=== RUN TestDockerEnvContainerd
docker_test.go:170: running with containerd true linux amd64
docker_test.go:181: (dbg) Run: out/minikube-linux-amd64 start -p dockerenv-500607 --driver=docker --container-runtime=containerd
docker_test.go:181: (dbg) Done: out/minikube-linux-amd64 start -p dockerenv-500607 --driver=docker --container-runtime=containerd: (24.132146025s)
docker_test.go:189: (dbg) Run: /bin/bash -c "out/minikube-linux-amd64 docker-env --ssh-host --ssh-add -p dockerenv-500607"
docker_test.go:220: (dbg) Run: /bin/bash -c "SSH_AUTH_SOCK="/tmp/ssh-cALoxAxuLePD/agent.116405" SSH_AGENT_PID="116406" DOCKER_HOST=ssh://docker@127.0.0.1:32774 docker version"
docker_test.go:243: (dbg) Run: /bin/bash -c "SSH_AUTH_SOCK="/tmp/ssh-cALoxAxuLePD/agent.116405" SSH_AGENT_PID="116406" DOCKER_HOST=ssh://docker@127.0.0.1:32774 DOCKER_BUILDKIT=0 docker build -t local/minikube-dockerenv-containerd-test:latest testdata/docker-env"
docker_test.go:243: (dbg) Done: /bin/bash -c "SSH_AUTH_SOCK="/tmp/ssh-cALoxAxuLePD/agent.116405" SSH_AGENT_PID="116406" DOCKER_HOST=ssh://docker@127.0.0.1:32774 DOCKER_BUILDKIT=0 docker build -t local/minikube-dockerenv-containerd-test:latest testdata/docker-env": (2.018879377s)
docker_test.go:250: (dbg) Run: /bin/bash -c "SSH_AUTH_SOCK="/tmp/ssh-cALoxAxuLePD/agent.116405" SSH_AGENT_PID="116406" DOCKER_HOST=ssh://docker@127.0.0.1:32774 docker image ls"
docker_test.go:250: (dbg) Non-zero exit: /bin/bash -c "SSH_AUTH_SOCK="/tmp/ssh-cALoxAxuLePD/agent.116405" SSH_AGENT_PID="116406" DOCKER_HOST=ssh://docker@127.0.0.1:32774 docker image ls": exit status 1 (538.498108ms)
** stderr **
error during connect: Get "http://docker.example.com/v1.43/images/json": EOF
** /stderr **
docker_test.go:252: failed to execute 'docker image ls', error: exit status 1, output:
** stderr **
error during connect: Get "http://docker.example.com/v1.43/images/json": EOF
** /stderr **
panic.go:636: *** TestDockerEnvContainerd FAILED at 2025-09-04 05:52:23.454054663 +0000 UTC m=+377.437325319
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:223: ======> post-mortem[TestDockerEnvContainerd]: network settings <======
helpers_test.go:230: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:238: ======> post-mortem[TestDockerEnvContainerd]: docker inspect <======
helpers_test.go:239: (dbg) Run: docker inspect dockerenv-500607
helpers_test.go:243: (dbg) docker inspect dockerenv-500607:
-- stdout --
[
{
"Id": "4b21d92236d18c95be749392a556d15dda7bcd0df358508a11d841484406631d",
"Created": "2025-09-04T05:51:50.889352833Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 113530,
"ExitCode": 0,
"Error": "",
"StartedAt": "2025-09-04T05:51:50.926121158Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:6f7d8b3ae805e64eb4efe058a75d43d384fe5989473cee7f8e24ea90eca28309",
"ResolvConfPath": "/var/lib/docker/containers/4b21d92236d18c95be749392a556d15dda7bcd0df358508a11d841484406631d/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/4b21d92236d18c95be749392a556d15dda7bcd0df358508a11d841484406631d/hostname",
"HostsPath": "/var/lib/docker/containers/4b21d92236d18c95be749392a556d15dda7bcd0df358508a11d841484406631d/hosts",
"LogPath": "/var/lib/docker/containers/4b21d92236d18c95be749392a556d15dda7bcd0df358508a11d841484406631d/4b21d92236d18c95be749392a556d15dda7bcd0df358508a11d841484406631d-json.log",
"Name": "/dockerenv-500607",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"dockerenv-500607:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {
"max-size": "100m"
}
},
"NetworkMode": "dockerenv-500607",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "host",
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 8388608000,
"NanoCpus": 2000000000,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 16777216000,
"MemorySwappiness": null,
"OomKillDisable": false,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"ID": "4b21d92236d18c95be749392a556d15dda7bcd0df358508a11d841484406631d",
"LowerDir": "/var/lib/docker/overlay2/dc2c418eca76e1a0757b50b702204304b556e20b330ae5e689fe0db0b33d3f5d-init/diff:/var/lib/docker/overlay2/7b184c25de34bb1724d01b8710b38de5a5236a323a1b29cf9c2746e97f23d725/diff",
"MergedDir": "/var/lib/docker/overlay2/dc2c418eca76e1a0757b50b702204304b556e20b330ae5e689fe0db0b33d3f5d/merged",
"UpperDir": "/var/lib/docker/overlay2/dc2c418eca76e1a0757b50b702204304b556e20b330ae5e689fe0db0b33d3f5d/diff",
"WorkDir": "/var/lib/docker/overlay2/dc2c418eca76e1a0757b50b702204304b556e20b330ae5e689fe0db0b33d3f5d/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
},
{
"Type": "volume",
"Name": "dockerenv-500607",
"Source": "/var/lib/docker/volumes/dockerenv-500607/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
}
],
"Config": {
"Hostname": "dockerenv-500607",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.47-1756936034-21409@sha256:06a2e6835062e5beff0e5288aa7d453ae87f4ed9d9f593dbbe436c8e34741bfc",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"OnBuild": null,
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "dockerenv-500607",
"name.minikube.sigs.k8s.io": "dockerenv-500607",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "815d38506c62d4d7924ac8d9344f4f94c463ce57055e8907ce60cf41d98ed9c0",
"SandboxKey": "/var/run/docker/netns/815d38506c62",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32774"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32775"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32778"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32776"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32777"
}
]
},
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"dockerenv-500607": {
"IPAMConfig": {
"IPv4Address": "192.168.49.2"
},
"Links": null,
"Aliases": null,
"MacAddress": "3e:aa:01:96:79:8b",
"DriverOpts": null,
"GwPriority": 0,
"NetworkID": "fcb7512a5fb7c143d33b61a182e668c90e5c7e5b9a17905f83d3003921b8f355",
"EndpointID": "4475e5677a4c88f5d602dcad620d037140f7558c9381fddacda798604b1cec2d",
"Gateway": "192.168.49.1",
"IPAddress": "192.168.49.2",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"dockerenv-500607",
"4b21d92236d1"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:247: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p dockerenv-500607 -n dockerenv-500607
helpers_test.go:252: <<< TestDockerEnvContainerd FAILED: start of post-mortem logs <<<
helpers_test.go:253: ======> post-mortem[TestDockerEnvContainerd]: minikube logs <======
helpers_test.go:255: (dbg) Run: out/minikube-linux-amd64 -p dockerenv-500607 logs -n 25
helpers_test.go:260: TestDockerEnvContainerd logs:
-- stdout --
==> Audit <==
┌────────────┬───────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬──────────────────┬─────────┬─────────┬─────────────────────┬─────────────────────┐
│ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │
├────────────┼───────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼──────────────────┼─────────┼─────────┼─────────────────────┼─────────────────────┤
│ addons │ addons-060123 addons disable registry-creds --alsologtostderr -v=1 │ addons-060123 │ jenkins │ v1.36.0 │ 04 Sep 25 05:50 UTC │ 04 Sep 25 05:50 UTC │
│ ssh │ addons-060123 ssh curl -s http://127.0.0.1/ -H 'Host: nginx.example.com' │ addons-060123 │ jenkins │ v1.36.0 │ 04 Sep 25 05:50 UTC │ 04 Sep 25 05:50 UTC │
│ ip │ addons-060123 ip │ addons-060123 │ jenkins │ v1.36.0 │ 04 Sep 25 05:50 UTC │ 04 Sep 25 05:50 UTC │
│ addons │ addons-060123 addons disable ingress-dns --alsologtostderr -v=1 │ addons-060123 │ jenkins │ v1.36.0 │ 04 Sep 25 05:50 UTC │ 04 Sep 25 05:50 UTC │
│ addons │ addons-060123 addons disable ingress --alsologtostderr -v=1 │ addons-060123 │ jenkins │ v1.36.0 │ 04 Sep 25 05:50 UTC │ 04 Sep 25 05:51 UTC │
│ ip │ addons-060123 ip │ addons-060123 │ jenkins │ v1.36.0 │ 04 Sep 25 05:50 UTC │ 04 Sep 25 05:50 UTC │
│ addons │ addons-060123 addons disable registry --alsologtostderr -v=1 │ addons-060123 │ jenkins │ v1.36.0 │ 04 Sep 25 05:50 UTC │ 04 Sep 25 05:50 UTC │
│ ssh │ addons-060123 ssh cat /opt/local-path-provisioner/pvc-6c4033e8-4c75-46c3-ac76-96ab6c7a043d_default_test-pvc/file1 │ addons-060123 │ jenkins │ v1.36.0 │ 04 Sep 25 05:51 UTC │ 04 Sep 25 05:51 UTC │
│ addons │ addons-060123 addons disable storage-provisioner-rancher --alsologtostderr -v=1 │ addons-060123 │ jenkins │ v1.36.0 │ 04 Sep 25 05:51 UTC │ 04 Sep 25 05:51 UTC │
│ addons │ addons-060123 addons disable yakd --alsologtostderr -v=1 │ addons-060123 │ jenkins │ v1.36.0 │ 04 Sep 25 05:51 UTC │ 04 Sep 25 05:51 UTC │
│ addons │ addons-060123 addons disable nvidia-device-plugin --alsologtostderr -v=1 │ addons-060123 │ jenkins │ v1.36.0 │ 04 Sep 25 05:51 UTC │ 04 Sep 25 05:51 UTC │
│ addons │ addons-060123 addons disable metrics-server --alsologtostderr -v=1 │ addons-060123 │ jenkins │ v1.36.0 │ 04 Sep 25 05:51 UTC │ 04 Sep 25 05:51 UTC │
│ addons │ enable headlamp -p addons-060123 --alsologtostderr -v=1 │ addons-060123 │ jenkins │ v1.36.0 │ 04 Sep 25 05:51 UTC │ 04 Sep 25 05:51 UTC │
│ addons │ addons-060123 addons disable cloud-spanner --alsologtostderr -v=1 │ addons-060123 │ jenkins │ v1.36.0 │ 04 Sep 25 05:51 UTC │ 04 Sep 25 05:51 UTC │
│ addons │ addons-060123 addons disable volumesnapshots --alsologtostderr -v=1 │ addons-060123 │ jenkins │ v1.36.0 │ 04 Sep 25 05:51 UTC │ 04 Sep 25 05:51 UTC │
│ addons │ addons-060123 addons disable inspektor-gadget --alsologtostderr -v=1 │ addons-060123 │ jenkins │ v1.36.0 │ 04 Sep 25 05:51 UTC │ 04 Sep 25 05:51 UTC │
│ addons │ addons-060123 addons disable csi-hostpath-driver --alsologtostderr -v=1 │ addons-060123 │ jenkins │ v1.36.0 │ 04 Sep 25 05:51 UTC │ 04 Sep 25 05:51 UTC │
│ addons │ addons-060123 addons disable headlamp --alsologtostderr -v=1 │ addons-060123 │ jenkins │ v1.36.0 │ 04 Sep 25 05:51 UTC │ 04 Sep 25 05:51 UTC │
│ stop │ -p addons-060123 │ addons-060123 │ jenkins │ v1.36.0 │ 04 Sep 25 05:51 UTC │ 04 Sep 25 05:51 UTC │
│ addons │ enable dashboard -p addons-060123 │ addons-060123 │ jenkins │ v1.36.0 │ 04 Sep 25 05:51 UTC │ 04 Sep 25 05:51 UTC │
│ addons │ disable dashboard -p addons-060123 │ addons-060123 │ jenkins │ v1.36.0 │ 04 Sep 25 05:51 UTC │ 04 Sep 25 05:51 UTC │
│ addons │ disable gvisor -p addons-060123 │ addons-060123 │ jenkins │ v1.36.0 │ 04 Sep 25 05:51 UTC │ 04 Sep 25 05:51 UTC │
│ delete │ -p addons-060123 │ addons-060123 │ jenkins │ v1.36.0 │ 04 Sep 25 05:51 UTC │ 04 Sep 25 05:51 UTC │
│ start │ -p dockerenv-500607 --driver=docker --container-runtime=containerd │ dockerenv-500607 │ jenkins │ v1.36.0 │ 04 Sep 25 05:51 UTC │ 04 Sep 25 05:52 UTC │
│ docker-env │ --ssh-host --ssh-add -p dockerenv-500607 │ dockerenv-500607 │ jenkins │ v1.36.0 │ 04 Sep 25 05:52 UTC │ 04 Sep 25 05:52 UTC │
└────────────┴───────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴──────────────────┴─────────┴─────────┴─────────────────────┴─────────────────────┘
==> Last Start <==
Log file created at: 2025/09/04 05:51:45
Running on machine: ubuntu-20-agent-8
Binary: Built with gc go1.24.6 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I0904 05:51:45.636925 112991 out.go:360] Setting OutFile to fd 1 ...
I0904 05:51:45.637176 112991 out.go:408] TERM=,COLORTERM=, which probably does not support color
I0904 05:51:45.637180 112991 out.go:374] Setting ErrFile to fd 2...
I0904 05:51:45.637183 112991 out.go:408] TERM=,COLORTERM=, which probably does not support color
I0904 05:51:45.637401 112991 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21409-85310/.minikube/bin
I0904 05:51:45.637966 112991 out.go:368] Setting JSON to false
I0904 05:51:45.638836 112991 start.go:130] hostinfo: {"hostname":"ubuntu-20-agent-8","uptime":9247,"bootTime":1756955859,"procs":183,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1083-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
I0904 05:51:45.638944 112991 start.go:140] virtualization: kvm guest
I0904 05:51:45.641348 112991 out.go:179] * [dockerenv-500607] minikube v1.36.0 on Ubuntu 20.04 (kvm/amd64)
I0904 05:51:45.642852 112991 out.go:179] - MINIKUBE_LOCATION=21409
I0904 05:51:45.642852 112991 notify.go:220] Checking for updates...
I0904 05:51:45.644329 112991 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I0904 05:51:45.645843 112991 out.go:179] - KUBECONFIG=/home/jenkins/minikube-integration/21409-85310/kubeconfig
I0904 05:51:45.647212 112991 out.go:179] - MINIKUBE_HOME=/home/jenkins/minikube-integration/21409-85310/.minikube
I0904 05:51:45.648649 112991 out.go:179] - MINIKUBE_BIN=out/minikube-linux-amd64
I0904 05:51:45.649934 112991 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I0904 05:51:45.651752 112991 driver.go:421] Setting default libvirt URI to qemu:///system
I0904 05:51:45.674711 112991 docker.go:123] docker version: linux-28.1.1:Docker Engine - Community
I0904 05:51:45.674798 112991 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0904 05:51:45.725285 112991 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:2 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:24 OomKillDisable:true NGoroutines:43 SystemTime:2025-09-04 05:51:45.715880143 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1083-gcp OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:x
86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33647988736 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-8 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx
Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I0904 05:51:45.725389 112991 docker.go:318] overlay module found
I0904 05:51:45.727368 112991 out.go:179] * Using the docker driver based on user configuration
I0904 05:51:45.728648 112991 start.go:304] selected driver: docker
I0904 05:51:45.728661 112991 start.go:918] validating driver "docker" against <nil>
I0904 05:51:45.728676 112991 start.go:929] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I0904 05:51:45.728794 112991 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0904 05:51:45.779111 112991 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:2 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:24 OomKillDisable:true NGoroutines:43 SystemTime:2025-09-04 05:51:45.769285544 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1083-gcp OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:x
86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33647988736 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-8 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx
Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I0904 05:51:45.779251 112991 start_flags.go:327] no existing cluster config was found, will generate one from the flags
I0904 05:51:45.779775 112991 start_flags.go:410] Using suggested 8000MB memory alloc based on sys=32089MB, container=32089MB
I0904 05:51:45.779917 112991 start_flags.go:974] Wait components to verify : map[apiserver:true system_pods:true]
I0904 05:51:45.781793 112991 out.go:179] * Using Docker driver with root privileges
I0904 05:51:45.783484 112991 cni.go:84] Creating CNI manager for ""
I0904 05:51:45.783568 112991 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I0904 05:51:45.783578 112991 start_flags.go:336] Found "CNI" CNI - setting NetworkPlugin=cni
I0904 05:51:45.783669 112991 start.go:348] cluster config:
{Name:dockerenv-500607 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.47-1756936034-21409@sha256:06a2e6835062e5beff0e5288aa7d453ae87f4ed9d9f593dbbe436c8e34741bfc Memory:8000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:dockerenv-500607 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerR
untime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0904 05:51:45.785167 112991 out.go:179] * Starting "dockerenv-500607" primary control-plane node in "dockerenv-500607" cluster
I0904 05:51:45.786329 112991 cache.go:123] Beginning downloading kic base image for docker with containerd
I0904 05:51:45.787662 112991 out.go:179] * Pulling base image v0.0.47-1756936034-21409 ...
I0904 05:51:45.789090 112991 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime containerd
I0904 05:51:45.789137 112991 preload.go:146] Found local preload: /home/jenkins/minikube-integration/21409-85310/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-containerd-overlay2-amd64.tar.lz4
I0904 05:51:45.789144 112991 cache.go:58] Caching tarball of preloaded images
I0904 05:51:45.789214 112991 image.go:81] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.47-1756936034-21409@sha256:06a2e6835062e5beff0e5288aa7d453ae87f4ed9d9f593dbbe436c8e34741bfc in local docker daemon
I0904 05:51:45.789247 112991 preload.go:172] Found /home/jenkins/minikube-integration/21409-85310/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-containerd-overlay2-amd64.tar.lz4 in cache, skipping download
I0904 05:51:45.789255 112991 cache.go:61] Finished verifying existence of preloaded tar for v1.34.0 on containerd
I0904 05:51:45.789588 112991 profile.go:143] Saving config to /home/jenkins/minikube-integration/21409-85310/.minikube/profiles/dockerenv-500607/config.json ...
I0904 05:51:45.789606 112991 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21409-85310/.minikube/profiles/dockerenv-500607/config.json: {Name:mk21220af2abb87159dce22474c68db224e44acf Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0904 05:51:45.811050 112991 image.go:100] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.47-1756936034-21409@sha256:06a2e6835062e5beff0e5288aa7d453ae87f4ed9d9f593dbbe436c8e34741bfc in local docker daemon, skipping pull
I0904 05:51:45.811062 112991 cache.go:147] gcr.io/k8s-minikube/kicbase-builds:v0.0.47-1756936034-21409@sha256:06a2e6835062e5beff0e5288aa7d453ae87f4ed9d9f593dbbe436c8e34741bfc exists in daemon, skipping load
I0904 05:51:45.811077 112991 cache.go:232] Successfully downloaded all kic artifacts
I0904 05:51:45.811100 112991 start.go:360] acquireMachinesLock for dockerenv-500607: {Name:mkea7f22a332921aeee84d11ea697a500662f7b1 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0904 05:51:45.811190 112991 start.go:364] duration metric: took 78.471µs to acquireMachinesLock for "dockerenv-500607"
I0904 05:51:45.811209 112991 start.go:93] Provisioning new machine with config: &{Name:dockerenv-500607 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.47-1756936034-21409@sha256:06a2e6835062e5beff0e5288aa7d453ae87f4ed9d9f593dbbe436c8e34741bfc Memory:8000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:dockerenv-500607 Namespace:default APIServerHAVIP: APIServerNa
me:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAut
hSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I0904 05:51:45.811275 112991 start.go:125] createHost starting for "" (driver="docker")
I0904 05:51:45.814135 112991 out.go:252] * Creating docker container (CPUs=2, Memory=8000MB) ...
I0904 05:51:45.814353 112991 start.go:159] libmachine.API.Create for "dockerenv-500607" (driver="docker")
I0904 05:51:45.814381 112991 client.go:168] LocalClient.Create starting
I0904 05:51:45.814467 112991 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21409-85310/.minikube/certs/ca.pem
I0904 05:51:45.814499 112991 main.go:141] libmachine: Decoding PEM data...
I0904 05:51:45.814512 112991 main.go:141] libmachine: Parsing certificate...
I0904 05:51:45.814578 112991 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21409-85310/.minikube/certs/cert.pem
I0904 05:51:45.814592 112991 main.go:141] libmachine: Decoding PEM data...
I0904 05:51:45.814599 112991 main.go:141] libmachine: Parsing certificate...
I0904 05:51:45.814912 112991 cli_runner.go:164] Run: docker network inspect dockerenv-500607 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W0904 05:51:45.831958 112991 cli_runner.go:211] docker network inspect dockerenv-500607 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I0904 05:51:45.832025 112991 network_create.go:284] running [docker network inspect dockerenv-500607] to gather additional debugging logs...
I0904 05:51:45.832041 112991 cli_runner.go:164] Run: docker network inspect dockerenv-500607
W0904 05:51:45.849070 112991 cli_runner.go:211] docker network inspect dockerenv-500607 returned with exit code 1
I0904 05:51:45.849095 112991 network_create.go:287] error running [docker network inspect dockerenv-500607]: docker network inspect dockerenv-500607: exit status 1
stdout:
[]
stderr:
Error response from daemon: network dockerenv-500607 not found
I0904 05:51:45.849109 112991 network_create.go:289] output of [docker network inspect dockerenv-500607]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network dockerenv-500607 not found
** /stderr **
I0904 05:51:45.849226 112991 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0904 05:51:45.867466 112991 network.go:206] using free private subnet 192.168.49.0/24: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc001bd6450}
I0904 05:51:45.867499 112991 network_create.go:124] attempt to create docker network dockerenv-500607 192.168.49.0/24 with gateway 192.168.49.1 and MTU of 1500 ...
I0904 05:51:45.867556 112991 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.49.0/24 --gateway=192.168.49.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=dockerenv-500607 dockerenv-500607
I0904 05:51:45.920566 112991 network_create.go:108] docker network dockerenv-500607 192.168.49.0/24 created
I0904 05:51:45.920590 112991 kic.go:121] calculated static IP "192.168.49.2" for the "dockerenv-500607" container
I0904 05:51:45.920658 112991 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I0904 05:51:45.938089 112991 cli_runner.go:164] Run: docker volume create dockerenv-500607 --label name.minikube.sigs.k8s.io=dockerenv-500607 --label created_by.minikube.sigs.k8s.io=true
I0904 05:51:45.956488 112991 oci.go:103] Successfully created a docker volume dockerenv-500607
I0904 05:51:45.956564 112991 cli_runner.go:164] Run: docker run --rm --name dockerenv-500607-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=dockerenv-500607 --entrypoint /usr/bin/test -v dockerenv-500607:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.47-1756936034-21409@sha256:06a2e6835062e5beff0e5288aa7d453ae87f4ed9d9f593dbbe436c8e34741bfc -d /var/lib
I0904 05:51:46.413826 112991 oci.go:107] Successfully prepared a docker volume dockerenv-500607
I0904 05:51:46.413866 112991 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime containerd
I0904 05:51:46.413891 112991 kic.go:194] Starting extracting preloaded images to volume ...
I0904 05:51:46.413957 112991 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21409-85310/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-containerd-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v dockerenv-500607:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.47-1756936034-21409@sha256:06a2e6835062e5beff0e5288aa7d453ae87f4ed9d9f593dbbe436c8e34741bfc -I lz4 -xf /preloaded.tar -C /extractDir
I0904 05:51:50.824699 112991 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21409-85310/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-containerd-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v dockerenv-500607:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.47-1756936034-21409@sha256:06a2e6835062e5beff0e5288aa7d453ae87f4ed9d9f593dbbe436c8e34741bfc -I lz4 -xf /preloaded.tar -C /extractDir: (4.41069542s)
I0904 05:51:50.824723 112991 kic.go:203] duration metric: took 4.410829025s to extract preloaded images to volume ...
W0904 05:51:50.825038 112991 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
I0904 05:51:50.825125 112991 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I0904 05:51:50.873732 112991 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname dockerenv-500607 --name dockerenv-500607 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=dockerenv-500607 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=dockerenv-500607 --network dockerenv-500607 --ip 192.168.49.2 --volume dockerenv-500607:/var --security-opt apparmor=unconfined --memory=8000mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.47-1756936034-21409@sha256:06a2e6835062e5beff0e5288aa7d453ae87f4ed9d9f593dbbe436c8e34741bfc
I0904 05:51:51.147478 112991 cli_runner.go:164] Run: docker container inspect dockerenv-500607 --format={{.State.Running}}
I0904 05:51:51.167719 112991 cli_runner.go:164] Run: docker container inspect dockerenv-500607 --format={{.State.Status}}
I0904 05:51:51.186553 112991 cli_runner.go:164] Run: docker exec dockerenv-500607 stat /var/lib/dpkg/alternatives/iptables
I0904 05:51:51.229283 112991 oci.go:144] the created container "dockerenv-500607" has a running status.
I0904 05:51:51.229312 112991 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/21409-85310/.minikube/machines/dockerenv-500607/id_rsa...
I0904 05:51:51.382131 112991 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/21409-85310/.minikube/machines/dockerenv-500607/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I0904 05:51:51.407993 112991 cli_runner.go:164] Run: docker container inspect dockerenv-500607 --format={{.State.Status}}
I0904 05:51:51.425348 112991 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I0904 05:51:51.425361 112991 kic_runner.go:114] Args: [docker exec --privileged dockerenv-500607 chown docker:docker /home/docker/.ssh/authorized_keys]
I0904 05:51:51.471811 112991 cli_runner.go:164] Run: docker container inspect dockerenv-500607 --format={{.State.Status}}
I0904 05:51:51.494703 112991 machine.go:93] provisionDockerMachine start ...
I0904 05:51:51.494795 112991 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-500607
I0904 05:51:51.515440 112991 main.go:141] libmachine: Using SSH client type: native
I0904 05:51:51.515714 112991 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840660] 0x843360 <nil> [] 0s} 127.0.0.1 32774 <nil> <nil>}
I0904 05:51:51.515724 112991 main.go:141] libmachine: About to run SSH command:
hostname
I0904 05:51:51.516553 112991 main.go:141] libmachine: Error dialing TCP: ssh: handshake failed: read tcp 127.0.0.1:57010->127.0.0.1:32774: read: connection reset by peer
I0904 05:51:54.639628 112991 main.go:141] libmachine: SSH cmd err, output: <nil>: dockerenv-500607
I0904 05:51:54.639650 112991 ubuntu.go:182] provisioning hostname "dockerenv-500607"
I0904 05:51:54.639731 112991 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-500607
I0904 05:51:54.657920 112991 main.go:141] libmachine: Using SSH client type: native
I0904 05:51:54.658224 112991 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840660] 0x843360 <nil> [] 0s} 127.0.0.1 32774 <nil> <nil>}
I0904 05:51:54.658236 112991 main.go:141] libmachine: About to run SSH command:
sudo hostname dockerenv-500607 && echo "dockerenv-500607" | sudo tee /etc/hostname
I0904 05:51:54.787819 112991 main.go:141] libmachine: SSH cmd err, output: <nil>: dockerenv-500607
I0904 05:51:54.787907 112991 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-500607
I0904 05:51:54.806085 112991 main.go:141] libmachine: Using SSH client type: native
I0904 05:51:54.806286 112991 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840660] 0x843360 <nil> [] 0s} 127.0.0.1 32774 <nil> <nil>}
I0904 05:51:54.806311 112991 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\sdockerenv-500607' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 dockerenv-500607/g' /etc/hosts;
else
echo '127.0.1.1 dockerenv-500607' | sudo tee -a /etc/hosts;
fi
fi
I0904 05:51:54.928569 112991 main.go:141] libmachine: SSH cmd err, output: <nil>:
I0904 05:51:54.928613 112991 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/21409-85310/.minikube CaCertPath:/home/jenkins/minikube-integration/21409-85310/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21409-85310/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21409-85310/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21409-85310/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21409-85310/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21409-85310/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21409-85310/.minikube}
I0904 05:51:54.928640 112991 ubuntu.go:190] setting up certificates
I0904 05:51:54.928653 112991 provision.go:84] configureAuth start
I0904 05:51:54.928722 112991 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" dockerenv-500607
I0904 05:51:54.946975 112991 provision.go:143] copyHostCerts
I0904 05:51:54.947066 112991 exec_runner.go:144] found /home/jenkins/minikube-integration/21409-85310/.minikube/ca.pem, removing ...
I0904 05:51:54.947076 112991 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21409-85310/.minikube/ca.pem
I0904 05:51:54.947151 112991 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21409-85310/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21409-85310/.minikube/ca.pem (1078 bytes)
I0904 05:51:54.947282 112991 exec_runner.go:144] found /home/jenkins/minikube-integration/21409-85310/.minikube/cert.pem, removing ...
I0904 05:51:54.947287 112991 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21409-85310/.minikube/cert.pem
I0904 05:51:54.947320 112991 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21409-85310/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21409-85310/.minikube/cert.pem (1123 bytes)
I0904 05:51:54.947402 112991 exec_runner.go:144] found /home/jenkins/minikube-integration/21409-85310/.minikube/key.pem, removing ...
I0904 05:51:54.947406 112991 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21409-85310/.minikube/key.pem
I0904 05:51:54.947434 112991 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21409-85310/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21409-85310/.minikube/key.pem (1679 bytes)
I0904 05:51:54.947512 112991 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21409-85310/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21409-85310/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21409-85310/.minikube/certs/ca-key.pem org=jenkins.dockerenv-500607 san=[127.0.0.1 192.168.49.2 dockerenv-500607 localhost minikube]
I0904 05:51:55.152125 112991 provision.go:177] copyRemoteCerts
I0904 05:51:55.152178 112991 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I0904 05:51:55.152217 112991 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-500607
I0904 05:51:55.171012 112991 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32774 SSHKeyPath:/home/jenkins/minikube-integration/21409-85310/.minikube/machines/dockerenv-500607/id_rsa Username:docker}
I0904 05:51:55.260853 112991 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21409-85310/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I0904 05:51:55.284233 112991 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21409-85310/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1078 bytes)
I0904 05:51:55.308061 112991 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21409-85310/.minikube/machines/server.pem --> /etc/docker/server.pem (1216 bytes)
I0904 05:51:55.331903 112991 provision.go:87] duration metric: took 403.233359ms to configureAuth
I0904 05:51:55.331928 112991 ubuntu.go:206] setting minikube options for container-runtime
I0904 05:51:55.332121 112991 config.go:182] Loaded profile config "dockerenv-500607": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.0
I0904 05:51:55.332127 112991 machine.go:96] duration metric: took 3.837411517s to provisionDockerMachine
I0904 05:51:55.332133 112991 client.go:171] duration metric: took 9.517748331s to LocalClient.Create
I0904 05:51:55.332153 112991 start.go:167] duration metric: took 9.517803897s to libmachine.API.Create "dockerenv-500607"
I0904 05:51:55.332159 112991 start.go:293] postStartSetup for "dockerenv-500607" (driver="docker")
I0904 05:51:55.332166 112991 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I0904 05:51:55.332211 112991 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I0904 05:51:55.332243 112991 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-500607
I0904 05:51:55.351020 112991 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32774 SSHKeyPath:/home/jenkins/minikube-integration/21409-85310/.minikube/machines/dockerenv-500607/id_rsa Username:docker}
I0904 05:51:55.441108 112991 ssh_runner.go:195] Run: cat /etc/os-release
I0904 05:51:55.444447 112991 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I0904 05:51:55.444465 112991 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
I0904 05:51:55.444471 112991 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
I0904 05:51:55.444477 112991 info.go:137] Remote host: Ubuntu 22.04.5 LTS
I0904 05:51:55.444486 112991 filesync.go:126] Scanning /home/jenkins/minikube-integration/21409-85310/.minikube/addons for local assets ...
I0904 05:51:55.444535 112991 filesync.go:126] Scanning /home/jenkins/minikube-integration/21409-85310/.minikube/files for local assets ...
I0904 05:51:55.444550 112991 start.go:296] duration metric: took 112.387477ms for postStartSetup
I0904 05:51:55.444827 112991 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" dockerenv-500607
I0904 05:51:55.463741 112991 profile.go:143] Saving config to /home/jenkins/minikube-integration/21409-85310/.minikube/profiles/dockerenv-500607/config.json ...
I0904 05:51:55.464083 112991 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I0904 05:51:55.464120 112991 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-500607
I0904 05:51:55.481610 112991 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32774 SSHKeyPath:/home/jenkins/minikube-integration/21409-85310/.minikube/machines/dockerenv-500607/id_rsa Username:docker}
I0904 05:51:55.565115 112991 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I0904 05:51:55.569643 112991 start.go:128] duration metric: took 9.758351435s to createHost
I0904 05:51:55.569665 112991 start.go:83] releasing machines lock for "dockerenv-500607", held for 9.758466846s
I0904 05:51:55.569737 112991 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" dockerenv-500607
I0904 05:51:55.588003 112991 ssh_runner.go:195] Run: cat /version.json
I0904 05:51:55.588021 112991 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I0904 05:51:55.588041 112991 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-500607
I0904 05:51:55.588083 112991 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-500607
I0904 05:51:55.606769 112991 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32774 SSHKeyPath:/home/jenkins/minikube-integration/21409-85310/.minikube/machines/dockerenv-500607/id_rsa Username:docker}
I0904 05:51:55.606784 112991 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32774 SSHKeyPath:/home/jenkins/minikube-integration/21409-85310/.minikube/machines/dockerenv-500607/id_rsa Username:docker}
I0904 05:51:55.691890 112991 ssh_runner.go:195] Run: systemctl --version
I0904 05:51:55.771707 112991 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
I0904 05:51:55.776423 112991 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
I0904 05:51:55.800348 112991 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
I0904 05:51:55.800413 112991 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I0904 05:51:55.826783 112991 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/100-crio-bridge.conf] bridge cni config(s)
I0904 05:51:55.826798 112991 start.go:495] detecting cgroup driver to use...
I0904 05:51:55.826830 112991 detect.go:187] detected "cgroupfs" cgroup driver on host os
I0904 05:51:55.826876 112991 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I0904 05:51:55.838804 112991 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I0904 05:51:55.850181 112991 docker.go:218] disabling cri-docker service (if available) ...
I0904 05:51:55.850223 112991 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I0904 05:51:55.863595 112991 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I0904 05:51:55.877130 112991 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I0904 05:51:55.951175 112991 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I0904 05:51:56.036794 112991 docker.go:234] disabling docker service ...
I0904 05:51:56.036842 112991 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I0904 05:51:56.057165 112991 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I0904 05:51:56.068792 112991 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I0904 05:51:56.145711 112991 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I0904 05:51:56.220955 112991 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I0904 05:51:56.231885 112991 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I0904 05:51:56.247804 112991 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I0904 05:51:56.257788 112991 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I0904 05:51:56.267808 112991 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I0904 05:51:56.267856 112991 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I0904 05:51:56.277319 112991 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0904 05:51:56.286897 112991 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I0904 05:51:56.296373 112991 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0904 05:51:56.305784 112991 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I0904 05:51:56.314721 112991 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I0904 05:51:56.324730 112991 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I0904 05:51:56.334557 112991 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I0904 05:51:56.344937 112991 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I0904 05:51:56.353642 112991 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I0904 05:51:56.362487 112991 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0904 05:51:56.437144 112991 ssh_runner.go:195] Run: sudo systemctl restart containerd
I0904 05:51:56.534417 112991 start.go:542] Will wait 60s for socket path /run/containerd/containerd.sock
I0904 05:51:56.534472 112991 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
I0904 05:51:56.538207 112991 start.go:563] Will wait 60s for crictl version
I0904 05:51:56.538271 112991 ssh_runner.go:195] Run: which crictl
I0904 05:51:56.541787 112991 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I0904 05:51:56.574657 112991 start.go:579] Version: 0.1.0
RuntimeName: containerd
RuntimeVersion: 1.7.27
RuntimeApiVersion: v1
I0904 05:51:56.574721 112991 ssh_runner.go:195] Run: containerd --version
I0904 05:51:56.598837 112991 ssh_runner.go:195] Run: containerd --version
I0904 05:51:56.625098 112991 out.go:179] * Preparing Kubernetes v1.34.0 on containerd 1.7.27 ...
I0904 05:51:56.626463 112991 cli_runner.go:164] Run: docker network inspect dockerenv-500607 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0904 05:51:56.644194 112991 ssh_runner.go:195] Run: grep 192.168.49.1 host.minikube.internal$ /etc/hosts
I0904 05:51:56.648132 112991 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.49.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0904 05:51:56.659157 112991 kubeadm.go:875] updating cluster {Name:dockerenv-500607 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.47-1756936034-21409@sha256:06a2e6835062e5beff0e5288aa7d453ae87f4ed9d9f593dbbe436c8e34741bfc Memory:8000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:dockerenv-500607 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APISe
rverNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock:
SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I0904 05:51:56.659260 112991 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime containerd
I0904 05:51:56.659304 112991 ssh_runner.go:195] Run: sudo crictl images --output json
I0904 05:51:56.692303 112991 containerd.go:627] all images are preloaded for containerd runtime.
I0904 05:51:56.692316 112991 containerd.go:534] Images already preloaded, skipping extraction
I0904 05:51:56.692376 112991 ssh_runner.go:195] Run: sudo crictl images --output json
I0904 05:51:56.726618 112991 containerd.go:627] all images are preloaded for containerd runtime.
I0904 05:51:56.726632 112991 cache_images.go:85] Images are preloaded, skipping loading
I0904 05:51:56.726639 112991 kubeadm.go:926] updating node { 192.168.49.2 8443 v1.34.0 containerd true true} ...
I0904 05:51:56.726729 112991 kubeadm.go:938] kubelet [Unit]
Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.34.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=dockerenv-500607 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.49.2
[Install]
config:
{KubernetesVersion:v1.34.0 ClusterName:dockerenv-500607 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I0904 05:51:56.726778 112991 ssh_runner.go:195] Run: sudo crictl info
I0904 05:51:56.761871 112991 cni.go:84] Creating CNI manager for ""
I0904 05:51:56.761886 112991 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I0904 05:51:56.761896 112991 kubeadm.go:84] Using pod CIDR: 10.244.0.0/16
I0904 05:51:56.761916 112991 kubeadm.go:189] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.49.2 APIServerPort:8443 KubernetesVersion:v1.34.0 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:dockerenv-500607 NodeName:dockerenv-500607 DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.49.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.49.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPat
h:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///run/containerd/containerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I0904 05:51:56.762012 112991 kubeadm.go:195] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.49.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
name: "dockerenv-500607"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.49.2"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.49.2"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
kubernetesVersion: v1.34.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///run/containerd/containerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I0904 05:51:56.762083 112991 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.0
I0904 05:51:56.771037 112991 binaries.go:44] Found k8s binaries, skipping transfer
I0904 05:51:56.771108 112991 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I0904 05:51:56.779883 112991 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (320 bytes)
I0904 05:51:56.797184 112991 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I0904 05:51:56.815617 112991 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2229 bytes)
I0904 05:51:56.833154 112991 ssh_runner.go:195] Run: grep 192.168.49.2 control-plane.minikube.internal$ /etc/hosts
I0904 05:51:56.836850 112991 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.49.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0904 05:51:56.847702 112991 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0904 05:51:56.921395 112991 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0904 05:51:56.935174 112991 certs.go:68] Setting up /home/jenkins/minikube-integration/21409-85310/.minikube/profiles/dockerenv-500607 for IP: 192.168.49.2
I0904 05:51:56.935211 112991 certs.go:194] generating shared ca certs ...
I0904 05:51:56.935228 112991 certs.go:226] acquiring lock for ca certs: {Name:mk78adf93da447a40105375c9d184b85c6f2546a Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0904 05:51:56.935374 112991 certs.go:235] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21409-85310/.minikube/ca.key
I0904 05:51:56.935439 112991 certs.go:235] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21409-85310/.minikube/proxy-client-ca.key
I0904 05:51:56.935445 112991 certs.go:256] generating profile certs ...
I0904 05:51:56.935494 112991 certs.go:363] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/21409-85310/.minikube/profiles/dockerenv-500607/client.key
I0904 05:51:56.935503 112991 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21409-85310/.minikube/profiles/dockerenv-500607/client.crt with IP's: []
I0904 05:51:57.075333 112991 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21409-85310/.minikube/profiles/dockerenv-500607/client.crt ...
I0904 05:51:57.075352 112991 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21409-85310/.minikube/profiles/dockerenv-500607/client.crt: {Name:mkae7741600fa3842c32f36fcfd6b4f771dbc465 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0904 05:51:57.075540 112991 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21409-85310/.minikube/profiles/dockerenv-500607/client.key ...
I0904 05:51:57.075547 112991 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21409-85310/.minikube/profiles/dockerenv-500607/client.key: {Name:mk554d5a960b5899c658f6631f5be9807ff0a110 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0904 05:51:57.075655 112991 certs.go:363] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21409-85310/.minikube/profiles/dockerenv-500607/apiserver.key.38d5c55e
I0904 05:51:57.075668 112991 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21409-85310/.minikube/profiles/dockerenv-500607/apiserver.crt.38d5c55e with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.49.2]
I0904 05:51:57.142805 112991 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21409-85310/.minikube/profiles/dockerenv-500607/apiserver.crt.38d5c55e ...
I0904 05:51:57.142823 112991 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21409-85310/.minikube/profiles/dockerenv-500607/apiserver.crt.38d5c55e: {Name:mk7987680060c4a9cbbc52807eaf09a4d57e5528 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0904 05:51:57.143036 112991 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21409-85310/.minikube/profiles/dockerenv-500607/apiserver.key.38d5c55e ...
I0904 05:51:57.143044 112991 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21409-85310/.minikube/profiles/dockerenv-500607/apiserver.key.38d5c55e: {Name:mkf4fb6f8a81ad2d12d895e25a9ea5434af361ad Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0904 05:51:57.143181 112991 certs.go:381] copying /home/jenkins/minikube-integration/21409-85310/.minikube/profiles/dockerenv-500607/apiserver.crt.38d5c55e -> /home/jenkins/minikube-integration/21409-85310/.minikube/profiles/dockerenv-500607/apiserver.crt
I0904 05:51:57.143258 112991 certs.go:385] copying /home/jenkins/minikube-integration/21409-85310/.minikube/profiles/dockerenv-500607/apiserver.key.38d5c55e -> /home/jenkins/minikube-integration/21409-85310/.minikube/profiles/dockerenv-500607/apiserver.key
I0904 05:51:57.143317 112991 certs.go:363] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/21409-85310/.minikube/profiles/dockerenv-500607/proxy-client.key
I0904 05:51:57.143331 112991 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21409-85310/.minikube/profiles/dockerenv-500607/proxy-client.crt with IP's: []
I0904 05:51:57.355021 112991 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21409-85310/.minikube/profiles/dockerenv-500607/proxy-client.crt ...
I0904 05:51:57.355038 112991 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21409-85310/.minikube/profiles/dockerenv-500607/proxy-client.crt: {Name:mk603ad20eed9ddb9b0cd5b61d8f04dcc533df15 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0904 05:51:57.355231 112991 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21409-85310/.minikube/profiles/dockerenv-500607/proxy-client.key ...
I0904 05:51:57.355239 112991 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21409-85310/.minikube/profiles/dockerenv-500607/proxy-client.key: {Name:mk1284d574b93a389af41c04ed6836833dd3242f Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0904 05:51:57.355430 112991 certs.go:484] found cert: /home/jenkins/minikube-integration/21409-85310/.minikube/certs/ca-key.pem (1675 bytes)
I0904 05:51:57.355464 112991 certs.go:484] found cert: /home/jenkins/minikube-integration/21409-85310/.minikube/certs/ca.pem (1078 bytes)
I0904 05:51:57.355485 112991 certs.go:484] found cert: /home/jenkins/minikube-integration/21409-85310/.minikube/certs/cert.pem (1123 bytes)
I0904 05:51:57.355507 112991 certs.go:484] found cert: /home/jenkins/minikube-integration/21409-85310/.minikube/certs/key.pem (1679 bytes)
I0904 05:51:57.356146 112991 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21409-85310/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I0904 05:51:57.379806 112991 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21409-85310/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I0904 05:51:57.402911 112991 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21409-85310/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I0904 05:51:57.427027 112991 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21409-85310/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
I0904 05:51:57.450348 112991 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21409-85310/.minikube/profiles/dockerenv-500607/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1424 bytes)
I0904 05:51:57.474235 112991 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21409-85310/.minikube/profiles/dockerenv-500607/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes)
I0904 05:51:57.497396 112991 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21409-85310/.minikube/profiles/dockerenv-500607/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I0904 05:51:57.520073 112991 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21409-85310/.minikube/profiles/dockerenv-500607/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I0904 05:51:57.543197 112991 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21409-85310/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I0904 05:51:57.567080 112991 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I0904 05:51:57.584542 112991 ssh_runner.go:195] Run: openssl version
I0904 05:51:57.589974 112991 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I0904 05:51:57.599200 112991 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I0904 05:51:57.602785 112991 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Sep 4 05:47 /usr/share/ca-certificates/minikubeCA.pem
I0904 05:51:57.602831 112991 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I0904 05:51:57.609661 112991 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I0904 05:51:57.619330 112991 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I0904 05:51:57.622786 112991 certs.go:399] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I0904 05:51:57.622832 112991 kubeadm.go:392] StartCluster: {Name:dockerenv-500607 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.47-1756936034-21409@sha256:06a2e6835062e5beff0e5288aa7d453ae87f4ed9d9f593dbbe436c8e34741bfc Memory:8000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:dockerenv-500607 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServe
rNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSH
AgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0904 05:51:57.622908 112991 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:paused Name: Namespaces:[kube-system]}
I0904 05:51:57.622958 112991 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
I0904 05:51:57.657114 112991 cri.go:89] found id: ""
I0904 05:51:57.657191 112991 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I0904 05:51:57.666304 112991 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I0904 05:51:57.675537 112991 kubeadm.go:214] ignoring SystemVerification for kubeadm because of docker driver
I0904 05:51:57.675617 112991 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I0904 05:51:57.684084 112991 kubeadm.go:155] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I0904 05:51:57.684092 112991 kubeadm.go:157] found existing configuration files:
I0904 05:51:57.684131 112991 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I0904 05:51:57.692822 112991 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I0904 05:51:57.692873 112991 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I0904 05:51:57.701320 112991 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I0904 05:51:57.709900 112991 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I0904 05:51:57.709950 112991 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I0904 05:51:57.718272 112991 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I0904 05:51:57.726971 112991 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I0904 05:51:57.727025 112991 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I0904 05:51:57.735203 112991 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I0904 05:51:57.743632 112991 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I0904 05:51:57.743687 112991 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I0904 05:51:57.752043 112991 ssh_runner.go:286] Start: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I0904 05:51:57.808976 112991 kubeadm.go:310] [WARNING SystemVerification]: cgroups v1 support is in maintenance mode, please migrate to cgroups v2
I0904 05:51:57.809251 112991 kubeadm.go:310] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1083-gcp\n", err: exit status 1
I0904 05:51:57.867796 112991 kubeadm.go:310] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I0904 05:52:08.136910 112991 kubeadm.go:310] [init] Using Kubernetes version: v1.34.0
I0904 05:52:08.136976 112991 kubeadm.go:310] [preflight] Running pre-flight checks
I0904 05:52:08.137076 112991 kubeadm.go:310] [preflight] The system verification failed. Printing the output from the verification:
I0904 05:52:08.137150 112991 kubeadm.go:310] [0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1083-gcp[0m
I0904 05:52:08.137190 112991 kubeadm.go:310] [0;37mOS[0m: [0;32mLinux[0m
I0904 05:52:08.137257 112991 kubeadm.go:310] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I0904 05:52:08.137320 112991 kubeadm.go:310] [0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
I0904 05:52:08.137377 112991 kubeadm.go:310] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I0904 05:52:08.137448 112991 kubeadm.go:310] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I0904 05:52:08.137513 112991 kubeadm.go:310] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I0904 05:52:08.137577 112991 kubeadm.go:310] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I0904 05:52:08.137612 112991 kubeadm.go:310] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I0904 05:52:08.137649 112991 kubeadm.go:310] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I0904 05:52:08.137696 112991 kubeadm.go:310] [0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
I0904 05:52:08.137753 112991 kubeadm.go:310] [preflight] Pulling images required for setting up a Kubernetes cluster
I0904 05:52:08.137826 112991 kubeadm.go:310] [preflight] This might take a minute or two, depending on the speed of your internet connection
I0904 05:52:08.137902 112991 kubeadm.go:310] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
I0904 05:52:08.137974 112991 kubeadm.go:310] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I0904 05:52:08.139665 112991 out.go:252] - Generating certificates and keys ...
I0904 05:52:08.139735 112991 kubeadm.go:310] [certs] Using existing ca certificate authority
I0904 05:52:08.139791 112991 kubeadm.go:310] [certs] Using existing apiserver certificate and key on disk
I0904 05:52:08.139852 112991 kubeadm.go:310] [certs] Generating "apiserver-kubelet-client" certificate and key
I0904 05:52:08.139922 112991 kubeadm.go:310] [certs] Generating "front-proxy-ca" certificate and key
I0904 05:52:08.140026 112991 kubeadm.go:310] [certs] Generating "front-proxy-client" certificate and key
I0904 05:52:08.140094 112991 kubeadm.go:310] [certs] Generating "etcd/ca" certificate and key
I0904 05:52:08.140184 112991 kubeadm.go:310] [certs] Generating "etcd/server" certificate and key
I0904 05:52:08.140279 112991 kubeadm.go:310] [certs] etcd/server serving cert is signed for DNS names [dockerenv-500607 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I0904 05:52:08.140318 112991 kubeadm.go:310] [certs] Generating "etcd/peer" certificate and key
I0904 05:52:08.140412 112991 kubeadm.go:310] [certs] etcd/peer serving cert is signed for DNS names [dockerenv-500607 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I0904 05:52:08.140463 112991 kubeadm.go:310] [certs] Generating "etcd/healthcheck-client" certificate and key
I0904 05:52:08.140512 112991 kubeadm.go:310] [certs] Generating "apiserver-etcd-client" certificate and key
I0904 05:52:08.140550 112991 kubeadm.go:310] [certs] Generating "sa" key and public key
I0904 05:52:08.140592 112991 kubeadm.go:310] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I0904 05:52:08.140653 112991 kubeadm.go:310] [kubeconfig] Writing "admin.conf" kubeconfig file
I0904 05:52:08.140727 112991 kubeadm.go:310] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I0904 05:52:08.140794 112991 kubeadm.go:310] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I0904 05:52:08.140866 112991 kubeadm.go:310] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I0904 05:52:08.140942 112991 kubeadm.go:310] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I0904 05:52:08.141012 112991 kubeadm.go:310] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I0904 05:52:08.141099 112991 kubeadm.go:310] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I0904 05:52:08.142754 112991 out.go:252] - Booting up control plane ...
I0904 05:52:08.142824 112991 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-apiserver"
I0904 05:52:08.142890 112991 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I0904 05:52:08.142943 112991 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-scheduler"
I0904 05:52:08.143047 112991 kubeadm.go:310] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I0904 05:52:08.143145 112991 kubeadm.go:310] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
I0904 05:52:08.143236 112991 kubeadm.go:310] [patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
I0904 05:52:08.143304 112991 kubeadm.go:310] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I0904 05:52:08.143419 112991 kubeadm.go:310] [kubelet-start] Starting the kubelet
I0904 05:52:08.143543 112991 kubeadm.go:310] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I0904 05:52:08.143648 112991 kubeadm.go:310] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I0904 05:52:08.143706 112991 kubeadm.go:310] [kubelet-check] The kubelet is healthy after 1.001124371s
I0904 05:52:08.143809 112991 kubeadm.go:310] [control-plane-check] Waiting for healthy control plane components. This can take up to 4m0s
I0904 05:52:08.143883 112991 kubeadm.go:310] [control-plane-check] Checking kube-apiserver at https://192.168.49.2:8443/livez
I0904 05:52:08.143992 112991 kubeadm.go:310] [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz
I0904 05:52:08.144053 112991 kubeadm.go:310] [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez
I0904 05:52:08.144111 112991 kubeadm.go:310] [control-plane-check] kube-controller-manager is healthy after 2.427706126s
I0904 05:52:08.144162 112991 kubeadm.go:310] [control-plane-check] kube-scheduler is healthy after 3.047055101s
I0904 05:52:08.144218 112991 kubeadm.go:310] [control-plane-check] kube-apiserver is healthy after 4.502499419s
I0904 05:52:08.144305 112991 kubeadm.go:310] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I0904 05:52:08.144443 112991 kubeadm.go:310] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I0904 05:52:08.144541 112991 kubeadm.go:310] [upload-certs] Skipping phase. Please see --upload-certs
I0904 05:52:08.144732 112991 kubeadm.go:310] [mark-control-plane] Marking the node dockerenv-500607 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I0904 05:52:08.144779 112991 kubeadm.go:310] [bootstrap-token] Using token: o3ci12.n8n77qq43zbox7wn
I0904 05:52:08.146353 112991 out.go:252] - Configuring RBAC rules ...
I0904 05:52:08.146444 112991 kubeadm.go:310] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I0904 05:52:08.146515 112991 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I0904 05:52:08.146644 112991 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I0904 05:52:08.146761 112991 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I0904 05:52:08.146860 112991 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I0904 05:52:08.146929 112991 kubeadm.go:310] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I0904 05:52:08.147034 112991 kubeadm.go:310] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I0904 05:52:08.147085 112991 kubeadm.go:310] [addons] Applied essential addon: CoreDNS
I0904 05:52:08.147120 112991 kubeadm.go:310] [addons] Applied essential addon: kube-proxy
I0904 05:52:08.147123 112991 kubeadm.go:310]
I0904 05:52:08.147176 112991 kubeadm.go:310] Your Kubernetes control-plane has initialized successfully!
I0904 05:52:08.147190 112991 kubeadm.go:310]
I0904 05:52:08.147263 112991 kubeadm.go:310] To start using your cluster, you need to run the following as a regular user:
I0904 05:52:08.147266 112991 kubeadm.go:310]
I0904 05:52:08.147286 112991 kubeadm.go:310] mkdir -p $HOME/.kube
I0904 05:52:08.147346 112991 kubeadm.go:310] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I0904 05:52:08.147400 112991 kubeadm.go:310] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I0904 05:52:08.147405 112991 kubeadm.go:310]
I0904 05:52:08.147449 112991 kubeadm.go:310] Alternatively, if you are the root user, you can run:
I0904 05:52:08.147451 112991 kubeadm.go:310]
I0904 05:52:08.147489 112991 kubeadm.go:310] export KUBECONFIG=/etc/kubernetes/admin.conf
I0904 05:52:08.147492 112991 kubeadm.go:310]
I0904 05:52:08.147534 112991 kubeadm.go:310] You should now deploy a pod network to the cluster.
I0904 05:52:08.147594 112991 kubeadm.go:310] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I0904 05:52:08.147671 112991 kubeadm.go:310] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I0904 05:52:08.147674 112991 kubeadm.go:310]
I0904 05:52:08.147766 112991 kubeadm.go:310] You can now join any number of control-plane nodes by copying certificate authorities
I0904 05:52:08.147851 112991 kubeadm.go:310] and service account keys on each node and then running the following as root:
I0904 05:52:08.147859 112991 kubeadm.go:310]
I0904 05:52:08.147991 112991 kubeadm.go:310] kubeadm join control-plane.minikube.internal:8443 --token o3ci12.n8n77qq43zbox7wn \
I0904 05:52:08.148147 112991 kubeadm.go:310] --discovery-token-ca-cert-hash sha256:3a13dc567f97d0daccfc6a50da453993ce362e062542e574ec530d5fe13b0a44 \
I0904 05:52:08.148176 112991 kubeadm.go:310] --control-plane
I0904 05:52:08.148188 112991 kubeadm.go:310]
I0904 05:52:08.148317 112991 kubeadm.go:310] Then you can join any number of worker nodes by running the following on each as root:
I0904 05:52:08.148322 112991 kubeadm.go:310]
I0904 05:52:08.148409 112991 kubeadm.go:310] kubeadm join control-plane.minikube.internal:8443 --token o3ci12.n8n77qq43zbox7wn \
I0904 05:52:08.148527 112991 kubeadm.go:310] --discovery-token-ca-cert-hash sha256:3a13dc567f97d0daccfc6a50da453993ce362e062542e574ec530d5fe13b0a44
I0904 05:52:08.148535 112991 cni.go:84] Creating CNI manager for ""
I0904 05:52:08.148541 112991 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I0904 05:52:08.150538 112991 out.go:179] * Configuring CNI (Container Networking Interface) ...
I0904 05:52:08.152083 112991 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I0904 05:52:08.156070 112991 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.34.0/kubectl ...
I0904 05:52:08.156080 112991 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2601 bytes)
I0904 05:52:08.173508 112991 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I0904 05:52:08.380982 112991 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I0904 05:52:08.381082 112991 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I0904 05:52:08.381084 112991 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes dockerenv-500607 minikube.k8s.io/updated_at=2025_09_04T05_52_08_0700 minikube.k8s.io/version=v1.36.0 minikube.k8s.io/commit=c3fa37de45a2901b215fab008201edf72ce5a1ff minikube.k8s.io/name=dockerenv-500607 minikube.k8s.io/primary=true
I0904 05:52:08.388968 112991 ops.go:34] apiserver oom_adj: -16
I0904 05:52:08.475808 112991 kubeadm.go:1105] duration metric: took 94.800341ms to wait for elevateKubeSystemPrivileges
I0904 05:52:08.475866 112991 kubeadm.go:394] duration metric: took 10.853034855s to StartCluster
I0904 05:52:08.475894 112991 settings.go:142] acquiring lock: {Name:mke468da77a7428bb2d33682bbfcc311a6048dbe Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0904 05:52:08.475983 112991 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/21409-85310/kubeconfig
I0904 05:52:08.476640 112991 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21409-85310/kubeconfig: {Name:mkb3add818be95e6116ddb7fc3e10e683f4b9256 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0904 05:52:08.476835 112991 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I0904 05:52:08.476840 112991 start.go:235] Will wait 6m0s for node &{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I0904 05:52:08.476893 112991 addons.go:511] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-gluster:false storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I0904 05:52:08.476971 112991 addons.go:69] Setting storage-provisioner=true in profile "dockerenv-500607"
I0904 05:52:08.476997 112991 addons.go:238] Setting addon storage-provisioner=true in "dockerenv-500607"
I0904 05:52:08.476997 112991 addons.go:69] Setting default-storageclass=true in profile "dockerenv-500607"
I0904 05:52:08.477013 112991 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "dockerenv-500607"
I0904 05:52:08.477033 112991 host.go:66] Checking if "dockerenv-500607" exists ...
I0904 05:52:08.477047 112991 config.go:182] Loaded profile config "dockerenv-500607": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.0
I0904 05:52:08.477424 112991 cli_runner.go:164] Run: docker container inspect dockerenv-500607 --format={{.State.Status}}
I0904 05:52:08.477569 112991 cli_runner.go:164] Run: docker container inspect dockerenv-500607 --format={{.State.Status}}
I0904 05:52:08.478366 112991 out.go:179] * Verifying Kubernetes components...
I0904 05:52:08.479800 112991 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0904 05:52:08.502660 112991 addons.go:238] Setting addon default-storageclass=true in "dockerenv-500607"
I0904 05:52:08.502693 112991 host.go:66] Checking if "dockerenv-500607" exists ...
I0904 05:52:08.503108 112991 cli_runner.go:164] Run: docker container inspect dockerenv-500607 --format={{.State.Status}}
I0904 05:52:08.508867 112991 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I0904 05:52:08.510198 112991 addons.go:435] installing /etc/kubernetes/addons/storage-provisioner.yaml
I0904 05:52:08.510209 112991 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I0904 05:52:08.510257 112991 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-500607
I0904 05:52:08.519530 112991 addons.go:435] installing /etc/kubernetes/addons/storageclass.yaml
I0904 05:52:08.519543 112991 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I0904 05:52:08.519588 112991 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-500607
I0904 05:52:08.526669 112991 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32774 SSHKeyPath:/home/jenkins/minikube-integration/21409-85310/.minikube/machines/dockerenv-500607/id_rsa Username:docker}
I0904 05:52:08.536841 112991 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32774 SSHKeyPath:/home/jenkins/minikube-integration/21409-85310/.minikube/machines/dockerenv-500607/id_rsa Username:docker}
I0904 05:52:08.763763 112991 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I0904 05:52:08.764638 112991 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.49.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I0904 05:52:08.767863 112991 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I0904 05:52:08.778238 112991 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0904 05:52:09.162572 112991 start.go:976] {"host.minikube.internal": 192.168.49.1} host record injected into CoreDNS's ConfigMap
I0904 05:52:09.297940 112991 api_server.go:52] waiting for apiserver process to appear ...
I0904 05:52:09.297991 112991 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I0904 05:52:09.298543 112991 out.go:179] * Enabled addons: default-storageclass, storage-provisioner
I0904 05:52:09.299865 112991 addons.go:514] duration metric: took 822.963935ms for enable addons: enabled=[default-storageclass storage-provisioner]
I0904 05:52:09.308617 112991 api_server.go:72] duration metric: took 831.733489ms to wait for apiserver process to appear ...
I0904 05:52:09.308637 112991 api_server.go:88] waiting for apiserver healthz status ...
I0904 05:52:09.308657 112991 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8443/healthz ...
I0904 05:52:09.314675 112991 api_server.go:279] https://192.168.49.2:8443/healthz returned 200:
ok
I0904 05:52:09.315650 112991 api_server.go:141] control plane version: v1.34.0
I0904 05:52:09.315666 112991 api_server.go:131] duration metric: took 7.022736ms to wait for apiserver health ...
I0904 05:52:09.315674 112991 system_pods.go:43] waiting for kube-system pods to appear ...
I0904 05:52:09.318500 112991 system_pods.go:59] 5 kube-system pods found
I0904 05:52:09.318517 112991 system_pods.go:61] "etcd-dockerenv-500607" [458a62eb-b1ad-4ff1-a3ac-14904ed91992] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd])
I0904 05:52:09.318523 112991 system_pods.go:61] "kube-apiserver-dockerenv-500607" [d6ae279a-2657-4d72-a1cd-46466d7264d1] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
I0904 05:52:09.318529 112991 system_pods.go:61] "kube-controller-manager-dockerenv-500607" [611634f6-56d7-4a7e-8c7f-beac344fbbd2] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager])
I0904 05:52:09.318535 112991 system_pods.go:61] "kube-scheduler-dockerenv-500607" [cc022fe4-b397-4e44-8c90-78ab642f42fd] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
I0904 05:52:09.318538 112991 system_pods.go:61] "storage-provisioner" [5ae9695b-4e29-460b-831e-3fa2de191f37] Pending
I0904 05:52:09.318544 112991 system_pods.go:74] duration metric: took 2.864912ms to wait for pod list to return data ...
I0904 05:52:09.318553 112991 kubeadm.go:578] duration metric: took 841.695432ms to wait for: map[apiserver:true system_pods:true]
I0904 05:52:09.318563 112991 node_conditions.go:102] verifying NodePressure condition ...
I0904 05:52:09.320822 112991 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I0904 05:52:09.320835 112991 node_conditions.go:123] node cpu capacity is 8
I0904 05:52:09.320847 112991 node_conditions.go:105] duration metric: took 2.281009ms to run NodePressure ...
I0904 05:52:09.320860 112991 start.go:241] waiting for startup goroutines ...
I0904 05:52:09.666688 112991 kapi.go:214] "coredns" deployment in "kube-system" namespace and "dockerenv-500607" context rescaled to 1 replicas
I0904 05:52:09.666721 112991 start.go:246] waiting for cluster config update ...
I0904 05:52:09.666733 112991 start.go:255] writing updated cluster config ...
I0904 05:52:09.667094 112991 ssh_runner.go:195] Run: rm -f paused
I0904 05:52:09.715661 112991 start.go:617] kubectl: 1.33.2, cluster: 1.34.0 (minor skew: 1)
I0904 05:52:09.717911 112991 out.go:179] * Done! kubectl is now configured to use "dockerenv-500607" cluster and "default" namespace by default
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD
eb0314a187929 409467f978b4a 10 seconds ago Running kindnet-cni 0 0c96b3095a45f kindnet-xt82b
be687ae1589d5 df0860106674d 10 seconds ago Running kube-proxy 0 7b6fa32cd6da1 kube-proxy-pss8w
ea752063eee8c 6e38f40d628db 11 seconds ago Running storage-provisioner 0 2da10f2320210 storage-provisioner
681f881eddf53 5f1f5298c888d 21 seconds ago Running etcd 0 a154f225300bb etcd-dockerenv-500607
3d26f2f6ad1d3 90550c43ad2bc 21 seconds ago Running kube-apiserver 0 9a0d3542d496d kube-apiserver-dockerenv-500607
3960958fa870b 46169d968e920 21 seconds ago Running kube-scheduler 0 c6576b976653e kube-scheduler-dockerenv-500607
7fdab663d12bc a0af72f2ec6d6 21 seconds ago Running kube-controller-manager 0 53e53da1f0122 kube-controller-manager-dockerenv-500607
==> containerd <==
Sep 04 05:52:02 dockerenv-500607 containerd[876]: time="2025-09-04T05:52:02.959572691Z" level=info msg="StartContainer for \"681f881eddf531c785ef39b6bb3db8880c463b93fbe265db98f7d8afc822bd7f\" returns successfully"
Sep 04 05:52:13 dockerenv-500607 containerd[876]: time="2025-09-04T05:52:13.172765334Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:storage-provisioner,Uid:5ae9695b-4e29-460b-831e-3fa2de191f37,Namespace:kube-system,Attempt:0,}"
Sep 04 05:52:13 dockerenv-500607 containerd[876]: time="2025-09-04T05:52:13.257648046Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:storage-provisioner,Uid:5ae9695b-4e29-460b-831e-3fa2de191f37,Namespace:kube-system,Attempt:0,} returns sandbox id \"2da10f2320210a5f7a27f24edec268c19fedf0c1c515fc5625b9a446157b1eeb\""
Sep 04 05:52:13 dockerenv-500607 containerd[876]: time="2025-09-04T05:52:13.263511885Z" level=info msg="CreateContainer within sandbox \"2da10f2320210a5f7a27f24edec268c19fedf0c1c515fc5625b9a446157b1eeb\" for container &ContainerMetadata{Name:storage-provisioner,Attempt:0,}"
Sep 04 05:52:13 dockerenv-500607 containerd[876]: time="2025-09-04T05:52:13.274804108Z" level=info msg="CreateContainer within sandbox \"2da10f2320210a5f7a27f24edec268c19fedf0c1c515fc5625b9a446157b1eeb\" for &ContainerMetadata{Name:storage-provisioner,Attempt:0,} returns container id \"ea752063eee8c49a29e2cafba9ad5199630ca12e1eae932a037a1eee651ba44c\""
Sep 04 05:52:13 dockerenv-500607 containerd[876]: time="2025-09-04T05:52:13.275589663Z" level=info msg="StartContainer for \"ea752063eee8c49a29e2cafba9ad5199630ca12e1eae932a037a1eee651ba44c\""
Sep 04 05:52:13 dockerenv-500607 containerd[876]: time="2025-09-04T05:52:13.370450204Z" level=info msg="StartContainer for \"ea752063eee8c49a29e2cafba9ad5199630ca12e1eae932a037a1eee651ba44c\" returns successfully"
Sep 04 05:52:13 dockerenv-500607 containerd[876]: time="2025-09-04T05:52:13.595723746Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:kube-proxy-pss8w,Uid:27fb586f-4e70-46e8-a054-a3363929d076,Namespace:kube-system,Attempt:0,}"
Sep 04 05:52:13 dockerenv-500607 containerd[876]: time="2025-09-04T05:52:13.644335524Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:kube-proxy-pss8w,Uid:27fb586f-4e70-46e8-a054-a3363929d076,Namespace:kube-system,Attempt:0,} returns sandbox id \"7b6fa32cd6da1e1e04dcea754e90013f2e8bd97c212913df3eac60e5c2da56d3\""
Sep 04 05:52:13 dockerenv-500607 containerd[876]: time="2025-09-04T05:52:13.649748314Z" level=info msg="CreateContainer within sandbox \"7b6fa32cd6da1e1e04dcea754e90013f2e8bd97c212913df3eac60e5c2da56d3\" for container &ContainerMetadata{Name:kube-proxy,Attempt:0,}"
Sep 04 05:52:13 dockerenv-500607 containerd[876]: time="2025-09-04T05:52:13.662134287Z" level=info msg="CreateContainer within sandbox \"7b6fa32cd6da1e1e04dcea754e90013f2e8bd97c212913df3eac60e5c2da56d3\" for &ContainerMetadata{Name:kube-proxy,Attempt:0,} returns container id \"be687ae1589d5264e8fae9159758d7d11f69f499b749fce7b2584bc30466c89f\""
Sep 04 05:52:13 dockerenv-500607 containerd[876]: time="2025-09-04T05:52:13.662767196Z" level=info msg="StartContainer for \"be687ae1589d5264e8fae9159758d7d11f69f499b749fce7b2584bc30466c89f\""
Sep 04 05:52:13 dockerenv-500607 containerd[876]: time="2025-09-04T05:52:13.684892308Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-66bc5c9577-rgn25,Uid:c953c737-ee46-447d-af77-4d2ef18a0d38,Namespace:kube-system,Attempt:0,}"
Sep 04 05:52:13 dockerenv-500607 containerd[876]: time="2025-09-04T05:52:13.705907697Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-66bc5c9577-rgn25,Uid:c953c737-ee46-447d-af77-4d2ef18a0d38,Namespace:kube-system,Attempt:0,} failed, error" error="failed to setup network for sandbox \"be6a53f3facb45f08532d30750c6817be0a045e95b2ebdcb54edb8ee985f074a\": failed to find network info for sandbox \"be6a53f3facb45f08532d30750c6817be0a045e95b2ebdcb54edb8ee985f074a\""
Sep 04 05:52:13 dockerenv-500607 containerd[876]: time="2025-09-04T05:52:13.717753834Z" level=info msg="StartContainer for \"be687ae1589d5264e8fae9159758d7d11f69f499b749fce7b2584bc30466c89f\" returns successfully"
Sep 04 05:52:13 dockerenv-500607 containerd[876]: time="2025-09-04T05:52:13.893288414Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:kindnet-xt82b,Uid:12959476-9605-49bc-b796-48b671af42d3,Namespace:kube-system,Attempt:0,}"
Sep 04 05:52:14 dockerenv-500607 containerd[876]: time="2025-09-04T05:52:14.158436731Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:kindnet-xt82b,Uid:12959476-9605-49bc-b796-48b671af42d3,Namespace:kube-system,Attempt:0,} returns sandbox id \"0c96b3095a45fe12e9a5393b108d37fb0367510a32f29c529012a1d4a334e3fc\""
Sep 04 05:52:14 dockerenv-500607 containerd[876]: time="2025-09-04T05:52:14.164060799Z" level=info msg="CreateContainer within sandbox \"0c96b3095a45fe12e9a5393b108d37fb0367510a32f29c529012a1d4a334e3fc\" for container &ContainerMetadata{Name:kindnet-cni,Attempt:0,}"
Sep 04 05:52:14 dockerenv-500607 containerd[876]: time="2025-09-04T05:52:14.175393262Z" level=info msg="CreateContainer within sandbox \"0c96b3095a45fe12e9a5393b108d37fb0367510a32f29c529012a1d4a334e3fc\" for &ContainerMetadata{Name:kindnet-cni,Attempt:0,} returns container id \"eb0314a18792945e30f40b024f0e1ea43542cdf14fdbb4962fbbf7b0baafc700\""
Sep 04 05:52:14 dockerenv-500607 containerd[876]: time="2025-09-04T05:52:14.176058091Z" level=info msg="StartContainer for \"eb0314a18792945e30f40b024f0e1ea43542cdf14fdbb4962fbbf7b0baafc700\""
Sep 04 05:52:14 dockerenv-500607 containerd[876]: time="2025-09-04T05:52:14.272260447Z" level=info msg="StartContainer for \"eb0314a18792945e30f40b024f0e1ea43542cdf14fdbb4962fbbf7b0baafc700\" returns successfully"
Sep 04 05:52:17 dockerenv-500607 containerd[876]: time="2025-09-04T05:52:17.958641824Z" level=info msg="No cni config template is specified, wait for other system components to drop the config."
Sep 04 05:52:22 dockerenv-500607 containerd[876]: time="2025-09-04T05:52:22.564845663Z" level=info msg="ImageCreate event name:\"docker.io/local/minikube-dockerenv-containerd-test:latest\""
Sep 04 05:52:22 dockerenv-500607 containerd[876]: time="2025-09-04T05:52:22.572512158Z" level=info msg="ImageCreate event name:\"sha256:b5071690d691e592d1838713d34f6e17359e609f6f72854cb670728c823ff7a7\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Sep 04 05:52:22 dockerenv-500607 containerd[876]: time="2025-09-04T05:52:22.572949356Z" level=info msg="ImageUpdate event name:\"docker.io/local/minikube-dockerenv-containerd-test:latest\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
==> describe nodes <==
Name: dockerenv-500607
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=dockerenv-500607
kubernetes.io/os=linux
minikube.k8s.io/commit=c3fa37de45a2901b215fab008201edf72ce5a1ff
minikube.k8s.io/name=dockerenv-500607
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2025_09_04T05_52_08_0700
minikube.k8s.io/version=v1.36.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Thu, 04 Sep 2025 05:52:05 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: dockerenv-500607
AcquireTime: <unset>
RenewTime: Thu, 04 Sep 2025 05:52:17 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Thu, 04 Sep 2025 05:52:17 +0000 Thu, 04 Sep 2025 05:52:03 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Thu, 04 Sep 2025 05:52:17 +0000 Thu, 04 Sep 2025 05:52:03 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Thu, 04 Sep 2025 05:52:17 +0000 Thu, 04 Sep 2025 05:52:03 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Thu, 04 Sep 2025 05:52:17 +0000 Thu, 04 Sep 2025 05:52:06 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.49.2
Hostname: dockerenv-500607
Capacity:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32859364Ki
pods: 110
Allocatable:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32859364Ki
pods: 110
System Info:
Machine ID: 2cd4816a0ff744a880a9904a875e1531
System UUID: d8db3b2c-1156-4103-9f46-159115c83970
Boot ID: 183da3a6-5334-4764-8aa8-37d003fbc77e
Kernel Version: 5.15.0-1083-gcp
OS Image: Ubuntu 22.04.5 LTS
Operating System: linux
Architecture: amd64
Container Runtime Version: containerd://1.7.27
Kubelet Version: v1.34.0
Kube-Proxy Version:
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (8 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
kube-system coredns-66bc5c9577-rgn25 100m (1%) 0 (0%) 70Mi (0%) 170Mi (0%) 11s
kube-system etcd-dockerenv-500607 100m (1%) 0 (0%) 100Mi (0%) 0 (0%) 18s
kube-system kindnet-xt82b 100m (1%) 100m (1%) 50Mi (0%) 50Mi (0%) 11s
kube-system kube-apiserver-dockerenv-500607 250m (3%) 0 (0%) 0 (0%) 0 (0%) 18s
kube-system kube-controller-manager-dockerenv-500607 200m (2%) 0 (0%) 0 (0%) 0 (0%) 18s
kube-system kube-proxy-pss8w 0 (0%) 0 (0%) 0 (0%) 0 (0%) 11s
kube-system kube-scheduler-dockerenv-500607 100m (1%) 0 (0%) 0 (0%) 0 (0%) 18s
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 15s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 850m (10%) 100m (1%)
memory 220Mi (0%) 220Mi (0%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 10s kube-proxy
Normal NodeAllocatableEnforced 22s kubelet Updated Node Allocatable limit across pods
Warning CgroupV1 22s kubelet cgroup v1 support is in maintenance mode, please migrate to cgroup v2
Normal NodeHasSufficientMemory 22s (x8 over 22s) kubelet Node dockerenv-500607 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 22s (x8 over 22s) kubelet Node dockerenv-500607 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 22s (x7 over 22s) kubelet Node dockerenv-500607 status is now: NodeHasSufficientPID
Normal Starting 22s kubelet Starting kubelet.
Normal Starting 17s kubelet Starting kubelet.
Warning CgroupV1 17s kubelet cgroup v1 support is in maintenance mode, please migrate to cgroup v2
Normal NodeAllocatableEnforced 17s kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 17s kubelet Node dockerenv-500607 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 17s kubelet Node dockerenv-500607 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 17s kubelet Node dockerenv-500607 status is now: NodeHasSufficientPID
Normal RegisteredNode 12s node-controller Node dockerenv-500607 event: Registered Node dockerenv-500607 in Controller
==> dmesg <==
[ +4.031704] net_ratelimit: 2 callbacks suppressed
[ +0.000005] IPv4: martian source 10.96.0.1 from 10.244.0.4, on dev virbr0
[ +0.000006] ll header: 00000000: 52 54 00 10 a2 1d 52 54 00 90 b7 64 08 00
[ +0.000009] IPv4: martian source 10.96.0.1 from 10.244.0.4, on dev virbr0
[ +0.000002] ll header: 00000000: 52 54 00 10 a2 1d 52 54 00 90 b7 64 08 00
[ +0.000004] IPv4: martian source 10.96.0.1 from 10.244.0.4, on dev virbr0
[ +0.000001] ll header: 00000000: 52 54 00 10 a2 1d 52 54 00 90 b7 64 08 00
[ +0.000004] IPv4: martian source 10.96.0.1 from 10.244.0.2, on dev virbr0
[ +0.000002] ll header: 00000000: 52 54 00 10 a2 1d 52 54 00 90 b7 64 08 00
[ +0.000004] IPv4: martian source 10.96.0.1 from 10.244.0.2, on dev virbr0
[ +0.000001] ll header: 00000000: 52 54 00 10 a2 1d 52 54 00 90 b7 64 08 00
[ +0.000004] IPv4: martian source 10.96.0.1 from 10.244.0.2, on dev virbr0
[ +0.000002] ll header: 00000000: 52 54 00 10 a2 1d 52 54 00 90 b7 64 08 00
[ +8.191204] IPv4: martian source 10.96.0.1 from 10.244.0.2, on dev virbr0
[ +0.000006] ll header: 00000000: 52 54 00 10 a2 1d 52 54 00 90 b7 64 08 00
[ +0.000007] IPv4: martian source 10.96.0.1 from 10.244.0.2, on dev virbr0
[ +0.000001] ll header: 00000000: 52 54 00 10 a2 1d 52 54 00 90 b7 64 08 00
[ +0.000003] IPv4: martian source 10.96.0.1 from 10.244.0.2, on dev virbr0
[ +0.000001] ll header: 00000000: 52 54 00 10 a2 1d 52 54 00 90 b7 64 08 00
[ +0.000002] IPv4: martian source 10.96.0.1 from 10.244.0.4, on dev virbr0
[ +0.000001] ll header: 00000000: 52 54 00 10 a2 1d 52 54 00 90 b7 64 08 00
[ +0.000003] IPv4: martian source 10.96.0.1 from 10.244.0.4, on dev virbr0
[ +0.000001] ll header: 00000000: 52 54 00 10 a2 1d 52 54 00 90 b7 64 08 00
[ +0.000002] IPv4: martian source 10.96.0.1 from 10.244.0.4, on dev virbr0
[ +0.000001] ll header: 00000000: 52 54 00 10 a2 1d 52 54 00 90 b7 64 08 00
==> etcd [681f881eddf531c785ef39b6bb3db8880c463b93fbe265db98f7d8afc822bd7f] <==
{"level":"warn","ts":"2025-09-04T05:52:04.256615Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:53852","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T05:52:04.267004Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:53886","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T05:52:04.274308Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:53900","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T05:52:04.282387Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:53930","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T05:52:04.288771Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:53942","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T05:52:04.294887Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:53962","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T05:52:04.352737Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:53978","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T05:52:04.360804Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:53996","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T05:52:04.368599Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:54002","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T05:52:04.404193Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:54012","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T05:52:04.410855Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:54038","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T05:52:04.444409Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:54058","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T05:52:04.451151Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:54082","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T05:52:04.457366Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:54110","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T05:52:04.464809Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:54122","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T05:52:04.473351Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:54150","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T05:52:04.479443Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:54174","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T05:52:04.485411Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:54188","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T05:52:04.492357Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:54194","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T05:52:04.498873Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:54206","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T05:52:04.504739Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:54228","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T05:52:04.547072Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:54248","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T05:52:04.553375Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:54268","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T05:52:04.560051Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:54292","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-04T05:52:04.649806Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:54318","server-name":"","error":"EOF"}
==> kernel <==
05:52:24 up 2:34, 0 users, load average: 1.28, 1.18, 1.12
Linux dockerenv-500607 5.15.0-1083-gcp #92~20.04.1-Ubuntu SMP Tue Apr 29 09:12:55 UTC 2025 x86_64 x86_64 x86_64 GNU/Linux
PRETTY_NAME="Ubuntu 22.04.5 LTS"
==> kindnet [eb0314a18792945e30f40b024f0e1ea43542cdf14fdbb4962fbbf7b0baafc700] <==
I0904 05:52:14.447998 1 main.go:109] connected to apiserver: https://10.96.0.1:443
I0904 05:52:14.448274 1 main.go:139] hostIP = 192.168.49.2
podIP = 192.168.49.2
I0904 05:52:14.448431 1 main.go:148] setting mtu 1500 for CNI
I0904 05:52:14.448452 1 main.go:178] kindnetd IP family: "ipv4"
I0904 05:52:14.448482 1 main.go:182] noMask IPv4 subnets: [10.244.0.0/16]
time="2025-09-04T05:52:14Z" level=info msg="Created plugin 10-kube-network-policies (kindnetd, handles RunPodSandbox,RemovePodSandbox)"
I0904 05:52:14.746974 1 controller.go:377] "Starting controller" name="kube-network-policies"
I0904 05:52:14.747032 1 controller.go:381] "Waiting for informer caches to sync"
I0904 05:52:14.747049 1 shared_informer.go:350] "Waiting for caches to sync" controller="kube-network-policies"
I0904 05:52:14.747695 1 controller.go:390] nri plugin exited: failed to connect to NRI service: dial unix /var/run/nri/nri.sock: connect: no such file or directory
I0904 05:52:15.144099 1 shared_informer.go:357] "Caches are synced" controller="kube-network-policies"
I0904 05:52:15.144188 1 metrics.go:72] Registering metrics
I0904 05:52:15.144608 1 controller.go:711] "Syncing nftables rules"
==> kube-apiserver [3d26f2f6ad1d3d72228bcb5b3a6d05ed1072f129fdac9eafb1e4cce3e64ac5c2] <==
I0904 05:52:05.248948 1 default_servicecidr_controller.go:166] Creating default ServiceCIDR with CIDRs: [10.96.0.0/12]
I0904 05:52:05.249779 1 controller.go:667] quota admission added evaluator for: namespaces
I0904 05:52:05.252416 1 cidrallocator.go:301] created ClusterIP allocator for Service CIDR 10.96.0.0/12
I0904 05:52:05.252432 1 default_servicecidr_controller.go:228] Setting default ServiceCIDR condition Ready to True
I0904 05:52:05.260517 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12
I0904 05:52:05.260858 1 default_servicecidr_controller.go:137] Shutting down kubernetes-service-cidr-controller
I0904 05:52:05.261108 1 default_servicecidr_controller.go:228] Setting default ServiceCIDR condition Ready to True
I0904 05:52:05.264940 1 controller.go:667] quota admission added evaluator for: leases.coordination.k8s.io
I0904 05:52:06.092643 1 storage_scheduling.go:95] created PriorityClass system-node-critical with value 2000001000
I0904 05:52:06.096592 1 storage_scheduling.go:95] created PriorityClass system-cluster-critical with value 2000000000
I0904 05:52:06.096610 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist.
I0904 05:52:06.564760 1 controller.go:667] quota admission added evaluator for: roles.rbac.authorization.k8s.io
I0904 05:52:06.601679 1 controller.go:667] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
I0904 05:52:06.659409 1 alloc.go:328] "allocated clusterIPs" service="default/kubernetes" clusterIPs={"IPv4":"10.96.0.1"}
W0904 05:52:06.664896 1 lease.go:265] Resetting endpoints for master service "kubernetes" to [192.168.49.2]
I0904 05:52:06.665990 1 controller.go:667] quota admission added evaluator for: endpoints
I0904 05:52:06.670409 1 controller.go:667] quota admission added evaluator for: endpointslices.discovery.k8s.io
I0904 05:52:07.164579 1 controller.go:667] quota admission added evaluator for: serviceaccounts
I0904 05:52:07.555426 1 controller.go:667] quota admission added evaluator for: deployments.apps
I0904 05:52:07.568122 1 alloc.go:328] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs={"IPv4":"10.96.0.10"}
I0904 05:52:07.577728 1 controller.go:667] quota admission added evaluator for: daemonsets.apps
I0904 05:52:12.318514 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12
I0904 05:52:12.322072 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12
I0904 05:52:13.166839 1 controller.go:667] quota admission added evaluator for: replicasets.apps
I0904 05:52:13.268144 1 controller.go:667] quota admission added evaluator for: controllerrevisions.apps
==> kube-controller-manager [7fdab663d12bc6e3136d5f6e3cf7b1a28ecb43340a234e53e93f29e1752741df] <==
I0904 05:52:12.162495 1 garbagecollector.go:154] "Garbage collector: all resource monitors have synced" logger="garbage-collector-controller"
I0904 05:52:12.162501 1 garbagecollector.go:157] "Proceeding to collect garbage" logger="garbage-collector-controller"
I0904 05:52:12.162744 1 shared_informer.go:356] "Caches are synced" controller="HPA"
I0904 05:52:12.162913 1 shared_informer.go:356] "Caches are synced" controller="disruption"
I0904 05:52:12.163599 1 shared_informer.go:356] "Caches are synced" controller="certificate-csrapproving"
I0904 05:52:12.164138 1 shared_informer.go:356] "Caches are synced" controller="certificate-csrsigning-kubelet-serving"
I0904 05:52:12.164235 1 shared_informer.go:356] "Caches are synced" controller="resource_claim"
I0904 05:52:12.164657 1 shared_informer.go:356] "Caches are synced" controller="legacy-service-account-token-cleaner"
I0904 05:52:12.164697 1 shared_informer.go:356] "Caches are synced" controller="TTL after finished"
I0904 05:52:12.164704 1 shared_informer.go:356] "Caches are synced" controller="deployment"
I0904 05:52:12.164795 1 shared_informer.go:356] "Caches are synced" controller="certificate-csrsigning-kubelet-client"
I0904 05:52:12.164824 1 shared_informer.go:356] "Caches are synced" controller="job"
I0904 05:52:12.164887 1 shared_informer.go:356] "Caches are synced" controller="GC"
I0904 05:52:12.164905 1 shared_informer.go:356] "Caches are synced" controller="VAC protection"
I0904 05:52:12.164928 1 shared_informer.go:356] "Caches are synced" controller="certificate-csrsigning-kube-apiserver-client"
I0904 05:52:12.165005 1 shared_informer.go:356] "Caches are synced" controller="expand"
I0904 05:52:12.165338 1 shared_informer.go:356] "Caches are synced" controller="service account"
I0904 05:52:12.165437 1 shared_informer.go:356] "Caches are synced" controller="daemon sets"
I0904 05:52:12.165442 1 shared_informer.go:356] "Caches are synced" controller="certificate-csrsigning-legacy-unknown"
I0904 05:52:12.166087 1 shared_informer.go:356] "Caches are synced" controller="persistent volume"
I0904 05:52:12.166948 1 shared_informer.go:356] "Caches are synced" controller="taint-eviction-controller"
I0904 05:52:12.168365 1 shared_informer.go:356] "Caches are synced" controller="resource quota"
I0904 05:52:12.168401 1 shared_informer.go:356] "Caches are synced" controller="stateful set"
I0904 05:52:12.176629 1 shared_informer.go:356] "Caches are synced" controller="cronjob"
I0904 05:52:12.177826 1 shared_informer.go:356] "Caches are synced" controller="garbage collector"
==> kube-proxy [be687ae1589d5264e8fae9159758d7d11f69f499b749fce7b2584bc30466c89f] <==
I0904 05:52:13.748358 1 server_linux.go:53] "Using iptables proxy"
I0904 05:52:13.873649 1 shared_informer.go:349] "Waiting for caches to sync" controller="node informer cache"
I0904 05:52:13.973813 1 shared_informer.go:356] "Caches are synced" controller="node informer cache"
I0904 05:52:13.973850 1 server.go:219] "Successfully retrieved NodeIPs" NodeIPs=["192.168.49.2"]
E0904 05:52:13.973923 1 server.go:256] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
I0904 05:52:13.992927 1 server.go:265] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I0904 05:52:13.992986 1 server_linux.go:132] "Using iptables Proxier"
I0904 05:52:13.997117 1 proxier.go:242] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
I0904 05:52:13.997493 1 server.go:527] "Version info" version="v1.34.0"
I0904 05:52:13.997509 1 server.go:529] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I0904 05:52:13.999412 1 config.go:200] "Starting service config controller"
I0904 05:52:13.999441 1 shared_informer.go:349] "Waiting for caches to sync" controller="service config"
I0904 05:52:13.999513 1 config.go:106] "Starting endpoint slice config controller"
I0904 05:52:13.999521 1 shared_informer.go:349] "Waiting for caches to sync" controller="endpoint slice config"
I0904 05:52:13.999545 1 config.go:403] "Starting serviceCIDR config controller"
I0904 05:52:13.999551 1 shared_informer.go:349] "Waiting for caches to sync" controller="serviceCIDR config"
I0904 05:52:13.999800 1 config.go:309] "Starting node config controller"
I0904 05:52:14.000031 1 shared_informer.go:349] "Waiting for caches to sync" controller="node config"
I0904 05:52:14.100220 1 shared_informer.go:356] "Caches are synced" controller="node config"
I0904 05:52:14.100257 1 shared_informer.go:356] "Caches are synced" controller="endpoint slice config"
I0904 05:52:14.100250 1 shared_informer.go:356] "Caches are synced" controller="serviceCIDR config"
I0904 05:52:14.100282 1 shared_informer.go:356] "Caches are synced" controller="service config"
==> kube-scheduler [3960958fa870ba09887fe48f50daae85ef2a0d0c100ff1bf85c77e35f4d8c26a] <==
E0904 05:52:05.266753 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ConfigMap: configmaps \"extension-apiserver-authentication\" is forbidden: User \"system:kube-scheduler\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"kube-system\"" logger="UnhandledError" reflector="runtime/asm_amd64.s:1700" type="*v1.ConfigMap"
E0904 05:52:05.268409 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicationcontrollers\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ReplicationController"
E0904 05:52:05.268634 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csinodes\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSINode"
E0904 05:52:05.269243 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ResourceClaim: resourceclaims.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"resourceclaims\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ResourceClaim"
E0904 05:52:05.269303 1 reflector.go:205] "Failed to watch" err="failed to list *v1.DeviceClass: deviceclasses.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"deviceclasses\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.DeviceClass"
E0904 05:52:05.269359 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User \"system:kube-scheduler\" cannot list resource \"poddisruptionbudgets\" in API group \"policy\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PodDisruptionBudget"
E0904 05:52:05.269413 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumeclaims\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolumeClaim"
E0904 05:52:05.269452 1 reflector.go:205] "Failed to watch" err="failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"storageclasses\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.StorageClass"
E0904 05:52:05.269513 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolume"
E0904 05:52:05.269600 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Node: nodes is forbidden: User \"system:kube-scheduler\" cannot list resource \"nodes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Node"
E0904 05:52:05.269651 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csistoragecapacities\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIStorageCapacity"
E0904 05:52:05.269720 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csidrivers\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIDriver"
E0904 05:52:05.269756 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ResourceSlice: resourceslices.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"resourceslices\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ResourceSlice"
E0904 05:52:05.269790 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Namespace: namespaces is forbidden: User \"system:kube-scheduler\" cannot list resource \"namespaces\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Namespace"
E0904 05:52:05.269837 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicasets\" in API group \"apps\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ReplicaSet"
E0904 05:52:05.269872 1 reflector.go:205] "Failed to watch" err="failed to list *v1.VolumeAttachment: volumeattachments.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"volumeattachments\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.VolumeAttachment"
E0904 05:52:05.269910 1 reflector.go:205] "Failed to watch" err="failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"statefulsets\" in API group \"apps\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.StatefulSet"
E0904 05:52:05.269921 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Pod: pods is forbidden: User \"system:kube-scheduler\" cannot list resource \"pods\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Pod"
E0904 05:52:06.294617 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csistoragecapacities\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIStorageCapacity"
E0904 05:52:06.303848 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Pod: pods is forbidden: User \"system:kube-scheduler\" cannot list resource \"pods\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Pod"
E0904 05:52:06.344997 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csidrivers\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIDriver"
E0904 05:52:06.353203 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumeclaims\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolumeClaim"
E0904 05:52:06.385503 1 reflector.go:205] "Failed to watch" err="failed to list *v1.VolumeAttachment: volumeattachments.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"volumeattachments\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.VolumeAttachment"
E0904 05:52:06.403687 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User \"system:kube-scheduler\" cannot list resource \"poddisruptionbudgets\" in API group \"policy\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PodDisruptionBudget"
I0904 05:52:06.966319 1 shared_informer.go:356] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
==> kubelet <==
Sep 04 05:52:12 dockerenv-500607 kubelet[1637]: I0904 05:52:12.376744 1637 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bpkkz\" (UniqueName: \"kubernetes.io/projected/5ae9695b-4e29-460b-831e-3fa2de191f37-kube-api-access-bpkkz\") pod \"storage-provisioner\" (UID: \"5ae9695b-4e29-460b-831e-3fa2de191f37\") " pod="kube-system/storage-provisioner"
Sep 04 05:52:12 dockerenv-500607 kubelet[1637]: I0904 05:52:12.376814 1637 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/5ae9695b-4e29-460b-831e-3fa2de191f37-tmp\") pod \"storage-provisioner\" (UID: \"5ae9695b-4e29-460b-831e-3fa2de191f37\") " pod="kube-system/storage-provisioner"
Sep 04 05:52:12 dockerenv-500607 kubelet[1637]: E0904 05:52:12.483792 1637 projected.go:291] Couldn't get configMap kube-system/kube-root-ca.crt: configmap "kube-root-ca.crt" not found
Sep 04 05:52:12 dockerenv-500607 kubelet[1637]: E0904 05:52:12.483832 1637 projected.go:196] Error preparing data for projected volume kube-api-access-bpkkz for pod kube-system/storage-provisioner: configmap "kube-root-ca.crt" not found
Sep 04 05:52:12 dockerenv-500607 kubelet[1637]: E0904 05:52:12.483971 1637 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/5ae9695b-4e29-460b-831e-3fa2de191f37-kube-api-access-bpkkz podName:5ae9695b-4e29-460b-831e-3fa2de191f37 nodeName:}" failed. No retries permitted until 2025-09-04 05:52:12.983925562 +0000 UTC m=+5.636891903 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-bpkkz" (UniqueName: "kubernetes.io/projected/5ae9695b-4e29-460b-831e-3fa2de191f37-kube-api-access-bpkkz") pod "storage-provisioner" (UID: "5ae9695b-4e29-460b-831e-3fa2de191f37") : configmap "kube-root-ca.crt" not found
Sep 04 05:52:13 dockerenv-500607 kubelet[1637]: I0904 05:52:13.081423 1637 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory"
Sep 04 05:52:13 dockerenv-500607 kubelet[1637]: I0904 05:52:13.482799 1637 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-proxy\" (UniqueName: \"kubernetes.io/configmap/27fb586f-4e70-46e8-a054-a3363929d076-kube-proxy\") pod \"kube-proxy-pss8w\" (UID: \"27fb586f-4e70-46e8-a054-a3363929d076\") " pod="kube-system/kube-proxy-pss8w"
Sep 04 05:52:13 dockerenv-500607 kubelet[1637]: I0904 05:52:13.482849 1637 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q64m9\" (UniqueName: \"kubernetes.io/projected/27fb586f-4e70-46e8-a054-a3363929d076-kube-api-access-q64m9\") pod \"kube-proxy-pss8w\" (UID: \"27fb586f-4e70-46e8-a054-a3363929d076\") " pod="kube-system/kube-proxy-pss8w"
Sep 04 05:52:13 dockerenv-500607 kubelet[1637]: I0904 05:52:13.482883 1637 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c953c737-ee46-447d-af77-4d2ef18a0d38-config-volume\") pod \"coredns-66bc5c9577-rgn25\" (UID: \"c953c737-ee46-447d-af77-4d2ef18a0d38\") " pod="kube-system/coredns-66bc5c9577-rgn25"
Sep 04 05:52:13 dockerenv-500607 kubelet[1637]: I0904 05:52:13.482933 1637 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/12959476-9605-49bc-b796-48b671af42d3-xtables-lock\") pod \"kindnet-xt82b\" (UID: \"12959476-9605-49bc-b796-48b671af42d3\") " pod="kube-system/kindnet-xt82b"
Sep 04 05:52:13 dockerenv-500607 kubelet[1637]: I0904 05:52:13.482957 1637 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/12959476-9605-49bc-b796-48b671af42d3-lib-modules\") pod \"kindnet-xt82b\" (UID: \"12959476-9605-49bc-b796-48b671af42d3\") " pod="kube-system/kindnet-xt82b"
Sep 04 05:52:13 dockerenv-500607 kubelet[1637]: I0904 05:52:13.483065 1637 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/27fb586f-4e70-46e8-a054-a3363929d076-xtables-lock\") pod \"kube-proxy-pss8w\" (UID: \"27fb586f-4e70-46e8-a054-a3363929d076\") " pod="kube-system/kube-proxy-pss8w"
Sep 04 05:52:13 dockerenv-500607 kubelet[1637]: I0904 05:52:13.483103 1637 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/27fb586f-4e70-46e8-a054-a3363929d076-lib-modules\") pod \"kube-proxy-pss8w\" (UID: \"27fb586f-4e70-46e8-a054-a3363929d076\") " pod="kube-system/kube-proxy-pss8w"
Sep 04 05:52:13 dockerenv-500607 kubelet[1637]: I0904 05:52:13.483122 1637 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lsjts\" (UniqueName: \"kubernetes.io/projected/c953c737-ee46-447d-af77-4d2ef18a0d38-kube-api-access-lsjts\") pod \"coredns-66bc5c9577-rgn25\" (UID: \"c953c737-ee46-447d-af77-4d2ef18a0d38\") " pod="kube-system/coredns-66bc5c9577-rgn25"
Sep 04 05:52:13 dockerenv-500607 kubelet[1637]: I0904 05:52:13.483144 1637 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-cfg\" (UniqueName: \"kubernetes.io/host-path/12959476-9605-49bc-b796-48b671af42d3-cni-cfg\") pod \"kindnet-xt82b\" (UID: \"12959476-9605-49bc-b796-48b671af42d3\") " pod="kube-system/kindnet-xt82b"
Sep 04 05:52:13 dockerenv-500607 kubelet[1637]: I0904 05:52:13.483217 1637 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fknms\" (UniqueName: \"kubernetes.io/projected/12959476-9605-49bc-b796-48b671af42d3-kube-api-access-fknms\") pod \"kindnet-xt82b\" (UID: \"12959476-9605-49bc-b796-48b671af42d3\") " pod="kube-system/kindnet-xt82b"
Sep 04 05:52:13 dockerenv-500607 kubelet[1637]: I0904 05:52:13.491476 1637 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/storage-provisioner" podStartSLOduration=4.491400506 podStartE2EDuration="4.491400506s" podCreationTimestamp="2025-09-04 05:52:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-04 05:52:13.49132649 +0000 UTC m=+6.144292840" watchObservedRunningTime="2025-09-04 05:52:13.491400506 +0000 UTC m=+6.144366855"
Sep 04 05:52:13 dockerenv-500607 kubelet[1637]: E0904 05:52:13.706253 1637 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to setup network for sandbox \"be6a53f3facb45f08532d30750c6817be0a045e95b2ebdcb54edb8ee985f074a\": failed to find network info for sandbox \"be6a53f3facb45f08532d30750c6817be0a045e95b2ebdcb54edb8ee985f074a\""
Sep 04 05:52:13 dockerenv-500607 kubelet[1637]: E0904 05:52:13.706347 1637 kuberuntime_sandbox.go:71] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to setup network for sandbox \"be6a53f3facb45f08532d30750c6817be0a045e95b2ebdcb54edb8ee985f074a\": failed to find network info for sandbox \"be6a53f3facb45f08532d30750c6817be0a045e95b2ebdcb54edb8ee985f074a\"" pod="kube-system/coredns-66bc5c9577-rgn25"
Sep 04 05:52:13 dockerenv-500607 kubelet[1637]: E0904 05:52:13.706376 1637 kuberuntime_manager.go:1343] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to setup network for sandbox \"be6a53f3facb45f08532d30750c6817be0a045e95b2ebdcb54edb8ee985f074a\": failed to find network info for sandbox \"be6a53f3facb45f08532d30750c6817be0a045e95b2ebdcb54edb8ee985f074a\"" pod="kube-system/coredns-66bc5c9577-rgn25"
Sep 04 05:52:13 dockerenv-500607 kubelet[1637]: E0904 05:52:13.706459 1637 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"coredns-66bc5c9577-rgn25_kube-system(c953c737-ee46-447d-af77-4d2ef18a0d38)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"coredns-66bc5c9577-rgn25_kube-system(c953c737-ee46-447d-af77-4d2ef18a0d38)\\\": rpc error: code = Unknown desc = failed to setup network for sandbox \\\"be6a53f3facb45f08532d30750c6817be0a045e95b2ebdcb54edb8ee985f074a\\\": failed to find network info for sandbox \\\"be6a53f3facb45f08532d30750c6817be0a045e95b2ebdcb54edb8ee985f074a\\\"\"" pod="kube-system/coredns-66bc5c9577-rgn25" podUID="c953c737-ee46-447d-af77-4d2ef18a0d38"
Sep 04 05:52:14 dockerenv-500607 kubelet[1637]: I0904 05:52:14.495179 1637 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kindnet-xt82b" podStartSLOduration=1.495160585 podStartE2EDuration="1.495160585s" podCreationTimestamp="2025-09-04 05:52:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-04 05:52:14.495055455 +0000 UTC m=+7.148021804" watchObservedRunningTime="2025-09-04 05:52:14.495160585 +0000 UTC m=+7.148126934"
Sep 04 05:52:15 dockerenv-500607 kubelet[1637]: I0904 05:52:15.607956 1637 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-proxy-pss8w" podStartSLOduration=2.607912724 podStartE2EDuration="2.607912724s" podCreationTimestamp="2025-09-04 05:52:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-04 05:52:14.505563638 +0000 UTC m=+7.158529986" watchObservedRunningTime="2025-09-04 05:52:15.607912724 +0000 UTC m=+8.260879074"
Sep 04 05:52:17 dockerenv-500607 kubelet[1637]: I0904 05:52:17.957879 1637 kuberuntime_manager.go:1828] "Updating runtime config through cri with podcidr" CIDR="10.244.0.0/24"
Sep 04 05:52:17 dockerenv-500607 kubelet[1637]: I0904 05:52:17.958994 1637 kubelet_network.go:47] "Updating Pod CIDR" originalPodCIDR="" newPodCIDR="10.244.0.0/24"
==> storage-provisioner [ea752063eee8c49a29e2cafba9ad5199630ca12e1eae932a037a1eee651ba44c] <==
I0904 05:52:13.379561 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
-- /stdout --
helpers_test.go:262: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p dockerenv-500607 -n dockerenv-500607
helpers_test.go:269: (dbg) Run: kubectl --context dockerenv-500607 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:280: non-running pods: coredns-66bc5c9577-rgn25
helpers_test.go:282: ======> post-mortem[TestDockerEnvContainerd]: describe non-running pods <======
helpers_test.go:285: (dbg) Run: kubectl --context dockerenv-500607 describe pod coredns-66bc5c9577-rgn25
helpers_test.go:285: (dbg) Non-zero exit: kubectl --context dockerenv-500607 describe pod coredns-66bc5c9577-rgn25: exit status 1 (63.233261ms)
** stderr **
Error from server (NotFound): pods "coredns-66bc5c9577-rgn25" not found
** /stderr **
helpers_test.go:287: kubectl --context dockerenv-500607 describe pod coredns-66bc5c9577-rgn25: exit status 1
helpers_test.go:175: Cleaning up "dockerenv-500607" profile ...
helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p dockerenv-500607
helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p dockerenv-500607: (1.847966957s)
--- FAIL: TestDockerEnvContainerd (41.46s)