=== RUN TestDockerEnvContainerd
docker_test.go:170: running with containerd true linux amd64
docker_test.go:181: (dbg) Run: out/minikube-linux-amd64 start -p dockerenv-230733 --driver=docker --container-runtime=containerd
docker_test.go:181: (dbg) Done: out/minikube-linux-amd64 start -p dockerenv-230733 --driver=docker --container-runtime=containerd: (24.441372453s)
docker_test.go:189: (dbg) Run: /bin/bash -c "out/minikube-linux-amd64 docker-env --ssh-host --ssh-add -p dockerenv-230733"
docker_test.go:189: (dbg) Done: /bin/bash -c "out/minikube-linux-amd64 docker-env --ssh-host --ssh-add -p dockerenv-230733": (1.001438494s)
docker_test.go:220: (dbg) Run: /bin/bash -c "SSH_AUTH_SOCK="/tmp/ssh-XXXXXXxKMnhD/agent.1126955" SSH_AGENT_PID="1126956" DOCKER_HOST=ssh://docker@127.0.0.1:33266 docker version"
docker_test.go:243: (dbg) Run: /bin/bash -c "SSH_AUTH_SOCK="/tmp/ssh-XXXXXXxKMnhD/agent.1126955" SSH_AGENT_PID="1126956" DOCKER_HOST=ssh://docker@127.0.0.1:33266 DOCKER_BUILDKIT=0 docker build -t local/minikube-dockerenv-containerd-test:latest testdata/docker-env"
docker_test.go:243: (dbg) Non-zero exit: /bin/bash -c "SSH_AUTH_SOCK="/tmp/ssh-XXXXXXxKMnhD/agent.1126955" SSH_AGENT_PID="1126956" DOCKER_HOST=ssh://docker@127.0.0.1:33266 DOCKER_BUILDKIT=0 docker build -t local/minikube-dockerenv-containerd-test:latest testdata/docker-env": exit status 1 (2.660878401s)
-- stdout --
Sending build context to Docker daemon 2.048kB
-- /stdout --
** stderr **
DEPRECATED: The legacy builder is deprecated and will be removed in a future release.
BuildKit is currently disabled; enable it by removing the DOCKER_BUILDKIT=0
environment-variable.
Error response from daemon: exit status 1
** /stderr **
docker_test.go:245: failed to build images, error: exit status 1, output:
-- stdout --
Sending build context to Docker daemon 2.048kB
-- /stdout --
** stderr **
DEPRECATED: The legacy builder is deprecated and will be removed in a future release.
BuildKit is currently disabled; enable it by removing the DOCKER_BUILDKIT=0
environment-variable.
Error response from daemon: exit status 1
** /stderr **
docker_test.go:250: (dbg) Run: /bin/bash -c "SSH_AUTH_SOCK="/tmp/ssh-XXXXXXxKMnhD/agent.1126955" SSH_AGENT_PID="1126956" DOCKER_HOST=ssh://docker@127.0.0.1:33266 docker image ls"
docker_test.go:255: failed to detect image 'local/minikube-dockerenv-containerd-test' in output of docker image ls
panic.go:636: *** TestDockerEnvContainerd FAILED at 2025-09-29 12:23:15.725398051 +0000 UTC m=+360.309421233
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:223: ======> post-mortem[TestDockerEnvContainerd]: network settings <======
helpers_test.go:230: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:238: ======> post-mortem[TestDockerEnvContainerd]: docker inspect <======
helpers_test.go:239: (dbg) Run: docker inspect dockerenv-230733
helpers_test.go:243: (dbg) docker inspect dockerenv-230733:
-- stdout --
[
{
"Id": "fdbed458c3205a89816fc5019259340c9514ee015fe5d4b5ac47ed88d80e96e1",
"Created": "2025-09-29T12:22:41.889254107Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 1124164,
"ExitCode": 0,
"Error": "",
"StartedAt": "2025-09-29T12:22:41.918526474Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:c6b5532e987b5b4f5fc9cb0336e378ed49c0542bad8cbfc564b71e977a6269de",
"ResolvConfPath": "/var/lib/docker/containers/fdbed458c3205a89816fc5019259340c9514ee015fe5d4b5ac47ed88d80e96e1/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/fdbed458c3205a89816fc5019259340c9514ee015fe5d4b5ac47ed88d80e96e1/hostname",
"HostsPath": "/var/lib/docker/containers/fdbed458c3205a89816fc5019259340c9514ee015fe5d4b5ac47ed88d80e96e1/hosts",
"LogPath": "/var/lib/docker/containers/fdbed458c3205a89816fc5019259340c9514ee015fe5d4b5ac47ed88d80e96e1/fdbed458c3205a89816fc5019259340c9514ee015fe5d4b5ac47ed88d80e96e1-json.log",
"Name": "/dockerenv-230733",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"dockerenv-230733:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {
"max-size": "100m"
}
},
"NetworkMode": "dockerenv-230733",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "private",
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 8388608000,
"NanoCpus": 0,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 16777216000,
"MemorySwappiness": null,
"OomKillDisable": null,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"ID": "fdbed458c3205a89816fc5019259340c9514ee015fe5d4b5ac47ed88d80e96e1",
"LowerDir": "/var/lib/docker/overlay2/469fcfe166795b34f0d839d5502c40a9e6df8dd54c063d915a5f2db53c57d85d-init/diff:/var/lib/docker/overlay2/fbd0ff8837aea1062458ef3b6c2ff01f7caaf77470820d108a1f7ca188c98aa7/diff",
"MergedDir": "/var/lib/docker/overlay2/469fcfe166795b34f0d839d5502c40a9e6df8dd54c063d915a5f2db53c57d85d/merged",
"UpperDir": "/var/lib/docker/overlay2/469fcfe166795b34f0d839d5502c40a9e6df8dd54c063d915a5f2db53c57d85d/diff",
"WorkDir": "/var/lib/docker/overlay2/469fcfe166795b34f0d839d5502c40a9e6df8dd54c063d915a5f2db53c57d85d/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
},
{
"Type": "volume",
"Name": "dockerenv-230733",
"Source": "/var/lib/docker/volumes/dockerenv-230733/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
}
],
"Config": {
"Hostname": "dockerenv-230733",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"OnBuild": null,
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "dockerenv-230733",
"name.minikube.sigs.k8s.io": "dockerenv-230733",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "95b3751e2cb3350484956697dc1e843d8d24259005add54c44d83f9e43d1180a",
"SandboxKey": "/var/run/docker/netns/95b3751e2cb3",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33266"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33267"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33270"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33268"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33269"
}
]
},
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"dockerenv-230733": {
"IPAMConfig": {
"IPv4Address": "192.168.49.2"
},
"Links": null,
"Aliases": null,
"MacAddress": "be:73:3c:bc:bb:f0",
"DriverOpts": null,
"GwPriority": 0,
"NetworkID": "3082c50a22240c6b19257a968d8b2dd3c5ccf5e3f6ae3bcc81951394c9958e8c",
"EndpointID": "532da024af43db8232fa84317d1dd6ac1732bc485b25ce672de22a577cfe3549",
"Gateway": "192.168.49.1",
"IPAddress": "192.168.49.2",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"dockerenv-230733",
"fdbed458c320"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:247: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p dockerenv-230733 -n dockerenv-230733
helpers_test.go:252: <<< TestDockerEnvContainerd FAILED: start of post-mortem logs <<<
helpers_test.go:253: ======> post-mortem[TestDockerEnvContainerd]: minikube logs <======
helpers_test.go:255: (dbg) Run: out/minikube-linux-amd64 -p dockerenv-230733 logs -n 25
helpers_test.go:255: (dbg) Done: out/minikube-linux-amd64 -p dockerenv-230733 logs -n 25: (1.13434514s)
helpers_test.go:260: TestDockerEnvContainerd logs:
-- stdout --
==> Audit <==
┌────────────┬───────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬──────────────────┬─────────┬─────────┬─────────────────────┬─────────────────────┐
│ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │
├────────────┼───────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼──────────────────┼─────────┼─────────┼─────────────────────┼─────────────────────┤
│ addons │ addons-752861 addons disable nvidia-device-plugin --alsologtostderr -v=1 │ addons-752861 │ jenkins │ v1.37.0 │ 29 Sep 25 12:21 UTC │ 29 Sep 25 12:21 UTC │
│ addons │ addons-752861 addons disable cloud-spanner --alsologtostderr -v=1 │ addons-752861 │ jenkins │ v1.37.0 │ 29 Sep 25 12:21 UTC │ 29 Sep 25 12:21 UTC │
│ addons │ addons-752861 addons disable headlamp --alsologtostderr -v=1 │ addons-752861 │ jenkins │ v1.37.0 │ 29 Sep 25 12:21 UTC │ 29 Sep 25 12:21 UTC │
│ ssh │ addons-752861 ssh cat /opt/local-path-provisioner/pvc-ffea6f18-99c9-41ff-a6ec-8000ba91e3a7_default_test-pvc/file1 │ addons-752861 │ jenkins │ v1.37.0 │ 29 Sep 25 12:21 UTC │ 29 Sep 25 12:21 UTC │
│ addons │ addons-752861 addons disable storage-provisioner-rancher --alsologtostderr -v=1 │ addons-752861 │ jenkins │ v1.37.0 │ 29 Sep 25 12:21 UTC │ 29 Sep 25 12:21 UTC │
│ ip │ addons-752861 ip │ addons-752861 │ jenkins │ v1.37.0 │ 29 Sep 25 12:21 UTC │ 29 Sep 25 12:21 UTC │
│ addons │ addons-752861 addons disable registry --alsologtostderr -v=1 │ addons-752861 │ jenkins │ v1.37.0 │ 29 Sep 25 12:21 UTC │ 29 Sep 25 12:21 UTC │
│ addons │ configure registry-creds -f ./testdata/addons_testconfig.json -p addons-752861 │ addons-752861 │ jenkins │ v1.37.0 │ 29 Sep 25 12:21 UTC │ 29 Sep 25 12:21 UTC │
│ addons │ addons-752861 addons disable registry-creds --alsologtostderr -v=1 │ addons-752861 │ jenkins │ v1.37.0 │ 29 Sep 25 12:21 UTC │ 29 Sep 25 12:21 UTC │
│ addons │ addons-752861 addons disable inspektor-gadget --alsologtostderr -v=1 │ addons-752861 │ jenkins │ v1.37.0 │ 29 Sep 25 12:21 UTC │ 29 Sep 25 12:21 UTC │
│ addons │ addons-752861 addons disable amd-gpu-device-plugin --alsologtostderr -v=1 │ addons-752861 │ jenkins │ v1.37.0 │ 29 Sep 25 12:21 UTC │ 29 Sep 25 12:21 UTC │
│ ssh │ addons-752861 ssh curl -s http://127.0.0.1/ -H 'Host: nginx.example.com' │ addons-752861 │ jenkins │ v1.37.0 │ 29 Sep 25 12:21 UTC │ 29 Sep 25 12:21 UTC │
│ ip │ addons-752861 ip │ addons-752861 │ jenkins │ v1.37.0 │ 29 Sep 25 12:21 UTC │ 29 Sep 25 12:21 UTC │
│ addons │ addons-752861 addons disable ingress-dns --alsologtostderr -v=1 │ addons-752861 │ jenkins │ v1.37.0 │ 29 Sep 25 12:21 UTC │ 29 Sep 25 12:21 UTC │
│ addons │ addons-752861 addons disable yakd --alsologtostderr -v=1 │ addons-752861 │ jenkins │ v1.37.0 │ 29 Sep 25 12:21 UTC │ 29 Sep 25 12:21 UTC │
│ addons │ addons-752861 addons disable ingress --alsologtostderr -v=1 │ addons-752861 │ jenkins │ v1.37.0 │ 29 Sep 25 12:21 UTC │ 29 Sep 25 12:21 UTC │
│ addons │ addons-752861 addons disable volumesnapshots --alsologtostderr -v=1 │ addons-752861 │ jenkins │ v1.37.0 │ 29 Sep 25 12:22 UTC │ 29 Sep 25 12:22 UTC │
│ addons │ addons-752861 addons disable csi-hostpath-driver --alsologtostderr -v=1 │ addons-752861 │ jenkins │ v1.37.0 │ 29 Sep 25 12:22 UTC │ 29 Sep 25 12:22 UTC │
│ stop │ -p addons-752861 │ addons-752861 │ jenkins │ v1.37.0 │ 29 Sep 25 12:22 UTC │ 29 Sep 25 12:22 UTC │
│ addons │ enable dashboard -p addons-752861 │ addons-752861 │ jenkins │ v1.37.0 │ 29 Sep 25 12:22 UTC │ 29 Sep 25 12:22 UTC │
│ addons │ disable dashboard -p addons-752861 │ addons-752861 │ jenkins │ v1.37.0 │ 29 Sep 25 12:22 UTC │ 29 Sep 25 12:22 UTC │
│ addons │ disable gvisor -p addons-752861 │ addons-752861 │ jenkins │ v1.37.0 │ 29 Sep 25 12:22 UTC │ 29 Sep 25 12:22 UTC │
│ delete │ -p addons-752861 │ addons-752861 │ jenkins │ v1.37.0 │ 29 Sep 25 12:22 UTC │ 29 Sep 25 12:22 UTC │
│ start │ -p dockerenv-230733 --driver=docker --container-runtime=containerd │ dockerenv-230733 │ jenkins │ v1.37.0 │ 29 Sep 25 12:22 UTC │ 29 Sep 25 12:23 UTC │
│ docker-env │ --ssh-host --ssh-add -p dockerenv-230733 │ dockerenv-230733 │ jenkins │ v1.37.0 │ 29 Sep 25 12:23 UTC │ 29 Sep 25 12:23 UTC │
└────────────┴───────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴──────────────────┴─────────┴─────────┴─────────────────────┴─────────────────────┘
==> Last Start <==
Log file created at: 2025/09/29 12:22:36
Running on machine: ubuntu-20-agent-10
Binary: Built with gc go1.24.6 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I0929 12:22:36.693841 1123601 out.go:360] Setting OutFile to fd 1 ...
I0929 12:22:36.693930 1123601 out.go:408] TERM=,COLORTERM=, which probably does not support color
I0929 12:22:36.693933 1123601 out.go:374] Setting ErrFile to fd 2...
I0929 12:22:36.693936 1123601 out.go:408] TERM=,COLORTERM=, which probably does not support color
I0929 12:22:36.694138 1123601 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21652-1097891/.minikube/bin
I0929 12:22:36.694640 1123601 out.go:368] Setting JSON to false
I0929 12:22:36.695618 1123601 start.go:130] hostinfo: {"hostname":"ubuntu-20-agent-10","uptime":18294,"bootTime":1759130263,"procs":186,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"22.04","kernelVersion":"6.8.0-1040-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
I0929 12:22:36.695702 1123601 start.go:140] virtualization: kvm guest
I0929 12:22:36.697696 1123601 out.go:179] * [dockerenv-230733] minikube v1.37.0 on Ubuntu 22.04 (kvm/amd64)
I0929 12:22:36.698740 1123601 out.go:179] - MINIKUBE_LOCATION=21652
I0929 12:22:36.698788 1123601 notify.go:220] Checking for updates...
I0929 12:22:36.700874 1123601 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I0929 12:22:36.701924 1123601 out.go:179] - KUBECONFIG=/home/jenkins/minikube-integration/21652-1097891/kubeconfig
I0929 12:22:36.702906 1123601 out.go:179] - MINIKUBE_HOME=/home/jenkins/minikube-integration/21652-1097891/.minikube
I0929 12:22:36.704018 1123601 out.go:179] - MINIKUBE_BIN=out/minikube-linux-amd64
I0929 12:22:36.705089 1123601 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I0929 12:22:36.706309 1123601 driver.go:421] Setting default libvirt URI to qemu:///system
I0929 12:22:36.731173 1123601 docker.go:123] docker version: linux-28.4.0:Docker Engine - Community
I0929 12:22:36.731267 1123601 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0929 12:22:36.787368 1123601 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:2 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:24 OomKillDisable:false NGoroutines:45 SystemTime:2025-09-29 12:22:36.777198299 +0000 UTC LoggingDriver:json-file CgroupDriver:systemd NEventsListener:0 KernelVersion:6.8.0-1040-gcp OperatingSystem:Ubuntu 22.04.5 LTS OSType:linux Architecture:x
86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33652174848 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-10 Labels:[] ExperimentalBuild:false ServerVersion:28.4.0 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:b98a3aace656320842a23f4a392a33f46af97866 Expected:} RuncCommit:{ID:v1.3.0-0-g4ca628d1 Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[
map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.28.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.39.4] map[Name:model Path:/usr/libexec/docker/cli-plugins/docker-model SchemaVersion:0.1.0 ShortDescription:Docker Model Runner Vendor:Docker Inc. Version:v0.1.40] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I0929 12:22:36.787483 1123601 docker.go:318] overlay module found
I0929 12:22:36.789442 1123601 out.go:179] * Using the docker driver based on user configuration
I0929 12:22:36.790403 1123601 start.go:304] selected driver: docker
I0929 12:22:36.790412 1123601 start.go:924] validating driver "docker" against <nil>
I0929 12:22:36.790424 1123601 start.go:935] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I0929 12:22:36.790554 1123601 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0929 12:22:36.845625 1123601 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:2 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:24 OomKillDisable:false NGoroutines:45 SystemTime:2025-09-29 12:22:36.835238116 +0000 UTC LoggingDriver:json-file CgroupDriver:systemd NEventsListener:0 KernelVersion:6.8.0-1040-gcp OperatingSystem:Ubuntu 22.04.5 LTS OSType:linux Architecture:x
86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33652174848 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-10 Labels:[] ExperimentalBuild:false ServerVersion:28.4.0 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:b98a3aace656320842a23f4a392a33f46af97866 Expected:} RuncCommit:{ID:v1.3.0-0-g4ca628d1 Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[
map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.28.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.39.4] map[Name:model Path:/usr/libexec/docker/cli-plugins/docker-model SchemaVersion:0.1.0 ShortDescription:Docker Model Runner Vendor:Docker Inc. Version:v0.1.40] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I0929 12:22:36.845779 1123601 start_flags.go:327] no existing cluster config was found, will generate one from the flags
I0929 12:22:36.846351 1123601 start_flags.go:410] Using suggested 8000MB memory alloc based on sys=32093MB, container=32093MB
I0929 12:22:36.846505 1123601 start_flags.go:974] Wait components to verify : map[apiserver:true system_pods:true]
I0929 12:22:36.847882 1123601 out.go:179] * Using Docker driver with root privileges
I0929 12:22:36.848718 1123601 cni.go:84] Creating CNI manager for ""
I0929 12:22:36.848771 1123601 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I0929 12:22:36.848780 1123601 start_flags.go:336] Found "CNI" CNI - setting NetworkPlugin=cni
I0929 12:22:36.848845 1123601 start.go:348] cluster config:
{Name:dockerenv-230733 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:8000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:dockerenv-230733 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISoc
ket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0929 12:22:36.849864 1123601 out.go:179] * Starting "dockerenv-230733" primary control-plane node in "dockerenv-230733" cluster
I0929 12:22:36.850641 1123601 cache.go:123] Beginning downloading kic base image for docker with containerd
I0929 12:22:36.851490 1123601 out.go:179] * Pulling base image v0.0.48 ...
I0929 12:22:36.852426 1123601 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime containerd
I0929 12:22:36.852462 1123601 preload.go:146] Found local preload: /home/jenkins/minikube-integration/21652-1097891/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-containerd-overlay2-amd64.tar.lz4
I0929 12:22:36.852470 1123601 cache.go:58] Caching tarball of preloaded images
I0929 12:22:36.852560 1123601 image.go:81] Checking for gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 in local docker daemon
I0929 12:22:36.852580 1123601 preload.go:172] Found /home/jenkins/minikube-integration/21652-1097891/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-containerd-overlay2-amd64.tar.lz4 in cache, skipping download
I0929 12:22:36.852586 1123601 cache.go:61] Finished verifying existence of preloaded tar for v1.34.0 on containerd
I0929 12:22:36.852918 1123601 profile.go:143] Saving config to /home/jenkins/minikube-integration/21652-1097891/.minikube/profiles/dockerenv-230733/config.json ...
I0929 12:22:36.852935 1123601 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21652-1097891/.minikube/profiles/dockerenv-230733/config.json: {Name:mk26740fc2ea6e22a5ebeefb57f0d750315dc0d8 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0929 12:22:36.873049 1123601 image.go:100] Found gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 in local docker daemon, skipping pull
I0929 12:22:36.873061 1123601 cache.go:147] gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 exists in daemon, skipping load
I0929 12:22:36.873077 1123601 cache.go:232] Successfully downloaded all kic artifacts
I0929 12:22:36.873102 1123601 start.go:360] acquireMachinesLock for dockerenv-230733: {Name:mk0cf6cef2a3d724166af51253a11990193ed097 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0929 12:22:36.873200 1123601 start.go:364] duration metric: took 83.553µs to acquireMachinesLock for "dockerenv-230733"
I0929 12:22:36.873219 1123601 start.go:93] Provisioning new machine with config: &{Name:dockerenv-230733 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:8000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:dockerenv-230733 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerN
ames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPU
s: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I0929 12:22:36.873274 1123601 start.go:125] createHost starting for "" (driver="docker")
I0929 12:22:36.874986 1123601 out.go:252] * Creating docker container (CPUs=2, Memory=8000MB) ...
I0929 12:22:36.875180 1123601 start.go:159] libmachine.API.Create for "dockerenv-230733" (driver="docker")
I0929 12:22:36.875205 1123601 client.go:168] LocalClient.Create starting
I0929 12:22:36.875295 1123601 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21652-1097891/.minikube/certs/ca.pem
I0929 12:22:36.875322 1123601 main.go:141] libmachine: Decoding PEM data...
I0929 12:22:36.875336 1123601 main.go:141] libmachine: Parsing certificate...
I0929 12:22:36.875392 1123601 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21652-1097891/.minikube/certs/cert.pem
I0929 12:22:36.875406 1123601 main.go:141] libmachine: Decoding PEM data...
I0929 12:22:36.875412 1123601 main.go:141] libmachine: Parsing certificate...
I0929 12:22:36.875704 1123601 cli_runner.go:164] Run: docker network inspect dockerenv-230733 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W0929 12:22:36.892386 1123601 cli_runner.go:211] docker network inspect dockerenv-230733 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I0929 12:22:36.892453 1123601 network_create.go:284] running [docker network inspect dockerenv-230733] to gather additional debugging logs...
I0929 12:22:36.892470 1123601 cli_runner.go:164] Run: docker network inspect dockerenv-230733
W0929 12:22:36.909686 1123601 cli_runner.go:211] docker network inspect dockerenv-230733 returned with exit code 1
I0929 12:22:36.909709 1123601 network_create.go:287] error running [docker network inspect dockerenv-230733]: docker network inspect dockerenv-230733: exit status 1
stdout:
[]
stderr:
Error response from daemon: network dockerenv-230733 not found
I0929 12:22:36.909723 1123601 network_create.go:289] output of [docker network inspect dockerenv-230733]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network dockerenv-230733 not found
** /stderr **
I0929 12:22:36.909812 1123601 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0929 12:22:36.926550 1123601 network.go:206] using free private subnet 192.168.49.0/24: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc001a10f70}
I0929 12:22:36.926588 1123601 network_create.go:124] attempt to create docker network dockerenv-230733 192.168.49.0/24 with gateway 192.168.49.1 and MTU of 1500 ...
I0929 12:22:36.926636 1123601 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.49.0/24 --gateway=192.168.49.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=dockerenv-230733 dockerenv-230733
I0929 12:22:36.980722 1123601 network_create.go:108] docker network dockerenv-230733 192.168.49.0/24 created
I0929 12:22:36.980746 1123601 kic.go:121] calculated static IP "192.168.49.2" for the "dockerenv-230733" container
I0929 12:22:36.980822 1123601 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I0929 12:22:36.997181 1123601 cli_runner.go:164] Run: docker volume create dockerenv-230733 --label name.minikube.sigs.k8s.io=dockerenv-230733 --label created_by.minikube.sigs.k8s.io=true
I0929 12:22:37.014710 1123601 oci.go:103] Successfully created a docker volume dockerenv-230733
I0929 12:22:37.014794 1123601 cli_runner.go:164] Run: docker run --rm --name dockerenv-230733-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=dockerenv-230733 --entrypoint /usr/bin/test -v dockerenv-230733:/var gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -d /var/lib
I0929 12:22:37.409588 1123601 oci.go:107] Successfully prepared a docker volume dockerenv-230733
I0929 12:22:37.409647 1123601 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime containerd
I0929 12:22:37.409669 1123601 kic.go:194] Starting extracting preloaded images to volume ...
I0929 12:22:37.409753 1123601 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21652-1097891/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-containerd-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v dockerenv-230733:/extractDir gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -I lz4 -xf /preloaded.tar -C /extractDir
I0929 12:22:41.817558 1123601 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21652-1097891/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-containerd-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v dockerenv-230733:/extractDir gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -I lz4 -xf /preloaded.tar -C /extractDir: (4.40775117s)
I0929 12:22:41.817584 1123601 kic.go:203] duration metric: took 4.407909917s to extract preloaded images to volume ...
W0929 12:22:41.817687 1123601 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
W0929 12:22:41.817719 1123601 oci.go:252] Your kernel does not support CPU cfs period/quota or the cgroup is not mounted.
I0929 12:22:41.817771 1123601 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I0929 12:22:41.872927 1123601 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname dockerenv-230733 --name dockerenv-230733 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=dockerenv-230733 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=dockerenv-230733 --network dockerenv-230733 --ip 192.168.49.2 --volume dockerenv-230733:/var --security-opt apparmor=unconfined --memory=8000mb -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1
I0929 12:22:42.119639 1123601 cli_runner.go:164] Run: docker container inspect dockerenv-230733 --format={{.State.Running}}
I0929 12:22:42.138183 1123601 cli_runner.go:164] Run: docker container inspect dockerenv-230733 --format={{.State.Status}}
I0929 12:22:42.156750 1123601 cli_runner.go:164] Run: docker exec dockerenv-230733 stat /var/lib/dpkg/alternatives/iptables
I0929 12:22:42.206322 1123601 oci.go:144] the created container "dockerenv-230733" has a running status.
I0929 12:22:42.206344 1123601 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/21652-1097891/.minikube/machines/dockerenv-230733/id_rsa...
I0929 12:22:42.488450 1123601 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/21652-1097891/.minikube/machines/dockerenv-230733/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I0929 12:22:42.512207 1123601 cli_runner.go:164] Run: docker container inspect dockerenv-230733 --format={{.State.Status}}
I0929 12:22:42.529211 1123601 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I0929 12:22:42.529223 1123601 kic_runner.go:114] Args: [docker exec --privileged dockerenv-230733 chown docker:docker /home/docker/.ssh/authorized_keys]
I0929 12:22:42.576419 1123601 cli_runner.go:164] Run: docker container inspect dockerenv-230733 --format={{.State.Status}}
I0929 12:22:42.593202 1123601 machine.go:93] provisionDockerMachine start ...
I0929 12:22:42.593302 1123601 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-230733
I0929 12:22:42.609771 1123601 main.go:141] libmachine: Using SSH client type: native
I0929 12:22:42.610068 1123601 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 33266 <nil> <nil>}
I0929 12:22:42.610077 1123601 main.go:141] libmachine: About to run SSH command:
hostname
I0929 12:22:42.610709 1123601 main.go:141] libmachine: Error dialing TCP: ssh: handshake failed: read tcp 127.0.0.1:37986->127.0.0.1:33266: read: connection reset by peer
I0929 12:22:45.750205 1123601 main.go:141] libmachine: SSH cmd err, output: <nil>: dockerenv-230733
I0929 12:22:45.750242 1123601 ubuntu.go:182] provisioning hostname "dockerenv-230733"
I0929 12:22:45.750319 1123601 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-230733
I0929 12:22:45.769159 1123601 main.go:141] libmachine: Using SSH client type: native
I0929 12:22:45.769448 1123601 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 33266 <nil> <nil>}
I0929 12:22:45.769459 1123601 main.go:141] libmachine: About to run SSH command:
sudo hostname dockerenv-230733 && echo "dockerenv-230733" | sudo tee /etc/hostname
I0929 12:22:45.918298 1123601 main.go:141] libmachine: SSH cmd err, output: <nil>: dockerenv-230733
I0929 12:22:45.918388 1123601 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-230733
I0929 12:22:45.935860 1123601 main.go:141] libmachine: Using SSH client type: native
I0929 12:22:45.936105 1123601 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 33266 <nil> <nil>}
I0929 12:22:45.936116 1123601 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\sdockerenv-230733' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 dockerenv-230733/g' /etc/hosts;
else
echo '127.0.1.1 dockerenv-230733' | sudo tee -a /etc/hosts;
fi
fi
I0929 12:22:46.071386 1123601 main.go:141] libmachine: SSH cmd err, output: <nil>:
I0929 12:22:46.071412 1123601 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/21652-1097891/.minikube CaCertPath:/home/jenkins/minikube-integration/21652-1097891/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21652-1097891/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21652-1097891/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21652-1097891/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21652-1097891/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21652-1097891/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21652-1097891/.minikube}
I0929 12:22:46.071473 1123601 ubuntu.go:190] setting up certificates
I0929 12:22:46.071490 1123601 provision.go:84] configureAuth start
I0929 12:22:46.071565 1123601 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" dockerenv-230733
I0929 12:22:46.089364 1123601 provision.go:143] copyHostCerts
I0929 12:22:46.089419 1123601 exec_runner.go:144] found /home/jenkins/minikube-integration/21652-1097891/.minikube/cert.pem, removing ...
I0929 12:22:46.089429 1123601 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21652-1097891/.minikube/cert.pem
I0929 12:22:46.089514 1123601 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21652-1097891/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21652-1097891/.minikube/cert.pem (1123 bytes)
I0929 12:22:46.089653 1123601 exec_runner.go:144] found /home/jenkins/minikube-integration/21652-1097891/.minikube/key.pem, removing ...
I0929 12:22:46.089660 1123601 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21652-1097891/.minikube/key.pem
I0929 12:22:46.089697 1123601 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21652-1097891/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21652-1097891/.minikube/key.pem (1679 bytes)
I0929 12:22:46.089845 1123601 exec_runner.go:144] found /home/jenkins/minikube-integration/21652-1097891/.minikube/ca.pem, removing ...
I0929 12:22:46.089853 1123601 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21652-1097891/.minikube/ca.pem
I0929 12:22:46.089887 1123601 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21652-1097891/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21652-1097891/.minikube/ca.pem (1078 bytes)
I0929 12:22:46.090015 1123601 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21652-1097891/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21652-1097891/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21652-1097891/.minikube/certs/ca-key.pem org=jenkins.dockerenv-230733 san=[127.0.0.1 192.168.49.2 dockerenv-230733 localhost minikube]
I0929 12:22:46.325627 1123601 provision.go:177] copyRemoteCerts
I0929 12:22:46.325687 1123601 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I0929 12:22:46.325728 1123601 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-230733
I0929 12:22:46.343797 1123601 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33266 SSHKeyPath:/home/jenkins/minikube-integration/21652-1097891/.minikube/machines/dockerenv-230733/id_rsa Username:docker}
I0929 12:22:46.442220 1123601 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21652-1097891/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1078 bytes)
I0929 12:22:46.469896 1123601 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21652-1097891/.minikube/machines/server.pem --> /etc/docker/server.pem (1216 bytes)
I0929 12:22:46.495552 1123601 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21652-1097891/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I0929 12:22:46.521669 1123601 provision.go:87] duration metric: took 450.142859ms to configureAuth
I0929 12:22:46.521695 1123601 ubuntu.go:206] setting minikube options for container-runtime
I0929 12:22:46.521862 1123601 config.go:182] Loaded profile config "dockerenv-230733": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.0
I0929 12:22:46.521873 1123601 machine.go:96] duration metric: took 3.928659417s to provisionDockerMachine
I0929 12:22:46.521879 1123601 client.go:171] duration metric: took 9.646669723s to LocalClient.Create
I0929 12:22:46.521900 1123601 start.go:167] duration metric: took 9.646720979s to libmachine.API.Create "dockerenv-230733"
I0929 12:22:46.521906 1123601 start.go:293] postStartSetup for "dockerenv-230733" (driver="docker")
I0929 12:22:46.521913 1123601 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I0929 12:22:46.521972 1123601 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I0929 12:22:46.522006 1123601 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-230733
I0929 12:22:46.539560 1123601 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33266 SSHKeyPath:/home/jenkins/minikube-integration/21652-1097891/.minikube/machines/dockerenv-230733/id_rsa Username:docker}
I0929 12:22:46.640278 1123601 ssh_runner.go:195] Run: cat /etc/os-release
I0929 12:22:46.644135 1123601 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I0929 12:22:46.644163 1123601 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
I0929 12:22:46.644174 1123601 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
I0929 12:22:46.644183 1123601 info.go:137] Remote host: Ubuntu 22.04.5 LTS
I0929 12:22:46.644194 1123601 filesync.go:126] Scanning /home/jenkins/minikube-integration/21652-1097891/.minikube/addons for local assets ...
I0929 12:22:46.644251 1123601 filesync.go:126] Scanning /home/jenkins/minikube-integration/21652-1097891/.minikube/files for local assets ...
I0929 12:22:46.644267 1123601 start.go:296] duration metric: took 122.356685ms for postStartSetup
I0929 12:22:46.644569 1123601 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" dockerenv-230733
I0929 12:22:46.662334 1123601 profile.go:143] Saving config to /home/jenkins/minikube-integration/21652-1097891/.minikube/profiles/dockerenv-230733/config.json ...
I0929 12:22:46.662631 1123601 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I0929 12:22:46.662673 1123601 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-230733
I0929 12:22:46.680490 1123601 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33266 SSHKeyPath:/home/jenkins/minikube-integration/21652-1097891/.minikube/machines/dockerenv-230733/id_rsa Username:docker}
I0929 12:22:46.775318 1123601 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I0929 12:22:46.780067 1123601 start.go:128] duration metric: took 9.906775794s to createHost
I0929 12:22:46.780083 1123601 start.go:83] releasing machines lock for "dockerenv-230733", held for 9.906877166s
I0929 12:22:46.780175 1123601 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" dockerenv-230733
I0929 12:22:46.797552 1123601 ssh_runner.go:195] Run: cat /version.json
I0929 12:22:46.797590 1123601 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-230733
I0929 12:22:46.797618 1123601 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I0929 12:22:46.797688 1123601 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-230733
I0929 12:22:46.815656 1123601 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33266 SSHKeyPath:/home/jenkins/minikube-integration/21652-1097891/.minikube/machines/dockerenv-230733/id_rsa Username:docker}
I0929 12:22:46.816364 1123601 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33266 SSHKeyPath:/home/jenkins/minikube-integration/21652-1097891/.minikube/machines/dockerenv-230733/id_rsa Username:docker}
I0929 12:22:46.907335 1123601 ssh_runner.go:195] Run: systemctl --version
I0929 12:22:46.979801 1123601 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
I0929 12:22:46.985013 1123601 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
I0929 12:22:47.015388 1123601 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
I0929 12:22:47.015454 1123601 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I0929 12:22:47.044586 1123601 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/100-crio-bridge.conf] bridge cni config(s)
I0929 12:22:47.044604 1123601 start.go:495] detecting cgroup driver to use...
I0929 12:22:47.044638 1123601 detect.go:190] detected "systemd" cgroup driver on host os
I0929 12:22:47.044681 1123601 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I0929 12:22:47.058689 1123601 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I0929 12:22:47.071130 1123601 docker.go:218] disabling cri-docker service (if available) ...
I0929 12:22:47.071200 1123601 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I0929 12:22:47.085862 1123601 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I0929 12:22:47.100249 1123601 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I0929 12:22:47.168650 1123601 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I0929 12:22:47.236797 1123601 docker.go:234] disabling docker service ...
I0929 12:22:47.236852 1123601 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I0929 12:22:47.255742 1123601 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I0929 12:22:47.267676 1123601 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I0929 12:22:47.340023 1123601 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I0929 12:22:47.406837 1123601 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I0929 12:22:47.419147 1123601 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I0929 12:22:47.438330 1123601 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I0929 12:22:47.450716 1123601 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I0929 12:22:47.461723 1123601 containerd.go:146] configuring containerd to use "systemd" as cgroup driver...
I0929 12:22:47.461774 1123601 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = true|g' /etc/containerd/config.toml"
I0929 12:22:47.472876 1123601 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0929 12:22:47.483692 1123601 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I0929 12:22:47.494681 1123601 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0929 12:22:47.505632 1123601 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I0929 12:22:47.515629 1123601 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I0929 12:22:47.526428 1123601 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I0929 12:22:47.536799 1123601 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I0929 12:22:47.547530 1123601 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I0929 12:22:47.556751 1123601 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I0929 12:22:47.566383 1123601 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0929 12:22:47.633613 1123601 ssh_runner.go:195] Run: sudo systemctl restart containerd
I0929 12:22:47.738193 1123601 start.go:542] Will wait 60s for socket path /run/containerd/containerd.sock
I0929 12:22:47.738275 1123601 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
I0929 12:22:47.742471 1123601 start.go:563] Will wait 60s for crictl version
I0929 12:22:47.742517 1123601 ssh_runner.go:195] Run: which crictl
I0929 12:22:47.746007 1123601 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I0929 12:22:47.780555 1123601 start.go:579] Version: 0.1.0
RuntimeName: containerd
RuntimeVersion: 1.7.27
RuntimeApiVersion: v1
I0929 12:22:47.780614 1123601 ssh_runner.go:195] Run: containerd --version
I0929 12:22:47.804314 1123601 ssh_runner.go:195] Run: containerd --version
I0929 12:22:47.829387 1123601 out.go:179] * Preparing Kubernetes v1.34.0 on containerd 1.7.27 ...
I0929 12:22:47.830526 1123601 cli_runner.go:164] Run: docker network inspect dockerenv-230733 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0929 12:22:47.847165 1123601 ssh_runner.go:195] Run: grep 192.168.49.1 host.minikube.internal$ /etc/hosts
I0929 12:22:47.851612 1123601 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.49.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0929 12:22:47.863679 1123601 kubeadm.go:875] updating cluster {Name:dockerenv-230733 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:8000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:dockerenv-230733 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIP
s:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: Auto
PauseInterval:1m0s} ...
I0929 12:22:47.863779 1123601 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime containerd
I0929 12:22:47.863821 1123601 ssh_runner.go:195] Run: sudo crictl images --output json
I0929 12:22:47.898408 1123601 containerd.go:627] all images are preloaded for containerd runtime.
I0929 12:22:47.898421 1123601 containerd.go:534] Images already preloaded, skipping extraction
I0929 12:22:47.898476 1123601 ssh_runner.go:195] Run: sudo crictl images --output json
I0929 12:22:47.932883 1123601 containerd.go:627] all images are preloaded for containerd runtime.
I0929 12:22:47.932896 1123601 cache_images.go:85] Images are preloaded, skipping loading
I0929 12:22:47.932902 1123601 kubeadm.go:926] updating node { 192.168.49.2 8443 v1.34.0 containerd true true} ...
I0929 12:22:47.933037 1123601 kubeadm.go:938] kubelet [Unit]
Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.34.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=dockerenv-230733 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.49.2
[Install]
config:
{KubernetesVersion:v1.34.0 ClusterName:dockerenv-230733 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I0929 12:22:47.933095 1123601 ssh_runner.go:195] Run: sudo crictl info
I0929 12:22:47.967936 1123601 cni.go:84] Creating CNI manager for ""
I0929 12:22:47.967949 1123601 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I0929 12:22:47.967975 1123601 kubeadm.go:84] Using pod CIDR: 10.244.0.0/16
I0929 12:22:47.968000 1123601 kubeadm.go:189] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.49.2 APIServerPort:8443 KubernetesVersion:v1.34.0 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:dockerenv-230733 NodeName:dockerenv-230733 DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.49.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.49.2 CgroupDriver:systemd ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath
:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///run/containerd/containerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I0929 12:22:47.968129 1123601 kubeadm.go:195] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.49.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
name: "dockerenv-230733"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.49.2"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.49.2"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
kubernetesVersion: v1.34.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
containerRuntimeEndpoint: unix:///run/containerd/containerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I0929 12:22:47.968188 1123601 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.0
I0929 12:22:47.978089 1123601 binaries.go:44] Found k8s binaries, skipping transfer
I0929 12:22:47.978136 1123601 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I0929 12:22:47.987258 1123601 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (320 bytes)
I0929 12:22:48.005878 1123601 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I0929 12:22:48.027029 1123601 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2228 bytes)
I0929 12:22:48.045544 1123601 ssh_runner.go:195] Run: grep 192.168.49.2 control-plane.minikube.internal$ /etc/hosts
I0929 12:22:48.049172 1123601 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.49.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0929 12:22:48.060600 1123601 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0929 12:22:48.128710 1123601 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0929 12:22:48.151353 1123601 certs.go:68] Setting up /home/jenkins/minikube-integration/21652-1097891/.minikube/profiles/dockerenv-230733 for IP: 192.168.49.2
I0929 12:22:48.151365 1123601 certs.go:194] generating shared ca certs ...
I0929 12:22:48.151380 1123601 certs.go:226] acquiring lock for ca certs: {Name:mk80f04796163f71154dbe6468cabd937b3d9c9f Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0929 12:22:48.151518 1123601 certs.go:235] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21652-1097891/.minikube/ca.key
I0929 12:22:48.151549 1123601 certs.go:235] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21652-1097891/.minikube/proxy-client-ca.key
I0929 12:22:48.151554 1123601 certs.go:256] generating profile certs ...
I0929 12:22:48.151606 1123601 certs.go:363] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/21652-1097891/.minikube/profiles/dockerenv-230733/client.key
I0929 12:22:48.151621 1123601 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21652-1097891/.minikube/profiles/dockerenv-230733/client.crt with IP's: []
I0929 12:22:49.018750 1123601 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21652-1097891/.minikube/profiles/dockerenv-230733/client.crt ...
I0929 12:22:49.018771 1123601 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21652-1097891/.minikube/profiles/dockerenv-230733/client.crt: {Name:mkfd531eb57ba7103a4bdc4fc3115c379312c6d5 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0929 12:22:49.019000 1123601 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21652-1097891/.minikube/profiles/dockerenv-230733/client.key ...
I0929 12:22:49.019017 1123601 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21652-1097891/.minikube/profiles/dockerenv-230733/client.key: {Name:mkf56b7bfb4a686b41218b2b5151f2e27a0db58b Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0929 12:22:49.019155 1123601 certs.go:363] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21652-1097891/.minikube/profiles/dockerenv-230733/apiserver.key.ba73ab82
I0929 12:22:49.019169 1123601 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21652-1097891/.minikube/profiles/dockerenv-230733/apiserver.crt.ba73ab82 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.49.2]
I0929 12:22:49.238547 1123601 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21652-1097891/.minikube/profiles/dockerenv-230733/apiserver.crt.ba73ab82 ...
I0929 12:22:49.238566 1123601 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21652-1097891/.minikube/profiles/dockerenv-230733/apiserver.crt.ba73ab82: {Name:mkf61b77eff3511633deca0f7067674cfda8f31b Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0929 12:22:49.238736 1123601 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21652-1097891/.minikube/profiles/dockerenv-230733/apiserver.key.ba73ab82 ...
I0929 12:22:49.238746 1123601 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21652-1097891/.minikube/profiles/dockerenv-230733/apiserver.key.ba73ab82: {Name:mkc6833e5d089502612874c7e894425be9147069 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0929 12:22:49.238819 1123601 certs.go:381] copying /home/jenkins/minikube-integration/21652-1097891/.minikube/profiles/dockerenv-230733/apiserver.crt.ba73ab82 -> /home/jenkins/minikube-integration/21652-1097891/.minikube/profiles/dockerenv-230733/apiserver.crt
I0929 12:22:49.238914 1123601 certs.go:385] copying /home/jenkins/minikube-integration/21652-1097891/.minikube/profiles/dockerenv-230733/apiserver.key.ba73ab82 -> /home/jenkins/minikube-integration/21652-1097891/.minikube/profiles/dockerenv-230733/apiserver.key
I0929 12:22:49.238988 1123601 certs.go:363] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/21652-1097891/.minikube/profiles/dockerenv-230733/proxy-client.key
I0929 12:22:49.239001 1123601 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21652-1097891/.minikube/profiles/dockerenv-230733/proxy-client.crt with IP's: []
I0929 12:22:49.518744 1123601 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21652-1097891/.minikube/profiles/dockerenv-230733/proxy-client.crt ...
I0929 12:22:49.518761 1123601 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21652-1097891/.minikube/profiles/dockerenv-230733/proxy-client.crt: {Name:mk19576351b75f57f5ddc9ebf92c3d6408079f64 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0929 12:22:49.518925 1123601 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21652-1097891/.minikube/profiles/dockerenv-230733/proxy-client.key ...
I0929 12:22:49.518933 1123601 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21652-1097891/.minikube/profiles/dockerenv-230733/proxy-client.key: {Name:mk5a53ab29a0f10acf8e1e9667a30893e1d99648 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0929 12:22:49.519164 1123601 certs.go:484] found cert: /home/jenkins/minikube-integration/21652-1097891/.minikube/certs/ca-key.pem (1675 bytes)
I0929 12:22:49.519199 1123601 certs.go:484] found cert: /home/jenkins/minikube-integration/21652-1097891/.minikube/certs/ca.pem (1078 bytes)
I0929 12:22:49.519224 1123601 certs.go:484] found cert: /home/jenkins/minikube-integration/21652-1097891/.minikube/certs/cert.pem (1123 bytes)
I0929 12:22:49.519243 1123601 certs.go:484] found cert: /home/jenkins/minikube-integration/21652-1097891/.minikube/certs/key.pem (1679 bytes)
I0929 12:22:49.519838 1123601 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21652-1097891/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I0929 12:22:49.545718 1123601 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21652-1097891/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1671 bytes)
I0929 12:22:49.569728 1123601 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21652-1097891/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I0929 12:22:49.593893 1123601 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21652-1097891/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
I0929 12:22:49.618455 1123601 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21652-1097891/.minikube/profiles/dockerenv-230733/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1424 bytes)
I0929 12:22:49.642343 1123601 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21652-1097891/.minikube/profiles/dockerenv-230733/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes)
I0929 12:22:49.667283 1123601 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21652-1097891/.minikube/profiles/dockerenv-230733/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I0929 12:22:49.691078 1123601 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21652-1097891/.minikube/profiles/dockerenv-230733/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I0929 12:22:49.714655 1123601 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21652-1097891/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I0929 12:22:49.741396 1123601 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I0929 12:22:49.760014 1123601 ssh_runner.go:195] Run: openssl version
I0929 12:22:49.766613 1123601 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I0929 12:22:49.779823 1123601 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I0929 12:22:49.784056 1123601 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Sep 29 12:18 /usr/share/ca-certificates/minikubeCA.pem
I0929 12:22:49.784101 1123601 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I0929 12:22:49.792267 1123601 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I0929 12:22:49.802100 1123601 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I0929 12:22:49.805503 1123601 certs.go:399] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I0929 12:22:49.805542 1123601 kubeadm.go:392] StartCluster: {Name:dockerenv-230733 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:8000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:dockerenv-230733 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[
] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPau
seInterval:1m0s}
I0929 12:22:49.805619 1123601 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:paused Name: Namespaces:[kube-system]}
I0929 12:22:49.805660 1123601 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
I0929 12:22:49.840807 1123601 cri.go:89] found id: ""
I0929 12:22:49.840856 1123601 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I0929 12:22:49.850348 1123601 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I0929 12:22:49.859500 1123601 kubeadm.go:214] ignoring SystemVerification for kubeadm because of docker driver
I0929 12:22:49.859541 1123601 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I0929 12:22:49.868237 1123601 kubeadm.go:155] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I0929 12:22:49.868244 1123601 kubeadm.go:157] found existing configuration files:
I0929 12:22:49.868277 1123601 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I0929 12:22:49.876815 1123601 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I0929 12:22:49.876849 1123601 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I0929 12:22:49.885034 1123601 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I0929 12:22:49.893642 1123601 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I0929 12:22:49.893698 1123601 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I0929 12:22:49.902332 1123601 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I0929 12:22:49.911150 1123601 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I0929 12:22:49.911187 1123601 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I0929 12:22:49.919588 1123601 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I0929 12:22:49.928155 1123601 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I0929 12:22:49.928189 1123601 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I0929 12:22:49.936446 1123601 ssh_runner.go:286] Start: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I0929 12:22:49.974154 1123601 kubeadm.go:310] [init] Using Kubernetes version: v1.34.0
I0929 12:22:49.974238 1123601 kubeadm.go:310] [preflight] Running pre-flight checks
I0929 12:22:49.989080 1123601 kubeadm.go:310] [preflight] The system verification failed. Printing the output from the verification:
I0929 12:22:49.989134 1123601 kubeadm.go:310] [0;37mKERNEL_VERSION[0m: [0;32m6.8.0-1040-gcp[0m
I0929 12:22:49.989182 1123601 kubeadm.go:310] [0;37mOS[0m: [0;32mLinux[0m
I0929 12:22:49.989220 1123601 kubeadm.go:310] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I0929 12:22:49.989303 1123601 kubeadm.go:310] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I0929 12:22:49.989373 1123601 kubeadm.go:310] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I0929 12:22:49.989413 1123601 kubeadm.go:310] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I0929 12:22:49.989462 1123601 kubeadm.go:310] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I0929 12:22:49.989506 1123601 kubeadm.go:310] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I0929 12:22:49.989562 1123601 kubeadm.go:310] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I0929 12:22:49.989621 1123601 kubeadm.go:310] [0;37mCGROUPS_IO[0m: [0;32menabled[0m
I0929 12:22:50.040984 1123601 kubeadm.go:310] [preflight] Pulling images required for setting up a Kubernetes cluster
I0929 12:22:50.041130 1123601 kubeadm.go:310] [preflight] This might take a minute or two, depending on the speed of your internet connection
I0929 12:22:50.041273 1123601 kubeadm.go:310] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
I0929 12:22:50.047393 1123601 kubeadm.go:310] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I0929 12:22:50.049218 1123601 out.go:252] - Generating certificates and keys ...
I0929 12:22:50.049289 1123601 kubeadm.go:310] [certs] Using existing ca certificate authority
I0929 12:22:50.049343 1123601 kubeadm.go:310] [certs] Using existing apiserver certificate and key on disk
I0929 12:22:50.192815 1123601 kubeadm.go:310] [certs] Generating "apiserver-kubelet-client" certificate and key
I0929 12:22:50.464124 1123601 kubeadm.go:310] [certs] Generating "front-proxy-ca" certificate and key
I0929 12:22:50.852165 1123601 kubeadm.go:310] [certs] Generating "front-proxy-client" certificate and key
I0929 12:22:51.197748 1123601 kubeadm.go:310] [certs] Generating "etcd/ca" certificate and key
I0929 12:22:51.579452 1123601 kubeadm.go:310] [certs] Generating "etcd/server" certificate and key
I0929 12:22:51.579580 1123601 kubeadm.go:310] [certs] etcd/server serving cert is signed for DNS names [dockerenv-230733 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I0929 12:22:51.946140 1123601 kubeadm.go:310] [certs] Generating "etcd/peer" certificate and key
I0929 12:22:51.946254 1123601 kubeadm.go:310] [certs] etcd/peer serving cert is signed for DNS names [dockerenv-230733 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I0929 12:22:52.195288 1123601 kubeadm.go:310] [certs] Generating "etcd/healthcheck-client" certificate and key
I0929 12:22:52.277455 1123601 kubeadm.go:310] [certs] Generating "apiserver-etcd-client" certificate and key
I0929 12:22:52.705471 1123601 kubeadm.go:310] [certs] Generating "sa" key and public key
I0929 12:22:52.705527 1123601 kubeadm.go:310] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I0929 12:22:53.046731 1123601 kubeadm.go:310] [kubeconfig] Writing "admin.conf" kubeconfig file
I0929 12:22:53.296477 1123601 kubeadm.go:310] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I0929 12:22:53.512297 1123601 kubeadm.go:310] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I0929 12:22:53.844404 1123601 kubeadm.go:310] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I0929 12:22:53.920091 1123601 kubeadm.go:310] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I0929 12:22:53.920515 1123601 kubeadm.go:310] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I0929 12:22:53.924346 1123601 kubeadm.go:310] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I0929 12:22:53.925777 1123601 out.go:252] - Booting up control plane ...
I0929 12:22:53.925871 1123601 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-apiserver"
I0929 12:22:53.925987 1123601 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I0929 12:22:53.926812 1123601 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-scheduler"
I0929 12:22:53.937150 1123601 kubeadm.go:310] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I0929 12:22:53.937280 1123601 kubeadm.go:310] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
I0929 12:22:53.944178 1123601 kubeadm.go:310] [patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
I0929 12:22:53.944444 1123601 kubeadm.go:310] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I0929 12:22:53.944502 1123601 kubeadm.go:310] [kubelet-start] Starting the kubelet
I0929 12:22:54.022049 1123601 kubeadm.go:310] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I0929 12:22:54.022168 1123601 kubeadm.go:310] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I0929 12:22:55.023880 1123601 kubeadm.go:310] [kubelet-check] The kubelet is healthy after 1.001909197s
I0929 12:22:55.026934 1123601 kubeadm.go:310] [control-plane-check] Waiting for healthy control plane components. This can take up to 4m0s
I0929 12:22:55.027120 1123601 kubeadm.go:310] [control-plane-check] Checking kube-apiserver at https://192.168.49.2:8443/livez
I0929 12:22:55.027253 1123601 kubeadm.go:310] [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz
I0929 12:22:55.027368 1123601 kubeadm.go:310] [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez
I0929 12:22:56.969944 1123601 kubeadm.go:310] [control-plane-check] kube-scheduler is healthy after 1.942883321s
I0929 12:22:57.010513 1123601 kubeadm.go:310] [control-plane-check] kube-controller-manager is healthy after 1.983280229s
I0929 12:22:58.528305 1123601 kubeadm.go:310] [control-plane-check] kube-apiserver is healthy after 3.501252771s
I0929 12:22:58.538291 1123601 kubeadm.go:310] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I0929 12:22:58.547688 1123601 kubeadm.go:310] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I0929 12:22:58.555715 1123601 kubeadm.go:310] [upload-certs] Skipping phase. Please see --upload-certs
I0929 12:22:58.555950 1123601 kubeadm.go:310] [mark-control-plane] Marking the node dockerenv-230733 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I0929 12:22:58.565162 1123601 kubeadm.go:310] [bootstrap-token] Using token: 7fnxcj.lx7vmmfllrfp6tby
I0929 12:22:58.566469 1123601 out.go:252] - Configuring RBAC rules ...
I0929 12:22:58.566607 1123601 kubeadm.go:310] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I0929 12:22:58.569852 1123601 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I0929 12:22:58.576186 1123601 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I0929 12:22:58.578660 1123601 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I0929 12:22:58.581077 1123601 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I0929 12:22:58.583380 1123601 kubeadm.go:310] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I0929 12:22:58.934770 1123601 kubeadm.go:310] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I0929 12:22:59.355326 1123601 kubeadm.go:310] [addons] Applied essential addon: CoreDNS
I0929 12:22:59.935075 1123601 kubeadm.go:310] [addons] Applied essential addon: kube-proxy
I0929 12:22:59.935958 1123601 kubeadm.go:310]
I0929 12:22:59.936075 1123601 kubeadm.go:310] Your Kubernetes control-plane has initialized successfully!
I0929 12:22:59.936085 1123601 kubeadm.go:310]
I0929 12:22:59.936185 1123601 kubeadm.go:310] To start using your cluster, you need to run the following as a regular user:
I0929 12:22:59.936191 1123601 kubeadm.go:310]
I0929 12:22:59.936230 1123601 kubeadm.go:310] mkdir -p $HOME/.kube
I0929 12:22:59.936303 1123601 kubeadm.go:310] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I0929 12:22:59.936373 1123601 kubeadm.go:310] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I0929 12:22:59.936379 1123601 kubeadm.go:310]
I0929 12:22:59.936454 1123601 kubeadm.go:310] Alternatively, if you are the root user, you can run:
I0929 12:22:59.936459 1123601 kubeadm.go:310]
I0929 12:22:59.936505 1123601 kubeadm.go:310] export KUBECONFIG=/etc/kubernetes/admin.conf
I0929 12:22:59.936511 1123601 kubeadm.go:310]
I0929 12:22:59.936550 1123601 kubeadm.go:310] You should now deploy a pod network to the cluster.
I0929 12:22:59.936638 1123601 kubeadm.go:310] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I0929 12:22:59.936729 1123601 kubeadm.go:310] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I0929 12:22:59.936736 1123601 kubeadm.go:310]
I0929 12:22:59.936813 1123601 kubeadm.go:310] You can now join any number of control-plane nodes by copying certificate authorities
I0929 12:22:59.936928 1123601 kubeadm.go:310] and service account keys on each node and then running the following as root:
I0929 12:22:59.936932 1123601 kubeadm.go:310]
I0929 12:22:59.937021 1123601 kubeadm.go:310] kubeadm join control-plane.minikube.internal:8443 --token 7fnxcj.lx7vmmfllrfp6tby \
I0929 12:22:59.937160 1123601 kubeadm.go:310] --discovery-token-ca-cert-hash sha256:e31917eb19b7c0879803010df843d835ccee1dda0a35b4d1611c13a53effe46e \
I0929 12:22:59.937186 1123601 kubeadm.go:310] --control-plane
I0929 12:22:59.937194 1123601 kubeadm.go:310]
I0929 12:22:59.937272 1123601 kubeadm.go:310] Then you can join any number of worker nodes by running the following on each as root:
I0929 12:22:59.937275 1123601 kubeadm.go:310]
I0929 12:22:59.937346 1123601 kubeadm.go:310] kubeadm join control-plane.minikube.internal:8443 --token 7fnxcj.lx7vmmfllrfp6tby \
I0929 12:22:59.937434 1123601 kubeadm.go:310] --discovery-token-ca-cert-hash sha256:e31917eb19b7c0879803010df843d835ccee1dda0a35b4d1611c13a53effe46e
I0929 12:22:59.940294 1123601 kubeadm.go:310] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/6.8.0-1040-gcp\n", err: exit status 1
I0929 12:22:59.940398 1123601 kubeadm.go:310] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I0929 12:22:59.940423 1123601 cni.go:84] Creating CNI manager for ""
I0929 12:22:59.940431 1123601 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I0929 12:22:59.942423 1123601 out.go:179] * Configuring CNI (Container Networking Interface) ...
I0929 12:22:59.943581 1123601 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I0929 12:22:59.948029 1123601 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.34.0/kubectl ...
I0929 12:22:59.948038 1123601 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2601 bytes)
I0929 12:22:59.968358 1123601 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I0929 12:23:00.181080 1123601 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I0929 12:23:00.181187 1123601 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I0929 12:23:00.181222 1123601 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes dockerenv-230733 minikube.k8s.io/updated_at=2025_09_29T12_23_00_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=aad2f46d67652a73456765446faac83429b43d5e minikube.k8s.io/name=dockerenv-230733 minikube.k8s.io/primary=true
I0929 12:23:00.190047 1123601 ops.go:34] apiserver oom_adj: -16
I0929 12:23:00.257957 1123601 kubeadm.go:1105] duration metric: took 76.861142ms to wait for elevateKubeSystemPrivileges
I0929 12:23:00.265979 1123601 kubeadm.go:394] duration metric: took 10.460414929s to StartCluster
I0929 12:23:00.266019 1123601 settings.go:142] acquiring lock: {Name:mk967ab7b412f5ea13a8bdbc3d08e00d0ec4417f Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0929 12:23:00.266096 1123601 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/21652-1097891/kubeconfig
I0929 12:23:00.266799 1123601 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21652-1097891/kubeconfig: {Name:mk343611c88fd6ad36810bb377f9a0ca463784db Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0929 12:23:00.267130 1123601 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I0929 12:23:00.267145 1123601 start.go:235] Will wait 6m0s for node &{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I0929 12:23:00.267233 1123601 addons.go:511] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I0929 12:23:00.267342 1123601 addons.go:69] Setting storage-provisioner=true in profile "dockerenv-230733"
I0929 12:23:00.267365 1123601 addons.go:238] Setting addon storage-provisioner=true in "dockerenv-230733"
I0929 12:23:00.267375 1123601 addons.go:69] Setting default-storageclass=true in profile "dockerenv-230733"
I0929 12:23:00.267389 1123601 config.go:182] Loaded profile config "dockerenv-230733": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.0
I0929 12:23:00.267420 1123601 host.go:66] Checking if "dockerenv-230733" exists ...
I0929 12:23:00.267423 1123601 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "dockerenv-230733"
I0929 12:23:00.267982 1123601 cli_runner.go:164] Run: docker container inspect dockerenv-230733 --format={{.State.Status}}
I0929 12:23:00.268044 1123601 cli_runner.go:164] Run: docker container inspect dockerenv-230733 --format={{.State.Status}}
I0929 12:23:00.268696 1123601 out.go:179] * Verifying Kubernetes components...
I0929 12:23:00.269918 1123601 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0929 12:23:00.293184 1123601 addons.go:238] Setting addon default-storageclass=true in "dockerenv-230733"
I0929 12:23:00.293219 1123601 host.go:66] Checking if "dockerenv-230733" exists ...
I0929 12:23:00.293592 1123601 cli_runner.go:164] Run: docker container inspect dockerenv-230733 --format={{.State.Status}}
I0929 12:23:00.293722 1123601 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I0929 12:23:00.294987 1123601 addons.go:435] installing /etc/kubernetes/addons/storage-provisioner.yaml
I0929 12:23:00.295000 1123601 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I0929 12:23:00.295058 1123601 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-230733
I0929 12:23:00.324469 1123601 addons.go:435] installing /etc/kubernetes/addons/storageclass.yaml
I0929 12:23:00.324481 1123601 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I0929 12:23:00.324531 1123601 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-230733
I0929 12:23:00.325460 1123601 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33266 SSHKeyPath:/home/jenkins/minikube-integration/21652-1097891/.minikube/machines/dockerenv-230733/id_rsa Username:docker}
I0929 12:23:00.346619 1123601 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33266 SSHKeyPath:/home/jenkins/minikube-integration/21652-1097891/.minikube/machines/dockerenv-230733/id_rsa Username:docker}
I0929 12:23:00.354779 1123601 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.49.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I0929 12:23:00.396849 1123601 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0929 12:23:00.441129 1123601 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I0929 12:23:00.461208 1123601 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I0929 12:23:00.518941 1123601 start.go:976] {"host.minikube.internal": 192.168.49.1} host record injected into CoreDNS's ConfigMap
I0929 12:23:00.520010 1123601 api_server.go:52] waiting for apiserver process to appear ...
I0929 12:23:00.520070 1123601 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I0929 12:23:00.723750 1123601 api_server.go:72] duration metric: took 456.568333ms to wait for apiserver process to appear ...
I0929 12:23:00.723773 1123601 api_server.go:88] waiting for apiserver healthz status ...
I0929 12:23:00.723797 1123601 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8443/healthz ...
I0929 12:23:00.730463 1123601 api_server.go:279] https://192.168.49.2:8443/healthz returned 200:
ok
I0929 12:23:00.731403 1123601 out.go:179] * Enabled addons: storage-provisioner, default-storageclass
I0929 12:23:00.731457 1123601 api_server.go:141] control plane version: v1.34.0
I0929 12:23:00.731471 1123601 api_server.go:131] duration metric: took 7.692466ms to wait for apiserver health ...
I0929 12:23:00.731479 1123601 system_pods.go:43] waiting for kube-system pods to appear ...
I0929 12:23:00.732909 1123601 addons.go:514] duration metric: took 465.684015ms for enable addons: enabled=[storage-provisioner default-storageclass]
I0929 12:23:00.736058 1123601 system_pods.go:59] 5 kube-system pods found
I0929 12:23:00.736107 1123601 system_pods.go:61] "etcd-dockerenv-230733" [4e5d069f-0c6d-42e0-9438-8cc3c9f00109] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd])
I0929 12:23:00.736115 1123601 system_pods.go:61] "kube-apiserver-dockerenv-230733" [87d1fb6f-d90e-4a9b-bdf2-b0ea97f95ee9] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
I0929 12:23:00.736122 1123601 system_pods.go:61] "kube-controller-manager-dockerenv-230733" [b7c91a8e-7c40-4135-817e-33d04e6a5fb6] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager])
I0929 12:23:00.736127 1123601 system_pods.go:61] "kube-scheduler-dockerenv-230733" [1a810e4b-b562-4616-9461-7451ca265073] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
I0929 12:23:00.736130 1123601 system_pods.go:61] "storage-provisioner" [f94b79a4-7245-46ba-bfd1-0aaa2d1710e1] Pending
I0929 12:23:00.736135 1123601 system_pods.go:74] duration metric: took 4.65169ms to wait for pod list to return data ...
I0929 12:23:00.736147 1123601 kubeadm.go:578] duration metric: took 468.972438ms to wait for: map[apiserver:true system_pods:true]
I0929 12:23:00.736164 1123601 node_conditions.go:102] verifying NodePressure condition ...
I0929 12:23:00.739093 1123601 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I0929 12:23:00.739114 1123601 node_conditions.go:123] node cpu capacity is 8
I0929 12:23:00.739124 1123601 node_conditions.go:105] duration metric: took 2.956422ms to run NodePressure ...
I0929 12:23:00.739135 1123601 start.go:241] waiting for startup goroutines ...
I0929 12:23:01.023632 1123601 kapi.go:214] "coredns" deployment in "kube-system" namespace and "dockerenv-230733" context rescaled to 1 replicas
I0929 12:23:01.023668 1123601 start.go:246] waiting for cluster config update ...
I0929 12:23:01.023682 1123601 start.go:255] writing updated cluster config ...
I0929 12:23:01.024018 1123601 ssh_runner.go:195] Run: rm -f paused
I0929 12:23:01.077474 1123601 start.go:623] kubectl: 1.34.1, cluster: 1.34.0 (minor skew: 0)
I0929 12:23:01.079302 1123601 out.go:179] * Done! kubectl is now configured to use "dockerenv-230733" cluster and "default" namespace by default
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD
ad1d104946cab 409467f978b4a 10 seconds ago Running kindnet-cni 0 f8516c757f710 kindnet-5qdnx
57860c205f919 df0860106674d 11 seconds ago Running kube-proxy 0 a080a7855ba00 kube-proxy-9jcqr
c84c28f31a71e 6e38f40d628db 11 seconds ago Running storage-provisioner 0 5b4b35311d33e storage-provisioner
a6b8668c39546 a0af72f2ec6d6 21 seconds ago Running kube-controller-manager 0 38c3cebb83a29 kube-controller-manager-dockerenv-230733
e6f6f5148dc09 46169d968e920 21 seconds ago Running kube-scheduler 0 292fb3084848a kube-scheduler-dockerenv-230733
212a1544b95df 5f1f5298c888d 21 seconds ago Running etcd 0 8127944541a24 etcd-dockerenv-230733
0f2304e212803 90550c43ad2bc 21 seconds ago Running kube-apiserver 0 f8ff66eec5e92 kube-apiserver-dockerenv-230733
==> containerd <==
Sep 29 12:22:55 dockerenv-230733 containerd[759]: time="2025-09-29T12:22:55.257323390Z" level=info msg="StartContainer for \"0f2304e212803d48fc789e13fa16dc16c548aacdce878325e9c00a75f331415a\" returns successfully"
Sep 29 12:22:55 dockerenv-230733 containerd[759]: time="2025-09-29T12:22:55.263648869Z" level=info msg="StartContainer for \"212a1544b95dfd4d23fc7ea050c88ba58bff823adff2ab036ba58523b38dade0\" returns successfully"
Sep 29 12:22:55 dockerenv-230733 containerd[759]: time="2025-09-29T12:22:55.272916135Z" level=info msg="StartContainer for \"e6f6f5148dc095031ae5cd99f9e34989e438be96b9689af9bb098f7699f1c252\" returns successfully"
Sep 29 12:22:55 dockerenv-230733 containerd[759]: time="2025-09-29T12:22:55.293574755Z" level=info msg="StartContainer for \"a6b8668c39546baf4f25d322cd9e313e33e41b70d86993dc8dd2955b1db6d1a0\" returns successfully"
Sep 29 12:23:05 dockerenv-230733 containerd[759]: time="2025-09-29T12:23:05.077325568Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:storage-provisioner,Uid:f94b79a4-7245-46ba-bfd1-0aaa2d1710e1,Namespace:kube-system,Attempt:0,}"
Sep 29 12:23:05 dockerenv-230733 containerd[759]: time="2025-09-29T12:23:05.168629743Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:storage-provisioner,Uid:f94b79a4-7245-46ba-bfd1-0aaa2d1710e1,Namespace:kube-system,Attempt:0,} returns sandbox id \"5b4b35311d33e039863e7826437d967a1b0194b4dd2cdf97ce4657cd857ab23e\""
Sep 29 12:23:05 dockerenv-230733 containerd[759]: time="2025-09-29T12:23:05.178223900Z" level=info msg="CreateContainer within sandbox \"5b4b35311d33e039863e7826437d967a1b0194b4dd2cdf97ce4657cd857ab23e\" for container &ContainerMetadata{Name:storage-provisioner,Attempt:0,}"
Sep 29 12:23:05 dockerenv-230733 containerd[759]: time="2025-09-29T12:23:05.189581272Z" level=info msg="CreateContainer within sandbox \"5b4b35311d33e039863e7826437d967a1b0194b4dd2cdf97ce4657cd857ab23e\" for &ContainerMetadata{Name:storage-provisioner,Attempt:0,} returns container id \"c84c28f31a71ec98fac1a792f3163c0f3fe59a78d4b5383106a7da0a115fa313\""
Sep 29 12:23:05 dockerenv-230733 containerd[759]: time="2025-09-29T12:23:05.190304919Z" level=info msg="StartContainer for \"c84c28f31a71ec98fac1a792f3163c0f3fe59a78d4b5383106a7da0a115fa313\""
Sep 29 12:23:05 dockerenv-230733 containerd[759]: time="2025-09-29T12:23:05.256043337Z" level=info msg="StartContainer for \"c84c28f31a71ec98fac1a792f3163c0f3fe59a78d4b5383106a7da0a115fa313\" returns successfully"
Sep 29 12:23:05 dockerenv-230733 containerd[759]: time="2025-09-29T12:23:05.452777184Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:kube-proxy-9jcqr,Uid:7a6834b6-692f-402a-8c80-fad81170db86,Namespace:kube-system,Attempt:0,}"
Sep 29 12:23:05 dockerenv-230733 containerd[759]: time="2025-09-29T12:23:05.456210226Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:kindnet-5qdnx,Uid:a7d4bee5-e0cb-4861-a3d3-423e8a3dc9c5,Namespace:kube-system,Attempt:0,}"
Sep 29 12:23:05 dockerenv-230733 containerd[759]: time="2025-09-29T12:23:05.520785448Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:kube-proxy-9jcqr,Uid:7a6834b6-692f-402a-8c80-fad81170db86,Namespace:kube-system,Attempt:0,} returns sandbox id \"a080a7855ba00a6c2d3cfb1c69a32f2f6e8e03043cd4ae326f24251c3593892f\""
Sep 29 12:23:05 dockerenv-230733 containerd[759]: time="2025-09-29T12:23:05.526562108Z" level=info msg="CreateContainer within sandbox \"a080a7855ba00a6c2d3cfb1c69a32f2f6e8e03043cd4ae326f24251c3593892f\" for container &ContainerMetadata{Name:kube-proxy,Attempt:0,}"
Sep 29 12:23:05 dockerenv-230733 containerd[759]: time="2025-09-29T12:23:05.536856487Z" level=info msg="CreateContainer within sandbox \"a080a7855ba00a6c2d3cfb1c69a32f2f6e8e03043cd4ae326f24251c3593892f\" for &ContainerMetadata{Name:kube-proxy,Attempt:0,} returns container id \"57860c205f9190face9cf7183d878cc0936bc156dfe8288af8be0756fa78e6b6\""
Sep 29 12:23:05 dockerenv-230733 containerd[759]: time="2025-09-29T12:23:05.537528439Z" level=info msg="StartContainer for \"57860c205f9190face9cf7183d878cc0936bc156dfe8288af8be0756fa78e6b6\""
Sep 29 12:23:05 dockerenv-230733 containerd[759]: time="2025-09-29T12:23:05.550114678Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-66bc5c9577-n625v,Uid:1c159786-cc99-400f-ad63-6c033da90abc,Namespace:kube-system,Attempt:0,}"
Sep 29 12:23:05 dockerenv-230733 containerd[759]: time="2025-09-29T12:23:05.573276520Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-66bc5c9577-n625v,Uid:1c159786-cc99-400f-ad63-6c033da90abc,Namespace:kube-system,Attempt:0,} failed, error" error="failed to setup network for sandbox \"22322ab230569bf6582d26b7f534aadd52cf5d978e0efca32e2d45ddc995b96b\": failed to find network info for sandbox \"22322ab230569bf6582d26b7f534aadd52cf5d978e0efca32e2d45ddc995b96b\""
Sep 29 12:23:05 dockerenv-230733 containerd[759]: time="2025-09-29T12:23:05.614619545Z" level=info msg="StartContainer for \"57860c205f9190face9cf7183d878cc0936bc156dfe8288af8be0756fa78e6b6\" returns successfully"
Sep 29 12:23:05 dockerenv-230733 containerd[759]: time="2025-09-29T12:23:05.775816174Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:kindnet-5qdnx,Uid:a7d4bee5-e0cb-4861-a3d3-423e8a3dc9c5,Namespace:kube-system,Attempt:0,} returns sandbox id \"f8516c757f710a78b1c3040806f8f36a4fa49fc45c655ed67b89fd50b091c24f\""
Sep 29 12:23:05 dockerenv-230733 containerd[759]: time="2025-09-29T12:23:05.781258449Z" level=info msg="CreateContainer within sandbox \"f8516c757f710a78b1c3040806f8f36a4fa49fc45c655ed67b89fd50b091c24f\" for container &ContainerMetadata{Name:kindnet-cni,Attempt:0,}"
Sep 29 12:23:05 dockerenv-230733 containerd[759]: time="2025-09-29T12:23:05.793115306Z" level=info msg="CreateContainer within sandbox \"f8516c757f710a78b1c3040806f8f36a4fa49fc45c655ed67b89fd50b091c24f\" for &ContainerMetadata{Name:kindnet-cni,Attempt:0,} returns container id \"ad1d104946cabf789b9e0cb715c46cbf6636cc68b0b131ce944e7cb2c7ae487e\""
Sep 29 12:23:05 dockerenv-230733 containerd[759]: time="2025-09-29T12:23:05.793881751Z" level=info msg="StartContainer for \"ad1d104946cabf789b9e0cb715c46cbf6636cc68b0b131ce944e7cb2c7ae487e\""
Sep 29 12:23:05 dockerenv-230733 containerd[759]: time="2025-09-29T12:23:05.914636920Z" level=info msg="StartContainer for \"ad1d104946cabf789b9e0cb715c46cbf6636cc68b0b131ce944e7cb2c7ae487e\" returns successfully"
Sep 29 12:23:09 dockerenv-230733 containerd[759]: time="2025-09-29T12:23:09.619909612Z" level=info msg="No cni config template is specified, wait for other system components to drop the config."
==> describe nodes <==
Name: dockerenv-230733
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=dockerenv-230733
kubernetes.io/os=linux
minikube.k8s.io/commit=aad2f46d67652a73456765446faac83429b43d5e
minikube.k8s.io/name=dockerenv-230733
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2025_09_29T12_23_00_0700
minikube.k8s.io/version=v1.37.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Mon, 29 Sep 2025 12:22:57 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: dockerenv-230733
AcquireTime: <unset>
RenewTime: Mon, 29 Sep 2025 12:23:09 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Mon, 29 Sep 2025 12:23:09 +0000 Mon, 29 Sep 2025 12:22:55 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Mon, 29 Sep 2025 12:23:09 +0000 Mon, 29 Sep 2025 12:22:55 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Mon, 29 Sep 2025 12:23:09 +0000 Mon, 29 Sep 2025 12:22:55 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Mon, 29 Sep 2025 12:23:09 +0000 Mon, 29 Sep 2025 12:22:57 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.49.2
Hostname: dockerenv-230733
Capacity:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863452Ki
pods: 110
Allocatable:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863452Ki
pods: 110
System Info:
Machine ID: 04ac7f0dcfc64bc89122042ba46c8e6a
System UUID: 0e906b12-24ac-4077-8192-7fb19f0625db
Boot ID: c950b162-3ea4-4410-8c2e-1238f18b29b9
Kernel Version: 6.8.0-1040-gcp
OS Image: Ubuntu 22.04.5 LTS
Operating System: linux
Architecture: amd64
Container Runtime Version: containerd://1.7.27
Kubelet Version: v1.34.0
Kube-Proxy Version:
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (8 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
kube-system coredns-66bc5c9577-n625v 100m (1%) 0 (0%) 70Mi (0%) 170Mi (0%) 11s
kube-system etcd-dockerenv-230733 100m (1%) 0 (0%) 100Mi (0%) 0 (0%) 17s
kube-system kindnet-5qdnx 100m (1%) 100m (1%) 50Mi (0%) 50Mi (0%) 11s
kube-system kube-apiserver-dockerenv-230733 250m (3%) 0 (0%) 0 (0%) 0 (0%) 17s
kube-system kube-controller-manager-dockerenv-230733 200m (2%) 0 (0%) 0 (0%) 0 (0%) 17s
kube-system kube-proxy-9jcqr 0 (0%) 0 (0%) 0 (0%) 0 (0%) 11s
kube-system kube-scheduler-dockerenv-230733 100m (1%) 0 (0%) 0 (0%) 0 (0%) 17s
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 16s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 850m (10%) 100m (1%)
memory 220Mi (0%) 220Mi (0%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 10s kube-proxy
Normal Starting 22s kubelet Starting kubelet.
Normal NodeHasSufficientMemory 22s (x8 over 22s) kubelet Node dockerenv-230733 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 22s (x8 over 22s) kubelet Node dockerenv-230733 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 22s (x7 over 22s) kubelet Node dockerenv-230733 status is now: NodeHasSufficientPID
Normal NodeAllocatableEnforced 22s kubelet Updated Node Allocatable limit across pods
Normal Starting 17s kubelet Starting kubelet.
Normal NodeAllocatableEnforced 17s kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 17s kubelet Node dockerenv-230733 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 17s kubelet Node dockerenv-230733 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 17s kubelet Node dockerenv-230733 status is now: NodeHasSufficientPID
Normal RegisteredNode 13s node-controller Node dockerenv-230733 event: Registered Node dockerenv-230733 in Controller
==> dmesg <==
[ +0.000010] ll header: 00000000: ff ff ff ff ff ff 02 e7 e8 51 10 6b 08 06
[ +1.517728] IPv4: martian source 10.244.0.1 from 10.244.0.2, on dev eth0
[ +0.000008] ll header: 00000000: ff ff ff ff ff ff ae a5 e4 37 95 62 08 06
[ +0.115888] IPv4: martian source 10.244.0.1 from 10.244.0.3, on dev eth0
[ +0.000008] ll header: 00000000: ff ff ff ff ff ff 5a 81 e5 e6 16 48 08 06
[ +12.890125] IPv4: martian source 10.244.0.1 from 10.244.0.3, on dev eth0
[ +0.000007] ll header: 00000000: ff ff ff ff ff ff 92 a3 59 25 5e a0 08 06
[ +0.000394] IPv4: martian source 10.244.0.3 from 10.244.0.2, on dev eth0
[ +0.000004] ll header: 00000000: ff ff ff ff ff ff 02 e7 e8 51 10 6b 08 06
[ +5.179291] IPv4: martian source 10.244.0.1 from 10.244.0.2, on dev eth0
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff 3e f5 e3 4f f3 1f 08 06
[Sep29 12:15] IPv4: martian source 10.244.0.1 from 10.244.0.2, on dev eth0
[ +0.000008] ll header: 00000000: ff ff ff ff ff ff 0e 41 b4 9f 67 06 08 06
[ +13.445656] IPv4: martian source 10.244.0.1 from 10.244.0.4, on dev eth0
[ +0.000009] ll header: 00000000: ff ff ff ff ff ff b2 1e 7c f1 b5 0d 08 06
[ +0.000381] IPv4: martian source 10.244.0.4 from 10.244.0.3, on dev eth0
[ +0.000004] ll header: 00000000: ff ff ff ff ff ff 5a 81 e5 e6 16 48 08 06
[ +7.699318] IPv4: martian source 10.244.0.1 from 10.244.0.3, on dev eth0
[ +0.000007] ll header: 00000000: ff ff ff ff ff ff 66 ba 46 0d 66 00 08 06
[ +0.000403] IPv4: martian source 10.244.0.3 from 10.244.0.2, on dev eth0
[ +0.000005] ll header: 00000000: ff ff ff ff ff ff 0e 41 b4 9f 67 06 08 06
[ +4.637857] IPv4: martian source 10.244.0.1 from 10.244.0.3, on dev eth0
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff d6 16 6b 9e 59 3c 08 06
[ +0.000369] IPv4: martian source 10.244.0.3 from 10.244.0.2, on dev eth0
[ +0.000005] ll header: 00000000: ff ff ff ff ff ff 3e f5 e3 4f f3 1f 08 06
==> etcd [212a1544b95dfd4d23fc7ea050c88ba58bff823adff2ab036ba58523b38dade0] <==
{"level":"warn","ts":"2025-09-29T12:22:56.275099Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:37172","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-29T12:22:56.281632Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:37194","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-29T12:22:56.290458Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:37218","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-29T12:22:56.297345Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:37232","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-29T12:22:56.304445Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:37240","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-29T12:22:56.311442Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:37252","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-29T12:22:56.325005Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:37278","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-29T12:22:56.331622Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:37296","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-29T12:22:56.339009Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:37324","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-29T12:22:56.345678Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:37340","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-29T12:22:56.354010Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:37352","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-29T12:22:56.360468Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:37368","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-29T12:22:56.367340Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:37394","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-29T12:22:56.373949Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:37412","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-29T12:22:56.380740Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:37436","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-29T12:22:56.388928Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:37456","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-29T12:22:56.395680Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:37486","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-29T12:22:56.403732Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:37506","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-29T12:22:56.410795Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:37520","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-29T12:22:56.417941Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:37538","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-29T12:22:56.424860Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:37560","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-29T12:22:56.435338Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:37570","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-29T12:22:56.441925Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:37592","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-29T12:22:56.448427Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:37610","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-29T12:22:56.498043Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:37620","server-name":"","error":"EOF"}
==> kernel <==
12:23:16 up 5:05, 0 users, load average: 1.39, 1.86, 2.46
Linux dockerenv-230733 6.8.0-1040-gcp #42~22.04.1-Ubuntu SMP Tue Sep 9 13:30:57 UTC 2025 x86_64 x86_64 x86_64 GNU/Linux
PRETTY_NAME="Ubuntu 22.04.5 LTS"
==> kindnet [ad1d104946cabf789b9e0cb715c46cbf6636cc68b0b131ce944e7cb2c7ae487e] <==
I0929 12:23:06.160006 1 main.go:109] connected to apiserver: https://10.96.0.1:443
I0929 12:23:06.160295 1 main.go:139] hostIP = 192.168.49.2
podIP = 192.168.49.2
I0929 12:23:06.160488 1 main.go:148] setting mtu 1500 for CNI
I0929 12:23:06.160508 1 main.go:178] kindnetd IP family: "ipv4"
I0929 12:23:06.160543 1 main.go:182] noMask IPv4 subnets: [10.244.0.0/16]
time="2025-09-29T12:23:06Z" level=info msg="Created plugin 10-kube-network-policies (kindnetd, handles RunPodSandbox,RemovePodSandbox)"
I0929 12:23:06.360306 1 controller.go:377] "Starting controller" name="kube-network-policies"
I0929 12:23:06.360419 1 controller.go:381] "Waiting for informer caches to sync"
I0929 12:23:06.360441 1 shared_informer.go:350] "Waiting for caches to sync" controller="kube-network-policies"
I0929 12:23:06.360933 1 controller.go:390] nri plugin exited: failed to connect to NRI service: dial unix /var/run/nri/nri.sock: connect: no such file or directory
I0929 12:23:06.760840 1 shared_informer.go:357] "Caches are synced" controller="kube-network-policies"
I0929 12:23:06.760870 1 metrics.go:72] Registering metrics
I0929 12:23:06.858495 1 controller.go:711] "Syncing nftables rules"
I0929 12:23:16.362103 1 main.go:297] Handling node with IPs: map[192.168.49.2:{}]
I0929 12:23:16.362147 1 main.go:301] handling current node
==> kube-apiserver [0f2304e212803d48fc789e13fa16dc16c548aacdce878325e9c00a75f331415a] <==
I0929 12:22:57.003351 1 cache.go:39] Caches are synced for RemoteAvailability controller
I0929 12:22:57.003820 1 cache.go:39] Caches are synced for LocalAvailability controller
I0929 12:22:57.004588 1 controller.go:667] quota admission added evaluator for: namespaces
I0929 12:22:57.009780 1 default_servicecidr_controller.go:228] Setting default ServiceCIDR condition Ready to True
I0929 12:22:57.015712 1 default_servicecidr_controller.go:137] Shutting down kubernetes-service-cidr-controller
I0929 12:22:57.030825 1 cidrallocator.go:301] created ClusterIP allocator for Service CIDR 10.96.0.0/12
I0929 12:22:57.046718 1 shared_informer.go:356] "Caches are synced" controller="node_authorizer"
I0929 12:22:57.204565 1 controller.go:667] quota admission added evaluator for: leases.coordination.k8s.io
I0929 12:22:57.907756 1 storage_scheduling.go:95] created PriorityClass system-node-critical with value 2000001000
I0929 12:22:57.914171 1 storage_scheduling.go:95] created PriorityClass system-cluster-critical with value 2000000000
I0929 12:22:57.914194 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist.
I0929 12:22:58.377140 1 controller.go:667] quota admission added evaluator for: roles.rbac.authorization.k8s.io
I0929 12:22:58.412167 1 controller.go:667] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
I0929 12:22:58.511838 1 alloc.go:328] "allocated clusterIPs" service="default/kubernetes" clusterIPs={"IPv4":"10.96.0.1"}
W0929 12:22:58.518050 1 lease.go:265] Resetting endpoints for master service "kubernetes" to [192.168.49.2]
I0929 12:22:58.519365 1 controller.go:667] quota admission added evaluator for: endpoints
I0929 12:22:58.524703 1 controller.go:667] quota admission added evaluator for: endpointslices.discovery.k8s.io
I0929 12:22:59.022695 1 controller.go:667] quota admission added evaluator for: serviceaccounts
I0929 12:22:59.341482 1 controller.go:667] quota admission added evaluator for: deployments.apps
I0929 12:22:59.354314 1 alloc.go:328] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs={"IPv4":"10.96.0.10"}
I0929 12:22:59.361755 1 controller.go:667] quota admission added evaluator for: daemonsets.apps
I0929 12:23:04.728235 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12
I0929 12:23:04.733248 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12
I0929 12:23:05.074342 1 controller.go:667] quota admission added evaluator for: replicasets.apps
I0929 12:23:05.125191 1 controller.go:667] quota admission added evaluator for: controllerrevisions.apps
==> kube-controller-manager [a6b8668c39546baf4f25d322cd9e313e33e41b70d86993dc8dd2955b1db6d1a0] <==
I0929 12:23:04.006799 1 shared_informer.go:356] "Caches are synced" controller="garbage collector"
I0929 12:23:04.006821 1 garbagecollector.go:154] "Garbage collector: all resource monitors have synced" logger="garbage-collector-controller"
I0929 12:23:04.006830 1 garbagecollector.go:157] "Proceeding to collect garbage" logger="garbage-collector-controller"
I0929 12:23:04.020438 1 shared_informer.go:356] "Caches are synced" controller="validatingadmissionpolicy-status"
I0929 12:23:04.020562 1 shared_informer.go:356] "Caches are synced" controller="disruption"
I0929 12:23:04.021581 1 shared_informer.go:356] "Caches are synced" controller="certificate-csrsigning-kubelet-serving"
I0929 12:23:04.021603 1 shared_informer.go:356] "Caches are synced" controller="HPA"
I0929 12:23:04.021627 1 shared_informer.go:356] "Caches are synced" controller="certificate-csrsigning-kubelet-client"
I0929 12:23:04.021637 1 shared_informer.go:356] "Caches are synced" controller="PVC protection"
I0929 12:23:04.021669 1 shared_informer.go:356] "Caches are synced" controller="service account"
I0929 12:23:04.021674 1 shared_informer.go:356] "Caches are synced" controller="legacy-service-account-token-cleaner"
I0929 12:23:04.021775 1 shared_informer.go:356] "Caches are synced" controller="certificate-csrsigning-kube-apiserver-client"
I0929 12:23:04.021787 1 shared_informer.go:356] "Caches are synced" controller="daemon sets"
I0929 12:23:04.021864 1 shared_informer.go:356] "Caches are synced" controller="persistent volume"
I0929 12:23:04.022033 1 shared_informer.go:356] "Caches are synced" controller="ephemeral"
I0929 12:23:04.023004 1 shared_informer.go:356] "Caches are synced" controller="endpoint_slice"
I0929 12:23:04.023034 1 shared_informer.go:356] "Caches are synced" controller="ReplicaSet"
I0929 12:23:04.023106 1 shared_informer.go:356] "Caches are synced" controller="GC"
I0929 12:23:04.023137 1 shared_informer.go:356] "Caches are synced" controller="certificate-csrsigning-legacy-unknown"
I0929 12:23:04.023111 1 shared_informer.go:356] "Caches are synced" controller="deployment"
I0929 12:23:04.023158 1 shared_informer.go:356] "Caches are synced" controller="attach detach"
I0929 12:23:04.024246 1 shared_informer.go:356] "Caches are synced" controller="expand"
I0929 12:23:04.027568 1 shared_informer.go:356] "Caches are synced" controller="resource quota"
I0929 12:23:04.038065 1 shared_informer.go:356] "Caches are synced" controller="garbage collector"
I0929 12:23:04.044110 1 shared_informer.go:356] "Caches are synced" controller="namespace"
==> kube-proxy [57860c205f9190face9cf7183d878cc0936bc156dfe8288af8be0756fa78e6b6] <==
I0929 12:23:05.645945 1 server_linux.go:53] "Using iptables proxy"
I0929 12:23:05.703825 1 shared_informer.go:349] "Waiting for caches to sync" controller="node informer cache"
I0929 12:23:05.804035 1 shared_informer.go:356] "Caches are synced" controller="node informer cache"
I0929 12:23:05.804074 1 server.go:219] "Successfully retrieved NodeIPs" NodeIPs=["192.168.49.2"]
E0929 12:23:05.804202 1 server.go:256] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
I0929 12:23:05.828921 1 server.go:265] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I0929 12:23:05.828992 1 server_linux.go:132] "Using iptables Proxier"
I0929 12:23:05.833881 1 proxier.go:242] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
I0929 12:23:05.834295 1 server.go:527] "Version info" version="v1.34.0"
I0929 12:23:05.834330 1 server.go:529] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I0929 12:23:05.835590 1 config.go:106] "Starting endpoint slice config controller"
I0929 12:23:05.835627 1 shared_informer.go:349] "Waiting for caches to sync" controller="endpoint slice config"
I0929 12:23:05.835678 1 config.go:200] "Starting service config controller"
I0929 12:23:05.835684 1 shared_informer.go:349] "Waiting for caches to sync" controller="service config"
I0929 12:23:05.835695 1 config.go:309] "Starting node config controller"
I0929 12:23:05.835708 1 shared_informer.go:349] "Waiting for caches to sync" controller="node config"
I0929 12:23:05.835716 1 shared_informer.go:356] "Caches are synced" controller="node config"
I0929 12:23:05.835725 1 config.go:403] "Starting serviceCIDR config controller"
I0929 12:23:05.835731 1 shared_informer.go:349] "Waiting for caches to sync" controller="serviceCIDR config"
I0929 12:23:05.935760 1 shared_informer.go:356] "Caches are synced" controller="endpoint slice config"
I0929 12:23:05.935808 1 shared_informer.go:356] "Caches are synced" controller="serviceCIDR config"
I0929 12:23:05.935832 1 shared_informer.go:356] "Caches are synced" controller="service config"
==> kube-scheduler [e6f6f5148dc095031ae5cd99f9e34989e438be96b9689af9bb098f7699f1c252] <==
E0929 12:22:56.966668 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csidrivers\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIDriver"
E0929 12:22:56.966757 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csistoragecapacities\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIStorageCapacity"
E0929 12:22:56.966855 1 reflector.go:205] "Failed to watch" err="failed to list *v1.VolumeAttachment: volumeattachments.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"volumeattachments\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.VolumeAttachment"
E0929 12:22:56.966887 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Pod: pods is forbidden: User \"system:kube-scheduler\" cannot list resource \"pods\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Pod"
E0929 12:22:56.967945 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Service: services is forbidden: User \"system:kube-scheduler\" cannot list resource \"services\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Service"
E0929 12:22:56.968887 1 reflector.go:205] "Failed to watch" err="failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"storageclasses\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.StorageClass"
E0929 12:22:56.969098 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csinodes\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSINode"
E0929 12:22:56.969194 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolume"
E0929 12:22:56.969273 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicationcontrollers\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ReplicationController"
E0929 12:22:56.969356 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumeclaims\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolumeClaim"
E0929 12:22:56.969457 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User \"system:kube-scheduler\" cannot list resource \"poddisruptionbudgets\" in API group \"policy\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PodDisruptionBudget"
E0929 12:22:56.969548 1 reflector.go:205] "Failed to watch" err="failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"statefulsets\" in API group \"apps\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.StatefulSet"
E0929 12:22:56.969553 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Namespace: namespaces is forbidden: User \"system:kube-scheduler\" cannot list resource \"namespaces\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Namespace"
E0929 12:22:56.969608 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicasets\" in API group \"apps\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ReplicaSet"
E0929 12:22:56.969674 1 reflector.go:205] "Failed to watch" err="failed to list *v1.DeviceClass: deviceclasses.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"deviceclasses\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.DeviceClass"
E0929 12:22:56.969754 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ResourceSlice: resourceslices.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"resourceslices\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ResourceSlice"
E0929 12:22:57.835160 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User \"system:kube-scheduler\" cannot list resource \"poddisruptionbudgets\" in API group \"policy\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PodDisruptionBudget"
E0929 12:22:57.851407 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csistoragecapacities\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIStorageCapacity"
E0929 12:22:57.855552 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicationcontrollers\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ReplicationController"
E0929 12:22:57.988626 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumeclaims\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolumeClaim"
E0929 12:22:58.004914 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicasets\" in API group \"apps\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ReplicaSet"
E0929 12:22:58.037141 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ResourceSlice: resourceslices.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"resourceslices\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ResourceSlice"
E0929 12:22:58.131518 1 reflector.go:205] "Failed to watch" err="failed to list *v1.VolumeAttachment: volumeattachments.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"volumeattachments\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.VolumeAttachment"
E0929 12:22:58.151736 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ConfigMap: configmaps \"extension-apiserver-authentication\" is forbidden: User \"system:kube-scheduler\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"kube-system\"" logger="UnhandledError" reflector="runtime/asm_amd64.s:1700" type="*v1.ConfigMap"
I0929 12:23:00.563690 1 shared_informer.go:356] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
==> kubelet <==
Sep 29 12:23:00 dockerenv-230733 kubelet[1534]: I0929 12:23:00.222551 1534 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/etcd-dockerenv-230733" podStartSLOduration=1.222530439 podStartE2EDuration="1.222530439s" podCreationTimestamp="2025-09-29 12:22:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 12:23:00.222530374 +0000 UTC m=+1.158797064" watchObservedRunningTime="2025-09-29 12:23:00.222530439 +0000 UTC m=+1.158797123"
Sep 29 12:23:04 dockerenv-230733 kubelet[1534]: I0929 12:23:04.268587 1534 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zqdk9\" (UniqueName: \"kubernetes.io/projected/f94b79a4-7245-46ba-bfd1-0aaa2d1710e1-kube-api-access-zqdk9\") pod \"storage-provisioner\" (UID: \"f94b79a4-7245-46ba-bfd1-0aaa2d1710e1\") " pod="kube-system/storage-provisioner"
Sep 29 12:23:04 dockerenv-230733 kubelet[1534]: I0929 12:23:04.268649 1534 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/f94b79a4-7245-46ba-bfd1-0aaa2d1710e1-tmp\") pod \"storage-provisioner\" (UID: \"f94b79a4-7245-46ba-bfd1-0aaa2d1710e1\") " pod="kube-system/storage-provisioner"
Sep 29 12:23:04 dockerenv-230733 kubelet[1534]: E0929 12:23:04.375163 1534 projected.go:291] Couldn't get configMap kube-system/kube-root-ca.crt: configmap "kube-root-ca.crt" not found
Sep 29 12:23:04 dockerenv-230733 kubelet[1534]: E0929 12:23:04.375203 1534 projected.go:196] Error preparing data for projected volume kube-api-access-zqdk9 for pod kube-system/storage-provisioner: configmap "kube-root-ca.crt" not found
Sep 29 12:23:04 dockerenv-230733 kubelet[1534]: E0929 12:23:04.375303 1534 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/f94b79a4-7245-46ba-bfd1-0aaa2d1710e1-kube-api-access-zqdk9 podName:f94b79a4-7245-46ba-bfd1-0aaa2d1710e1 nodeName:}" failed. No retries permitted until 2025-09-29 12:23:04.875270692 +0000 UTC m=+5.811537375 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-zqdk9" (UniqueName: "kubernetes.io/projected/f94b79a4-7245-46ba-bfd1-0aaa2d1710e1-kube-api-access-zqdk9") pod "storage-provisioner" (UID: "f94b79a4-7245-46ba-bfd1-0aaa2d1710e1") : configmap "kube-root-ca.crt" not found
Sep 29 12:23:05 dockerenv-230733 kubelet[1534]: I0929 12:23:05.174118 1534 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/7a6834b6-692f-402a-8c80-fad81170db86-xtables-lock\") pod \"kube-proxy-9jcqr\" (UID: \"7a6834b6-692f-402a-8c80-fad81170db86\") " pod="kube-system/kube-proxy-9jcqr"
Sep 29 12:23:05 dockerenv-230733 kubelet[1534]: I0929 12:23:05.174216 1534 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-cfg\" (UniqueName: \"kubernetes.io/host-path/a7d4bee5-e0cb-4861-a3d3-423e8a3dc9c5-cni-cfg\") pod \"kindnet-5qdnx\" (UID: \"a7d4bee5-e0cb-4861-a3d3-423e8a3dc9c5\") " pod="kube-system/kindnet-5qdnx"
Sep 29 12:23:05 dockerenv-230733 kubelet[1534]: I0929 12:23:05.174303 1534 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-proxy\" (UniqueName: \"kubernetes.io/configmap/7a6834b6-692f-402a-8c80-fad81170db86-kube-proxy\") pod \"kube-proxy-9jcqr\" (UID: \"7a6834b6-692f-402a-8c80-fad81170db86\") " pod="kube-system/kube-proxy-9jcqr"
Sep 29 12:23:05 dockerenv-230733 kubelet[1534]: I0929 12:23:05.174337 1534 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/7a6834b6-692f-402a-8c80-fad81170db86-lib-modules\") pod \"kube-proxy-9jcqr\" (UID: \"7a6834b6-692f-402a-8c80-fad81170db86\") " pod="kube-system/kube-proxy-9jcqr"
Sep 29 12:23:05 dockerenv-230733 kubelet[1534]: I0929 12:23:05.174379 1534 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/a7d4bee5-e0cb-4861-a3d3-423e8a3dc9c5-xtables-lock\") pod \"kindnet-5qdnx\" (UID: \"a7d4bee5-e0cb-4861-a3d3-423e8a3dc9c5\") " pod="kube-system/kindnet-5qdnx"
Sep 29 12:23:05 dockerenv-230733 kubelet[1534]: I0929 12:23:05.174407 1534 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/a7d4bee5-e0cb-4861-a3d3-423e8a3dc9c5-lib-modules\") pod \"kindnet-5qdnx\" (UID: \"a7d4bee5-e0cb-4861-a3d3-423e8a3dc9c5\") " pod="kube-system/kindnet-5qdnx"
Sep 29 12:23:05 dockerenv-230733 kubelet[1534]: I0929 12:23:05.174443 1534 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-prhmn\" (UniqueName: \"kubernetes.io/projected/a7d4bee5-e0cb-4861-a3d3-423e8a3dc9c5-kube-api-access-prhmn\") pod \"kindnet-5qdnx\" (UID: \"a7d4bee5-e0cb-4861-a3d3-423e8a3dc9c5\") " pod="kube-system/kindnet-5qdnx"
Sep 29 12:23:05 dockerenv-230733 kubelet[1534]: I0929 12:23:05.174936 1534 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bsbp7\" (UniqueName: \"kubernetes.io/projected/7a6834b6-692f-402a-8c80-fad81170db86-kube-api-access-bsbp7\") pod \"kube-proxy-9jcqr\" (UID: \"7a6834b6-692f-402a-8c80-fad81170db86\") " pod="kube-system/kube-proxy-9jcqr"
Sep 29 12:23:05 dockerenv-230733 kubelet[1534]: I0929 12:23:05.276032 1534 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1c159786-cc99-400f-ad63-6c033da90abc-config-volume\") pod \"coredns-66bc5c9577-n625v\" (UID: \"1c159786-cc99-400f-ad63-6c033da90abc\") " pod="kube-system/coredns-66bc5c9577-n625v"
Sep 29 12:23:05 dockerenv-230733 kubelet[1534]: I0929 12:23:05.276075 1534 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kht9g\" (UniqueName: \"kubernetes.io/projected/1c159786-cc99-400f-ad63-6c033da90abc-kube-api-access-kht9g\") pod \"coredns-66bc5c9577-n625v\" (UID: \"1c159786-cc99-400f-ad63-6c033da90abc\") " pod="kube-system/coredns-66bc5c9577-n625v"
Sep 29 12:23:05 dockerenv-230733 kubelet[1534]: E0929 12:23:05.573584 1534 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to setup network for sandbox \"22322ab230569bf6582d26b7f534aadd52cf5d978e0efca32e2d45ddc995b96b\": failed to find network info for sandbox \"22322ab230569bf6582d26b7f534aadd52cf5d978e0efca32e2d45ddc995b96b\""
Sep 29 12:23:05 dockerenv-230733 kubelet[1534]: E0929 12:23:05.573712 1534 kuberuntime_sandbox.go:71] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to setup network for sandbox \"22322ab230569bf6582d26b7f534aadd52cf5d978e0efca32e2d45ddc995b96b\": failed to find network info for sandbox \"22322ab230569bf6582d26b7f534aadd52cf5d978e0efca32e2d45ddc995b96b\"" pod="kube-system/coredns-66bc5c9577-n625v"
Sep 29 12:23:05 dockerenv-230733 kubelet[1534]: E0929 12:23:05.573743 1534 kuberuntime_manager.go:1343] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to setup network for sandbox \"22322ab230569bf6582d26b7f534aadd52cf5d978e0efca32e2d45ddc995b96b\": failed to find network info for sandbox \"22322ab230569bf6582d26b7f534aadd52cf5d978e0efca32e2d45ddc995b96b\"" pod="kube-system/coredns-66bc5c9577-n625v"
Sep 29 12:23:05 dockerenv-230733 kubelet[1534]: E0929 12:23:05.573849 1534 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"coredns-66bc5c9577-n625v_kube-system(1c159786-cc99-400f-ad63-6c033da90abc)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"coredns-66bc5c9577-n625v_kube-system(1c159786-cc99-400f-ad63-6c033da90abc)\\\": rpc error: code = Unknown desc = failed to setup network for sandbox \\\"22322ab230569bf6582d26b7f534aadd52cf5d978e0efca32e2d45ddc995b96b\\\": failed to find network info for sandbox \\\"22322ab230569bf6582d26b7f534aadd52cf5d978e0efca32e2d45ddc995b96b\\\"\"" pod="kube-system/coredns-66bc5c9577-n625v" podUID="1c159786-cc99-400f-ad63-6c033da90abc"
Sep 29 12:23:06 dockerenv-230733 kubelet[1534]: I0929 12:23:06.199255 1534 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/storage-provisioner" podStartSLOduration=6.199233052 podStartE2EDuration="6.199233052s" podCreationTimestamp="2025-09-29 12:23:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 12:23:06.19879098 +0000 UTC m=+7.135057651" watchObservedRunningTime="2025-09-29 12:23:06.199233052 +0000 UTC m=+7.135499741"
Sep 29 12:23:06 dockerenv-230733 kubelet[1534]: I0929 12:23:06.208528 1534 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-proxy-9jcqr" podStartSLOduration=1.208507528 podStartE2EDuration="1.208507528s" podCreationTimestamp="2025-09-29 12:23:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 12:23:06.208256584 +0000 UTC m=+7.144523274" watchObservedRunningTime="2025-09-29 12:23:06.208507528 +0000 UTC m=+7.144774213"
Sep 29 12:23:06 dockerenv-230733 kubelet[1534]: I0929 12:23:06.219385 1534 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kindnet-5qdnx" podStartSLOduration=1.219365342 podStartE2EDuration="1.219365342s" podCreationTimestamp="2025-09-29 12:23:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-29 12:23:06.219177025 +0000 UTC m=+7.155443714" watchObservedRunningTime="2025-09-29 12:23:06.219365342 +0000 UTC m=+7.155632031"
Sep 29 12:23:09 dockerenv-230733 kubelet[1534]: I0929 12:23:09.619219 1534 kuberuntime_manager.go:1828] "Updating runtime config through cri with podcidr" CIDR="10.244.0.0/24"
Sep 29 12:23:09 dockerenv-230733 kubelet[1534]: I0929 12:23:09.620217 1534 kubelet_network.go:47] "Updating Pod CIDR" originalPodCIDR="" newPodCIDR="10.244.0.0/24"
==> storage-provisioner [c84c28f31a71ec98fac1a792f3163c0f3fe59a78d4b5383106a7da0a115fa313] <==
I0929 12:23:05.265569 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
-- /stdout --
helpers_test.go:262: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p dockerenv-230733 -n dockerenv-230733
helpers_test.go:269: (dbg) Run: kubectl --context dockerenv-230733 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:280: non-running pods: coredns-66bc5c9577-n625v
helpers_test.go:282: ======> post-mortem[TestDockerEnvContainerd]: describe non-running pods <======
helpers_test.go:285: (dbg) Run: kubectl --context dockerenv-230733 describe pod coredns-66bc5c9577-n625v
helpers_test.go:285: (dbg) Non-zero exit: kubectl --context dockerenv-230733 describe pod coredns-66bc5c9577-n625v: exit status 1 (68.144721ms)
** stderr **
Error from server (NotFound): pods "coredns-66bc5c9577-n625v" not found
** /stderr **
helpers_test.go:287: kubectl --context dockerenv-230733 describe pod coredns-66bc5c9577-n625v: exit status 1
helpers_test.go:175: Cleaning up "dockerenv-230733" profile ...
helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p dockerenv-230733
helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p dockerenv-230733: (2.31259157s)
--- FAIL: TestDockerEnvContainerd (43.33s)