=== RUN TestDockerEnvContainerd
docker_test.go:170: running with containerd true linux amd64
docker_test.go:181: (dbg) Run: out/minikube-linux-amd64 start -p dockerenv-288409 --driver=docker --container-runtime=containerd
docker_test.go:181: (dbg) Done: out/minikube-linux-amd64 start -p dockerenv-288409 --driver=docker --container-runtime=containerd: (18.815794674s)
docker_test.go:189: (dbg) Run: /bin/bash -c "out/minikube-linux-amd64 docker-env --ssh-host --ssh-add -p dockerenv-288409"
docker_test.go:220: (dbg) Run: /bin/bash -c "SSH_AUTH_SOCK="/tmp/ssh-XXXXXXDK1ail/agent.38490" SSH_AGENT_PID="38491" DOCKER_HOST=ssh://docker@127.0.0.1:32773 docker version"
docker_test.go:243: (dbg) Run: /bin/bash -c "SSH_AUTH_SOCK="/tmp/ssh-XXXXXXDK1ail/agent.38490" SSH_AGENT_PID="38491" DOCKER_HOST=ssh://docker@127.0.0.1:32773 DOCKER_BUILDKIT=0 docker build -t local/minikube-dockerenv-containerd-test:latest testdata/docker-env"
docker_test.go:243: (dbg) Non-zero exit: /bin/bash -c "SSH_AUTH_SOCK="/tmp/ssh-XXXXXXDK1ail/agent.38490" SSH_AGENT_PID="38491" DOCKER_HOST=ssh://docker@127.0.0.1:32773 DOCKER_BUILDKIT=0 docker build -t local/minikube-dockerenv-containerd-test:latest testdata/docker-env": exit status 1 (2.219809288s)
-- stdout --
Sending build context to Docker daemon 2.048kB
-- /stdout --
** stderr **
DEPRECATED: The legacy builder is deprecated and will be removed in a future release.
BuildKit is currently disabled; enable it by removing the DOCKER_BUILDKIT=0
environment-variable.
Error response from daemon: exit status 1
** /stderr **
docker_test.go:245: failed to build images, error: exit status 1, output:
-- stdout --
Sending build context to Docker daemon 2.048kB
-- /stdout --
** stderr **
DEPRECATED: The legacy builder is deprecated and will be removed in a future release.
BuildKit is currently disabled; enable it by removing the DOCKER_BUILDKIT=0
environment-variable.
Error response from daemon: exit status 1
** /stderr **
docker_test.go:250: (dbg) Run: /bin/bash -c "SSH_AUTH_SOCK="/tmp/ssh-XXXXXXDK1ail/agent.38490" SSH_AGENT_PID="38491" DOCKER_HOST=ssh://docker@127.0.0.1:32773 docker image ls"
docker_test.go:255: failed to detect image 'local/minikube-dockerenv-containerd-test' in output of docker image ls
panic.go:636: *** TestDockerEnvContainerd FAILED at 2025-09-26 22:34:43.177538522 +0000 UTC m=+347.432745586
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:223: ======> post-mortem[TestDockerEnvContainerd]: network settings <======
helpers_test.go:230: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:238: ======> post-mortem[TestDockerEnvContainerd]: docker inspect <======
helpers_test.go:239: (dbg) Run: docker inspect dockerenv-288409
helpers_test.go:243: (dbg) docker inspect dockerenv-288409:
-- stdout --
[
{
"Id": "d75fefaf209eb8650388af8dc066e1afa27073d3e65c71ec3c8c8b2f934026cf",
"Created": "2025-09-26T22:34:15.150917676Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 35733,
"ExitCode": 0,
"Error": "",
"StartedAt": "2025-09-26T22:34:15.198218083Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:c6b5532e987b5b4f5fc9cb0336e378ed49c0542bad8cbfc564b71e977a6269de",
"ResolvConfPath": "/var/lib/docker/containers/d75fefaf209eb8650388af8dc066e1afa27073d3e65c71ec3c8c8b2f934026cf/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/d75fefaf209eb8650388af8dc066e1afa27073d3e65c71ec3c8c8b2f934026cf/hostname",
"HostsPath": "/var/lib/docker/containers/d75fefaf209eb8650388af8dc066e1afa27073d3e65c71ec3c8c8b2f934026cf/hosts",
"LogPath": "/var/lib/docker/containers/d75fefaf209eb8650388af8dc066e1afa27073d3e65c71ec3c8c8b2f934026cf/d75fefaf209eb8650388af8dc066e1afa27073d3e65c71ec3c8c8b2f934026cf-json.log",
"Name": "/dockerenv-288409",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"dockerenv-288409:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {
"max-size": "100m"
}
},
"NetworkMode": "dockerenv-288409",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "private",
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 8388608000,
"NanoCpus": 0,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 16777216000,
"MemorySwappiness": null,
"OomKillDisable": null,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"ID": "d75fefaf209eb8650388af8dc066e1afa27073d3e65c71ec3c8c8b2f934026cf",
"LowerDir": "/var/lib/docker/overlay2/42b73ec2b02d64e633f0aa4ea3ea8d4521eda0caad5e0d6cf1d0ceb694ecafbd-init/diff:/var/lib/docker/overlay2/9d3f38ae04ffa0ee7bbacc3f831d8e286eafea1eb3c677a38c62c87997e117c6/diff",
"MergedDir": "/var/lib/docker/overlay2/42b73ec2b02d64e633f0aa4ea3ea8d4521eda0caad5e0d6cf1d0ceb694ecafbd/merged",
"UpperDir": "/var/lib/docker/overlay2/42b73ec2b02d64e633f0aa4ea3ea8d4521eda0caad5e0d6cf1d0ceb694ecafbd/diff",
"WorkDir": "/var/lib/docker/overlay2/42b73ec2b02d64e633f0aa4ea3ea8d4521eda0caad5e0d6cf1d0ceb694ecafbd/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
},
{
"Type": "volume",
"Name": "dockerenv-288409",
"Source": "/var/lib/docker/volumes/dockerenv-288409/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
}
],
"Config": {
"Hostname": "dockerenv-288409",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"OnBuild": null,
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "dockerenv-288409",
"name.minikube.sigs.k8s.io": "dockerenv-288409",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "19811ee2a64eec698ad19b077572d8ab098ba7bb8e075ad6f868be747651b729",
"SandboxKey": "/var/run/docker/netns/19811ee2a64e",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32773"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32774"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32777"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32775"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32776"
}
]
},
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"dockerenv-288409": {
"IPAMConfig": {
"IPv4Address": "192.168.49.2"
},
"Links": null,
"Aliases": null,
"MacAddress": "ce:10:8e:c9:c5:29",
"DriverOpts": null,
"GwPriority": 0,
"NetworkID": "16f31e99b148dc18f25a1db2e7522aeba5af47fdd515e5ecf5d9cfad28e458d3",
"EndpointID": "46ed42bc7f8e201913fafdee381707b10be10625d3f0a032abec4a1f852848b3",
"Gateway": "192.168.49.1",
"IPAddress": "192.168.49.2",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"dockerenv-288409",
"d75fefaf209e"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:247: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p dockerenv-288409 -n dockerenv-288409
helpers_test.go:252: <<< TestDockerEnvContainerd FAILED: start of post-mortem logs <<<
helpers_test.go:253: ======> post-mortem[TestDockerEnvContainerd]: minikube logs <======
helpers_test.go:255: (dbg) Run: out/minikube-linux-amd64 -p dockerenv-288409 logs -n 25
helpers_test.go:260: TestDockerEnvContainerd logs:
-- stdout --
==> Audit <==
┌────────────┬───────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬──────────────────┬─────────┬─────────┬─────────────────────┬─────────────────────┐
│ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │
├────────────┼───────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼──────────────────┼─────────┼─────────┼─────────────────────┼─────────────────────┤
│ addons │ addons-048605 addons disable metrics-server --alsologtostderr -v=1 │ addons-048605 │ jenkins │ v1.37.0 │ 26 Sep 25 22:33 UTC │ 26 Sep 25 22:33 UTC │
│ addons │ addons-048605 addons disable cloud-spanner --alsologtostderr -v=1 │ addons-048605 │ jenkins │ v1.37.0 │ 26 Sep 25 22:33 UTC │ 26 Sep 25 22:33 UTC │
│ addons │ addons-048605 addons disable headlamp --alsologtostderr -v=1 │ addons-048605 │ jenkins │ v1.37.0 │ 26 Sep 25 22:33 UTC │ 26 Sep 25 22:33 UTC │
│ ip │ addons-048605 ip │ addons-048605 │ jenkins │ v1.37.0 │ 26 Sep 25 22:33 UTC │ 26 Sep 25 22:33 UTC │
│ addons │ addons-048605 addons disable registry --alsologtostderr -v=1 │ addons-048605 │ jenkins │ v1.37.0 │ 26 Sep 25 22:33 UTC │ 26 Sep 25 22:33 UTC │
│ addons │ configure registry-creds -f ./testdata/addons_testconfig.json -p addons-048605 │ addons-048605 │ jenkins │ v1.37.0 │ 26 Sep 25 22:33 UTC │ 26 Sep 25 22:33 UTC │
│ addons │ addons-048605 addons disable registry-creds --alsologtostderr -v=1 │ addons-048605 │ jenkins │ v1.37.0 │ 26 Sep 25 22:33 UTC │ 26 Sep 25 22:33 UTC │
│ ssh │ addons-048605 ssh cat /opt/local-path-provisioner/pvc-8d02d742-b1cb-40fd-8405-10d79a57af25_default_test-pvc/file1 │ addons-048605 │ jenkins │ v1.37.0 │ 26 Sep 25 22:33 UTC │ 26 Sep 25 22:33 UTC │
│ addons │ addons-048605 addons disable storage-provisioner-rancher --alsologtostderr -v=1 │ addons-048605 │ jenkins │ v1.37.0 │ 26 Sep 25 22:33 UTC │ 26 Sep 25 22:33 UTC │
│ addons │ addons-048605 addons disable inspektor-gadget --alsologtostderr -v=1 │ addons-048605 │ jenkins │ v1.37.0 │ 26 Sep 25 22:33 UTC │ 26 Sep 25 22:33 UTC │
│ ssh │ addons-048605 ssh curl -s http://127.0.0.1/ -H 'Host: nginx.example.com' │ addons-048605 │ jenkins │ v1.37.0 │ 26 Sep 25 22:33 UTC │ 26 Sep 25 22:33 UTC │
│ ip │ addons-048605 ip │ addons-048605 │ jenkins │ v1.37.0 │ 26 Sep 25 22:33 UTC │ 26 Sep 25 22:33 UTC │
│ addons │ addons-048605 addons disable ingress-dns --alsologtostderr -v=1 │ addons-048605 │ jenkins │ v1.37.0 │ 26 Sep 25 22:33 UTC │ 26 Sep 25 22:33 UTC │
│ addons │ addons-048605 addons disable ingress --alsologtostderr -v=1 │ addons-048605 │ jenkins │ v1.37.0 │ 26 Sep 25 22:33 UTC │ 26 Sep 25 22:33 UTC │
│ addons │ addons-048605 addons disable amd-gpu-device-plugin --alsologtostderr -v=1 │ addons-048605 │ jenkins │ v1.37.0 │ 26 Sep 25 22:33 UTC │ 26 Sep 25 22:33 UTC │
│ addons │ addons-048605 addons disable yakd --alsologtostderr -v=1 │ addons-048605 │ jenkins │ v1.37.0 │ 26 Sep 25 22:33 UTC │ 26 Sep 25 22:33 UTC │
│ addons │ addons-048605 addons disable volumesnapshots --alsologtostderr -v=1 │ addons-048605 │ jenkins │ v1.37.0 │ 26 Sep 25 22:33 UTC │ 26 Sep 25 22:33 UTC │
│ addons │ addons-048605 addons disable csi-hostpath-driver --alsologtostderr -v=1 │ addons-048605 │ jenkins │ v1.37.0 │ 26 Sep 25 22:33 UTC │ 26 Sep 25 22:33 UTC │
│ stop │ -p addons-048605 │ addons-048605 │ jenkins │ v1.37.0 │ 26 Sep 25 22:33 UTC │ 26 Sep 25 22:34 UTC │
│ addons │ enable dashboard -p addons-048605 │ addons-048605 │ jenkins │ v1.37.0 │ 26 Sep 25 22:34 UTC │ 26 Sep 25 22:34 UTC │
│ addons │ disable dashboard -p addons-048605 │ addons-048605 │ jenkins │ v1.37.0 │ 26 Sep 25 22:34 UTC │ 26 Sep 25 22:34 UTC │
│ addons │ disable gvisor -p addons-048605 │ addons-048605 │ jenkins │ v1.37.0 │ 26 Sep 25 22:34 UTC │ 26 Sep 25 22:34 UTC │
│ delete │ -p addons-048605 │ addons-048605 │ jenkins │ v1.37.0 │ 26 Sep 25 22:34 UTC │ 26 Sep 25 22:34 UTC │
│ start │ -p dockerenv-288409 --driver=docker --container-runtime=containerd │ dockerenv-288409 │ jenkins │ v1.37.0 │ 26 Sep 25 22:34 UTC │ 26 Sep 25 22:34 UTC │
│ docker-env │ --ssh-host --ssh-add -p dockerenv-288409 │ dockerenv-288409 │ jenkins │ v1.37.0 │ 26 Sep 25 22:34 UTC │ 26 Sep 25 22:34 UTC │
└────────────┴───────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴──────────────────┴─────────┴─────────┴─────────────────────┴─────────────────────┘
==> Last Start <==
Log file created at: 2025/09/26 22:34:10
Running on machine: ubuntu-20-agent-6
Binary: Built with gc go1.24.6 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I0926 22:34:10.381355 35169 out.go:360] Setting OutFile to fd 1 ...
I0926 22:34:10.381453 35169 out.go:408] TERM=,COLORTERM=, which probably does not support color
I0926 22:34:10.381456 35169 out.go:374] Setting ErrFile to fd 2...
I0926 22:34:10.381459 35169 out.go:408] TERM=,COLORTERM=, which probably does not support color
I0926 22:34:10.381621 35169 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21642-9508/.minikube/bin
I0926 22:34:10.382073 35169 out.go:368] Setting JSON to false
I0926 22:34:10.382823 35169 start.go:130] hostinfo: {"hostname":"ubuntu-20-agent-6","uptime":985,"bootTime":1758925065,"procs":184,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"22.04","kernelVersion":"6.8.0-1040-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
I0926 22:34:10.382889 35169 start.go:140] virtualization: kvm guest
I0926 22:34:10.384406 35169 out.go:179] * [dockerenv-288409] minikube v1.37.0 on Ubuntu 22.04 (kvm/amd64)
I0926 22:34:10.385561 35169 notify.go:220] Checking for updates...
I0926 22:34:10.385591 35169 out.go:179] - MINIKUBE_LOCATION=21642
I0926 22:34:10.386559 35169 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I0926 22:34:10.387532 35169 out.go:179] - KUBECONFIG=/home/jenkins/minikube-integration/21642-9508/kubeconfig
I0926 22:34:10.388520 35169 out.go:179] - MINIKUBE_HOME=/home/jenkins/minikube-integration/21642-9508/.minikube
I0926 22:34:10.389418 35169 out.go:179] - MINIKUBE_BIN=out/minikube-linux-amd64
I0926 22:34:10.390390 35169 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I0926 22:34:10.391509 35169 driver.go:421] Setting default libvirt URI to qemu:///system
I0926 22:34:10.413591 35169 docker.go:123] docker version: linux-28.4.0:Docker Engine - Community
I0926 22:34:10.413683 35169 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0926 22:34:10.466041 35169 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:2 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:24 OomKillDisable:false NGoroutines:45 SystemTime:2025-09-26 22:34:10.456858146 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:6.8.0-1040-gcp OperatingSystem:Ubuntu 22.04.5 LTS OSType:linux Architecture:
x86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33652174848 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-6 Labels:[] ExperimentalBuild:false ServerVersion:28.4.0 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:b98a3aace656320842a23f4a392a33f46af97866 Expected:} RuncCommit:{ID:v1.3.0-0-g4ca628d1 Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[
map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.28.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.39.4] map[Name:model Path:/usr/libexec/docker/cli-plugins/docker-model SchemaVersion:0.1.0 ShortDescription:Docker Model Runner Vendor:Docker Inc. Version:v0.1.40] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I0926 22:34:10.466131 35169 docker.go:318] overlay module found
I0926 22:34:10.467429 35169 out.go:179] * Using the docker driver based on user configuration
I0926 22:34:10.468372 35169 start.go:304] selected driver: docker
I0926 22:34:10.468376 35169 start.go:924] validating driver "docker" against <nil>
I0926 22:34:10.468385 35169 start.go:935] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I0926 22:34:10.468464 35169 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0926 22:34:10.519985 35169 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:2 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:24 OomKillDisable:false NGoroutines:45 SystemTime:2025-09-26 22:34:10.511440987 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:6.8.0-1040-gcp OperatingSystem:Ubuntu 22.04.5 LTS OSType:linux Architecture:
x86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33652174848 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-6 Labels:[] ExperimentalBuild:false ServerVersion:28.4.0 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:b98a3aace656320842a23f4a392a33f46af97866 Expected:} RuncCommit:{ID:v1.3.0-0-g4ca628d1 Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[
map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.28.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.39.4] map[Name:model Path:/usr/libexec/docker/cli-plugins/docker-model SchemaVersion:0.1.0 ShortDescription:Docker Model Runner Vendor:Docker Inc. Version:v0.1.40] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I0926 22:34:10.520137 35169 start_flags.go:327] no existing cluster config was found, will generate one from the flags
I0926 22:34:10.520621 35169 start_flags.go:410] Using suggested 8000MB memory alloc based on sys=32093MB, container=32093MB
I0926 22:34:10.520748 35169 start_flags.go:974] Wait components to verify : map[apiserver:true system_pods:true]
I0926 22:34:10.522068 35169 out.go:179] * Using Docker driver with root privileges
I0926 22:34:10.523102 35169 cni.go:84] Creating CNI manager for ""
I0926 22:34:10.523147 35169 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I0926 22:34:10.523152 35169 start_flags.go:336] Found "CNI" CNI - setting NetworkPlugin=cni
I0926 22:34:10.523206 35169 start.go:348] cluster config:
{Name:dockerenv-288409 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:8000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:dockerenv-288409 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISoc
ket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0926 22:34:10.524202 35169 out.go:179] * Starting "dockerenv-288409" primary control-plane node in "dockerenv-288409" cluster
I0926 22:34:10.525165 35169 cache.go:123] Beginning downloading kic base image for docker with containerd
I0926 22:34:10.526141 35169 out.go:179] * Pulling base image v0.0.48 ...
I0926 22:34:10.527133 35169 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime containerd
I0926 22:34:10.527161 35169 preload.go:146] Found local preload: /home/jenkins/minikube-integration/21642-9508/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-containerd-overlay2-amd64.tar.lz4
I0926 22:34:10.527169 35169 cache.go:58] Caching tarball of preloaded images
I0926 22:34:10.527228 35169 image.go:81] Checking for gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 in local docker daemon
I0926 22:34:10.527245 35169 preload.go:172] Found /home/jenkins/minikube-integration/21642-9508/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-containerd-overlay2-amd64.tar.lz4 in cache, skipping download
I0926 22:34:10.527251 35169 cache.go:61] Finished verifying existence of preloaded tar for v1.34.0 on containerd
I0926 22:34:10.527541 35169 profile.go:143] Saving config to /home/jenkins/minikube-integration/21642-9508/.minikube/profiles/dockerenv-288409/config.json ...
I0926 22:34:10.527556 35169 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21642-9508/.minikube/profiles/dockerenv-288409/config.json: {Name:mk024090da8f422767b767fd68a3277a49031b2d Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0926 22:34:10.546078 35169 image.go:100] Found gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 in local docker daemon, skipping pull
I0926 22:34:10.546087 35169 cache.go:147] gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 exists in daemon, skipping load
I0926 22:34:10.546103 35169 cache.go:232] Successfully downloaded all kic artifacts
I0926 22:34:10.546135 35169 start.go:360] acquireMachinesLock for dockerenv-288409: {Name:mk0f5c5af672e42021152f6126e8db7977083766 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0926 22:34:10.546217 35169 start.go:364] duration metric: took 67.17µs to acquireMachinesLock for "dockerenv-288409"
I0926 22:34:10.546235 35169 start.go:93] Provisioning new machine with config: &{Name:dockerenv-288409 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:8000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:dockerenv-288409 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerN
ames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPU
s: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I0926 22:34:10.546289 35169 start.go:125] createHost starting for "" (driver="docker")
I0926 22:34:10.547606 35169 out.go:252] * Creating docker container (CPUs=2, Memory=8000MB) ...
I0926 22:34:10.547827 35169 start.go:159] libmachine.API.Create for "dockerenv-288409" (driver="docker")
I0926 22:34:10.547847 35169 client.go:168] LocalClient.Create starting
I0926 22:34:10.547893 35169 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21642-9508/.minikube/certs/ca.pem
I0926 22:34:10.547918 35169 main.go:141] libmachine: Decoding PEM data...
I0926 22:34:10.547930 35169 main.go:141] libmachine: Parsing certificate...
I0926 22:34:10.547972 35169 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21642-9508/.minikube/certs/cert.pem
I0926 22:34:10.547985 35169 main.go:141] libmachine: Decoding PEM data...
I0926 22:34:10.547991 35169 main.go:141] libmachine: Parsing certificate...
I0926 22:34:10.548298 35169 cli_runner.go:164] Run: docker network inspect dockerenv-288409 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W0926 22:34:10.563692 35169 cli_runner.go:211] docker network inspect dockerenv-288409 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I0926 22:34:10.563745 35169 network_create.go:284] running [docker network inspect dockerenv-288409] to gather additional debugging logs...
I0926 22:34:10.563774 35169 cli_runner.go:164] Run: docker network inspect dockerenv-288409
W0926 22:34:10.578391 35169 cli_runner.go:211] docker network inspect dockerenv-288409 returned with exit code 1
I0926 22:34:10.578410 35169 network_create.go:287] error running [docker network inspect dockerenv-288409]: docker network inspect dockerenv-288409: exit status 1
stdout:
[]
stderr:
Error response from daemon: network dockerenv-288409 not found
I0926 22:34:10.578420 35169 network_create.go:289] output of [docker network inspect dockerenv-288409]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network dockerenv-288409 not found
** /stderr **
I0926 22:34:10.578519 35169 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0926 22:34:10.594175 35169 network.go:206] using free private subnet 192.168.49.0/24: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc001c121b0}
I0926 22:34:10.594210 35169 network_create.go:124] attempt to create docker network dockerenv-288409 192.168.49.0/24 with gateway 192.168.49.1 and MTU of 1500 ...
I0926 22:34:10.594245 35169 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.49.0/24 --gateway=192.168.49.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=dockerenv-288409 dockerenv-288409
I0926 22:34:10.645546 35169 network_create.go:108] docker network dockerenv-288409 192.168.49.0/24 created
I0926 22:34:10.645567 35169 kic.go:121] calculated static IP "192.168.49.2" for the "dockerenv-288409" container
I0926 22:34:10.645624 35169 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I0926 22:34:10.660667 35169 cli_runner.go:164] Run: docker volume create dockerenv-288409 --label name.minikube.sigs.k8s.io=dockerenv-288409 --label created_by.minikube.sigs.k8s.io=true
I0926 22:34:10.676393 35169 oci.go:103] Successfully created a docker volume dockerenv-288409
I0926 22:34:10.676477 35169 cli_runner.go:164] Run: docker run --rm --name dockerenv-288409-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=dockerenv-288409 --entrypoint /usr/bin/test -v dockerenv-288409:/var gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -d /var/lib
I0926 22:34:11.025597 35169 oci.go:107] Successfully prepared a docker volume dockerenv-288409
I0926 22:34:11.025642 35169 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime containerd
I0926 22:34:11.025662 35169 kic.go:194] Starting extracting preloaded images to volume ...
I0926 22:34:11.025725 35169 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21642-9508/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-containerd-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v dockerenv-288409:/extractDir gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -I lz4 -xf /preloaded.tar -C /extractDir
I0926 22:34:15.086427 35169 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21642-9508/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-containerd-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v dockerenv-288409:/extractDir gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -I lz4 -xf /preloaded.tar -C /extractDir: (4.060664671s)
I0926 22:34:15.086446 35169 kic.go:203] duration metric: took 4.060781676s to extract preloaded images to volume ...
W0926 22:34:15.086557 35169 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
W0926 22:34:15.086584 35169 oci.go:252] Your kernel does not support CPU cfs period/quota or the cgroup is not mounted.
I0926 22:34:15.086615 35169 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I0926 22:34:15.136425 35169 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname dockerenv-288409 --name dockerenv-288409 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=dockerenv-288409 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=dockerenv-288409 --network dockerenv-288409 --ip 192.168.49.2 --volume dockerenv-288409:/var --security-opt apparmor=unconfined --memory=8000mb -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1
I0926 22:34:15.385382 35169 cli_runner.go:164] Run: docker container inspect dockerenv-288409 --format={{.State.Running}}
I0926 22:34:15.403671 35169 cli_runner.go:164] Run: docker container inspect dockerenv-288409 --format={{.State.Status}}
I0926 22:34:15.420640 35169 cli_runner.go:164] Run: docker exec dockerenv-288409 stat /var/lib/dpkg/alternatives/iptables
I0926 22:34:15.465048 35169 oci.go:144] the created container "dockerenv-288409" has a running status.
I0926 22:34:15.465069 35169 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/21642-9508/.minikube/machines/dockerenv-288409/id_rsa...
I0926 22:34:15.548091 35169 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/21642-9508/.minikube/machines/dockerenv-288409/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I0926 22:34:15.577115 35169 cli_runner.go:164] Run: docker container inspect dockerenv-288409 --format={{.State.Status}}
I0926 22:34:15.596249 35169 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I0926 22:34:15.596294 35169 kic_runner.go:114] Args: [docker exec --privileged dockerenv-288409 chown docker:docker /home/docker/.ssh/authorized_keys]
I0926 22:34:15.643090 35169 cli_runner.go:164] Run: docker container inspect dockerenv-288409 --format={{.State.Status}}
I0926 22:34:15.663525 35169 machine.go:93] provisionDockerMachine start ...
I0926 22:34:15.663610 35169 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-288409
I0926 22:34:15.683806 35169 main.go:141] libmachine: Using SSH client type: native
I0926 22:34:15.684150 35169 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32773 <nil> <nil>}
I0926 22:34:15.684160 35169 main.go:141] libmachine: About to run SSH command:
hostname
I0926 22:34:15.822272 35169 main.go:141] libmachine: SSH cmd err, output: <nil>: dockerenv-288409
I0926 22:34:15.822296 35169 ubuntu.go:182] provisioning hostname "dockerenv-288409"
I0926 22:34:15.822349 35169 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-288409
I0926 22:34:15.840262 35169 main.go:141] libmachine: Using SSH client type: native
I0926 22:34:15.840512 35169 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32773 <nil> <nil>}
I0926 22:34:15.840528 35169 main.go:141] libmachine: About to run SSH command:
sudo hostname dockerenv-288409 && echo "dockerenv-288409" | sudo tee /etc/hostname
I0926 22:34:15.983962 35169 main.go:141] libmachine: SSH cmd err, output: <nil>: dockerenv-288409
I0926 22:34:15.984030 35169 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-288409
I0926 22:34:16.000672 35169 main.go:141] libmachine: Using SSH client type: native
I0926 22:34:16.000925 35169 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32773 <nil> <nil>}
I0926 22:34:16.000951 35169 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\sdockerenv-288409' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 dockerenv-288409/g' /etc/hosts;
else
echo '127.0.1.1 dockerenv-288409' | sudo tee -a /etc/hosts;
fi
fi
I0926 22:34:16.132808 35169 main.go:141] libmachine: SSH cmd err, output: <nil>:
I0926 22:34:16.132826 35169 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/21642-9508/.minikube CaCertPath:/home/jenkins/minikube-integration/21642-9508/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21642-9508/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21642-9508/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21642-9508/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21642-9508/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21642-9508/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21642-9508/.minikube}
I0926 22:34:16.132842 35169 ubuntu.go:190] setting up certificates
I0926 22:34:16.132853 35169 provision.go:84] configureAuth start
I0926 22:34:16.132908 35169 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" dockerenv-288409
I0926 22:34:16.149427 35169 provision.go:143] copyHostCerts
I0926 22:34:16.149468 35169 exec_runner.go:144] found /home/jenkins/minikube-integration/21642-9508/.minikube/ca.pem, removing ...
I0926 22:34:16.149486 35169 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21642-9508/.minikube/ca.pem
I0926 22:34:16.149544 35169 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21642-9508/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21642-9508/.minikube/ca.pem (1078 bytes)
I0926 22:34:16.149633 35169 exec_runner.go:144] found /home/jenkins/minikube-integration/21642-9508/.minikube/cert.pem, removing ...
I0926 22:34:16.149636 35169 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21642-9508/.minikube/cert.pem
I0926 22:34:16.149660 35169 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21642-9508/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21642-9508/.minikube/cert.pem (1123 bytes)
I0926 22:34:16.149720 35169 exec_runner.go:144] found /home/jenkins/minikube-integration/21642-9508/.minikube/key.pem, removing ...
I0926 22:34:16.149723 35169 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21642-9508/.minikube/key.pem
I0926 22:34:16.149744 35169 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21642-9508/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21642-9508/.minikube/key.pem (1679 bytes)
I0926 22:34:16.149829 35169 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21642-9508/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21642-9508/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21642-9508/.minikube/certs/ca-key.pem org=jenkins.dockerenv-288409 san=[127.0.0.1 192.168.49.2 dockerenv-288409 localhost minikube]
I0926 22:34:16.309073 35169 provision.go:177] copyRemoteCerts
I0926 22:34:16.309118 35169 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I0926 22:34:16.309160 35169 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-288409
I0926 22:34:16.325536 35169 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32773 SSHKeyPath:/home/jenkins/minikube-integration/21642-9508/.minikube/machines/dockerenv-288409/id_rsa Username:docker}
I0926 22:34:16.420344 35169 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21642-9508/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1078 bytes)
I0926 22:34:16.444466 35169 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21642-9508/.minikube/machines/server.pem --> /etc/docker/server.pem (1216 bytes)
I0926 22:34:16.466684 35169 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21642-9508/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I0926 22:34:16.489048 35169 provision.go:87] duration metric: took 356.183417ms to configureAuth
I0926 22:34:16.489066 35169 ubuntu.go:206] setting minikube options for container-runtime
I0926 22:34:16.489220 35169 config.go:182] Loaded profile config "dockerenv-288409": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.0
I0926 22:34:16.489226 35169 machine.go:96] duration metric: took 825.689297ms to provisionDockerMachine
I0926 22:34:16.489232 35169 client.go:171] duration metric: took 5.941381771s to LocalClient.Create
I0926 22:34:16.489252 35169 start.go:167] duration metric: took 5.941425787s to libmachine.API.Create "dockerenv-288409"
I0926 22:34:16.489261 35169 start.go:293] postStartSetup for "dockerenv-288409" (driver="docker")
I0926 22:34:16.489268 35169 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I0926 22:34:16.489306 35169 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I0926 22:34:16.489346 35169 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-288409
I0926 22:34:16.506112 35169 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32773 SSHKeyPath:/home/jenkins/minikube-integration/21642-9508/.minikube/machines/dockerenv-288409/id_rsa Username:docker}
I0926 22:34:16.601827 35169 ssh_runner.go:195] Run: cat /etc/os-release
I0926 22:34:16.604817 35169 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I0926 22:34:16.604834 35169 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
I0926 22:34:16.604840 35169 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
I0926 22:34:16.604844 35169 info.go:137] Remote host: Ubuntu 22.04.5 LTS
I0926 22:34:16.604851 35169 filesync.go:126] Scanning /home/jenkins/minikube-integration/21642-9508/.minikube/addons for local assets ...
I0926 22:34:16.604892 35169 filesync.go:126] Scanning /home/jenkins/minikube-integration/21642-9508/.minikube/files for local assets ...
I0926 22:34:16.604907 35169 start.go:296] duration metric: took 115.641809ms for postStartSetup
I0926 22:34:16.605157 35169 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" dockerenv-288409
I0926 22:34:16.621660 35169 profile.go:143] Saving config to /home/jenkins/minikube-integration/21642-9508/.minikube/profiles/dockerenv-288409/config.json ...
I0926 22:34:16.621881 35169 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I0926 22:34:16.621908 35169 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-288409
I0926 22:34:16.637585 35169 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32773 SSHKeyPath:/home/jenkins/minikube-integration/21642-9508/.minikube/machines/dockerenv-288409/id_rsa Username:docker}
I0926 22:34:16.728035 35169 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I0926 22:34:16.732021 35169 start.go:128] duration metric: took 6.185721821s to createHost
I0926 22:34:16.732035 35169 start.go:83] releasing machines lock for "dockerenv-288409", held for 6.185810771s
I0926 22:34:16.732096 35169 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" dockerenv-288409
I0926 22:34:16.748428 35169 ssh_runner.go:195] Run: cat /version.json
I0926 22:34:16.748460 35169 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-288409
I0926 22:34:16.748557 35169 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I0926 22:34:16.748603 35169 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-288409
I0926 22:34:16.765284 35169 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32773 SSHKeyPath:/home/jenkins/minikube-integration/21642-9508/.minikube/machines/dockerenv-288409/id_rsa Username:docker}
I0926 22:34:16.765596 35169 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32773 SSHKeyPath:/home/jenkins/minikube-integration/21642-9508/.minikube/machines/dockerenv-288409/id_rsa Username:docker}
I0926 22:34:16.854030 35169 ssh_runner.go:195] Run: systemctl --version
I0926 22:34:16.935140 35169 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
I0926 22:34:16.939479 35169 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
I0926 22:34:16.965163 35169 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
I0926 22:34:16.965213 35169 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I0926 22:34:16.989332 35169 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/100-crio-bridge.conf] bridge cni config(s)
I0926 22:34:16.989346 35169 start.go:495] detecting cgroup driver to use...
I0926 22:34:16.989373 35169 detect.go:190] detected "systemd" cgroup driver on host os
I0926 22:34:16.989402 35169 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I0926 22:34:17.000366 35169 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I0926 22:34:17.010390 35169 docker.go:218] disabling cri-docker service (if available) ...
I0926 22:34:17.010448 35169 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I0926 22:34:17.022466 35169 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I0926 22:34:17.035103 35169 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I0926 22:34:17.099002 35169 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I0926 22:34:17.166920 35169 docker.go:234] disabling docker service ...
I0926 22:34:17.166961 35169 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I0926 22:34:17.182708 35169 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I0926 22:34:17.193160 35169 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I0926 22:34:17.258825 35169 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I0926 22:34:17.320985 35169 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I0926 22:34:17.331339 35169 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I0926 22:34:17.346432 35169 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I0926 22:34:17.356792 35169 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I0926 22:34:17.365915 35169 containerd.go:146] configuring containerd to use "systemd" as cgroup driver...
I0926 22:34:17.365961 35169 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = true|g' /etc/containerd/config.toml"
I0926 22:34:17.374851 35169 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0926 22:34:17.384372 35169 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I0926 22:34:17.393131 35169 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0926 22:34:17.402170 35169 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I0926 22:34:17.410250 35169 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I0926 22:34:17.419045 35169 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I0926 22:34:17.427649 35169 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I0926 22:34:17.436490 35169 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I0926 22:34:17.444277 35169 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I0926 22:34:17.452042 35169 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0926 22:34:17.510253 35169 ssh_runner.go:195] Run: sudo systemctl restart containerd
I0926 22:34:17.607084 35169 start.go:542] Will wait 60s for socket path /run/containerd/containerd.sock
I0926 22:34:17.607141 35169 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
I0926 22:34:17.610614 35169 start.go:563] Will wait 60s for crictl version
I0926 22:34:17.610649 35169 ssh_runner.go:195] Run: which crictl
I0926 22:34:17.613785 35169 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I0926 22:34:17.644631 35169 start.go:579] Version: 0.1.0
RuntimeName: containerd
RuntimeVersion: 1.7.27
RuntimeApiVersion: v1
I0926 22:34:17.644682 35169 ssh_runner.go:195] Run: containerd --version
I0926 22:34:17.665189 35169 ssh_runner.go:195] Run: containerd --version
I0926 22:34:17.688770 35169 out.go:179] * Preparing Kubernetes v1.34.0 on containerd 1.7.27 ...
I0926 22:34:17.689681 35169 cli_runner.go:164] Run: docker network inspect dockerenv-288409 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0926 22:34:17.705580 35169 ssh_runner.go:195] Run: grep 192.168.49.1 host.minikube.internal$ /etc/hosts
I0926 22:34:17.708961 35169 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.49.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0926 22:34:17.719670 35169 kubeadm.go:883] updating cluster {Name:dockerenv-288409 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:8000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:dockerenv-288409 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIP
s:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: Auto
PauseInterval:1m0s} ...
I0926 22:34:17.719762 35169 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime containerd
I0926 22:34:17.719811 35169 ssh_runner.go:195] Run: sudo crictl images --output json
I0926 22:34:17.750143 35169 containerd.go:627] all images are preloaded for containerd runtime.
I0926 22:34:17.750152 35169 containerd.go:534] Images already preloaded, skipping extraction
I0926 22:34:17.750190 35169 ssh_runner.go:195] Run: sudo crictl images --output json
I0926 22:34:17.781886 35169 containerd.go:627] all images are preloaded for containerd runtime.
I0926 22:34:17.781898 35169 cache_images.go:85] Images are preloaded, skipping loading
I0926 22:34:17.781905 35169 kubeadm.go:934] updating node { 192.168.49.2 8443 v1.34.0 containerd true true} ...
I0926 22:34:17.781996 35169 kubeadm.go:946] kubelet [Unit]
Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.34.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=dockerenv-288409 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.49.2
[Install]
config:
{KubernetesVersion:v1.34.0 ClusterName:dockerenv-288409 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I0926 22:34:17.782039 35169 ssh_runner.go:195] Run: sudo crictl info
I0926 22:34:17.813696 35169 cni.go:84] Creating CNI manager for ""
I0926 22:34:17.813703 35169 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I0926 22:34:17.813714 35169 kubeadm.go:84] Using pod CIDR: 10.244.0.0/16
I0926 22:34:17.813732 35169 kubeadm.go:189] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.49.2 APIServerPort:8443 KubernetesVersion:v1.34.0 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:dockerenv-288409 NodeName:dockerenv-288409 DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.49.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.49.2 CgroupDriver:systemd ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath
:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///run/containerd/containerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I0926 22:34:17.813874 35169 kubeadm.go:195] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.49.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
name: "dockerenv-288409"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.49.2"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.49.2"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
kubernetesVersion: v1.34.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
containerRuntimeEndpoint: unix:///run/containerd/containerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I0926 22:34:17.813929 35169 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.0
I0926 22:34:17.822519 35169 binaries.go:44] Found k8s binaries, skipping transfer
I0926 22:34:17.822566 35169 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I0926 22:34:17.830879 35169 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (320 bytes)
I0926 22:34:17.847258 35169 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I0926 22:34:17.865561 35169 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2228 bytes)
I0926 22:34:17.881378 35169 ssh_runner.go:195] Run: grep 192.168.49.2 control-plane.minikube.internal$ /etc/hosts
I0926 22:34:17.884470 35169 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.49.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0926 22:34:17.894347 35169 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0926 22:34:17.954994 35169 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0926 22:34:17.983681 35169 certs.go:69] Setting up /home/jenkins/minikube-integration/21642-9508/.minikube/profiles/dockerenv-288409 for IP: 192.168.49.2
I0926 22:34:17.983692 35169 certs.go:195] generating shared ca certs ...
I0926 22:34:17.983707 35169 certs.go:227] acquiring lock for ca certs: {Name:mk080975279b3a5ea38bd0bf3f7fdebf08ad146a Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0926 22:34:17.983848 35169 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21642-9508/.minikube/ca.key
I0926 22:34:17.983882 35169 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21642-9508/.minikube/proxy-client-ca.key
I0926 22:34:17.983889 35169 certs.go:257] generating profile certs ...
I0926 22:34:17.983934 35169 certs.go:364] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/21642-9508/.minikube/profiles/dockerenv-288409/client.key
I0926 22:34:17.983942 35169 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21642-9508/.minikube/profiles/dockerenv-288409/client.crt with IP's: []
I0926 22:34:18.404438 35169 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21642-9508/.minikube/profiles/dockerenv-288409/client.crt ...
I0926 22:34:18.404454 35169 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21642-9508/.minikube/profiles/dockerenv-288409/client.crt: {Name:mk96817594255b6552424600fe11b22efb8abb1f Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0926 22:34:18.404612 35169 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21642-9508/.minikube/profiles/dockerenv-288409/client.key ...
I0926 22:34:18.404618 35169 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21642-9508/.minikube/profiles/dockerenv-288409/client.key: {Name:mkd9e78dd738e2fe90f03958687f344d9e0c71e1 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0926 22:34:18.404690 35169 certs.go:364] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21642-9508/.minikube/profiles/dockerenv-288409/apiserver.key.06e91cc2
I0926 22:34:18.404699 35169 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21642-9508/.minikube/profiles/dockerenv-288409/apiserver.crt.06e91cc2 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.49.2]
I0926 22:34:19.206187 35169 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21642-9508/.minikube/profiles/dockerenv-288409/apiserver.crt.06e91cc2 ...
I0926 22:34:19.206203 35169 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21642-9508/.minikube/profiles/dockerenv-288409/apiserver.crt.06e91cc2: {Name:mk821940e00eae1caec9748b6e62e7ef326488fe Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0926 22:34:19.206360 35169 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21642-9508/.minikube/profiles/dockerenv-288409/apiserver.key.06e91cc2 ...
I0926 22:34:19.206368 35169 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21642-9508/.minikube/profiles/dockerenv-288409/apiserver.key.06e91cc2: {Name:mk00d5afc9d5a0c0ba19b4b83e46c5601ad33cf1 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0926 22:34:19.206432 35169 certs.go:382] copying /home/jenkins/minikube-integration/21642-9508/.minikube/profiles/dockerenv-288409/apiserver.crt.06e91cc2 -> /home/jenkins/minikube-integration/21642-9508/.minikube/profiles/dockerenv-288409/apiserver.crt
I0926 22:34:19.206500 35169 certs.go:386] copying /home/jenkins/minikube-integration/21642-9508/.minikube/profiles/dockerenv-288409/apiserver.key.06e91cc2 -> /home/jenkins/minikube-integration/21642-9508/.minikube/profiles/dockerenv-288409/apiserver.key
I0926 22:34:19.206550 35169 certs.go:364] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/21642-9508/.minikube/profiles/dockerenv-288409/proxy-client.key
I0926 22:34:19.206560 35169 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21642-9508/.minikube/profiles/dockerenv-288409/proxy-client.crt with IP's: []
I0926 22:34:19.597689 35169 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21642-9508/.minikube/profiles/dockerenv-288409/proxy-client.crt ...
I0926 22:34:19.597707 35169 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21642-9508/.minikube/profiles/dockerenv-288409/proxy-client.crt: {Name:mk7ba0398406f0eee427cfbeeaffc0d8a3510864 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0926 22:34:19.597883 35169 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21642-9508/.minikube/profiles/dockerenv-288409/proxy-client.key ...
I0926 22:34:19.597892 35169 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21642-9508/.minikube/profiles/dockerenv-288409/proxy-client.key: {Name:mk44e68c614ad945afb9fe3b9d18eea645d35c5d Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0926 22:34:19.598090 35169 certs.go:484] found cert: /home/jenkins/minikube-integration/21642-9508/.minikube/certs/ca-key.pem (1675 bytes)
I0926 22:34:19.598124 35169 certs.go:484] found cert: /home/jenkins/minikube-integration/21642-9508/.minikube/certs/ca.pem (1078 bytes)
I0926 22:34:19.598145 35169 certs.go:484] found cert: /home/jenkins/minikube-integration/21642-9508/.minikube/certs/cert.pem (1123 bytes)
I0926 22:34:19.598161 35169 certs.go:484] found cert: /home/jenkins/minikube-integration/21642-9508/.minikube/certs/key.pem (1679 bytes)
I0926 22:34:19.598776 35169 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21642-9508/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I0926 22:34:19.622683 35169 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21642-9508/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1679 bytes)
I0926 22:34:19.646906 35169 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21642-9508/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I0926 22:34:19.669833 35169 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21642-9508/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I0926 22:34:19.691549 35169 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21642-9508/.minikube/profiles/dockerenv-288409/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1424 bytes)
I0926 22:34:19.713389 35169 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21642-9508/.minikube/profiles/dockerenv-288409/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I0926 22:34:19.734728 35169 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21642-9508/.minikube/profiles/dockerenv-288409/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I0926 22:34:19.756206 35169 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21642-9508/.minikube/profiles/dockerenv-288409/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I0926 22:34:19.778215 35169 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21642-9508/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I0926 22:34:19.802241 35169 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I0926 22:34:19.818253 35169 ssh_runner.go:195] Run: openssl version
I0926 22:34:19.823088 35169 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I0926 22:34:19.833291 35169 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I0926 22:34:19.836397 35169 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Sep 26 22:29 /usr/share/ca-certificates/minikubeCA.pem
I0926 22:34:19.836438 35169 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I0926 22:34:19.842484 35169 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I0926 22:34:19.850890 35169 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I0926 22:34:19.853920 35169 certs.go:400] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I0926 22:34:19.853961 35169 kubeadm.go:400] StartCluster: {Name:dockerenv-288409 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:8000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:dockerenv-288409 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[
] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPau
seInterval:1m0s}
I0926 22:34:19.854019 35169 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:paused Name: Namespaces:[kube-system]}
I0926 22:34:19.854062 35169 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
I0926 22:34:19.885486 35169 cri.go:89] found id: ""
I0926 22:34:19.885525 35169 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I0926 22:34:19.893655 35169 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I0926 22:34:19.901780 35169 kubeadm.go:214] ignoring SystemVerification for kubeadm because of docker driver
I0926 22:34:19.901810 35169 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I0926 22:34:19.909675 35169 kubeadm.go:155] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I0926 22:34:19.909686 35169 kubeadm.go:157] found existing configuration files:
I0926 22:34:19.909718 35169 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I0926 22:34:19.917520 35169 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I0926 22:34:19.917554 35169 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I0926 22:34:19.925063 35169 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I0926 22:34:19.932686 35169 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I0926 22:34:19.932717 35169 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I0926 22:34:19.940335 35169 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I0926 22:34:19.948170 35169 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I0926 22:34:19.948206 35169 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I0926 22:34:19.955819 35169 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I0926 22:34:19.963428 35169 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I0926 22:34:19.963457 35169 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I0926 22:34:19.970984 35169 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I0926 22:34:20.020731 35169 kubeadm.go:318] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/6.8.0-1040-gcp\n", err: exit status 1
I0926 22:34:20.069416 35169 kubeadm.go:318] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I0926 22:34:28.043398 35169 kubeadm.go:318] [init] Using Kubernetes version: v1.34.0
I0926 22:34:28.043472 35169 kubeadm.go:318] [preflight] Running pre-flight checks
I0926 22:34:28.043552 35169 kubeadm.go:318] [preflight] The system verification failed. Printing the output from the verification:
I0926 22:34:28.043615 35169 kubeadm.go:318] [0;37mKERNEL_VERSION[0m: [0;32m6.8.0-1040-gcp[0m
I0926 22:34:28.043646 35169 kubeadm.go:318] [0;37mOS[0m: [0;32mLinux[0m
I0926 22:34:28.043680 35169 kubeadm.go:318] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I0926 22:34:28.043727 35169 kubeadm.go:318] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I0926 22:34:28.043797 35169 kubeadm.go:318] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I0926 22:34:28.043837 35169 kubeadm.go:318] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I0926 22:34:28.043879 35169 kubeadm.go:318] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I0926 22:34:28.043916 35169 kubeadm.go:318] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I0926 22:34:28.043954 35169 kubeadm.go:318] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I0926 22:34:28.043992 35169 kubeadm.go:318] [0;37mCGROUPS_IO[0m: [0;32menabled[0m
I0926 22:34:28.044056 35169 kubeadm.go:318] [preflight] Pulling images required for setting up a Kubernetes cluster
I0926 22:34:28.044131 35169 kubeadm.go:318] [preflight] This might take a minute or two, depending on the speed of your internet connection
I0926 22:34:28.044216 35169 kubeadm.go:318] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
I0926 22:34:28.044269 35169 kubeadm.go:318] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I0926 22:34:28.045983 35169 out.go:252] - Generating certificates and keys ...
I0926 22:34:28.046040 35169 kubeadm.go:318] [certs] Using existing ca certificate authority
I0926 22:34:28.046101 35169 kubeadm.go:318] [certs] Using existing apiserver certificate and key on disk
I0926 22:34:28.046154 35169 kubeadm.go:318] [certs] Generating "apiserver-kubelet-client" certificate and key
I0926 22:34:28.046210 35169 kubeadm.go:318] [certs] Generating "front-proxy-ca" certificate and key
I0926 22:34:28.046269 35169 kubeadm.go:318] [certs] Generating "front-proxy-client" certificate and key
I0926 22:34:28.046310 35169 kubeadm.go:318] [certs] Generating "etcd/ca" certificate and key
I0926 22:34:28.046362 35169 kubeadm.go:318] [certs] Generating "etcd/server" certificate and key
I0926 22:34:28.046461 35169 kubeadm.go:318] [certs] etcd/server serving cert is signed for DNS names [dockerenv-288409 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I0926 22:34:28.046506 35169 kubeadm.go:318] [certs] Generating "etcd/peer" certificate and key
I0926 22:34:28.046625 35169 kubeadm.go:318] [certs] etcd/peer serving cert is signed for DNS names [dockerenv-288409 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I0926 22:34:28.046679 35169 kubeadm.go:318] [certs] Generating "etcd/healthcheck-client" certificate and key
I0926 22:34:28.046734 35169 kubeadm.go:318] [certs] Generating "apiserver-etcd-client" certificate and key
I0926 22:34:28.046799 35169 kubeadm.go:318] [certs] Generating "sa" key and public key
I0926 22:34:28.046854 35169 kubeadm.go:318] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I0926 22:34:28.046908 35169 kubeadm.go:318] [kubeconfig] Writing "admin.conf" kubeconfig file
I0926 22:34:28.046978 35169 kubeadm.go:318] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I0926 22:34:28.047025 35169 kubeadm.go:318] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I0926 22:34:28.047079 35169 kubeadm.go:318] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I0926 22:34:28.047127 35169 kubeadm.go:318] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I0926 22:34:28.047193 35169 kubeadm.go:318] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I0926 22:34:28.047252 35169 kubeadm.go:318] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I0926 22:34:28.048345 35169 out.go:252] - Booting up control plane ...
I0926 22:34:28.048419 35169 kubeadm.go:318] [control-plane] Creating static Pod manifest for "kube-apiserver"
I0926 22:34:28.048480 35169 kubeadm.go:318] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I0926 22:34:28.048558 35169 kubeadm.go:318] [control-plane] Creating static Pod manifest for "kube-scheduler"
I0926 22:34:28.048694 35169 kubeadm.go:318] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I0926 22:34:28.048824 35169 kubeadm.go:318] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
I0926 22:34:28.048958 35169 kubeadm.go:318] [patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
I0926 22:34:28.049082 35169 kubeadm.go:318] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I0926 22:34:28.049148 35169 kubeadm.go:318] [kubelet-start] Starting the kubelet
I0926 22:34:28.049271 35169 kubeadm.go:318] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I0926 22:34:28.049359 35169 kubeadm.go:318] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I0926 22:34:28.049414 35169 kubeadm.go:318] [kubelet-check] The kubelet is healthy after 501.788611ms
I0926 22:34:28.049492 35169 kubeadm.go:318] [control-plane-check] Waiting for healthy control plane components. This can take up to 4m0s
I0926 22:34:28.049557 35169 kubeadm.go:318] [control-plane-check] Checking kube-apiserver at https://192.168.49.2:8443/livez
I0926 22:34:28.049643 35169 kubeadm.go:318] [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz
I0926 22:34:28.049705 35169 kubeadm.go:318] [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez
I0926 22:34:28.049803 35169 kubeadm.go:318] [control-plane-check] kube-controller-manager is healthy after 1.010772138s
I0926 22:34:28.049869 35169 kubeadm.go:318] [control-plane-check] kube-scheduler is healthy after 2.427586132s
I0926 22:34:28.049963 35169 kubeadm.go:318] [control-plane-check] kube-apiserver is healthy after 3.501847601s
I0926 22:34:28.050117 35169 kubeadm.go:318] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I0926 22:34:28.050300 35169 kubeadm.go:318] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I0926 22:34:28.050375 35169 kubeadm.go:318] [upload-certs] Skipping phase. Please see --upload-certs
I0926 22:34:28.050537 35169 kubeadm.go:318] [mark-control-plane] Marking the node dockerenv-288409 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I0926 22:34:28.050593 35169 kubeadm.go:318] [bootstrap-token] Using token: g29eup.zvx9ymjpjpfj88e5
I0926 22:34:28.051532 35169 out.go:252] - Configuring RBAC rules ...
I0926 22:34:28.051637 35169 kubeadm.go:318] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I0926 22:34:28.051704 35169 kubeadm.go:318] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I0926 22:34:28.051846 35169 kubeadm.go:318] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I0926 22:34:28.051955 35169 kubeadm.go:318] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I0926 22:34:28.052057 35169 kubeadm.go:318] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I0926 22:34:28.052139 35169 kubeadm.go:318] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I0926 22:34:28.052284 35169 kubeadm.go:318] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I0926 22:34:28.052350 35169 kubeadm.go:318] [addons] Applied essential addon: CoreDNS
I0926 22:34:28.052394 35169 kubeadm.go:318] [addons] Applied essential addon: kube-proxy
I0926 22:34:28.052399 35169 kubeadm.go:318]
I0926 22:34:28.052444 35169 kubeadm.go:318] Your Kubernetes control-plane has initialized successfully!
I0926 22:34:28.052446 35169 kubeadm.go:318]
I0926 22:34:28.052544 35169 kubeadm.go:318] To start using your cluster, you need to run the following as a regular user:
I0926 22:34:28.052549 35169 kubeadm.go:318]
I0926 22:34:28.052584 35169 kubeadm.go:318] mkdir -p $HOME/.kube
I0926 22:34:28.052667 35169 kubeadm.go:318] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I0926 22:34:28.052741 35169 kubeadm.go:318] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I0926 22:34:28.052745 35169 kubeadm.go:318]
I0926 22:34:28.052831 35169 kubeadm.go:318] Alternatively, if you are the root user, you can run:
I0926 22:34:28.052837 35169 kubeadm.go:318]
I0926 22:34:28.052895 35169 kubeadm.go:318] export KUBECONFIG=/etc/kubernetes/admin.conf
I0926 22:34:28.052897 35169 kubeadm.go:318]
I0926 22:34:28.052952 35169 kubeadm.go:318] You should now deploy a pod network to the cluster.
I0926 22:34:28.053033 35169 kubeadm.go:318] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I0926 22:34:28.053127 35169 kubeadm.go:318] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I0926 22:34:28.053131 35169 kubeadm.go:318]
I0926 22:34:28.053241 35169 kubeadm.go:318] You can now join any number of control-plane nodes by copying certificate authorities
I0926 22:34:28.053309 35169 kubeadm.go:318] and service account keys on each node and then running the following as root:
I0926 22:34:28.053311 35169 kubeadm.go:318]
I0926 22:34:28.053385 35169 kubeadm.go:318] kubeadm join control-plane.minikube.internal:8443 --token g29eup.zvx9ymjpjpfj88e5 \
I0926 22:34:28.053486 35169 kubeadm.go:318] --discovery-token-ca-cert-hash sha256:1dbeb716d602e0941682b86f7d46c5a496a37728672c82fc41605cb6bf1292a7 \
I0926 22:34:28.053515 35169 kubeadm.go:318] --control-plane
I0926 22:34:28.053519 35169 kubeadm.go:318]
I0926 22:34:28.053633 35169 kubeadm.go:318] Then you can join any number of worker nodes by running the following on each as root:
I0926 22:34:28.053644 35169 kubeadm.go:318]
I0926 22:34:28.053733 35169 kubeadm.go:318] kubeadm join control-plane.minikube.internal:8443 --token g29eup.zvx9ymjpjpfj88e5 \
I0926 22:34:28.053850 35169 kubeadm.go:318] --discovery-token-ca-cert-hash sha256:1dbeb716d602e0941682b86f7d46c5a496a37728672c82fc41605cb6bf1292a7
I0926 22:34:28.053860 35169 cni.go:84] Creating CNI manager for ""
I0926 22:34:28.053868 35169 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I0926 22:34:28.055023 35169 out.go:179] * Configuring CNI (Container Networking Interface) ...
I0926 22:34:28.055842 35169 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I0926 22:34:28.059847 35169 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.34.0/kubectl ...
I0926 22:34:28.059856 35169 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2601 bytes)
I0926 22:34:28.077619 35169 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I0926 22:34:28.270556 35169 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I0926 22:34:28.270633 35169 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I0926 22:34:28.270664 35169 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes dockerenv-288409 minikube.k8s.io/updated_at=2025_09_26T22_34_28_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=528ef52dd808f925e881f79a2a823817d9197d47 minikube.k8s.io/name=dockerenv-288409 minikube.k8s.io/primary=true
I0926 22:34:28.278127 35169 ops.go:34] apiserver oom_adj: -16
I0926 22:34:28.370521 35169 kubeadm.go:1113] duration metric: took 99.958235ms to wait for elevateKubeSystemPrivileges
I0926 22:34:28.370554 35169 kubeadm.go:402] duration metric: took 8.516595515s to StartCluster
I0926 22:34:28.370576 35169 settings.go:142] acquiring lock: {Name:mke935858c08b57824075e52fb45232e2555a3b0 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0926 22:34:28.370633 35169 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/21642-9508/kubeconfig
I0926 22:34:28.371238 35169 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21642-9508/kubeconfig: {Name:mka72bf89c026ab3e09a0062db4219353845dcad Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0926 22:34:28.371430 35169 start.go:235] Will wait 6m0s for node &{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I0926 22:34:28.371451 35169 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I0926 22:34:28.371472 35169 addons.go:511] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I0926 22:34:28.371534 35169 addons.go:69] Setting storage-provisioner=true in profile "dockerenv-288409"
I0926 22:34:28.371544 35169 addons.go:238] Setting addon storage-provisioner=true in "dockerenv-288409"
I0926 22:34:28.371577 35169 host.go:66] Checking if "dockerenv-288409" exists ...
I0926 22:34:28.371574 35169 addons.go:69] Setting default-storageclass=true in profile "dockerenv-288409"
I0926 22:34:28.371595 35169 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "dockerenv-288409"
I0926 22:34:28.371656 35169 config.go:182] Loaded profile config "dockerenv-288409": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.0
I0926 22:34:28.371943 35169 cli_runner.go:164] Run: docker container inspect dockerenv-288409 --format={{.State.Status}}
I0926 22:34:28.371992 35169 cli_runner.go:164] Run: docker container inspect dockerenv-288409 --format={{.State.Status}}
I0926 22:34:28.372719 35169 out.go:179] * Verifying Kubernetes components...
I0926 22:34:28.373679 35169 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0926 22:34:28.394615 35169 addons.go:238] Setting addon default-storageclass=true in "dockerenv-288409"
I0926 22:34:28.394648 35169 host.go:66] Checking if "dockerenv-288409" exists ...
I0926 22:34:28.395179 35169 cli_runner.go:164] Run: docker container inspect dockerenv-288409 --format={{.State.Status}}
I0926 22:34:28.395856 35169 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I0926 22:34:28.396970 35169 addons.go:435] installing /etc/kubernetes/addons/storage-provisioner.yaml
I0926 22:34:28.396978 35169 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I0926 22:34:28.397014 35169 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-288409
I0926 22:34:28.418421 35169 addons.go:435] installing /etc/kubernetes/addons/storageclass.yaml
I0926 22:34:28.418434 35169 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I0926 22:34:28.418485 35169 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-288409
I0926 22:34:28.422142 35169 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32773 SSHKeyPath:/home/jenkins/minikube-integration/21642-9508/.minikube/machines/dockerenv-288409/id_rsa Username:docker}
I0926 22:34:28.440920 35169 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32773 SSHKeyPath:/home/jenkins/minikube-integration/21642-9508/.minikube/machines/dockerenv-288409/id_rsa Username:docker}
I0926 22:34:28.454189 35169 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.49.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I0926 22:34:28.492084 35169 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0926 22:34:28.530224 35169 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I0926 22:34:28.558565 35169 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I0926 22:34:28.604180 35169 start.go:976] {"host.minikube.internal": 192.168.49.1} host record injected into CoreDNS's ConfigMap
I0926 22:34:28.604906 35169 api_server.go:52] waiting for apiserver process to appear ...
I0926 22:34:28.604955 35169 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I0926 22:34:28.780000 35169 api_server.go:72] duration metric: took 408.548638ms to wait for apiserver process to appear ...
I0926 22:34:28.780013 35169 api_server.go:88] waiting for apiserver healthz status ...
I0926 22:34:28.780031 35169 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8443/healthz ...
I0926 22:34:28.785435 35169 out.go:179] * Enabled addons: storage-provisioner, default-storageclass
I0926 22:34:28.786277 35169 api_server.go:279] https://192.168.49.2:8443/healthz returned 200:
ok
I0926 22:34:28.786298 35169 addons.go:514] duration metric: took 414.834334ms for enable addons: enabled=[storage-provisioner default-storageclass]
I0926 22:34:28.786936 35169 api_server.go:141] control plane version: v1.34.0
I0926 22:34:28.786947 35169 api_server.go:131] duration metric: took 6.929906ms to wait for apiserver health ...
I0926 22:34:28.786953 35169 system_pods.go:43] waiting for kube-system pods to appear ...
I0926 22:34:28.789080 35169 system_pods.go:59] 5 kube-system pods found
I0926 22:34:28.789102 35169 system_pods.go:61] "etcd-dockerenv-288409" [2df10d92-0afe-4dc7-8e0a-8f0e1dfd2910] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd])
I0926 22:34:28.789109 35169 system_pods.go:61] "kube-apiserver-dockerenv-288409" [79739b95-a990-47d3-99c8-d5a8b7922428] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
I0926 22:34:28.789115 35169 system_pods.go:61] "kube-controller-manager-dockerenv-288409" [8665119f-0062-4e45-9b0a-f7e9c6254104] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager])
I0926 22:34:28.789124 35169 system_pods.go:61] "kube-scheduler-dockerenv-288409" [3bc6684a-9a86-4f64-b01b-8dba6e250edf] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
I0926 22:34:28.789130 35169 system_pods.go:61] "storage-provisioner" [829a3b28-be70-4704-9ff0-0f13173e9a69] Pending: PodScheduled:Unschedulable (0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/not-ready: }. no new claims to deallocate, preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling.)
I0926 22:34:28.789136 35169 system_pods.go:74] duration metric: took 2.178159ms to wait for pod list to return data ...
I0926 22:34:28.789148 35169 kubeadm.go:586] duration metric: took 417.699448ms to wait for: map[apiserver:true system_pods:true]
I0926 22:34:28.789160 35169 node_conditions.go:102] verifying NodePressure condition ...
I0926 22:34:28.790910 35169 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I0926 22:34:28.790921 35169 node_conditions.go:123] node cpu capacity is 8
I0926 22:34:28.790930 35169 node_conditions.go:105] duration metric: took 1.767668ms to run NodePressure ...
I0926 22:34:28.790939 35169 start.go:241] waiting for startup goroutines ...
I0926 22:34:29.106891 35169 kapi.go:214] "coredns" deployment in "kube-system" namespace and "dockerenv-288409" context rescaled to 1 replicas
I0926 22:34:29.106921 35169 start.go:246] waiting for cluster config update ...
I0926 22:34:29.106934 35169 start.go:255] writing updated cluster config ...
I0926 22:34:29.107232 35169 ssh_runner.go:195] Run: rm -f paused
I0926 22:34:29.150275 35169 start.go:623] kubectl: 1.34.1, cluster: 1.34.0 (minor skew: 0)
I0926 22:34:29.151857 35169 out.go:179] * Done! kubectl is now configured to use "dockerenv-288409" cluster and "default" namespace by default
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD
747fbf3b16772 6e38f40d628db 10 seconds ago Running storage-provisioner 0 c568bedf72619 storage-provisioner
b8e256beac427 409467f978b4a 10 seconds ago Running kindnet-cni 0 01618a7e9e0bf kindnet-j7zqb
00c07918d85dc df0860106674d 10 seconds ago Running kube-proxy 0 58f1e505db5ac kube-proxy-b8w46
07f5581a290d4 46169d968e920 20 seconds ago Running kube-scheduler 0 b5acd76ea2e75 kube-scheduler-dockerenv-288409
a122dc94a3fdc 5f1f5298c888d 20 seconds ago Running etcd 0 26fc76b5fa3d9 etcd-dockerenv-288409
6ab780ebce19e a0af72f2ec6d6 20 seconds ago Running kube-controller-manager 0 64dd925d3b847 kube-controller-manager-dockerenv-288409
15bac67683448 90550c43ad2bc 20 seconds ago Running kube-apiserver 0 dba1bcd26b7e0 kube-apiserver-dockerenv-288409
==> containerd <==
Sep 26 22:34:23 dockerenv-288409 containerd[760]: time="2025-09-26T22:34:23.499829346Z" level=info msg="StartContainer for \"15bac676834485c52f16bf244d0549ad3ef5ec24a71542f0a164b6e89a7a3f5b\" returns successfully"
Sep 26 22:34:23 dockerenv-288409 containerd[760]: time="2025-09-26T22:34:23.499842185Z" level=info msg="StartContainer for \"07f5581a290d4cf0a51f67b7de1bcd59271ed696ab242fc2115709f2274c98a5\" returns successfully"
Sep 26 22:34:23 dockerenv-288409 containerd[760]: time="2025-09-26T22:34:23.509667892Z" level=info msg="StartContainer for \"6ab780ebce19ebe083d69344e2b93b4c8fbf614e07cd2e0dc5f281ee27f39465\" returns successfully"
Sep 26 22:34:23 dockerenv-288409 containerd[760]: time="2025-09-26T22:34:23.509764738Z" level=info msg="StartContainer for \"a122dc94a3fdc725c11720dd2aa12be971bdefa53aa2ab2d310cfef11b0accd5\" returns successfully"
Sep 26 22:34:33 dockerenv-288409 containerd[760]: time="2025-09-26T22:34:33.358547587Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:kube-proxy-b8w46,Uid:a1ecac65-9974-4c9c-a3fa-2a23b59e0583,Namespace:kube-system,Attempt:0,}"
Sep 26 22:34:33 dockerenv-288409 containerd[760]: time="2025-09-26T22:34:33.371722478Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:kindnet-j7zqb,Uid:dbd80d7c-ac73-4ace-b92a-c92e83855505,Namespace:kube-system,Attempt:0,}"
Sep 26 22:34:33 dockerenv-288409 containerd[760]: time="2025-09-26T22:34:33.420470242Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:kube-proxy-b8w46,Uid:a1ecac65-9974-4c9c-a3fa-2a23b59e0583,Namespace:kube-system,Attempt:0,} returns sandbox id \"58f1e505db5ac5e5680b84f3a87296265431e3359d5a1e3d839dfb91f6295765\""
Sep 26 22:34:33 dockerenv-288409 containerd[760]: time="2025-09-26T22:34:33.425418458Z" level=info msg="CreateContainer within sandbox \"58f1e505db5ac5e5680b84f3a87296265431e3359d5a1e3d839dfb91f6295765\" for container &ContainerMetadata{Name:kube-proxy,Attempt:0,}"
Sep 26 22:34:33 dockerenv-288409 containerd[760]: time="2025-09-26T22:34:33.434461484Z" level=info msg="CreateContainer within sandbox \"58f1e505db5ac5e5680b84f3a87296265431e3359d5a1e3d839dfb91f6295765\" for &ContainerMetadata{Name:kube-proxy,Attempt:0,} returns container id \"00c07918d85dc67ccd690abe73f0f796c2ba48ffb3bc18370c690feb73d26a6a\""
Sep 26 22:34:33 dockerenv-288409 containerd[760]: time="2025-09-26T22:34:33.434956417Z" level=info msg="StartContainer for \"00c07918d85dc67ccd690abe73f0f796c2ba48ffb3bc18370c690feb73d26a6a\""
Sep 26 22:34:33 dockerenv-288409 containerd[760]: time="2025-09-26T22:34:33.454839353Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-66bc5c9577-ks8sh,Uid:1a3a32b2-531c-4c7e-80f8-1fb4c90a7113,Namespace:kube-system,Attempt:0,}"
Sep 26 22:34:33 dockerenv-288409 containerd[760]: time="2025-09-26T22:34:33.472150192Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-66bc5c9577-ks8sh,Uid:1a3a32b2-531c-4c7e-80f8-1fb4c90a7113,Namespace:kube-system,Attempt:0,} failed, error" error="failed to setup network for sandbox \"8e7469dea77034071c540151bc35e8fa275f94c06d5888615f95f61f1ad72c82\": failed to find network info for sandbox \"8e7469dea77034071c540151bc35e8fa275f94c06d5888615f95f61f1ad72c82\""
Sep 26 22:34:33 dockerenv-288409 containerd[760]: time="2025-09-26T22:34:33.503053983Z" level=info msg="StartContainer for \"00c07918d85dc67ccd690abe73f0f796c2ba48ffb3bc18370c690feb73d26a6a\" returns successfully"
Sep 26 22:34:33 dockerenv-288409 containerd[760]: time="2025-09-26T22:34:33.707991339Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:kindnet-j7zqb,Uid:dbd80d7c-ac73-4ace-b92a-c92e83855505,Namespace:kube-system,Attempt:0,} returns sandbox id \"01618a7e9e0bff3c2e3cfcc5324a7f4d594bd9eae2dc9eb24fad476daa34bf49\""
Sep 26 22:34:33 dockerenv-288409 containerd[760]: time="2025-09-26T22:34:33.712339857Z" level=info msg="CreateContainer within sandbox \"01618a7e9e0bff3c2e3cfcc5324a7f4d594bd9eae2dc9eb24fad476daa34bf49\" for container &ContainerMetadata{Name:kindnet-cni,Attempt:0,}"
Sep 26 22:34:33 dockerenv-288409 containerd[760]: time="2025-09-26T22:34:33.715872807Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:storage-provisioner,Uid:829a3b28-be70-4704-9ff0-0f13173e9a69,Namespace:kube-system,Attempt:0,}"
Sep 26 22:34:33 dockerenv-288409 containerd[760]: time="2025-09-26T22:34:33.724323936Z" level=info msg="CreateContainer within sandbox \"01618a7e9e0bff3c2e3cfcc5324a7f4d594bd9eae2dc9eb24fad476daa34bf49\" for &ContainerMetadata{Name:kindnet-cni,Attempt:0,} returns container id \"b8e256beac4272358658f856de9d4ee060aa68822cd69e36db42c39cae7f8143\""
Sep 26 22:34:33 dockerenv-288409 containerd[760]: time="2025-09-26T22:34:33.724815585Z" level=info msg="StartContainer for \"b8e256beac4272358658f856de9d4ee060aa68822cd69e36db42c39cae7f8143\""
Sep 26 22:34:33 dockerenv-288409 containerd[760]: time="2025-09-26T22:34:33.806439444Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:storage-provisioner,Uid:829a3b28-be70-4704-9ff0-0f13173e9a69,Namespace:kube-system,Attempt:0,} returns sandbox id \"c568bedf72619cc91850a3b263cfae3c18910b6726ebec3ac0332006959aa173\""
Sep 26 22:34:33 dockerenv-288409 containerd[760]: time="2025-09-26T22:34:33.811777598Z" level=info msg="CreateContainer within sandbox \"c568bedf72619cc91850a3b263cfae3c18910b6726ebec3ac0332006959aa173\" for container &ContainerMetadata{Name:storage-provisioner,Attempt:0,}"
Sep 26 22:34:33 dockerenv-288409 containerd[760]: time="2025-09-26T22:34:33.813877410Z" level=info msg="StartContainer for \"b8e256beac4272358658f856de9d4ee060aa68822cd69e36db42c39cae7f8143\" returns successfully"
Sep 26 22:34:33 dockerenv-288409 containerd[760]: time="2025-09-26T22:34:33.820485651Z" level=info msg="CreateContainer within sandbox \"c568bedf72619cc91850a3b263cfae3c18910b6726ebec3ac0332006959aa173\" for &ContainerMetadata{Name:storage-provisioner,Attempt:0,} returns container id \"747fbf3b16772930c7f5f29583387afea27865f28e72502aecbc606127778af9\""
Sep 26 22:34:33 dockerenv-288409 containerd[760]: time="2025-09-26T22:34:33.820920515Z" level=info msg="StartContainer for \"747fbf3b16772930c7f5f29583387afea27865f28e72502aecbc606127778af9\""
Sep 26 22:34:33 dockerenv-288409 containerd[760]: time="2025-09-26T22:34:33.871076376Z" level=info msg="StartContainer for \"747fbf3b16772930c7f5f29583387afea27865f28e72502aecbc606127778af9\" returns successfully"
Sep 26 22:34:37 dockerenv-288409 containerd[760]: time="2025-09-26T22:34:37.490535781Z" level=info msg="No cni config template is specified, wait for other system components to drop the config."
==> describe nodes <==
Name: dockerenv-288409
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=dockerenv-288409
kubernetes.io/os=linux
minikube.k8s.io/commit=528ef52dd808f925e881f79a2a823817d9197d47
minikube.k8s.io/name=dockerenv-288409
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2025_09_26T22_34_28_0700
minikube.k8s.io/version=v1.37.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Fri, 26 Sep 2025 22:34:25 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: dockerenv-288409
AcquireTime: <unset>
RenewTime: Fri, 26 Sep 2025 22:34:37 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Fri, 26 Sep 2025 22:34:37 +0000 Fri, 26 Sep 2025 22:34:23 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Fri, 26 Sep 2025 22:34:37 +0000 Fri, 26 Sep 2025 22:34:23 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Fri, 26 Sep 2025 22:34:37 +0000 Fri, 26 Sep 2025 22:34:23 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Fri, 26 Sep 2025 22:34:37 +0000 Fri, 26 Sep 2025 22:34:25 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.49.2
Hostname: dockerenv-288409
Capacity:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863452Ki
pods: 110
Allocatable:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863452Ki
pods: 110
System Info:
Machine ID: 507c4e7b5f934498b1d9e3008fbd9b6d
System UUID: f2a19196-8d22-41a2-9930-32d776aeedaa
Boot ID: d6777c8b-c717-4851-a50e-a884fc659348
Kernel Version: 6.8.0-1040-gcp
OS Image: Ubuntu 22.04.5 LTS
Operating System: linux
Architecture: amd64
Container Runtime Version: containerd://1.7.27
Kubelet Version: v1.34.0
Kube-Proxy Version:
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (8 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
kube-system coredns-66bc5c9577-ks8sh 100m (1%) 0 (0%) 70Mi (0%) 170Mi (0%) 11s
kube-system etcd-dockerenv-288409 100m (1%) 0 (0%) 100Mi (0%) 0 (0%) 17s
kube-system kindnet-j7zqb 100m (1%) 100m (1%) 50Mi (0%) 50Mi (0%) 11s
kube-system kube-apiserver-dockerenv-288409 250m (3%) 0 (0%) 0 (0%) 0 (0%) 17s
kube-system kube-controller-manager-dockerenv-288409 200m (2%) 0 (0%) 0 (0%) 0 (0%) 17s
kube-system kube-proxy-b8w46 0 (0%) 0 (0%) 0 (0%) 0 (0%) 11s
kube-system kube-scheduler-dockerenv-288409 100m (1%) 0 (0%) 0 (0%) 0 (0%) 17s
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 16s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 850m (10%) 100m (1%)
memory 220Mi (0%) 220Mi (0%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 10s kube-proxy
Normal Starting 22s kubelet Starting kubelet.
Normal NodeHasSufficientMemory 22s (x8 over 22s) kubelet Node dockerenv-288409 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 22s (x8 over 22s) kubelet Node dockerenv-288409 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 22s (x7 over 22s) kubelet Node dockerenv-288409 status is now: NodeHasSufficientPID
Normal NodeAllocatableEnforced 22s kubelet Updated Node Allocatable limit across pods
Normal Starting 17s kubelet Starting kubelet.
Normal NodeAllocatableEnforced 17s kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 17s kubelet Node dockerenv-288409 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 17s kubelet Node dockerenv-288409 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 17s kubelet Node dockerenv-288409 status is now: NodeHasSufficientPID
Normal RegisteredNode 13s node-controller Node dockerenv-288409 event: Registered Node dockerenv-288409 in Controller
==> dmesg <==
[Sep26 22:17] MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.
[ +0.001877] TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.
[ +0.000999] MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.
[ +0.086010] acpi PNP0A03:00: fail to add MMCONFIG information, can't access extended configuration space under this bridge
[ +0.387443] i8042: Warning: Keylock active
[ +0.011484] device-mapper: core: CONFIG_IMA_DISABLE_HTABLE is disabled. Duplicate IMA measurements will not be recorded in the IMA log.
[ +0.004689] platform eisa.0: EISA: Cannot allocate resource for mainboard
[ +0.000998] platform eisa.0: Cannot allocate resource for EISA slot 1
[ +0.001003] platform eisa.0: Cannot allocate resource for EISA slot 2
[ +0.000986] platform eisa.0: Cannot allocate resource for EISA slot 3
[ +0.001141] platform eisa.0: Cannot allocate resource for EISA slot 4
[ +0.000947] platform eisa.0: Cannot allocate resource for EISA slot 5
[ +0.001004] platform eisa.0: Cannot allocate resource for EISA slot 6
[ +0.001049] platform eisa.0: Cannot allocate resource for EISA slot 7
[ +0.001043] platform eisa.0: Cannot allocate resource for EISA slot 8
[ +0.448971] block sda: the capability attribute has been deprecated.
[ +0.076726] systemd[1]: Configuration file /run/systemd/system/netplan-ovs-cleanup.service is marked world-inaccessible. This has no effect as configuration data is accessible via APIs without restrictions. Proceeding anyway.
[ +0.021403] systemd[1]: /lib/systemd/system/snapd.service:23: Unknown key name 'RestartMode' in section 'Service', ignoring.
[ +6.907524] kauditd_printk_skb: 47 callbacks suppressed
==> etcd [a122dc94a3fdc725c11720dd2aa12be971bdefa53aa2ab2d310cfef11b0accd5] <==
{"level":"warn","ts":"2025-09-26T22:34:24.310147Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:34806","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-26T22:34:24.317113Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:34816","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-26T22:34:24.325624Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:34854","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-26T22:34:24.331506Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:34860","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-26T22:34:24.338728Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:34886","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-26T22:34:24.345090Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:34894","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-26T22:34:24.350780Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:34904","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-26T22:34:24.357241Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:34932","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-26T22:34:24.362861Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:34934","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-26T22:34:24.369010Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:34962","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-26T22:34:24.376904Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:34980","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-26T22:34:24.383174Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:34982","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-26T22:34:24.389681Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:34992","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-26T22:34:24.395736Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:35020","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-26T22:34:24.401627Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:35026","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-26T22:34:24.411879Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:35036","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-26T22:34:24.418865Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:35042","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-26T22:34:24.425357Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:35050","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-26T22:34:24.431514Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:35060","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-26T22:34:24.437293Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:35074","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-26T22:34:24.443245Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:35084","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-26T22:34:24.448870Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:35102","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-26T22:34:24.463389Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:35124","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-26T22:34:24.470086Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:35140","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-26T22:34:24.477539Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:35148","server-name":"","error":"EOF"}
==> kernel <==
22:34:44 up 16 min, 0 users, load average: 1.37, 1.14, 0.60
Linux dockerenv-288409 6.8.0-1040-gcp #42~22.04.1-Ubuntu SMP Tue Sep 9 13:30:57 UTC 2025 x86_64 x86_64 x86_64 GNU/Linux
PRETTY_NAME="Ubuntu 22.04.5 LTS"
==> kindnet [b8e256beac4272358658f856de9d4ee060aa68822cd69e36db42c39cae7f8143] <==
I0926 22:34:33.995787 1 main.go:109] connected to apiserver: https://10.96.0.1:443
I0926 22:34:33.996024 1 main.go:139] hostIP = 192.168.49.2
podIP = 192.168.49.2
I0926 22:34:33.996120 1 main.go:148] setting mtu 1500 for CNI
I0926 22:34:33.996134 1 main.go:178] kindnetd IP family: "ipv4"
I0926 22:34:33.996152 1 main.go:182] noMask IPv4 subnets: [10.244.0.0/16]
time="2025-09-26T22:34:34Z" level=info msg="Created plugin 10-kube-network-policies (kindnetd, handles RunPodSandbox,RemovePodSandbox)"
I0926 22:34:34.197317 1 controller.go:377] "Starting controller" name="kube-network-policies"
I0926 22:34:34.197354 1 controller.go:381] "Waiting for informer caches to sync"
I0926 22:34:34.197369 1 shared_informer.go:350] "Waiting for caches to sync" controller="kube-network-policies"
I0926 22:34:34.197578 1 controller.go:390] nri plugin exited: failed to connect to NRI service: dial unix /var/run/nri/nri.sock: connect: no such file or directory
I0926 22:34:34.497964 1 shared_informer.go:357] "Caches are synced" controller="kube-network-policies"
I0926 22:34:34.497982 1 metrics.go:72] Registering metrics
I0926 22:34:34.498042 1 controller.go:711] "Syncing nftables rules"
I0926 22:34:44.197862 1 main.go:297] Handling node with IPs: map[192.168.49.2:{}]
I0926 22:34:44.197896 1 main.go:301] handling current node
==> kube-apiserver [15bac676834485c52f16bf244d0549ad3ef5ec24a71542f0a164b6e89a7a3f5b] <==
I0926 22:34:25.011414 1 shared_informer.go:356] "Caches are synced" controller="crd-autoregister"
I0926 22:34:25.011531 1 aggregator.go:171] initial CRD sync complete...
I0926 22:34:25.011552 1 autoregister_controller.go:144] Starting autoregister controller
I0926 22:34:25.011558 1 cache.go:32] Waiting for caches to sync for autoregister controller
I0926 22:34:25.011565 1 cache.go:39] Caches are synced for autoregister controller
I0926 22:34:25.013036 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12
I0926 22:34:25.013092 1 default_servicecidr_controller.go:137] Shutting down kubernetes-service-cidr-controller
I0926 22:34:25.031236 1 controller.go:667] quota admission added evaluator for: leases.coordination.k8s.io
I0926 22:34:25.909631 1 storage_scheduling.go:95] created PriorityClass system-node-critical with value 2000001000
I0926 22:34:25.913163 1 storage_scheduling.go:95] created PriorityClass system-cluster-critical with value 2000000000
I0926 22:34:25.913180 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist.
I0926 22:34:26.303664 1 controller.go:667] quota admission added evaluator for: roles.rbac.authorization.k8s.io
I0926 22:34:26.333529 1 controller.go:667] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
I0926 22:34:26.412387 1 alloc.go:328] "allocated clusterIPs" service="default/kubernetes" clusterIPs={"IPv4":"10.96.0.1"}
W0926 22:34:26.417862 1 lease.go:265] Resetting endpoints for master service "kubernetes" to [192.168.49.2]
I0926 22:34:26.418568 1 controller.go:667] quota admission added evaluator for: endpoints
I0926 22:34:26.421981 1 controller.go:667] quota admission added evaluator for: endpointslices.discovery.k8s.io
I0926 22:34:26.934924 1 controller.go:667] quota admission added evaluator for: serviceaccounts
I0926 22:34:27.441834 1 controller.go:667] quota admission added evaluator for: deployments.apps
I0926 22:34:27.448878 1 alloc.go:328] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs={"IPv4":"10.96.0.10"}
I0926 22:34:27.456464 1 controller.go:667] quota admission added evaluator for: daemonsets.apps
I0926 22:34:32.837939 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12
I0926 22:34:32.840786 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12
I0926 22:34:32.936979 1 controller.go:667] quota admission added evaluator for: replicasets.apps
I0926 22:34:33.036379 1 controller.go:667] quota admission added evaluator for: controllerrevisions.apps
==> kube-controller-manager [6ab780ebce19ebe083d69344e2b93b4c8fbf614e07cd2e0dc5f281ee27f39465] <==
I0926 22:34:31.896673 1 range_allocator.go:428] "Set node PodCIDR" logger="node-ipam-controller" node="dockerenv-288409" podCIDRs=["10.244.0.0/24"]
I0926 22:34:31.933357 1 shared_informer.go:356] "Caches are synced" controller="certificate-csrsigning-kubelet-serving"
I0926 22:34:31.933500 1 shared_informer.go:356] "Caches are synced" controller="HPA"
I0926 22:34:31.934568 1 shared_informer.go:356] "Caches are synced" controller="ReplicaSet"
I0926 22:34:31.934590 1 shared_informer.go:356] "Caches are synced" controller="job"
I0926 22:34:31.934594 1 shared_informer.go:356] "Caches are synced" controller="attach detach"
I0926 22:34:31.934620 1 shared_informer.go:356] "Caches are synced" controller="certificate-csrsigning-kube-apiserver-client"
I0926 22:34:31.934699 1 shared_informer.go:356] "Caches are synced" controller="certificate-csrsigning-kubelet-client"
I0926 22:34:31.934724 1 shared_informer.go:356] "Caches are synced" controller="bootstrap_signer"
I0926 22:34:31.934729 1 shared_informer.go:356] "Caches are synced" controller="endpoint"
I0926 22:34:31.934710 1 shared_informer.go:356] "Caches are synced" controller="legacy-service-account-token-cleaner"
I0926 22:34:31.934717 1 shared_informer.go:356] "Caches are synced" controller="deployment"
I0926 22:34:31.934819 1 shared_informer.go:356] "Caches are synced" controller="service-cidr-controller"
I0926 22:34:31.934803 1 shared_informer.go:356] "Caches are synced" controller="TTL after finished"
I0926 22:34:31.934783 1 shared_informer.go:356] "Caches are synced" controller="certificate-csrsigning-legacy-unknown"
I0926 22:34:31.934881 1 shared_informer.go:356] "Caches are synced" controller="crt configmap"
I0926 22:34:31.934973 1 shared_informer.go:356] "Caches are synced" controller="persistent volume"
I0926 22:34:31.938196 1 shared_informer.go:356] "Caches are synced" controller="endpoint_slice"
I0926 22:34:31.938327 1 shared_informer.go:356] "Caches are synced" controller="namespace"
I0926 22:34:31.940573 1 shared_informer.go:356] "Caches are synced" controller="resource quota"
I0926 22:34:31.941757 1 shared_informer.go:356] "Caches are synced" controller="resource quota"
I0926 22:34:31.942902 1 shared_informer.go:356] "Caches are synced" controller="daemon sets"
I0926 22:34:31.949172 1 shared_informer.go:356] "Caches are synced" controller="disruption"
I0926 22:34:31.950374 1 shared_informer.go:356] "Caches are synced" controller="garbage collector"
I0926 22:34:31.954529 1 shared_informer.go:356] "Caches are synced" controller="stateful set"
==> kube-proxy [00c07918d85dc67ccd690abe73f0f796c2ba48ffb3bc18370c690feb73d26a6a] <==
I0926 22:34:33.531801 1 server_linux.go:53] "Using iptables proxy"
I0926 22:34:33.597197 1 shared_informer.go:349] "Waiting for caches to sync" controller="node informer cache"
I0926 22:34:33.697887 1 shared_informer.go:356] "Caches are synced" controller="node informer cache"
I0926 22:34:33.697916 1 server.go:219] "Successfully retrieved NodeIPs" NodeIPs=["192.168.49.2"]
E0926 22:34:33.698046 1 server.go:256] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
I0926 22:34:33.721490 1 server.go:265] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I0926 22:34:33.721533 1 server_linux.go:132] "Using iptables Proxier"
I0926 22:34:33.727458 1 proxier.go:242] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
I0926 22:34:33.727925 1 server.go:527] "Version info" version="v1.34.0"
I0926 22:34:33.727960 1 server.go:529] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I0926 22:34:33.729511 1 config.go:200] "Starting service config controller"
I0926 22:34:33.729522 1 config.go:106] "Starting endpoint slice config controller"
I0926 22:34:33.729533 1 shared_informer.go:349] "Waiting for caches to sync" controller="service config"
I0926 22:34:33.729538 1 shared_informer.go:349] "Waiting for caches to sync" controller="endpoint slice config"
I0926 22:34:33.729579 1 config.go:309] "Starting node config controller"
I0926 22:34:33.729585 1 shared_informer.go:349] "Waiting for caches to sync" controller="node config"
I0926 22:34:33.729592 1 shared_informer.go:356] "Caches are synced" controller="node config"
I0926 22:34:33.729850 1 config.go:403] "Starting serviceCIDR config controller"
I0926 22:34:33.729868 1 shared_informer.go:349] "Waiting for caches to sync" controller="serviceCIDR config"
I0926 22:34:33.829691 1 shared_informer.go:356] "Caches are synced" controller="endpoint slice config"
I0926 22:34:33.829691 1 shared_informer.go:356] "Caches are synced" controller="service config"
I0926 22:34:33.830102 1 shared_informer.go:356] "Caches are synced" controller="serviceCIDR config"
==> kube-scheduler [07f5581a290d4cf0a51f67b7de1bcd59271ed696ab242fc2115709f2274c98a5] <==
I0926 22:34:25.549383 1 server.go:177] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I0926 22:34:25.551092 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
I0926 22:34:25.551121 1 shared_informer.go:349] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
I0926 22:34:25.551388 1 secure_serving.go:211] Serving securely on 127.0.0.1:10259
E0926 22:34:25.552775 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ConfigMap: configmaps \"extension-apiserver-authentication\" is forbidden: User \"system:kube-scheduler\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"kube-system\"" logger="UnhandledError" reflector="runtime/asm_amd64.s:1700" type="*v1.ConfigMap"
E0926 22:34:25.553507 1 reflector.go:205] "Failed to watch" err="failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"statefulsets\" in API group \"apps\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.StatefulSet"
I0926 22:34:25.554403 1 tlsconfig.go:243] "Starting DynamicServingCertificateController"
E0926 22:34:25.554839 1 reflector.go:205] "Failed to watch" err="failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"storageclasses\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.StorageClass"
E0926 22:34:25.554865 1 reflector.go:205] "Failed to watch" err="failed to list *v1.DeviceClass: deviceclasses.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"deviceclasses\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.DeviceClass"
E0926 22:34:25.555047 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csistoragecapacities\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIStorageCapacity"
E0926 22:34:25.555233 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicasets\" in API group \"apps\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ReplicaSet"
E0926 22:34:25.555370 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Pod: pods is forbidden: User \"system:kube-scheduler\" cannot list resource \"pods\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Pod"
E0926 22:34:25.555467 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolume"
E0926 22:34:25.555477 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Namespace: namespaces is forbidden: User \"system:kube-scheduler\" cannot list resource \"namespaces\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Namespace"
E0926 22:34:25.555475 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicationcontrollers\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ReplicationController"
E0926 22:34:25.555585 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User \"system:kube-scheduler\" cannot list resource \"poddisruptionbudgets\" in API group \"policy\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PodDisruptionBudget"
E0926 22:34:25.555634 1 reflector.go:205] "Failed to watch" err="failed to list *v1.VolumeAttachment: volumeattachments.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"volumeattachments\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.VolumeAttachment"
E0926 22:34:25.555656 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csinodes\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSINode"
E0926 22:34:25.555776 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csidrivers\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIDriver"
E0926 22:34:25.555921 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Node: nodes is forbidden: User \"system:kube-scheduler\" cannot list resource \"nodes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Node"
E0926 22:34:25.556070 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ResourceSlice: resourceslices.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"resourceslices\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ResourceSlice"
E0926 22:34:25.556471 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Service: services is forbidden: User \"system:kube-scheduler\" cannot list resource \"services\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Service"
E0926 22:34:25.556497 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ResourceClaim: resourceclaims.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"resourceclaims\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ResourceClaim"
E0926 22:34:25.556482 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumeclaims\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolumeClaim"
I0926 22:34:26.851829 1 shared_informer.go:356] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
==> kubelet <==
Sep 26 22:34:32 dockerenv-288409 kubelet[1524]: E0926 22:34:32.070493 1524 projected.go:291] Couldn't get configMap kube-system/kube-root-ca.crt: configmap "kube-root-ca.crt" not found
Sep 26 22:34:32 dockerenv-288409 kubelet[1524]: E0926 22:34:32.070529 1524 projected.go:196] Error preparing data for projected volume kube-api-access-64wn8 for pod kube-system/storage-provisioner: configmap "kube-root-ca.crt" not found
Sep 26 22:34:32 dockerenv-288409 kubelet[1524]: E0926 22:34:32.070658 1524 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/829a3b28-be70-4704-9ff0-0f13173e9a69-kube-api-access-64wn8 podName:829a3b28-be70-4704-9ff0-0f13173e9a69 nodeName:}" failed. No retries permitted until 2025-09-26 22:34:32.570626732 +0000 UTC m=+5.393883412 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-64wn8" (UniqueName: "kubernetes.io/projected/829a3b28-be70-4704-9ff0-0f13173e9a69-kube-api-access-64wn8") pod "storage-provisioner" (UID: "829a3b28-be70-4704-9ff0-0f13173e9a69") : configmap "kube-root-ca.crt" not found
Sep 26 22:34:32 dockerenv-288409 kubelet[1524]: E0926 22:34:32.669723 1524 projected.go:291] Couldn't get configMap kube-system/kube-root-ca.crt: configmap "kube-root-ca.crt" not found
Sep 26 22:34:32 dockerenv-288409 kubelet[1524]: E0926 22:34:32.669767 1524 projected.go:196] Error preparing data for projected volume kube-api-access-64wn8 for pod kube-system/storage-provisioner: configmap "kube-root-ca.crt" not found
Sep 26 22:34:32 dockerenv-288409 kubelet[1524]: E0926 22:34:32.669838 1524 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/829a3b28-be70-4704-9ff0-0f13173e9a69-kube-api-access-64wn8 podName:829a3b28-be70-4704-9ff0-0f13173e9a69 nodeName:}" failed. No retries permitted until 2025-09-26 22:34:33.669820137 +0000 UTC m=+6.493076811 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-64wn8" (UniqueName: "kubernetes.io/projected/829a3b28-be70-4704-9ff0-0f13173e9a69-kube-api-access-64wn8") pod "storage-provisioner" (UID: "829a3b28-be70-4704-9ff0-0f13173e9a69") : configmap "kube-root-ca.crt" not found
Sep 26 22:34:33 dockerenv-288409 kubelet[1524]: I0926 22:34:33.071801 1524 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mfzpq\" (UniqueName: \"kubernetes.io/projected/a1ecac65-9974-4c9c-a3fa-2a23b59e0583-kube-api-access-mfzpq\") pod \"kube-proxy-b8w46\" (UID: \"a1ecac65-9974-4c9c-a3fa-2a23b59e0583\") " pod="kube-system/kube-proxy-b8w46"
Sep 26 22:34:33 dockerenv-288409 kubelet[1524]: I0926 22:34:33.071832 1524 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/dbd80d7c-ac73-4ace-b92a-c92e83855505-lib-modules\") pod \"kindnet-j7zqb\" (UID: \"dbd80d7c-ac73-4ace-b92a-c92e83855505\") " pod="kube-system/kindnet-j7zqb"
Sep 26 22:34:33 dockerenv-288409 kubelet[1524]: I0926 22:34:33.071861 1524 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-proxy\" (UniqueName: \"kubernetes.io/configmap/a1ecac65-9974-4c9c-a3fa-2a23b59e0583-kube-proxy\") pod \"kube-proxy-b8w46\" (UID: \"a1ecac65-9974-4c9c-a3fa-2a23b59e0583\") " pod="kube-system/kube-proxy-b8w46"
Sep 26 22:34:33 dockerenv-288409 kubelet[1524]: I0926 22:34:33.071877 1524 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/a1ecac65-9974-4c9c-a3fa-2a23b59e0583-lib-modules\") pod \"kube-proxy-b8w46\" (UID: \"a1ecac65-9974-4c9c-a3fa-2a23b59e0583\") " pod="kube-system/kube-proxy-b8w46"
Sep 26 22:34:33 dockerenv-288409 kubelet[1524]: I0926 22:34:33.071932 1524 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-cfg\" (UniqueName: \"kubernetes.io/host-path/dbd80d7c-ac73-4ace-b92a-c92e83855505-cni-cfg\") pod \"kindnet-j7zqb\" (UID: \"dbd80d7c-ac73-4ace-b92a-c92e83855505\") " pod="kube-system/kindnet-j7zqb"
Sep 26 22:34:33 dockerenv-288409 kubelet[1524]: I0926 22:34:33.071972 1524 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/dbd80d7c-ac73-4ace-b92a-c92e83855505-xtables-lock\") pod \"kindnet-j7zqb\" (UID: \"dbd80d7c-ac73-4ace-b92a-c92e83855505\") " pod="kube-system/kindnet-j7zqb"
Sep 26 22:34:33 dockerenv-288409 kubelet[1524]: I0926 22:34:33.071999 1524 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/a1ecac65-9974-4c9c-a3fa-2a23b59e0583-xtables-lock\") pod \"kube-proxy-b8w46\" (UID: \"a1ecac65-9974-4c9c-a3fa-2a23b59e0583\") " pod="kube-system/kube-proxy-b8w46"
Sep 26 22:34:33 dockerenv-288409 kubelet[1524]: I0926 22:34:33.072028 1524 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x78pk\" (UniqueName: \"kubernetes.io/projected/dbd80d7c-ac73-4ace-b92a-c92e83855505-kube-api-access-x78pk\") pod \"kindnet-j7zqb\" (UID: \"dbd80d7c-ac73-4ace-b92a-c92e83855505\") " pod="kube-system/kindnet-j7zqb"
Sep 26 22:34:33 dockerenv-288409 kubelet[1524]: I0926 22:34:33.172892 1524 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q5d4x\" (UniqueName: \"kubernetes.io/projected/1a3a32b2-531c-4c7e-80f8-1fb4c90a7113-kube-api-access-q5d4x\") pod \"coredns-66bc5c9577-ks8sh\" (UID: \"1a3a32b2-531c-4c7e-80f8-1fb4c90a7113\") " pod="kube-system/coredns-66bc5c9577-ks8sh"
Sep 26 22:34:33 dockerenv-288409 kubelet[1524]: I0926 22:34:33.173051 1524 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1a3a32b2-531c-4c7e-80f8-1fb4c90a7113-config-volume\") pod \"coredns-66bc5c9577-ks8sh\" (UID: \"1a3a32b2-531c-4c7e-80f8-1fb4c90a7113\") " pod="kube-system/coredns-66bc5c9577-ks8sh"
Sep 26 22:34:33 dockerenv-288409 kubelet[1524]: E0926 22:34:33.472345 1524 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to setup network for sandbox \"8e7469dea77034071c540151bc35e8fa275f94c06d5888615f95f61f1ad72c82\": failed to find network info for sandbox \"8e7469dea77034071c540151bc35e8fa275f94c06d5888615f95f61f1ad72c82\""
Sep 26 22:34:33 dockerenv-288409 kubelet[1524]: E0926 22:34:33.472413 1524 kuberuntime_sandbox.go:71] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to setup network for sandbox \"8e7469dea77034071c540151bc35e8fa275f94c06d5888615f95f61f1ad72c82\": failed to find network info for sandbox \"8e7469dea77034071c540151bc35e8fa275f94c06d5888615f95f61f1ad72c82\"" pod="kube-system/coredns-66bc5c9577-ks8sh"
Sep 26 22:34:33 dockerenv-288409 kubelet[1524]: E0926 22:34:33.472446 1524 kuberuntime_manager.go:1343] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to setup network for sandbox \"8e7469dea77034071c540151bc35e8fa275f94c06d5888615f95f61f1ad72c82\": failed to find network info for sandbox \"8e7469dea77034071c540151bc35e8fa275f94c06d5888615f95f61f1ad72c82\"" pod="kube-system/coredns-66bc5c9577-ks8sh"
Sep 26 22:34:33 dockerenv-288409 kubelet[1524]: E0926 22:34:33.472510 1524 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"coredns-66bc5c9577-ks8sh_kube-system(1a3a32b2-531c-4c7e-80f8-1fb4c90a7113)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"coredns-66bc5c9577-ks8sh_kube-system(1a3a32b2-531c-4c7e-80f8-1fb4c90a7113)\\\": rpc error: code = Unknown desc = failed to setup network for sandbox \\\"8e7469dea77034071c540151bc35e8fa275f94c06d5888615f95f61f1ad72c82\\\": failed to find network info for sandbox \\\"8e7469dea77034071c540151bc35e8fa275f94c06d5888615f95f61f1ad72c82\\\"\"" pod="kube-system/coredns-66bc5c9577-ks8sh" podUID="1a3a32b2-531c-4c7e-80f8-1fb4c90a7113"
Sep 26 22:34:34 dockerenv-288409 kubelet[1524]: I0926 22:34:34.281112 1524 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/storage-provisioner" podStartSLOduration=6.281092756 podStartE2EDuration="6.281092756s" podCreationTimestamp="2025-09-26 22:34:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-26 22:34:34.28089295 +0000 UTC m=+7.104149633" watchObservedRunningTime="2025-09-26 22:34:34.281092756 +0000 UTC m=+7.104349438"
Sep 26 22:34:34 dockerenv-288409 kubelet[1524]: I0926 22:34:34.297489 1524 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-proxy-b8w46" podStartSLOduration=1.297468821 podStartE2EDuration="1.297468821s" podCreationTimestamp="2025-09-26 22:34:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-26 22:34:34.297218125 +0000 UTC m=+7.120474810" watchObservedRunningTime="2025-09-26 22:34:34.297468821 +0000 UTC m=+7.120725503"
Sep 26 22:34:37 dockerenv-288409 kubelet[1524]: I0926 22:34:37.489925 1524 kuberuntime_manager.go:1828] "Updating runtime config through cri with podcidr" CIDR="10.244.0.0/24"
Sep 26 22:34:37 dockerenv-288409 kubelet[1524]: I0926 22:34:37.490798 1524 kubelet_network.go:47] "Updating Pod CIDR" originalPodCIDR="" newPodCIDR="10.244.0.0/24"
Sep 26 22:34:37 dockerenv-288409 kubelet[1524]: I0926 22:34:37.988106 1524 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kindnet-j7zqb" podStartSLOduration=4.988084184 podStartE2EDuration="4.988084184s" podCreationTimestamp="2025-09-26 22:34:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-26 22:34:34.304902857 +0000 UTC m=+7.128159540" watchObservedRunningTime="2025-09-26 22:34:37.988084184 +0000 UTC m=+10.811340868"
==> storage-provisioner [747fbf3b16772930c7f5f29583387afea27865f28e72502aecbc606127778af9] <==
I0926 22:34:33.881137 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
-- /stdout --
helpers_test.go:262: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p dockerenv-288409 -n dockerenv-288409
helpers_test.go:269: (dbg) Run: kubectl --context dockerenv-288409 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:280: non-running pods: coredns-66bc5c9577-ks8sh
helpers_test.go:282: ======> post-mortem[TestDockerEnvContainerd]: describe non-running pods <======
helpers_test.go:285: (dbg) Run: kubectl --context dockerenv-288409 describe pod coredns-66bc5c9577-ks8sh
helpers_test.go:285: (dbg) Non-zero exit: kubectl --context dockerenv-288409 describe pod coredns-66bc5c9577-ks8sh: exit status 1 (57.38141ms)
** stderr **
Error from server (NotFound): pods "coredns-66bc5c9577-ks8sh" not found
** /stderr **
helpers_test.go:287: kubectl --context dockerenv-288409 describe pod coredns-66bc5c9577-ks8sh: exit status 1
helpers_test.go:175: Cleaning up "dockerenv-288409" profile ...
helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p dockerenv-288409
helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p dockerenv-288409: (2.236048884s)
--- FAIL: TestDockerEnvContainerd (36.75s)