=== RUN TestDockerEnvContainerd
docker_test.go:170: running with containerd true linux amd64
docker_test.go:181: (dbg) Run: out/minikube-linux-amd64 start -p dockerenv-311598 --driver=docker --container-runtime=containerd
docker_test.go:181: (dbg) Done: out/minikube-linux-amd64 start -p dockerenv-311598 --driver=docker --container-runtime=containerd: (21.553648178s)
docker_test.go:189: (dbg) Run: /bin/bash -c "out/minikube-linux-amd64 docker-env --ssh-host --ssh-add -p dockerenv-311598"
docker_test.go:220: (dbg) Run: /bin/bash -c "SSH_AUTH_SOCK="/tmp/ssh-4HgIwDccgeRR/agent.867503" SSH_AGENT_PID="867504" DOCKER_HOST=ssh://docker@127.0.0.1:33520 docker version"
docker_test.go:243: (dbg) Run: /bin/bash -c "SSH_AUTH_SOCK="/tmp/ssh-4HgIwDccgeRR/agent.867503" SSH_AGENT_PID="867504" DOCKER_HOST=ssh://docker@127.0.0.1:33520 DOCKER_BUILDKIT=0 docker build -t local/minikube-dockerenv-containerd-test:latest testdata/docker-env"
docker_test.go:243: (dbg) Non-zero exit: /bin/bash -c "SSH_AUTH_SOCK="/tmp/ssh-4HgIwDccgeRR/agent.867503" SSH_AGENT_PID="867504" DOCKER_HOST=ssh://docker@127.0.0.1:33520 DOCKER_BUILDKIT=0 docker build -t local/minikube-dockerenv-containerd-test:latest testdata/docker-env": exit status 1 (1.249908499s)
-- stdout --
Sending build context to Docker daemon 2.048kB
-- /stdout --
** stderr **
DEPRECATED: The legacy builder is deprecated and will be removed in a future release.
BuildKit is currently disabled; enable it by removing the DOCKER_BUILDKIT=0
environment-variable.
Error response from daemon: exit status 1
** /stderr **
docker_test.go:245: failed to build images, error: exit status 1, output:
-- stdout --
Sending build context to Docker daemon 2.048kB
-- /stdout --
** stderr **
DEPRECATED: The legacy builder is deprecated and will be removed in a future release.
BuildKit is currently disabled; enable it by removing the DOCKER_BUILDKIT=0
environment-variable.
Error response from daemon: exit status 1
** /stderr **
docker_test.go:250: (dbg) Run: /bin/bash -c "SSH_AUTH_SOCK="/tmp/ssh-4HgIwDccgeRR/agent.867503" SSH_AGENT_PID="867504" DOCKER_HOST=ssh://docker@127.0.0.1:33520 docker image ls"
docker_test.go:255: failed to detect image 'local/minikube-dockerenv-containerd-test' in output of docker image ls
panic.go:631: *** TestDockerEnvContainerd FAILED at 2025-04-14 12:53:37.711353419 +0000 UTC m=+314.770560761
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:230: ======> post-mortem[TestDockerEnvContainerd]: docker inspect <======
helpers_test.go:231: (dbg) Run: docker inspect dockerenv-311598
helpers_test.go:235: (dbg) docker inspect dockerenv-311598:
-- stdout --
[
{
"Id": "c7ee2ab45ce92fa1c1a341b7b7cade5dc96e8f25e3c012e6c5bb548e43cf04cb",
"Created": "2025-04-14T12:53:08.352394064Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 864614,
"ExitCode": 0,
"Error": "",
"StartedAt": "2025-04-14T12:53:08.389790956Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:fa6441117abd3f0ec72d78de011fb44ecb7b1e274ddcf240e39454ed1f98f388",
"ResolvConfPath": "/var/lib/docker/containers/c7ee2ab45ce92fa1c1a341b7b7cade5dc96e8f25e3c012e6c5bb548e43cf04cb/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/c7ee2ab45ce92fa1c1a341b7b7cade5dc96e8f25e3c012e6c5bb548e43cf04cb/hostname",
"HostsPath": "/var/lib/docker/containers/c7ee2ab45ce92fa1c1a341b7b7cade5dc96e8f25e3c012e6c5bb548e43cf04cb/hosts",
"LogPath": "/var/lib/docker/containers/c7ee2ab45ce92fa1c1a341b7b7cade5dc96e8f25e3c012e6c5bb548e43cf04cb/c7ee2ab45ce92fa1c1a341b7b7cade5dc96e8f25e3c012e6c5bb548e43cf04cb-json.log",
"Name": "/dockerenv-311598",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"dockerenv-311598:/var",
"/lib/modules:/lib/modules:ro"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {
"max-size": "100m"
}
},
"NetworkMode": "dockerenv-311598",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "host",
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 8388608000,
"NanoCpus": 2000000000,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 16777216000,
"MemorySwappiness": null,
"OomKillDisable": false,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"ID": "c7ee2ab45ce92fa1c1a341b7b7cade5dc96e8f25e3c012e6c5bb548e43cf04cb",
"LowerDir": "/var/lib/docker/overlay2/c79744a47724874374fe543a2de96e5db39c4e2bc61ab693d9d0f050ba9a0984-init/diff:/var/lib/docker/overlay2/cca4799f9312b6fe9b1cfc055f52c55059a9c5847099ceef6e87d300c55653b8/diff",
"MergedDir": "/var/lib/docker/overlay2/c79744a47724874374fe543a2de96e5db39c4e2bc61ab693d9d0f050ba9a0984/merged",
"UpperDir": "/var/lib/docker/overlay2/c79744a47724874374fe543a2de96e5db39c4e2bc61ab693d9d0f050ba9a0984/diff",
"WorkDir": "/var/lib/docker/overlay2/c79744a47724874374fe543a2de96e5db39c4e2bc61ab693d9d0f050ba9a0984/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
},
{
"Type": "volume",
"Name": "dockerenv-311598",
"Source": "/var/lib/docker/volumes/dockerenv-311598/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
}
],
"Config": {
"Hostname": "dockerenv-311598",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.46-1744107393-20604@sha256:2430533582a8c08f907b2d5976c79bd2e672b4f3d4484088c99b839f3175ed6a",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"OnBuild": null,
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "dockerenv-311598",
"name.minikube.sigs.k8s.io": "dockerenv-311598",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "023f511dabf96db5d91f9208539f667ba12ba0fb0576866f44111ae3d0d44685",
"SandboxKey": "/var/run/docker/netns/023f511dabf9",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33520"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33521"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33524"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33522"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33523"
}
]
},
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"dockerenv-311598": {
"IPAMConfig": {
"IPv4Address": "192.168.49.2"
},
"Links": null,
"Aliases": null,
"MacAddress": "8a:ba:7f:95:8f:25",
"DriverOpts": null,
"GwPriority": 0,
"NetworkID": "863d13fab63c9e00e21e7f894184d3427d15b6c2130afd816a90d5b994fb6a75",
"EndpointID": "741d7645616e4ad0c1753b6fe0a5726063a09073c01f72ef34c3fc53b2cbf89b",
"Gateway": "192.168.49.1",
"IPAddress": "192.168.49.2",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"dockerenv-311598",
"c7ee2ab45ce9"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:239: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p dockerenv-311598 -n dockerenv-311598
helpers_test.go:244: <<< TestDockerEnvContainerd FAILED: start of post-mortem logs <<<
helpers_test.go:245: ======> post-mortem[TestDockerEnvContainerd]: minikube logs <======
helpers_test.go:247: (dbg) Run: out/minikube-linux-amd64 -p dockerenv-311598 logs -n 25
helpers_test.go:247: (dbg) Done: out/minikube-linux-amd64 -p dockerenv-311598 logs -n 25: (1.069489264s)
helpers_test.go:252: TestDockerEnvContainerd logs:
-- stdout --
==> Audit <==
|------------|---------------------------------------------------------------------------------------------|------------------|---------|---------|---------------------|---------------------|
| Command | Args | Profile | User | Version | Start Time | End Time |
|------------|---------------------------------------------------------------------------------------------|------------------|---------|---------|---------------------|---------------------|
| addons | addons-486370 addons disable | addons-486370 | jenkins | v1.35.0 | 14 Apr 25 12:51 UTC | 14 Apr 25 12:51 UTC |
| | amd-gpu-device-plugin | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | addons-486370 addons disable | addons-486370 | jenkins | v1.35.0 | 14 Apr 25 12:51 UTC | 14 Apr 25 12:52 UTC |
| | yakd --alsologtostderr -v=1 | | | | | |
| addons | addons-486370 addons | addons-486370 | jenkins | v1.35.0 | 14 Apr 25 12:52 UTC | 14 Apr 25 12:52 UTC |
| | disable nvidia-device-plugin | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| ip | addons-486370 ip | addons-486370 | jenkins | v1.35.0 | 14 Apr 25 12:52 UTC | 14 Apr 25 12:52 UTC |
| addons | addons-486370 addons disable | addons-486370 | jenkins | v1.35.0 | 14 Apr 25 12:52 UTC | 14 Apr 25 12:52 UTC |
| | registry --alsologtostderr | | | | | |
| | -v=1 | | | | | |
| addons | enable headlamp | addons-486370 | jenkins | v1.35.0 | 14 Apr 25 12:52 UTC | 14 Apr 25 12:52 UTC |
| | -p addons-486370 | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | addons-486370 addons | addons-486370 | jenkins | v1.35.0 | 14 Apr 25 12:52 UTC | 14 Apr 25 12:52 UTC |
| | disable cloud-spanner | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| ssh | addons-486370 ssh cat | addons-486370 | jenkins | v1.35.0 | 14 Apr 25 12:52 UTC | 14 Apr 25 12:52 UTC |
| | /opt/local-path-provisioner/pvc-d75ee2b8-4f0d-42d1-a0d3-e04c36bbef4f_default_test-pvc/file1 | | | | | |
| addons | addons-486370 addons disable | addons-486370 | jenkins | v1.35.0 | 14 Apr 25 12:52 UTC | 14 Apr 25 12:52 UTC |
| | storage-provisioner-rancher | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | addons-486370 addons | addons-486370 | jenkins | v1.35.0 | 14 Apr 25 12:52 UTC | 14 Apr 25 12:52 UTC |
| | disable metrics-server | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | addons-486370 addons disable | addons-486370 | jenkins | v1.35.0 | 14 Apr 25 12:52 UTC | 14 Apr 25 12:52 UTC |
| | headlamp --alsologtostderr | | | | | |
| | -v=1 | | | | | |
| addons | addons-486370 addons | addons-486370 | jenkins | v1.35.0 | 14 Apr 25 12:52 UTC | 14 Apr 25 12:52 UTC |
| | disable inspektor-gadget | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| ssh | addons-486370 ssh curl -s | addons-486370 | jenkins | v1.35.0 | 14 Apr 25 12:52 UTC | 14 Apr 25 12:52 UTC |
| | http://127.0.0.1/ -H 'Host: | | | | | |
| | nginx.example.com' | | | | | |
| ip | addons-486370 ip | addons-486370 | jenkins | v1.35.0 | 14 Apr 25 12:52 UTC | 14 Apr 25 12:52 UTC |
| addons | addons-486370 addons disable | addons-486370 | jenkins | v1.35.0 | 14 Apr 25 12:52 UTC | 14 Apr 25 12:52 UTC |
| | ingress-dns --alsologtostderr | | | | | |
| | -v=1 | | | | | |
| addons | addons-486370 addons disable | addons-486370 | jenkins | v1.35.0 | 14 Apr 25 12:52 UTC | 14 Apr 25 12:52 UTC |
| | ingress --alsologtostderr -v=1 | | | | | |
| addons | addons-486370 addons | addons-486370 | jenkins | v1.35.0 | 14 Apr 25 12:52 UTC | 14 Apr 25 12:52 UTC |
| | disable volumesnapshots | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | addons-486370 addons | addons-486370 | jenkins | v1.35.0 | 14 Apr 25 12:52 UTC | 14 Apr 25 12:52 UTC |
| | disable csi-hostpath-driver | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| stop | -p addons-486370 | addons-486370 | jenkins | v1.35.0 | 14 Apr 25 12:52 UTC | 14 Apr 25 12:52 UTC |
| addons | enable dashboard -p | addons-486370 | jenkins | v1.35.0 | 14 Apr 25 12:52 UTC | 14 Apr 25 12:52 UTC |
| | addons-486370 | | | | | |
| addons | disable dashboard -p | addons-486370 | jenkins | v1.35.0 | 14 Apr 25 12:52 UTC | 14 Apr 25 12:52 UTC |
| | addons-486370 | | | | | |
| addons | disable gvisor -p | addons-486370 | jenkins | v1.35.0 | 14 Apr 25 12:52 UTC | 14 Apr 25 12:52 UTC |
| | addons-486370 | | | | | |
| delete | -p addons-486370 | addons-486370 | jenkins | v1.35.0 | 14 Apr 25 12:52 UTC | 14 Apr 25 12:53 UTC |
| start | -p dockerenv-311598 | dockerenv-311598 | jenkins | v1.35.0 | 14 Apr 25 12:53 UTC | 14 Apr 25 12:53 UTC |
| | --driver=docker | | | | | |
| | --container-runtime=containerd | | | | | |
| docker-env | --ssh-host --ssh-add -p | dockerenv-311598 | jenkins | v1.35.0 | 14 Apr 25 12:53 UTC | 14 Apr 25 12:53 UTC |
| | dockerenv-311598 | | | | | |
|------------|---------------------------------------------------------------------------------------------|------------------|---------|---------|---------------------|---------------------|
==> Last Start <==
Log file created at: 2025/04/14 12:53:02
Running on machine: ubuntu-20-agent-15
Binary: Built with gc go1.24.0 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I0414 12:53:02.718600 864076 out.go:345] Setting OutFile to fd 1 ...
I0414 12:53:02.718871 864076 out.go:392] TERM=,COLORTERM=, which probably does not support color
I0414 12:53:02.718875 864076 out.go:358] Setting ErrFile to fd 2...
I0414 12:53:02.718878 864076 out.go:392] TERM=,COLORTERM=, which probably does not support color
I0414 12:53:02.719038 864076 root.go:338] Updating PATH: /home/jenkins/minikube-integration/20623-835291/.minikube/bin
I0414 12:53:02.719600 864076 out.go:352] Setting JSON to false
I0414 12:53:02.720589 864076 start.go:129] hostinfo: {"hostname":"ubuntu-20-agent-15","uptime":16532,"bootTime":1744618651,"procs":180,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1078-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
I0414 12:53:02.720646 864076 start.go:139] virtualization: kvm guest
I0414 12:53:02.723147 864076 out.go:177] * [dockerenv-311598] minikube v1.35.0 on Ubuntu 20.04 (kvm/amd64)
I0414 12:53:02.725116 864076 out.go:177] - MINIKUBE_LOCATION=20623
I0414 12:53:02.725122 864076 notify.go:220] Checking for updates...
I0414 12:53:02.728351 864076 out.go:177] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I0414 12:53:02.730229 864076 out.go:177] - KUBECONFIG=/home/jenkins/minikube-integration/20623-835291/kubeconfig
I0414 12:53:02.732095 864076 out.go:177] - MINIKUBE_HOME=/home/jenkins/minikube-integration/20623-835291/.minikube
I0414 12:53:02.733883 864076 out.go:177] - MINIKUBE_BIN=out/minikube-linux-amd64
I0414 12:53:02.735419 864076 out.go:177] - MINIKUBE_FORCE_SYSTEMD=
I0414 12:53:02.737679 864076 driver.go:394] Setting default libvirt URI to qemu:///system
I0414 12:53:02.763004 864076 docker.go:123] docker version: linux-28.0.4:Docker Engine - Community
I0414 12:53:02.763105 864076 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0414 12:53:02.814969 864076 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:2 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:24 OomKillDisable:true NGoroutines:43 SystemTime:2025-04-14 12:53:02.805248429 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1078-gcp OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:x
86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33647984640 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-15 Labels:[] ExperimentalBuild:false ServerVersion:28.0.4 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:05044ec0a9a75232cad458027ca83437aae3f4da} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:v1.2.5-0-g59923ef} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil>
ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.22.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.34.0] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I0414 12:53:02.815072 864076 docker.go:318] overlay module found
I0414 12:53:02.817915 864076 out.go:177] * Using the docker driver based on user configuration
I0414 12:53:02.819371 864076 start.go:297] selected driver: docker
I0414 12:53:02.819388 864076 start.go:901] validating driver "docker" against <nil>
I0414 12:53:02.819407 864076 start.go:912] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I0414 12:53:02.819515 864076 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0414 12:53:02.868320 864076 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:2 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:24 OomKillDisable:true NGoroutines:43 SystemTime:2025-04-14 12:53:02.859431873 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1078-gcp OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:x
86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33647984640 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-15 Labels:[] ExperimentalBuild:false ServerVersion:28.0.4 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:05044ec0a9a75232cad458027ca83437aae3f4da} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:v1.2.5-0-g59923ef} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil>
ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.22.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.34.0] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I0414 12:53:02.868542 864076 start_flags.go:310] no existing cluster config was found, will generate one from the flags
I0414 12:53:02.869328 864076 start_flags.go:393] Using suggested 8000MB memory alloc based on sys=32089MB, container=32089MB
I0414 12:53:02.869553 864076 start_flags.go:929] Wait components to verify : map[apiserver:true system_pods:true]
I0414 12:53:02.871844 864076 out.go:177] * Using Docker driver with root privileges
I0414 12:53:02.873266 864076 cni.go:84] Creating CNI manager for ""
I0414 12:53:02.873332 864076 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I0414 12:53:02.873339 864076 start_flags.go:319] Found "CNI" CNI - setting NetworkPlugin=cni
I0414 12:53:02.873423 864076 start.go:340] cluster config:
{Name:dockerenv-311598 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.46-1744107393-20604@sha256:2430533582a8c08f907b2d5976c79bd2e672b4f3d4484088c99b839f3175ed6a Memory:8000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.32.2 ClusterName:dockerenv-311598 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerR
untime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.32.2 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0414 12:53:02.875183 864076 out.go:177] * Starting "dockerenv-311598" primary control-plane node in "dockerenv-311598" cluster
I0414 12:53:02.876636 864076 cache.go:121] Beginning downloading kic base image for docker with containerd
I0414 12:53:02.877846 864076 out.go:177] * Pulling base image v0.0.46-1744107393-20604 ...
I0414 12:53:02.878953 864076 preload.go:131] Checking if preload exists for k8s version v1.32.2 and runtime containerd
I0414 12:53:02.878992 864076 preload.go:146] Found local preload: /home/jenkins/minikube-integration/20623-835291/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.32.2-containerd-overlay2-amd64.tar.lz4
I0414 12:53:02.878998 864076 cache.go:56] Caching tarball of preloaded images
I0414 12:53:02.879061 864076 image.go:81] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.46-1744107393-20604@sha256:2430533582a8c08f907b2d5976c79bd2e672b4f3d4484088c99b839f3175ed6a in local docker daemon
I0414 12:53:02.879112 864076 preload.go:172] Found /home/jenkins/minikube-integration/20623-835291/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.32.2-containerd-overlay2-amd64.tar.lz4 in cache, skipping download
I0414 12:53:02.879120 864076 cache.go:59] Finished verifying existence of preloaded tar for v1.32.2 on containerd
I0414 12:53:02.879522 864076 profile.go:143] Saving config to /home/jenkins/minikube-integration/20623-835291/.minikube/profiles/dockerenv-311598/config.json ...
I0414 12:53:02.879551 864076 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20623-835291/.minikube/profiles/dockerenv-311598/config.json: {Name:mk47c145542f67a864929aac9650ea7f0328dd03 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0414 12:53:02.899983 864076 image.go:100] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.46-1744107393-20604@sha256:2430533582a8c08f907b2d5976c79bd2e672b4f3d4484088c99b839f3175ed6a in local docker daemon, skipping pull
I0414 12:53:02.899996 864076 cache.go:145] gcr.io/k8s-minikube/kicbase-builds:v0.0.46-1744107393-20604@sha256:2430533582a8c08f907b2d5976c79bd2e672b4f3d4484088c99b839f3175ed6a exists in daemon, skipping load
I0414 12:53:02.900013 864076 cache.go:230] Successfully downloaded all kic artifacts
I0414 12:53:02.900038 864076 start.go:360] acquireMachinesLock for dockerenv-311598: {Name:mkc4e350dd264007d1aa6efe3c18c2c9d3726061 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0414 12:53:02.900143 864076 start.go:364] duration metric: took 91.124µs to acquireMachinesLock for "dockerenv-311598"
I0414 12:53:02.900164 864076 start.go:93] Provisioning new machine with config: &{Name:dockerenv-311598 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.46-1744107393-20604@sha256:2430533582a8c08f907b2d5976c79bd2e672b4f3d4484088c99b839f3175ed6a Memory:8000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.32.2 ClusterName:dockerenv-311598 Namespace:default APIServerHAVIP: APIServerNa
me:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.32.2 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath:
StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.32.2 ContainerRuntime:containerd ControlPlane:true Worker:true}
I0414 12:53:02.900222 864076 start.go:125] createHost starting for "" (driver="docker")
I0414 12:53:02.902391 864076 out.go:235] * Creating docker container (CPUs=2, Memory=8000MB) ...
I0414 12:53:02.902648 864076 start.go:159] libmachine.API.Create for "dockerenv-311598" (driver="docker")
I0414 12:53:02.902675 864076 client.go:168] LocalClient.Create starting
I0414 12:53:02.902744 864076 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/20623-835291/.minikube/certs/ca.pem
I0414 12:53:02.902770 864076 main.go:141] libmachine: Decoding PEM data...
I0414 12:53:02.902780 864076 main.go:141] libmachine: Parsing certificate...
I0414 12:53:02.902827 864076 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/20623-835291/.minikube/certs/cert.pem
I0414 12:53:02.902840 864076 main.go:141] libmachine: Decoding PEM data...
I0414 12:53:02.902852 864076 main.go:141] libmachine: Parsing certificate...
I0414 12:53:02.903154 864076 cli_runner.go:164] Run: docker network inspect dockerenv-311598 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W0414 12:53:02.920427 864076 cli_runner.go:211] docker network inspect dockerenv-311598 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I0414 12:53:02.920536 864076 network_create.go:284] running [docker network inspect dockerenv-311598] to gather additional debugging logs...
I0414 12:53:02.920555 864076 cli_runner.go:164] Run: docker network inspect dockerenv-311598
W0414 12:53:02.937851 864076 cli_runner.go:211] docker network inspect dockerenv-311598 returned with exit code 1
I0414 12:53:02.937881 864076 network_create.go:287] error running [docker network inspect dockerenv-311598]: docker network inspect dockerenv-311598: exit status 1
stdout:
[]
stderr:
Error response from daemon: network dockerenv-311598 not found
I0414 12:53:02.937893 864076 network_create.go:289] output of [docker network inspect dockerenv-311598]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network dockerenv-311598 not found
** /stderr **
I0414 12:53:02.937978 864076 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0414 12:53:02.955390 864076 network.go:206] using free private subnet 192.168.49.0/24: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc00003d460}
I0414 12:53:02.955434 864076 network_create.go:124] attempt to create docker network dockerenv-311598 192.168.49.0/24 with gateway 192.168.49.1 and MTU of 1500 ...
I0414 12:53:02.955504 864076 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.49.0/24 --gateway=192.168.49.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=dockerenv-311598 dockerenv-311598
I0414 12:53:03.010296 864076 network_create.go:108] docker network dockerenv-311598 192.168.49.0/24 created
I0414 12:53:03.010323 864076 kic.go:121] calculated static IP "192.168.49.2" for the "dockerenv-311598" container
I0414 12:53:03.010390 864076 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I0414 12:53:03.027819 864076 cli_runner.go:164] Run: docker volume create dockerenv-311598 --label name.minikube.sigs.k8s.io=dockerenv-311598 --label created_by.minikube.sigs.k8s.io=true
I0414 12:53:03.047172 864076 oci.go:103] Successfully created a docker volume dockerenv-311598
I0414 12:53:03.047246 864076 cli_runner.go:164] Run: docker run --rm --name dockerenv-311598-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=dockerenv-311598 --entrypoint /usr/bin/test -v dockerenv-311598:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.46-1744107393-20604@sha256:2430533582a8c08f907b2d5976c79bd2e672b4f3d4484088c99b839f3175ed6a -d /var/lib
I0414 12:53:03.530505 864076 oci.go:107] Successfully prepared a docker volume dockerenv-311598
I0414 12:53:03.530558 864076 preload.go:131] Checking if preload exists for k8s version v1.32.2 and runtime containerd
I0414 12:53:03.530592 864076 kic.go:194] Starting extracting preloaded images to volume ...
I0414 12:53:03.530652 864076 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/20623-835291/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.32.2-containerd-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v dockerenv-311598:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.46-1744107393-20604@sha256:2430533582a8c08f907b2d5976c79bd2e672b4f3d4484088c99b839f3175ed6a -I lz4 -xf /preloaded.tar -C /extractDir
I0414 12:53:08.286202 864076 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/20623-835291/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.32.2-containerd-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v dockerenv-311598:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.46-1744107393-20604@sha256:2430533582a8c08f907b2d5976c79bd2e672b4f3d4484088c99b839f3175ed6a -I lz4 -xf /preloaded.tar -C /extractDir: (4.755485302s)
I0414 12:53:08.286237 864076 kic.go:203] duration metric: took 4.755640667s to extract preloaded images to volume ...
W0414 12:53:08.286614 864076 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
I0414 12:53:08.286710 864076 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I0414 12:53:08.336575 864076 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname dockerenv-311598 --name dockerenv-311598 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=dockerenv-311598 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=dockerenv-311598 --network dockerenv-311598 --ip 192.168.49.2 --volume dockerenv-311598:/var --security-opt apparmor=unconfined --memory=8000mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.46-1744107393-20604@sha256:2430533582a8c08f907b2d5976c79bd2e672b4f3d4484088c99b839f3175ed6a
I0414 12:53:08.611638 864076 cli_runner.go:164] Run: docker container inspect dockerenv-311598 --format={{.State.Running}}
I0414 12:53:08.629666 864076 cli_runner.go:164] Run: docker container inspect dockerenv-311598 --format={{.State.Status}}
I0414 12:53:08.648611 864076 cli_runner.go:164] Run: docker exec dockerenv-311598 stat /var/lib/dpkg/alternatives/iptables
I0414 12:53:08.690200 864076 oci.go:144] the created container "dockerenv-311598" has a running status.
I0414 12:53:08.690225 864076 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/20623-835291/.minikube/machines/dockerenv-311598/id_rsa...
I0414 12:53:09.465798 864076 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/20623-835291/.minikube/machines/dockerenv-311598/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I0414 12:53:09.487277 864076 cli_runner.go:164] Run: docker container inspect dockerenv-311598 --format={{.State.Status}}
I0414 12:53:09.506609 864076 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I0414 12:53:09.506624 864076 kic_runner.go:114] Args: [docker exec --privileged dockerenv-311598 chown docker:docker /home/docker/.ssh/authorized_keys]
I0414 12:53:09.547563 864076 cli_runner.go:164] Run: docker container inspect dockerenv-311598 --format={{.State.Status}}
I0414 12:53:09.565421 864076 machine.go:93] provisionDockerMachine start ...
I0414 12:53:09.565511 864076 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-311598
I0414 12:53:09.583785 864076 main.go:141] libmachine: Using SSH client type: native
I0414 12:53:09.584024 864076 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x836360] 0x839060 <nil> [] 0s} 127.0.0.1 33520 <nil> <nil>}
I0414 12:53:09.584031 864076 main.go:141] libmachine: About to run SSH command:
hostname
I0414 12:53:09.705455 864076 main.go:141] libmachine: SSH cmd err, output: <nil>: dockerenv-311598
I0414 12:53:09.705485 864076 ubuntu.go:169] provisioning hostname "dockerenv-311598"
I0414 12:53:09.705557 864076 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-311598
I0414 12:53:09.724373 864076 main.go:141] libmachine: Using SSH client type: native
I0414 12:53:09.724665 864076 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x836360] 0x839060 <nil> [] 0s} 127.0.0.1 33520 <nil> <nil>}
I0414 12:53:09.724678 864076 main.go:141] libmachine: About to run SSH command:
sudo hostname dockerenv-311598 && echo "dockerenv-311598" | sudo tee /etc/hostname
I0414 12:53:09.856843 864076 main.go:141] libmachine: SSH cmd err, output: <nil>: dockerenv-311598
I0414 12:53:09.856946 864076 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-311598
I0414 12:53:09.876031 864076 main.go:141] libmachine: Using SSH client type: native
I0414 12:53:09.876324 864076 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x836360] 0x839060 <nil> [] 0s} 127.0.0.1 33520 <nil> <nil>}
I0414 12:53:09.876358 864076 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\sdockerenv-311598' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 dockerenv-311598/g' /etc/hosts;
else
echo '127.0.1.1 dockerenv-311598' | sudo tee -a /etc/hosts;
fi
fi
I0414 12:53:10.001599 864076 main.go:141] libmachine: SSH cmd err, output: <nil>:
I0414 12:53:10.001622 864076 ubuntu.go:175] set auth options {CertDir:/home/jenkins/minikube-integration/20623-835291/.minikube CaCertPath:/home/jenkins/minikube-integration/20623-835291/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/20623-835291/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/20623-835291/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/20623-835291/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/20623-835291/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/20623-835291/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/20623-835291/.minikube}
I0414 12:53:10.001656 864076 ubuntu.go:177] setting up certificates
I0414 12:53:10.001673 864076 provision.go:84] configureAuth start
I0414 12:53:10.001753 864076 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" dockerenv-311598
I0414 12:53:10.020753 864076 provision.go:143] copyHostCerts
I0414 12:53:10.020819 864076 exec_runner.go:144] found /home/jenkins/minikube-integration/20623-835291/.minikube/ca.pem, removing ...
I0414 12:53:10.020828 864076 exec_runner.go:203] rm: /home/jenkins/minikube-integration/20623-835291/.minikube/ca.pem
I0414 12:53:10.020902 864076 exec_runner.go:151] cp: /home/jenkins/minikube-integration/20623-835291/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/20623-835291/.minikube/ca.pem (1078 bytes)
I0414 12:53:10.021047 864076 exec_runner.go:144] found /home/jenkins/minikube-integration/20623-835291/.minikube/cert.pem, removing ...
I0414 12:53:10.021052 864076 exec_runner.go:203] rm: /home/jenkins/minikube-integration/20623-835291/.minikube/cert.pem
I0414 12:53:10.021081 864076 exec_runner.go:151] cp: /home/jenkins/minikube-integration/20623-835291/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/20623-835291/.minikube/cert.pem (1123 bytes)
I0414 12:53:10.021160 864076 exec_runner.go:144] found /home/jenkins/minikube-integration/20623-835291/.minikube/key.pem, removing ...
I0414 12:53:10.021164 864076 exec_runner.go:203] rm: /home/jenkins/minikube-integration/20623-835291/.minikube/key.pem
I0414 12:53:10.021187 864076 exec_runner.go:151] cp: /home/jenkins/minikube-integration/20623-835291/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/20623-835291/.minikube/key.pem (1675 bytes)
I0414 12:53:10.021240 864076 provision.go:117] generating server cert: /home/jenkins/minikube-integration/20623-835291/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/20623-835291/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/20623-835291/.minikube/certs/ca-key.pem org=jenkins.dockerenv-311598 san=[127.0.0.1 192.168.49.2 dockerenv-311598 localhost minikube]
I0414 12:53:10.080188 864076 provision.go:177] copyRemoteCerts
I0414 12:53:10.080243 864076 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I0414 12:53:10.080285 864076 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-311598
I0414 12:53:10.098849 864076 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33520 SSHKeyPath:/home/jenkins/minikube-integration/20623-835291/.minikube/machines/dockerenv-311598/id_rsa Username:docker}
I0414 12:53:10.189853 864076 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20623-835291/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1078 bytes)
I0414 12:53:10.213261 864076 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20623-835291/.minikube/machines/server.pem --> /etc/docker/server.pem (1216 bytes)
I0414 12:53:10.237161 864076 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20623-835291/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I0414 12:53:10.260383 864076 provision.go:87] duration metric: took 258.696565ms to configureAuth
I0414 12:53:10.260406 864076 ubuntu.go:193] setting minikube options for container-runtime
I0414 12:53:10.260626 864076 config.go:182] Loaded profile config "dockerenv-311598": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.32.2
I0414 12:53:10.260634 864076 machine.go:96] duration metric: took 695.199919ms to provisionDockerMachine
I0414 12:53:10.260640 864076 client.go:171] duration metric: took 7.357960469s to LocalClient.Create
I0414 12:53:10.260661 864076 start.go:167] duration metric: took 7.358016185s to libmachine.API.Create "dockerenv-311598"
I0414 12:53:10.260668 864076 start.go:293] postStartSetup for "dockerenv-311598" (driver="docker")
I0414 12:53:10.260676 864076 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I0414 12:53:10.260715 864076 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I0414 12:53:10.260759 864076 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-311598
I0414 12:53:10.278878 864076 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33520 SSHKeyPath:/home/jenkins/minikube-integration/20623-835291/.minikube/machines/dockerenv-311598/id_rsa Username:docker}
I0414 12:53:10.370102 864076 ssh_runner.go:195] Run: cat /etc/os-release
I0414 12:53:10.373764 864076 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I0414 12:53:10.373786 864076 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
I0414 12:53:10.373791 864076 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
I0414 12:53:10.373798 864076 info.go:137] Remote host: Ubuntu 22.04.5 LTS
I0414 12:53:10.373808 864076 filesync.go:126] Scanning /home/jenkins/minikube-integration/20623-835291/.minikube/addons for local assets ...
I0414 12:53:10.373863 864076 filesync.go:126] Scanning /home/jenkins/minikube-integration/20623-835291/.minikube/files for local assets ...
I0414 12:53:10.373879 864076 start.go:296] duration metric: took 113.207025ms for postStartSetup
I0414 12:53:10.374164 864076 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" dockerenv-311598
I0414 12:53:10.392821 864076 profile.go:143] Saving config to /home/jenkins/minikube-integration/20623-835291/.minikube/profiles/dockerenv-311598/config.json ...
I0414 12:53:10.393124 864076 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I0414 12:53:10.393160 864076 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-311598
I0414 12:53:10.410990 864076 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33520 SSHKeyPath:/home/jenkins/minikube-integration/20623-835291/.minikube/machines/dockerenv-311598/id_rsa Username:docker}
I0414 12:53:10.494094 864076 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I0414 12:53:10.499087 864076 start.go:128] duration metric: took 7.598837285s to createHost
I0414 12:53:10.499107 864076 start.go:83] releasing machines lock for "dockerenv-311598", held for 7.598955314s
I0414 12:53:10.499170 864076 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" dockerenv-311598
I0414 12:53:10.517506 864076 ssh_runner.go:195] Run: cat /version.json
I0414 12:53:10.517544 864076 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-311598
I0414 12:53:10.517586 864076 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I0414 12:53:10.517651 864076 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-311598
I0414 12:53:10.537160 864076 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33520 SSHKeyPath:/home/jenkins/minikube-integration/20623-835291/.minikube/machines/dockerenv-311598/id_rsa Username:docker}
I0414 12:53:10.537515 864076 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33520 SSHKeyPath:/home/jenkins/minikube-integration/20623-835291/.minikube/machines/dockerenv-311598/id_rsa Username:docker}
I0414 12:53:10.697048 864076 ssh_runner.go:195] Run: systemctl --version
I0414 12:53:10.701463 864076 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
I0414 12:53:10.705958 864076 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
I0414 12:53:10.730181 864076 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
I0414 12:53:10.730244 864076 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I0414 12:53:10.759487 864076 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/100-crio-bridge.conf] bridge cni config(s)
I0414 12:53:10.759503 864076 start.go:495] detecting cgroup driver to use...
I0414 12:53:10.759537 864076 detect.go:187] detected "cgroupfs" cgroup driver on host os
I0414 12:53:10.759588 864076 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I0414 12:53:10.771481 864076 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I0414 12:53:10.783140 864076 docker.go:217] disabling cri-docker service (if available) ...
I0414 12:53:10.783191 864076 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I0414 12:53:10.796467 864076 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I0414 12:53:10.810621 864076 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I0414 12:53:10.889212 864076 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I0414 12:53:10.976300 864076 docker.go:233] disabling docker service ...
I0414 12:53:10.976350 864076 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I0414 12:53:10.996895 864076 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I0414 12:53:11.008704 864076 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I0414 12:53:11.089409 864076 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I0414 12:53:11.169265 864076 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I0414 12:53:11.180760 864076 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I0414 12:53:11.197049 864076 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10"|' /etc/containerd/config.toml"
I0414 12:53:11.207230 864076 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I0414 12:53:11.217354 864076 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I0414 12:53:11.217413 864076 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I0414 12:53:11.228373 864076 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0414 12:53:11.239348 864076 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I0414 12:53:11.249854 864076 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0414 12:53:11.260138 864076 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I0414 12:53:11.269896 864076 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I0414 12:53:11.280259 864076 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I0414 12:53:11.290894 864076 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I0414 12:53:11.300880 864076 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I0414 12:53:11.309685 864076 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I0414 12:53:11.318933 864076 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0414 12:53:11.400939 864076 ssh_runner.go:195] Run: sudo systemctl restart containerd
I0414 12:53:11.504623 864076 start.go:542] Will wait 60s for socket path /run/containerd/containerd.sock
I0414 12:53:11.504682 864076 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
I0414 12:53:11.508449 864076 start.go:563] Will wait 60s for crictl version
I0414 12:53:11.508500 864076 ssh_runner.go:195] Run: which crictl
I0414 12:53:11.511882 864076 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I0414 12:53:11.551649 864076 start.go:579] Version: 0.1.0
RuntimeName: containerd
RuntimeVersion: 1.7.27
RuntimeApiVersion: v1
I0414 12:53:11.551718 864076 ssh_runner.go:195] Run: containerd --version
I0414 12:53:11.577339 864076 ssh_runner.go:195] Run: containerd --version
I0414 12:53:11.604159 864076 out.go:177] * Preparing Kubernetes v1.32.2 on containerd 1.7.27 ...
I0414 12:53:11.605857 864076 cli_runner.go:164] Run: docker network inspect dockerenv-311598 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0414 12:53:11.624894 864076 ssh_runner.go:195] Run: grep 192.168.49.1 host.minikube.internal$ /etc/hosts
I0414 12:53:11.628905 864076 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.49.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0414 12:53:11.639799 864076 kubeadm.go:883] updating cluster {Name:dockerenv-311598 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.46-1744107393-20604@sha256:2430533582a8c08f907b2d5976c79bd2e672b4f3d4484088c99b839f3175ed6a Memory:8000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.32.2 ClusterName:dockerenv-311598 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APISe
rverNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.32.2 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticI
P: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I0414 12:53:11.639899 864076 preload.go:131] Checking if preload exists for k8s version v1.32.2 and runtime containerd
I0414 12:53:11.639943 864076 ssh_runner.go:195] Run: sudo crictl images --output json
I0414 12:53:11.673497 864076 containerd.go:627] all images are preloaded for containerd runtime.
I0414 12:53:11.673511 864076 containerd.go:534] Images already preloaded, skipping extraction
I0414 12:53:11.673562 864076 ssh_runner.go:195] Run: sudo crictl images --output json
I0414 12:53:11.706632 864076 containerd.go:627] all images are preloaded for containerd runtime.
I0414 12:53:11.706660 864076 cache_images.go:84] Images are preloaded, skipping loading
I0414 12:53:11.706669 864076 kubeadm.go:934] updating node { 192.168.49.2 8443 v1.32.2 containerd true true} ...
I0414 12:53:11.706756 864076 kubeadm.go:946] kubelet [Unit]
Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.32.2/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=dockerenv-311598 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.49.2
[Install]
config:
{KubernetesVersion:v1.32.2 ClusterName:dockerenv-311598 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I0414 12:53:11.706805 864076 ssh_runner.go:195] Run: sudo crictl info
I0414 12:53:11.740743 864076 cni.go:84] Creating CNI manager for ""
I0414 12:53:11.740755 864076 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I0414 12:53:11.740777 864076 kubeadm.go:84] Using pod CIDR: 10.244.0.0/16
I0414 12:53:11.740797 864076 kubeadm.go:189] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.49.2 APIServerPort:8443 KubernetesVersion:v1.32.2 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:dockerenv-311598 NodeName:dockerenv-311598 DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.49.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.49.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPat
h:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///run/containerd/containerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I0414 12:53:11.740930 864076 kubeadm.go:195] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.49.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
name: "dockerenv-311598"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.49.2"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.49.2"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
- name: "proxy-refresh-interval"
value: "70000"
kubernetesVersion: v1.32.2
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///run/containerd/containerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I0414 12:53:11.740992 864076 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.32.2
I0414 12:53:11.749500 864076 binaries.go:44] Found k8s binaries, skipping transfer
I0414 12:53:11.749598 864076 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I0414 12:53:11.757819 864076 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (320 bytes)
I0414 12:53:11.775269 864076 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I0414 12:53:11.792523 864076 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2306 bytes)
I0414 12:53:11.810285 864076 ssh_runner.go:195] Run: grep 192.168.49.2 control-plane.minikube.internal$ /etc/hosts
I0414 12:53:11.814209 864076 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.49.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0414 12:53:11.825672 864076 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0414 12:53:11.904474 864076 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0414 12:53:11.918219 864076 certs.go:68] Setting up /home/jenkins/minikube-integration/20623-835291/.minikube/profiles/dockerenv-311598 for IP: 192.168.49.2
I0414 12:53:11.918237 864076 certs.go:194] generating shared ca certs ...
I0414 12:53:11.918257 864076 certs.go:226] acquiring lock for ca certs: {Name:mk8a1a66cbed08159d33bb37560b0cd6f81b677b Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0414 12:53:11.918407 864076 certs.go:235] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/20623-835291/.minikube/ca.key
I0414 12:53:11.918446 864076 certs.go:235] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/20623-835291/.minikube/proxy-client-ca.key
I0414 12:53:11.918452 864076 certs.go:256] generating profile certs ...
I0414 12:53:11.918504 864076 certs.go:363] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/20623-835291/.minikube/profiles/dockerenv-311598/client.key
I0414 12:53:11.918522 864076 crypto.go:68] Generating cert /home/jenkins/minikube-integration/20623-835291/.minikube/profiles/dockerenv-311598/client.crt with IP's: []
I0414 12:53:12.121856 864076 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/20623-835291/.minikube/profiles/dockerenv-311598/client.crt ...
I0414 12:53:12.121878 864076 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20623-835291/.minikube/profiles/dockerenv-311598/client.crt: {Name:mke110680a8fdccbfcc4b6571fe0fad76ef6e139 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0414 12:53:12.122068 864076 crypto.go:164] Writing key to /home/jenkins/minikube-integration/20623-835291/.minikube/profiles/dockerenv-311598/client.key ...
I0414 12:53:12.122076 864076 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20623-835291/.minikube/profiles/dockerenv-311598/client.key: {Name:mkfee5512dd77bbcf487ae092d87404da909a13d Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0414 12:53:12.122165 864076 certs.go:363] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/20623-835291/.minikube/profiles/dockerenv-311598/apiserver.key.15e0f10a
I0414 12:53:12.122176 864076 crypto.go:68] Generating cert /home/jenkins/minikube-integration/20623-835291/.minikube/profiles/dockerenv-311598/apiserver.crt.15e0f10a with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.49.2]
I0414 12:53:12.416400 864076 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/20623-835291/.minikube/profiles/dockerenv-311598/apiserver.crt.15e0f10a ...
I0414 12:53:12.416420 864076 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20623-835291/.minikube/profiles/dockerenv-311598/apiserver.crt.15e0f10a: {Name:mk75cc005af1996fb6bdb72aefd8de71564b5b63 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0414 12:53:12.416619 864076 crypto.go:164] Writing key to /home/jenkins/minikube-integration/20623-835291/.minikube/profiles/dockerenv-311598/apiserver.key.15e0f10a ...
I0414 12:53:12.416631 864076 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20623-835291/.minikube/profiles/dockerenv-311598/apiserver.key.15e0f10a: {Name:mkb78a1f65eabc761711dbe28592125a57342ff2 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0414 12:53:12.416700 864076 certs.go:381] copying /home/jenkins/minikube-integration/20623-835291/.minikube/profiles/dockerenv-311598/apiserver.crt.15e0f10a -> /home/jenkins/minikube-integration/20623-835291/.minikube/profiles/dockerenv-311598/apiserver.crt
I0414 12:53:12.416785 864076 certs.go:385] copying /home/jenkins/minikube-integration/20623-835291/.minikube/profiles/dockerenv-311598/apiserver.key.15e0f10a -> /home/jenkins/minikube-integration/20623-835291/.minikube/profiles/dockerenv-311598/apiserver.key
I0414 12:53:12.416837 864076 certs.go:363] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/20623-835291/.minikube/profiles/dockerenv-311598/proxy-client.key
I0414 12:53:12.416849 864076 crypto.go:68] Generating cert /home/jenkins/minikube-integration/20623-835291/.minikube/profiles/dockerenv-311598/proxy-client.crt with IP's: []
I0414 12:53:12.663343 864076 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/20623-835291/.minikube/profiles/dockerenv-311598/proxy-client.crt ...
I0414 12:53:12.663364 864076 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20623-835291/.minikube/profiles/dockerenv-311598/proxy-client.crt: {Name:mke4d8e3f57107751359ea5828d5f611fe7a5eaa Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0414 12:53:12.663542 864076 crypto.go:164] Writing key to /home/jenkins/minikube-integration/20623-835291/.minikube/profiles/dockerenv-311598/proxy-client.key ...
I0414 12:53:12.663550 864076 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20623-835291/.minikube/profiles/dockerenv-311598/proxy-client.key: {Name:mkb4bade992a0b354161660aa67f18272c17fff5 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0414 12:53:12.663716 864076 certs.go:484] found cert: /home/jenkins/minikube-integration/20623-835291/.minikube/certs/ca-key.pem (1675 bytes)
I0414 12:53:12.663745 864076 certs.go:484] found cert: /home/jenkins/minikube-integration/20623-835291/.minikube/certs/ca.pem (1078 bytes)
I0414 12:53:12.663768 864076 certs.go:484] found cert: /home/jenkins/minikube-integration/20623-835291/.minikube/certs/cert.pem (1123 bytes)
I0414 12:53:12.663784 864076 certs.go:484] found cert: /home/jenkins/minikube-integration/20623-835291/.minikube/certs/key.pem (1675 bytes)
I0414 12:53:12.664413 864076 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20623-835291/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I0414 12:53:12.692125 864076 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20623-835291/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1679 bytes)
I0414 12:53:12.719044 864076 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20623-835291/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I0414 12:53:12.743719 864076 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20623-835291/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I0414 12:53:12.768067 864076 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20623-835291/.minikube/profiles/dockerenv-311598/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1424 bytes)
I0414 12:53:12.793805 864076 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20623-835291/.minikube/profiles/dockerenv-311598/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I0414 12:53:12.818003 864076 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20623-835291/.minikube/profiles/dockerenv-311598/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I0414 12:53:12.841698 864076 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20623-835291/.minikube/profiles/dockerenv-311598/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I0414 12:53:12.865715 864076 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20623-835291/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I0414 12:53:12.890868 864076 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I0414 12:53:12.908775 864076 ssh_runner.go:195] Run: openssl version
I0414 12:53:12.914153 864076 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I0414 12:53:12.923398 864076 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I0414 12:53:12.926993 864076 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Apr 14 12:49 /usr/share/ca-certificates/minikubeCA.pem
I0414 12:53:12.927062 864076 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I0414 12:53:12.933604 864076 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I0414 12:53:12.943324 864076 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I0414 12:53:12.946689 864076 certs.go:399] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I0414 12:53:12.946770 864076 kubeadm.go:392] StartCluster: {Name:dockerenv-311598 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.46-1744107393-20604@sha256:2430533582a8c08f907b2d5976c79bd2e672b4f3d4484088c99b839f3175ed6a Memory:8000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.32.2 ClusterName:dockerenv-311598 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServe
rNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.32.2 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP:
SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0414 12:53:12.946830 864076 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:paused Name: Namespaces:[kube-system]}
I0414 12:53:12.946875 864076 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
I0414 12:53:12.981090 864076 cri.go:89] found id: ""
I0414 12:53:12.981161 864076 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I0414 12:53:12.989595 864076 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I0414 12:53:12.997942 864076 kubeadm.go:214] ignoring SystemVerification for kubeadm because of docker driver
I0414 12:53:12.997996 864076 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I0414 12:53:13.006380 864076 kubeadm.go:155] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I0414 12:53:13.006389 864076 kubeadm.go:157] found existing configuration files:
I0414 12:53:13.006426 864076 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I0414 12:53:13.014800 864076 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I0414 12:53:13.014847 864076 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I0414 12:53:13.023271 864076 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I0414 12:53:13.031713 864076 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I0414 12:53:13.031764 864076 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I0414 12:53:13.039995 864076 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I0414 12:53:13.048296 864076 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I0414 12:53:13.048355 864076 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I0414 12:53:13.056656 864076 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I0414 12:53:13.065232 864076 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I0414 12:53:13.065291 864076 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I0414 12:53:13.073975 864076 ssh_runner.go:286] Start: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.32.2:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I0414 12:53:13.111914 864076 kubeadm.go:310] [init] Using Kubernetes version: v1.32.2
I0414 12:53:13.112015 864076 kubeadm.go:310] [preflight] Running pre-flight checks
I0414 12:53:13.129123 864076 kubeadm.go:310] [preflight] The system verification failed. Printing the output from the verification:
I0414 12:53:13.129243 864076 kubeadm.go:310] [0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1078-gcp[0m
I0414 12:53:13.129287 864076 kubeadm.go:310] [0;37mOS[0m: [0;32mLinux[0m
I0414 12:53:13.129329 864076 kubeadm.go:310] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I0414 12:53:13.129376 864076 kubeadm.go:310] [0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
I0414 12:53:13.129434 864076 kubeadm.go:310] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I0414 12:53:13.129522 864076 kubeadm.go:310] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I0414 12:53:13.129581 864076 kubeadm.go:310] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I0414 12:53:13.129659 864076 kubeadm.go:310] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I0414 12:53:13.129735 864076 kubeadm.go:310] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I0414 12:53:13.129802 864076 kubeadm.go:310] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I0414 12:53:13.129857 864076 kubeadm.go:310] [0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
I0414 12:53:13.186901 864076 kubeadm.go:310] [preflight] Pulling images required for setting up a Kubernetes cluster
I0414 12:53:13.187031 864076 kubeadm.go:310] [preflight] This might take a minute or two, depending on the speed of your internet connection
I0414 12:53:13.187175 864076 kubeadm.go:310] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
I0414 12:53:13.193960 864076 kubeadm.go:310] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I0414 12:53:13.196422 864076 out.go:235] - Generating certificates and keys ...
I0414 12:53:13.196542 864076 kubeadm.go:310] [certs] Using existing ca certificate authority
I0414 12:53:13.196590 864076 kubeadm.go:310] [certs] Using existing apiserver certificate and key on disk
I0414 12:53:13.460615 864076 kubeadm.go:310] [certs] Generating "apiserver-kubelet-client" certificate and key
I0414 12:53:13.552278 864076 kubeadm.go:310] [certs] Generating "front-proxy-ca" certificate and key
I0414 12:53:13.634753 864076 kubeadm.go:310] [certs] Generating "front-proxy-client" certificate and key
I0414 12:53:13.992866 864076 kubeadm.go:310] [certs] Generating "etcd/ca" certificate and key
I0414 12:53:14.472649 864076 kubeadm.go:310] [certs] Generating "etcd/server" certificate and key
I0414 12:53:14.472818 864076 kubeadm.go:310] [certs] etcd/server serving cert is signed for DNS names [dockerenv-311598 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I0414 12:53:14.675509 864076 kubeadm.go:310] [certs] Generating "etcd/peer" certificate and key
I0414 12:53:14.675684 864076 kubeadm.go:310] [certs] etcd/peer serving cert is signed for DNS names [dockerenv-311598 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I0414 12:53:14.866863 864076 kubeadm.go:310] [certs] Generating "etcd/healthcheck-client" certificate and key
I0414 12:53:14.993126 864076 kubeadm.go:310] [certs] Generating "apiserver-etcd-client" certificate and key
I0414 12:53:15.148617 864076 kubeadm.go:310] [certs] Generating "sa" key and public key
I0414 12:53:15.148709 864076 kubeadm.go:310] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I0414 12:53:15.395775 864076 kubeadm.go:310] [kubeconfig] Writing "admin.conf" kubeconfig file
I0414 12:53:15.509482 864076 kubeadm.go:310] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I0414 12:53:15.767581 864076 kubeadm.go:310] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I0414 12:53:15.859136 864076 kubeadm.go:310] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I0414 12:53:16.067877 864076 kubeadm.go:310] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I0414 12:53:16.068455 864076 kubeadm.go:310] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I0414 12:53:16.072671 864076 kubeadm.go:310] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I0414 12:53:16.076801 864076 out.go:235] - Booting up control plane ...
I0414 12:53:16.076979 864076 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-apiserver"
I0414 12:53:16.077069 864076 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I0414 12:53:16.077172 864076 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-scheduler"
I0414 12:53:16.086302 864076 kubeadm.go:310] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I0414 12:53:16.092331 864076 kubeadm.go:310] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I0414 12:53:16.092455 864076 kubeadm.go:310] [kubelet-start] Starting the kubelet
I0414 12:53:16.182190 864076 kubeadm.go:310] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I0414 12:53:16.182304 864076 kubeadm.go:310] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I0414 12:53:16.684168 864076 kubeadm.go:310] [kubelet-check] The kubelet is healthy after 502.034057ms
I0414 12:53:16.684272 864076 kubeadm.go:310] [api-check] Waiting for a healthy API server. This can take up to 4m0s
I0414 12:53:21.185626 864076 kubeadm.go:310] [api-check] The API server is healthy after 4.501522955s
I0414 12:53:21.197793 864076 kubeadm.go:310] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I0414 12:53:21.210949 864076 kubeadm.go:310] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I0414 12:53:21.234913 864076 kubeadm.go:310] [upload-certs] Skipping phase. Please see --upload-certs
I0414 12:53:21.235165 864076 kubeadm.go:310] [mark-control-plane] Marking the node dockerenv-311598 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I0414 12:53:21.244448 864076 kubeadm.go:310] [bootstrap-token] Using token: 6paqje.rj2676tnxs6nfd1w
I0414 12:53:21.246433 864076 out.go:235] - Configuring RBAC rules ...
I0414 12:53:21.246618 864076 kubeadm.go:310] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I0414 12:53:21.251078 864076 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I0414 12:53:21.259042 864076 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I0414 12:53:21.262401 864076 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I0414 12:53:21.265521 864076 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I0414 12:53:21.269210 864076 kubeadm.go:310] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I0414 12:53:21.592720 864076 kubeadm.go:310] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I0414 12:53:22.052357 864076 kubeadm.go:310] [addons] Applied essential addon: CoreDNS
I0414 12:53:22.593574 864076 kubeadm.go:310] [addons] Applied essential addon: kube-proxy
I0414 12:53:22.594705 864076 kubeadm.go:310]
I0414 12:53:22.594761 864076 kubeadm.go:310] Your Kubernetes control-plane has initialized successfully!
I0414 12:53:22.594764 864076 kubeadm.go:310]
I0414 12:53:22.594886 864076 kubeadm.go:310] To start using your cluster, you need to run the following as a regular user:
I0414 12:53:22.594902 864076 kubeadm.go:310]
I0414 12:53:22.594924 864076 kubeadm.go:310] mkdir -p $HOME/.kube
I0414 12:53:22.594974 864076 kubeadm.go:310] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I0414 12:53:22.595012 864076 kubeadm.go:310] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I0414 12:53:22.595014 864076 kubeadm.go:310]
I0414 12:53:22.595068 864076 kubeadm.go:310] Alternatively, if you are the root user, you can run:
I0414 12:53:22.595074 864076 kubeadm.go:310]
I0414 12:53:22.595113 864076 kubeadm.go:310] export KUBECONFIG=/etc/kubernetes/admin.conf
I0414 12:53:22.595116 864076 kubeadm.go:310]
I0414 12:53:22.595163 864076 kubeadm.go:310] You should now deploy a pod network to the cluster.
I0414 12:53:22.595243 864076 kubeadm.go:310] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I0414 12:53:22.595299 864076 kubeadm.go:310] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I0414 12:53:22.595306 864076 kubeadm.go:310]
I0414 12:53:22.595383 864076 kubeadm.go:310] You can now join any number of control-plane nodes by copying certificate authorities
I0414 12:53:22.595442 864076 kubeadm.go:310] and service account keys on each node and then running the following as root:
I0414 12:53:22.595445 864076 kubeadm.go:310]
I0414 12:53:22.595518 864076 kubeadm.go:310] kubeadm join control-plane.minikube.internal:8443 --token 6paqje.rj2676tnxs6nfd1w \
I0414 12:53:22.595622 864076 kubeadm.go:310] --discovery-token-ca-cert-hash sha256:7cf9ededa45efa7945b14aad30c146ea406f42325807e7ae42d996a7a412ae18 \
I0414 12:53:22.595638 864076 kubeadm.go:310] --control-plane
I0414 12:53:22.595641 864076 kubeadm.go:310]
I0414 12:53:22.595712 864076 kubeadm.go:310] Then you can join any number of worker nodes by running the following on each as root:
I0414 12:53:22.595715 864076 kubeadm.go:310]
I0414 12:53:22.595777 864076 kubeadm.go:310] kubeadm join control-plane.minikube.internal:8443 --token 6paqje.rj2676tnxs6nfd1w \
I0414 12:53:22.595858 864076 kubeadm.go:310] --discovery-token-ca-cert-hash sha256:7cf9ededa45efa7945b14aad30c146ea406f42325807e7ae42d996a7a412ae18
I0414 12:53:22.598351 864076 kubeadm.go:310] [WARNING SystemVerification]: cgroups v1 support is in maintenance mode, please migrate to cgroups v2
I0414 12:53:22.598599 864076 kubeadm.go:310] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1078-gcp\n", err: exit status 1
I0414 12:53:22.598714 864076 kubeadm.go:310] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I0414 12:53:22.598744 864076 cni.go:84] Creating CNI manager for ""
I0414 12:53:22.598752 864076 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I0414 12:53:22.601953 864076 out.go:177] * Configuring CNI (Container Networking Interface) ...
I0414 12:53:22.603699 864076 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I0414 12:53:22.608701 864076 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.32.2/kubectl ...
I0414 12:53:22.608718 864076 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2601 bytes)
I0414 12:53:22.628861 864076 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.32.2/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I0414 12:53:22.839087 864076 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I0414 12:53:22.839168 864076 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.32.2/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I0414 12:53:22.839201 864076 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.32.2/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes dockerenv-311598 minikube.k8s.io/updated_at=2025_04_14T12_53_22_0700 minikube.k8s.io/version=v1.35.0 minikube.k8s.io/commit=460835bb8f21087bfa90e48a25f4afc66a903d88 minikube.k8s.io/name=dockerenv-311598 minikube.k8s.io/primary=true
I0414 12:53:22.847026 864076 ops.go:34] apiserver oom_adj: -16
I0414 12:53:22.953043 864076 kubeadm.go:1113] duration metric: took 113.926729ms to wait for elevateKubeSystemPrivileges
I0414 12:53:22.953093 864076 kubeadm.go:394] duration metric: took 10.006358102s to StartCluster
I0414 12:53:22.953124 864076 settings.go:142] acquiring lock: {Name:mk22a68ac5ad609762021e46545e24320e0ac077 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0414 12:53:22.953200 864076 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/20623-835291/kubeconfig
I0414 12:53:22.954002 864076 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20623-835291/kubeconfig: {Name:mkf88aaf24d784c72bf57ce14af60a32087f514c Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0414 12:53:22.954243 864076 start.go:235] Will wait 6m0s for node &{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.32.2 ContainerRuntime:containerd ControlPlane:true Worker:true}
I0414 12:53:22.954263 864076 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.32.2/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I0414 12:53:22.954357 864076 addons.go:511] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-gluster:false storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I0414 12:53:22.954460 864076 addons.go:69] Setting storage-provisioner=true in profile "dockerenv-311598"
I0414 12:53:22.954475 864076 config.go:182] Loaded profile config "dockerenv-311598": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.32.2
I0414 12:53:22.954481 864076 addons.go:238] Setting addon storage-provisioner=true in "dockerenv-311598"
I0414 12:53:22.954504 864076 addons.go:69] Setting default-storageclass=true in profile "dockerenv-311598"
I0414 12:53:22.954519 864076 host.go:66] Checking if "dockerenv-311598" exists ...
I0414 12:53:22.954528 864076 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "dockerenv-311598"
I0414 12:53:22.954987 864076 cli_runner.go:164] Run: docker container inspect dockerenv-311598 --format={{.State.Status}}
I0414 12:53:22.955106 864076 cli_runner.go:164] Run: docker container inspect dockerenv-311598 --format={{.State.Status}}
I0414 12:53:22.955993 864076 out.go:177] * Verifying Kubernetes components...
I0414 12:53:22.957330 864076 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0414 12:53:22.980300 864076 addons.go:238] Setting addon default-storageclass=true in "dockerenv-311598"
I0414 12:53:22.980343 864076 host.go:66] Checking if "dockerenv-311598" exists ...
I0414 12:53:22.980817 864076 cli_runner.go:164] Run: docker container inspect dockerenv-311598 --format={{.State.Status}}
I0414 12:53:22.988099 864076 out.go:177] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I0414 12:53:22.989607 864076 addons.go:435] installing /etc/kubernetes/addons/storage-provisioner.yaml
I0414 12:53:22.989618 864076 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I0414 12:53:22.989669 864076 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-311598
I0414 12:53:23.009694 864076 addons.go:435] installing /etc/kubernetes/addons/storageclass.yaml
I0414 12:53:23.009709 864076 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I0414 12:53:23.009756 864076 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-311598
I0414 12:53:23.015582 864076 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33520 SSHKeyPath:/home/jenkins/minikube-integration/20623-835291/.minikube/machines/dockerenv-311598/id_rsa Username:docker}
I0414 12:53:23.027739 864076 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33520 SSHKeyPath:/home/jenkins/minikube-integration/20623-835291/.minikube/machines/dockerenv-311598/id_rsa Username:docker}
I0414 12:53:23.179552 864076 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.32.2/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.49.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.32.2/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I0414 12:53:23.251180 864076 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0414 12:53:23.362639 864076 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.32.2/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I0414 12:53:23.371861 864076 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.32.2/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I0414 12:53:23.658324 864076 start.go:971] {"host.minikube.internal": 192.168.49.1} host record injected into CoreDNS's ConfigMap
I0414 12:53:23.659126 864076 api_server.go:52] waiting for apiserver process to appear ...
I0414 12:53:23.659163 864076 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I0414 12:53:23.851326 864076 api_server.go:72] duration metric: took 897.052522ms to wait for apiserver process to appear ...
I0414 12:53:23.851341 864076 api_server.go:88] waiting for apiserver healthz status ...
I0414 12:53:23.851359 864076 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8443/healthz ...
I0414 12:53:23.858310 864076 api_server.go:279] https://192.168.49.2:8443/healthz returned 200:
ok
I0414 12:53:23.859270 864076 api_server.go:141] control plane version: v1.32.2
I0414 12:53:23.859288 864076 api_server.go:131] duration metric: took 7.940121ms to wait for apiserver health ...
I0414 12:53:23.859296 864076 system_pods.go:43] waiting for kube-system pods to appear ...
I0414 12:53:23.860215 864076 out.go:177] * Enabled addons: storage-provisioner, default-storageclass
I0414 12:53:23.861681 864076 addons.go:514] duration metric: took 907.342947ms for enable addons: enabled=[storage-provisioner default-storageclass]
I0414 12:53:23.862284 864076 system_pods.go:59] 5 kube-system pods found
I0414 12:53:23.862308 864076 system_pods.go:61] "etcd-dockerenv-311598" [b2807d55-0faf-4377-8bec-fca043c4b1b1] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd])
I0414 12:53:23.862317 864076 system_pods.go:61] "kube-apiserver-dockerenv-311598" [9b1dbd6d-7fdc-41c0-ae8d-a5ba2ab6f77b] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
I0414 12:53:23.862327 864076 system_pods.go:61] "kube-controller-manager-dockerenv-311598" [5c756f90-87eb-4bd6-8e6b-9f6f92f23549] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager])
I0414 12:53:23.862333 864076 system_pods.go:61] "kube-scheduler-dockerenv-311598" [96fee8b0-091a-4714-a667-2ef887c7e024] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
I0414 12:53:23.862336 864076 system_pods.go:61] "storage-provisioner" [69e53839-831d-48d4-830d-ec430953f48e] Pending: PodScheduled:Unschedulable (0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/not-ready: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling.)
I0414 12:53:23.862343 864076 system_pods.go:74] duration metric: took 3.040872ms to wait for pod list to return data ...
I0414 12:53:23.862356 864076 kubeadm.go:582] duration metric: took 908.086447ms to wait for: map[apiserver:true system_pods:true]
I0414 12:53:23.862369 864076 node_conditions.go:102] verifying NodePressure condition ...
I0414 12:53:23.864678 864076 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I0414 12:53:23.864691 864076 node_conditions.go:123] node cpu capacity is 8
I0414 12:53:23.864701 864076 node_conditions.go:105] duration metric: took 2.328965ms to run NodePressure ...
I0414 12:53:23.864712 864076 start.go:241] waiting for startup goroutines ...
I0414 12:53:24.163156 864076 kapi.go:214] "coredns" deployment in "kube-system" namespace and "dockerenv-311598" context rescaled to 1 replicas
I0414 12:53:24.163187 864076 start.go:246] waiting for cluster config update ...
I0414 12:53:24.163200 864076 start.go:255] writing updated cluster config ...
I0414 12:53:24.163540 864076 ssh_runner.go:195] Run: rm -f paused
I0414 12:53:24.213944 864076 start.go:600] kubectl: 1.32.3, cluster: 1.32.2 (minor skew: 0)
I0414 12:53:24.215909 864076 out.go:177] * Done! kubectl is now configured to use "dockerenv-311598" cluster and "default" namespace by default
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD
7ab87e4b1580d df3849d954c98 8 seconds ago Running kindnet-cni 0 89a402aea613c kindnet-nk6m8
4639e2832aef5 f1332858868e1 11 seconds ago Running kube-proxy 0 646124b89a3e1 kube-proxy-v8p9b
8ea9d4ffc3bc5 6e38f40d628db 11 seconds ago Running storage-provisioner 0 26052ac3364ff storage-provisioner
bdb6d181beeb6 b6a454c5a800d 21 seconds ago Running kube-controller-manager 0 8009358f7d23a kube-controller-manager-dockerenv-311598
304fdc593159d d8e673e7c9983 21 seconds ago Running kube-scheduler 0 c48a39614f673 kube-scheduler-dockerenv-311598
2860ded811b1e a9e7e6b294baf 21 seconds ago Running etcd 0 7bc7b1e1c8515 etcd-dockerenv-311598
926973aa9ef46 85b7a174738ba 21 seconds ago Running kube-apiserver 0 215cec781b4ed kube-apiserver-dockerenv-311598
==> containerd <==
Apr 14 12:53:27 dockerenv-311598 containerd[875]: time="2025-04-14T12:53:27.386630785Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:kube-proxy-v8p9b,Uid:c48574bc-09f0-4263-b5de-f1dc6539ee8f,Namespace:kube-system,Attempt:0,}"
Apr 14 12:53:27 dockerenv-311598 containerd[875]: time="2025-04-14T12:53:27.450482681Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:kube-proxy-v8p9b,Uid:c48574bc-09f0-4263-b5de-f1dc6539ee8f,Namespace:kube-system,Attempt:0,} returns sandbox id \"646124b89a3e181c46e259f9be27969338e3ae77ef53192eec4a0fa5bfc80c6d\""
Apr 14 12:53:27 dockerenv-311598 containerd[875]: time="2025-04-14T12:53:27.454050130Z" level=info msg="CreateContainer within sandbox \"646124b89a3e181c46e259f9be27969338e3ae77ef53192eec4a0fa5bfc80c6d\" for container &ContainerMetadata{Name:kube-proxy,Attempt:0,}"
Apr 14 12:53:27 dockerenv-311598 containerd[875]: time="2025-04-14T12:53:27.468155455Z" level=info msg="CreateContainer within sandbox \"646124b89a3e181c46e259f9be27969338e3ae77ef53192eec4a0fa5bfc80c6d\" for &ContainerMetadata{Name:kube-proxy,Attempt:0,} returns container id \"4639e2832aef528534ed3ab88db2ff4da35c73e948eaa5af292fda6f49d84e02\""
Apr 14 12:53:27 dockerenv-311598 containerd[875]: time="2025-04-14T12:53:27.468839750Z" level=info msg="StartContainer for \"4639e2832aef528534ed3ab88db2ff4da35c73e948eaa5af292fda6f49d84e02\""
Apr 14 12:53:27 dockerenv-311598 containerd[875]: time="2025-04-14T12:53:27.526323981Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-668d6bf9bc-nlq8v,Uid:f375d2c2-0f65-4e48-91d1-638b5796f237,Namespace:kube-system,Attempt:0,}"
Apr 14 12:53:27 dockerenv-311598 containerd[875]: time="2025-04-14T12:53:27.529873769Z" level=info msg="StartContainer for \"4639e2832aef528534ed3ab88db2ff4da35c73e948eaa5af292fda6f49d84e02\" returns successfully"
Apr 14 12:53:27 dockerenv-311598 containerd[875]: time="2025-04-14T12:53:27.552359047Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-668d6bf9bc-nlq8v,Uid:f375d2c2-0f65-4e48-91d1-638b5796f237,Namespace:kube-system,Attempt:0,} failed, error" error="failed to setup network for sandbox \"1aadf549fba23f0d7615f11ec2fb517e4ff7992f1d447fc2a57fce833b851c17\": failed to find network info for sandbox \"1aadf549fba23f0d7615f11ec2fb517e4ff7992f1d447fc2a57fce833b851c17\""
Apr 14 12:53:27 dockerenv-311598 containerd[875]: time="2025-04-14T12:53:27.763061108Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:kindnet-nk6m8,Uid:d9167302-1ea0-4214-8abe-c9b9b32febd1,Namespace:kube-system,Attempt:0,} returns sandbox id \"89a402aea613cdb60649888b02a61902b0a959f1326249b6528e476e5734565d\""
Apr 14 12:53:27 dockerenv-311598 containerd[875]: time="2025-04-14T12:53:27.764785340Z" level=info msg="PullImage \"docker.io/kindest/kindnetd:v20250214-acbabc1a\""
Apr 14 12:53:27 dockerenv-311598 containerd[875]: time="2025-04-14T12:53:27.766476857Z" level=error msg="failed to decode hosts.toml" error="invalid `host` tree"
Apr 14 12:53:28 dockerenv-311598 containerd[875]: time="2025-04-14T12:53:28.095273306Z" level=error msg="failed to decode hosts.toml" error="invalid `host` tree"
Apr 14 12:53:29 dockerenv-311598 containerd[875]: time="2025-04-14T12:53:29.639496937Z" level=info msg="ImageCreate event name:\"docker.io/kindest/kindnetd:v20250214-acbabc1a\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Apr 14 12:53:29 dockerenv-311598 containerd[875]: time="2025-04-14T12:53:29.640384790Z" level=info msg="stop pulling image docker.io/kindest/kindnetd:v20250214-acbabc1a: active requests=0, bytes read=27521633"
Apr 14 12:53:29 dockerenv-311598 containerd[875]: time="2025-04-14T12:53:29.641827455Z" level=info msg="ImageCreate event name:\"sha256:df3849d954c98a7162c7bee7313ece357606e313d98ebd68b7aac5e961b1156f\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Apr 14 12:53:29 dockerenv-311598 containerd[875]: time="2025-04-14T12:53:29.644713331Z" level=info msg="ImageCreate event name:\"docker.io/kindest/kindnetd@sha256:e3c42406b0806c1f7e8a66838377936cbd2cdfd94d9b26a3eefedada8713d495\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Apr 14 12:53:29 dockerenv-311598 containerd[875]: time="2025-04-14T12:53:29.645292974Z" level=info msg="Pulled image \"docker.io/kindest/kindnetd:v20250214-acbabc1a\" with image id \"sha256:df3849d954c98a7162c7bee7313ece357606e313d98ebd68b7aac5e961b1156f\", repo tag \"docker.io/kindest/kindnetd:v20250214-acbabc1a\", repo digest \"docker.io/kindest/kindnetd@sha256:e3c42406b0806c1f7e8a66838377936cbd2cdfd94d9b26a3eefedada8713d495\", size \"38996835\" in 1.880472527s"
Apr 14 12:53:29 dockerenv-311598 containerd[875]: time="2025-04-14T12:53:29.645338527Z" level=info msg="PullImage \"docker.io/kindest/kindnetd:v20250214-acbabc1a\" returns image reference \"sha256:df3849d954c98a7162c7bee7313ece357606e313d98ebd68b7aac5e961b1156f\""
Apr 14 12:53:29 dockerenv-311598 containerd[875]: time="2025-04-14T12:53:29.647597146Z" level=info msg="CreateContainer within sandbox \"89a402aea613cdb60649888b02a61902b0a959f1326249b6528e476e5734565d\" for container &ContainerMetadata{Name:kindnet-cni,Attempt:0,}"
Apr 14 12:53:29 dockerenv-311598 containerd[875]: time="2025-04-14T12:53:29.661898096Z" level=info msg="CreateContainer within sandbox \"89a402aea613cdb60649888b02a61902b0a959f1326249b6528e476e5734565d\" for &ContainerMetadata{Name:kindnet-cni,Attempt:0,} returns container id \"7ab87e4b1580d6f15cafd856118ac9fdf71639cdd903db500cfb2744e2e781b9\""
Apr 14 12:53:29 dockerenv-311598 containerd[875]: time="2025-04-14T12:53:29.662668803Z" level=info msg="StartContainer for \"7ab87e4b1580d6f15cafd856118ac9fdf71639cdd903db500cfb2744e2e781b9\""
Apr 14 12:53:29 dockerenv-311598 containerd[875]: time="2025-04-14T12:53:29.764254679Z" level=info msg="StartContainer for \"7ab87e4b1580d6f15cafd856118ac9fdf71639cdd903db500cfb2744e2e781b9\" returns successfully"
Apr 14 12:53:32 dockerenv-311598 containerd[875]: time="2025-04-14T12:53:32.460323215Z" level=info msg="No cni config template is specified, wait for other system components to drop the config."
Apr 14 12:53:37 dockerenv-311598 containerd[875]: time="2025-04-14T12:53:37.885022644Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-668d6bf9bc-nlq8v,Uid:f375d2c2-0f65-4e48-91d1-638b5796f237,Namespace:kube-system,Attempt:0,}"
Apr 14 12:53:37 dockerenv-311598 containerd[875]: time="2025-04-14T12:53:37.907546606Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-668d6bf9bc-nlq8v,Uid:f375d2c2-0f65-4e48-91d1-638b5796f237,Namespace:kube-system,Attempt:0,} failed, error" error="failed to setup network for sandbox \"2d0f403dcdf8013586c62cf924ce9a7e608b6a2d912c71b92371d49a72a72db5\": failed to find network info for sandbox \"2d0f403dcdf8013586c62cf924ce9a7e608b6a2d912c71b92371d49a72a72db5\""
==> describe nodes <==
Name: dockerenv-311598
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=dockerenv-311598
kubernetes.io/os=linux
minikube.k8s.io/commit=460835bb8f21087bfa90e48a25f4afc66a903d88
minikube.k8s.io/name=dockerenv-311598
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2025_04_14T12_53_22_0700
minikube.k8s.io/version=v1.35.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///run/containerd/containerd.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Mon, 14 Apr 2025 12:53:19 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: dockerenv-311598
AcquireTime: <unset>
RenewTime: Mon, 14 Apr 2025 12:53:32 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Mon, 14 Apr 2025 12:53:32 +0000 Mon, 14 Apr 2025 12:53:17 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Mon, 14 Apr 2025 12:53:32 +0000 Mon, 14 Apr 2025 12:53:17 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Mon, 14 Apr 2025 12:53:32 +0000 Mon, 14 Apr 2025 12:53:17 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Mon, 14 Apr 2025 12:53:32 +0000 Mon, 14 Apr 2025 12:53:19 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.49.2
Hostname: dockerenv-311598
Capacity:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32859360Ki
pods: 110
Allocatable:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32859360Ki
pods: 110
System Info:
Machine ID: 482e3a1082b44e13878432ccdd3f51c3
System UUID: d25ad53e-499b-4914-a9a8-b67134ec20c9
Boot ID: 2d7a6cb6-90ce-44c4-8d5a-5a7c527ec513
Kernel Version: 5.15.0-1078-gcp
OS Image: Ubuntu 22.04.5 LTS
Operating System: linux
Architecture: amd64
Container Runtime Version: containerd://1.7.27
Kubelet Version: v1.32.2
Kube-Proxy Version: v1.32.2
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (8 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
kube-system coredns-668d6bf9bc-nlq8v 100m (1%) 0 (0%) 70Mi (0%) 170Mi (0%) 11s
kube-system etcd-dockerenv-311598 100m (1%) 0 (0%) 100Mi (0%) 0 (0%) 16s
kube-system kindnet-nk6m8 100m (1%) 100m (1%) 50Mi (0%) 50Mi (0%) 11s
kube-system kube-apiserver-dockerenv-311598 250m (3%) 0 (0%) 0 (0%) 0 (0%) 16s
kube-system kube-controller-manager-dockerenv-311598 200m (2%) 0 (0%) 0 (0%) 0 (0%) 16s
kube-system kube-proxy-v8p9b 0 (0%) 0 (0%) 0 (0%) 0 (0%) 11s
kube-system kube-scheduler-dockerenv-311598 100m (1%) 0 (0%) 0 (0%) 0 (0%) 16s
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 15s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 850m (10%) 100m (1%)
memory 220Mi (0%) 220Mi (0%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 10s kube-proxy
Normal Starting 17s kubelet Starting kubelet.
Warning CgroupV1 17s kubelet cgroup v1 support is in maintenance mode, please migrate to cgroup v2
Normal NodeAllocatableEnforced 16s kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 16s kubelet Node dockerenv-311598 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 16s kubelet Node dockerenv-311598 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 16s kubelet Node dockerenv-311598 status is now: NodeHasSufficientPID
Normal RegisteredNode 12s node-controller Node dockerenv-311598 event: Registered Node dockerenv-311598 in Controller
==> dmesg <==
[ +0.000001] ll header: 00000000: 52 54 00 10 a2 1d 52 54 00 e3 c3 f1 08 00
[ +0.000003] IPv4: martian source 10.96.0.1 from 10.244.0.2, on dev virbr0
[ +0.000001] ll header: 00000000: 52 54 00 10 a2 1d 52 54 00 e3 c3 f1 08 00
[ +0.000002] IPv4: martian source 10.96.0.1 from 10.244.0.2, on dev virbr0
[ +0.000001] ll header: 00000000: 52 54 00 10 a2 1d 52 54 00 e3 c3 f1 08 00
[ +0.000003] IPv4: martian source 10.96.0.1 from 10.244.0.3, on dev virbr0
[ +0.000001] ll header: 00000000: 52 54 00 10 a2 1d 52 54 00 e3 c3 f1 08 00
[ +8.447585] IPv4: martian source 10.96.0.1 from 10.244.0.3, on dev virbr0
[ +0.000007] ll header: 00000000: 52 54 00 10 a2 1d 52 54 00 e3 c3 f1 08 00
[ +0.000040] IPv4: martian source 10.96.0.1 from 10.244.0.3, on dev virbr0
[ +0.000001] ll header: 00000000: 52 54 00 10 a2 1d 52 54 00 e3 c3 f1 08 00
[ +0.000003] IPv4: martian source 10.96.0.1 from 10.244.0.3, on dev virbr0
[ +0.000001] ll header: 00000000: 52 54 00 10 a2 1d 52 54 00 e3 c3 f1 08 00
[ +0.000006] IPv4: martian source 10.96.0.1 from 10.244.0.2, on dev virbr0
[ +0.000001] ll header: 00000000: 52 54 00 10 a2 1d 52 54 00 e3 c3 f1 08 00
[ +0.000003] IPv4: martian source 10.96.0.1 from 10.244.0.2, on dev virbr0
[ +0.000001] ll header: 00000000: 52 54 00 10 a2 1d 52 54 00 e3 c3 f1 08 00
[ +0.000003] IPv4: martian source 10.96.0.1 from 10.244.0.2, on dev virbr0
[ +0.000001] ll header: 00000000: 52 54 00 10 a2 1d 52 54 00 e3 c3 f1 08 00
[Apr14 12:44] IPv4: martian source 192.168.122.1 from 10.244.4.65, on dev virbr0
[ +0.000011] ll header: 00000000: 52 54 00 10 a2 1d 52 54 00 b1 a9 ab 08 00
[ +2.001172] IPv4: martian source 192.168.122.1 from 10.244.4.65, on dev virbr0
[ +0.000006] ll header: 00000000: 52 54 00 10 a2 1d 52 54 00 b1 a9 ab 08 00
[ +0.000008] IPv4: martian source 192.168.122.1 from 10.244.4.65, on dev virbr0
[ +0.000001] ll header: 00000000: 52 54 00 10 a2 1d 52 54 00 b1 a9 ab 08 00
==> etcd [2860ded811b1e7d7d4295ce80f94087715bf22c1f53981de236f0ebd3a9a3c4a] <==
{"level":"info","ts":"2025-04-14T12:53:17.385376Z","caller":"embed/etcd.go:729","msg":"starting with client TLS","tls-info":"cert = /var/lib/minikube/certs/etcd/server.crt, key = /var/lib/minikube/certs/etcd/server.key, client-cert=, client-key=, trusted-ca = /var/lib/minikube/certs/etcd/ca.crt, client-cert-auth = true, crl-file = ","cipher-suites":[]}
{"level":"info","ts":"2025-04-14T12:53:17.385514Z","caller":"embed/etcd.go:600","msg":"serving peer traffic","address":"192.168.49.2:2380"}
{"level":"info","ts":"2025-04-14T12:53:17.385755Z","caller":"embed/etcd.go:572","msg":"cmux::serve","address":"192.168.49.2:2380"}
{"level":"info","ts":"2025-04-14T12:53:17.386197Z","caller":"embed/etcd.go:280","msg":"now serving peer/client/metrics","local-member-id":"aec36adc501070cc","initial-advertise-peer-urls":["https://192.168.49.2:2380"],"listen-peer-urls":["https://192.168.49.2:2380"],"advertise-client-urls":["https://192.168.49.2:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.49.2:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"]}
{"level":"info","ts":"2025-04-14T12:53:17.386314Z","caller":"embed/etcd.go:871","msg":"serving metrics","address":"http://127.0.0.1:2381"}
{"level":"info","ts":"2025-04-14T12:53:17.972522Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc is starting a new election at term 1"}
{"level":"info","ts":"2025-04-14T12:53:17.972585Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became pre-candidate at term 1"}
{"level":"info","ts":"2025-04-14T12:53:17.972606Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc received MsgPreVoteResp from aec36adc501070cc at term 1"}
{"level":"info","ts":"2025-04-14T12:53:17.972645Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became candidate at term 2"}
{"level":"info","ts":"2025-04-14T12:53:17.972661Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc received MsgVoteResp from aec36adc501070cc at term 2"}
{"level":"info","ts":"2025-04-14T12:53:17.972679Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became leader at term 2"}
{"level":"info","ts":"2025-04-14T12:53:17.972695Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"raft.node: aec36adc501070cc elected leader aec36adc501070cc at term 2"}
{"level":"info","ts":"2025-04-14T12:53:17.973672Z","caller":"etcdserver/server.go:2651","msg":"setting up initial cluster version using v2 API","cluster-version":"3.5"}
{"level":"info","ts":"2025-04-14T12:53:17.974327Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-04-14T12:53:17.974329Z","caller":"etcdserver/server.go:2140","msg":"published local member to cluster through raft","local-member-id":"aec36adc501070cc","local-member-attributes":"{Name:dockerenv-311598 ClientURLs:[https://192.168.49.2:2379]}","request-path":"/0/members/aec36adc501070cc/attributes","cluster-id":"fa54960ea34d58be","publish-timeout":"7s"}
{"level":"info","ts":"2025-04-14T12:53:17.974373Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-04-14T12:53:17.974800Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"fa54960ea34d58be","local-member-id":"aec36adc501070cc","cluster-version":"3.5"}
{"level":"info","ts":"2025-04-14T12:53:17.974780Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
{"level":"info","ts":"2025-04-14T12:53:17.974852Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
{"level":"info","ts":"2025-04-14T12:53:17.974906Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
{"level":"info","ts":"2025-04-14T12:53:17.974946Z","caller":"etcdserver/server.go:2675","msg":"cluster version is updated","cluster-version":"3.5"}
{"level":"info","ts":"2025-04-14T12:53:17.975462Z","caller":"v3rpc/health.go:61","msg":"grpc service status changed","service":"","status":"SERVING"}
{"level":"info","ts":"2025-04-14T12:53:17.975547Z","caller":"v3rpc/health.go:61","msg":"grpc service status changed","service":"","status":"SERVING"}
{"level":"info","ts":"2025-04-14T12:53:17.976153Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
{"level":"info","ts":"2025-04-14T12:53:17.976474Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.49.2:2379"}
==> kernel <==
12:53:38 up 4:36, 0 users, load average: 1.01, 1.70, 2.03
Linux dockerenv-311598 5.15.0-1078-gcp #87~20.04.1-Ubuntu SMP Mon Feb 24 10:23:16 UTC 2025 x86_64 x86_64 x86_64 GNU/Linux
PRETTY_NAME="Ubuntu 22.04.5 LTS"
==> kindnet [7ab87e4b1580d6f15cafd856118ac9fdf71639cdd903db500cfb2744e2e781b9] <==
I0414 12:53:29.952052 1 main.go:109] connected to apiserver: https://10.96.0.1:443
I0414 12:53:29.952398 1 main.go:139] hostIP = 192.168.49.2
podIP = 192.168.49.2
I0414 12:53:29.952601 1 main.go:148] setting mtu 1500 for CNI
I0414 12:53:29.952626 1 main.go:178] kindnetd IP family: "ipv4"
I0414 12:53:29.952656 1 main.go:182] noMask IPv4 subnets: [10.244.0.0/16]
I0414 12:53:30.350516 1 controller.go:361] Starting controller kube-network-policies
I0414 12:53:30.350567 1 controller.go:365] Waiting for informer caches to sync
I0414 12:53:30.350576 1 shared_informer.go:313] Waiting for caches to sync for kube-network-policies
I0414 12:53:30.650962 1 shared_informer.go:320] Caches are synced for kube-network-policies
I0414 12:53:30.650990 1 metrics.go:61] Registering metrics
I0414 12:53:30.651057 1 controller.go:401] Syncing nftables rules
==> kube-apiserver [926973aa9ef46f643ca94c2263c6cf40f39e12c813f9c2bc7c1d017bf2482047] <==
I0414 12:53:19.570343 1 handler_discovery.go:451] Starting ResourceDiscoveryManager
I0414 12:53:19.570392 1 shared_informer.go:320] Caches are synced for cluster_authentication_trust_controller
I0414 12:53:19.572341 1 apf_controller.go:382] Running API Priority and Fairness config worker
I0414 12:53:19.572364 1 apf_controller.go:385] Running API Priority and Fairness periodic rebalancing process
I0414 12:53:19.578129 1 shared_informer.go:320] Caches are synced for *generic.policySource[*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicy,*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicyBinding,k8s.io/apiserver/pkg/admission/plugin/policy/validating.Validator]
I0414 12:53:19.578167 1 policy_source.go:240] refreshing policies
E0414 12:53:19.606451 1 controller.go:145] "Failed to ensure lease exists, will retry" err="namespaces \"kube-system\" not found" interval="200ms"
E0414 12:53:19.608439 1 controller.go:148] "Unhandled Error" err="while syncing ConfigMap \"kube-system/kube-apiserver-legacy-service-account-token-tracking\", err: namespaces \"kube-system\" not found" logger="UnhandledError"
I0414 12:53:19.656424 1 controller.go:615] quota admission added evaluator for: namespaces
I0414 12:53:19.809891 1 controller.go:615] quota admission added evaluator for: leases.coordination.k8s.io
I0414 12:53:20.462402 1 storage_scheduling.go:95] created PriorityClass system-node-critical with value 2000001000
I0414 12:53:20.469385 1 storage_scheduling.go:95] created PriorityClass system-cluster-critical with value 2000000000
I0414 12:53:20.469414 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist.
I0414 12:53:20.993883 1 controller.go:615] quota admission added evaluator for: roles.rbac.authorization.k8s.io
I0414 12:53:21.035891 1 controller.go:615] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
I0414 12:53:21.162781 1 alloc.go:330] "allocated clusterIPs" service="default/kubernetes" clusterIPs={"IPv4":"10.96.0.1"}
W0414 12:53:21.171459 1 lease.go:265] Resetting endpoints for master service "kubernetes" to [192.168.49.2]
I0414 12:53:21.172558 1 controller.go:615] quota admission added evaluator for: endpoints
I0414 12:53:21.179009 1 controller.go:615] quota admission added evaluator for: endpointslices.discovery.k8s.io
I0414 12:53:21.559097 1 controller.go:615] quota admission added evaluator for: serviceaccounts
I0414 12:53:22.003795 1 controller.go:615] quota admission added evaluator for: deployments.apps
I0414 12:53:22.051185 1 alloc.go:330] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs={"IPv4":"10.96.0.10"}
I0414 12:53:22.062248 1 controller.go:615] quota admission added evaluator for: daemonsets.apps
I0414 12:53:26.811024 1 controller.go:615] quota admission added evaluator for: replicasets.apps
I0414 12:53:27.060958 1 controller.go:615] quota admission added evaluator for: controllerrevisions.apps
==> kube-controller-manager [bdb6d181beeb61153c8403f88e066777673ada98b69e175d18a224faebdc0df2] <==
I0414 12:53:26.107296 1 shared_informer.go:320] Caches are synced for taint
I0414 12:53:26.107306 1 shared_informer.go:320] Caches are synced for TTL
I0414 12:53:26.107388 1 node_lifecycle_controller.go:1234] "Initializing eviction metric for zone" logger="node-lifecycle-controller" zone=""
I0414 12:53:26.107522 1 node_lifecycle_controller.go:886] "Missing timestamp for Node. Assuming now as a timestamp" logger="node-lifecycle-controller" node="dockerenv-311598"
I0414 12:53:26.107570 1 node_lifecycle_controller.go:1080] "Controller detected that zone is now in new state" logger="node-lifecycle-controller" zone="" newState="Normal"
I0414 12:53:26.108217 1 shared_informer.go:320] Caches are synced for attach detach
I0414 12:53:26.108373 1 shared_informer.go:320] Caches are synced for certificate-csrapproving
I0414 12:53:26.108220 1 shared_informer.go:320] Caches are synced for ReplicaSet
I0414 12:53:26.108230 1 shared_informer.go:320] Caches are synced for expand
I0414 12:53:26.108234 1 shared_informer.go:320] Caches are synced for GC
I0414 12:53:26.108247 1 shared_informer.go:320] Caches are synced for ephemeral
I0414 12:53:26.108783 1 shared_informer.go:320] Caches are synced for legacy-service-account-token-cleaner
I0414 12:53:26.108790 1 shared_informer.go:320] Caches are synced for stateful set
I0414 12:53:26.112630 1 shared_informer.go:320] Caches are synced for resource quota
I0414 12:53:26.114002 1 shared_informer.go:320] Caches are synced for endpoint_slice_mirroring
I0414 12:53:26.114040 1 shared_informer.go:320] Caches are synced for daemon sets
I0414 12:53:26.117582 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="dockerenv-311598"
I0414 12:53:26.120102 1 shared_informer.go:320] Caches are synced for bootstrap_signer
I0414 12:53:26.123500 1 shared_informer.go:320] Caches are synced for garbage collector
I0414 12:53:26.915739 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="dockerenv-311598"
I0414 12:53:27.254416 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="439.439239ms"
I0414 12:53:27.262499 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="8.027053ms"
I0414 12:53:27.262618 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="72.702µs"
I0414 12:53:27.264624 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="68.169µs"
I0414 12:53:32.469808 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="dockerenv-311598"
==> kube-proxy [4639e2832aef528534ed3ab88db2ff4da35c73e948eaa5af292fda6f49d84e02] <==
I0414 12:53:27.567930 1 server_linux.go:66] "Using iptables proxy"
I0414 12:53:27.693824 1 server.go:698] "Successfully retrieved node IP(s)" IPs=["192.168.49.2"]
E0414 12:53:27.693894 1 server.go:234] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
I0414 12:53:27.714278 1 server.go:243] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I0414 12:53:27.714348 1 server_linux.go:170] "Using iptables Proxier"
I0414 12:53:27.716469 1 proxier.go:255] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
I0414 12:53:27.716954 1 server.go:497] "Version info" version="v1.32.2"
I0414 12:53:27.716994 1 server.go:499] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I0414 12:53:27.718569 1 config.go:105] "Starting endpoint slice config controller"
I0414 12:53:27.718627 1 shared_informer.go:313] Waiting for caches to sync for endpoint slice config
I0414 12:53:27.718666 1 config.go:199] "Starting service config controller"
I0414 12:53:27.718727 1 shared_informer.go:313] Waiting for caches to sync for service config
I0414 12:53:27.718825 1 config.go:329] "Starting node config controller"
I0414 12:53:27.718943 1 shared_informer.go:313] Waiting for caches to sync for node config
I0414 12:53:27.818842 1 shared_informer.go:320] Caches are synced for service config
I0414 12:53:27.818856 1 shared_informer.go:320] Caches are synced for endpoint slice config
I0414 12:53:27.819083 1 shared_informer.go:320] Caches are synced for node config
==> kube-scheduler [304fdc593159d4d72b8ff5e4a0b08017cc258931f1a18deff706bc83a167c9f2] <==
W0414 12:53:19.572263 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
E0414 12:53:19.572285 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.StatefulSet: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"statefulsets\" in API group \"apps\" at the cluster scope" logger="UnhandledError"
W0414 12:53:19.572293 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
E0414 12:53:19.572324 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.ReplicaSet: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicasets\" in API group \"apps\" at the cluster scope" logger="UnhandledError"
W0414 12:53:20.394440 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
E0414 12:53:20.394498 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: nodes is forbidden: User \"system:kube-scheduler\" cannot list resource \"nodes\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0414 12:53:20.431283 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
E0414 12:53:20.431336 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PodDisruptionBudget: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User \"system:kube-scheduler\" cannot list resource \"poddisruptionbudgets\" in API group \"policy\" at the cluster scope" logger="UnhandledError"
W0414 12:53:20.437176 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
E0414 12:53:20.437228 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.ReplicaSet: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicasets\" in API group \"apps\" at the cluster scope" logger="UnhandledError"
W0414 12:53:20.444259 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
E0414 12:53:20.444308 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIStorageCapacity: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csistoragecapacities\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError"
W0414 12:53:20.489652 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
W0414 12:53:20.489684 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
E0414 12:53:20.489693 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.StatefulSet: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"statefulsets\" in API group \"apps\" at the cluster scope" logger="UnhandledError"
E0414 12:53:20.489727 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User \"system:kube-scheduler\" cannot list resource \"services\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0414 12:53:20.504068 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
E0414 12:53:20.504125 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PersistentVolume: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumes\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0414 12:53:20.511537 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
E0414 12:53:20.511577 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User \"system:kube-scheduler\" cannot list resource \"namespaces\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0414 12:53:20.550317 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
E0414 12:53:20.550367 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumeclaims\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0414 12:53:20.647716 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.VolumeAttachment: volumeattachments.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "volumeattachments" in API group "storage.k8s.io" at the cluster scope
E0414 12:53:20.647773 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.VolumeAttachment: failed to list *v1.VolumeAttachment: volumeattachments.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"volumeattachments\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError"
I0414 12:53:21.068271 1 shared_informer.go:320] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
==> kubelet <==
Apr 14 12:53:26 dockerenv-311598 kubelet[1634]: E0414 12:53:26.295262 1634 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/69e53839-831d-48d4-830d-ec430953f48e-kube-api-access-f96fj podName:69e53839-831d-48d4-830d-ec430953f48e nodeName:}" failed. No retries permitted until 2025-04-14 12:53:26.795239974 +0000 UTC m=+5.003090562 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-f96fj" (UniqueName: "kubernetes.io/projected/69e53839-831d-48d4-830d-ec430953f48e-kube-api-access-f96fj") pod "storage-provisioner" (UID: "69e53839-831d-48d4-830d-ec430953f48e") : configmap "kube-root-ca.crt" not found
Apr 14 12:53:26 dockerenv-311598 kubelet[1634]: I0414 12:53:26.895279 1634 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory"
Apr 14 12:53:27 dockerenv-311598 kubelet[1634]: I0414 12:53:27.096322 1634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fj6f9\" (UniqueName: \"kubernetes.io/projected/c48574bc-09f0-4263-b5de-f1dc6539ee8f-kube-api-access-fj6f9\") pod \"kube-proxy-v8p9b\" (UID: \"c48574bc-09f0-4263-b5de-f1dc6539ee8f\") " pod="kube-system/kube-proxy-v8p9b"
Apr 14 12:53:27 dockerenv-311598 kubelet[1634]: I0414 12:53:27.096400 1634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/c48574bc-09f0-4263-b5de-f1dc6539ee8f-lib-modules\") pod \"kube-proxy-v8p9b\" (UID: \"c48574bc-09f0-4263-b5de-f1dc6539ee8f\") " pod="kube-system/kube-proxy-v8p9b"
Apr 14 12:53:27 dockerenv-311598 kubelet[1634]: I0414 12:53:27.096465 1634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/d9167302-1ea0-4214-8abe-c9b9b32febd1-lib-modules\") pod \"kindnet-nk6m8\" (UID: \"d9167302-1ea0-4214-8abe-c9b9b32febd1\") " pod="kube-system/kindnet-nk6m8"
Apr 14 12:53:27 dockerenv-311598 kubelet[1634]: I0414 12:53:27.096489 1634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-proxy\" (UniqueName: \"kubernetes.io/configmap/c48574bc-09f0-4263-b5de-f1dc6539ee8f-kube-proxy\") pod \"kube-proxy-v8p9b\" (UID: \"c48574bc-09f0-4263-b5de-f1dc6539ee8f\") " pod="kube-system/kube-proxy-v8p9b"
Apr 14 12:53:27 dockerenv-311598 kubelet[1634]: I0414 12:53:27.096510 1634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/c48574bc-09f0-4263-b5de-f1dc6539ee8f-xtables-lock\") pod \"kube-proxy-v8p9b\" (UID: \"c48574bc-09f0-4263-b5de-f1dc6539ee8f\") " pod="kube-system/kube-proxy-v8p9b"
Apr 14 12:53:27 dockerenv-311598 kubelet[1634]: I0414 12:53:27.096542 1634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-cfg\" (UniqueName: \"kubernetes.io/host-path/d9167302-1ea0-4214-8abe-c9b9b32febd1-cni-cfg\") pod \"kindnet-nk6m8\" (UID: \"d9167302-1ea0-4214-8abe-c9b9b32febd1\") " pod="kube-system/kindnet-nk6m8"
Apr 14 12:53:27 dockerenv-311598 kubelet[1634]: I0414 12:53:27.096580 1634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/d9167302-1ea0-4214-8abe-c9b9b32febd1-xtables-lock\") pod \"kindnet-nk6m8\" (UID: \"d9167302-1ea0-4214-8abe-c9b9b32febd1\") " pod="kube-system/kindnet-nk6m8"
Apr 14 12:53:27 dockerenv-311598 kubelet[1634]: I0414 12:53:27.096600 1634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5p9d7\" (UniqueName: \"kubernetes.io/projected/d9167302-1ea0-4214-8abe-c9b9b32febd1-kube-api-access-5p9d7\") pod \"kindnet-nk6m8\" (UID: \"d9167302-1ea0-4214-8abe-c9b9b32febd1\") " pod="kube-system/kindnet-nk6m8"
Apr 14 12:53:27 dockerenv-311598 kubelet[1634]: I0414 12:53:27.297909 1634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ghtqn\" (UniqueName: \"kubernetes.io/projected/f375d2c2-0f65-4e48-91d1-638b5796f237-kube-api-access-ghtqn\") pod \"coredns-668d6bf9bc-nlq8v\" (UID: \"f375d2c2-0f65-4e48-91d1-638b5796f237\") " pod="kube-system/coredns-668d6bf9bc-nlq8v"
Apr 14 12:53:27 dockerenv-311598 kubelet[1634]: I0414 12:53:27.297992 1634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f375d2c2-0f65-4e48-91d1-638b5796f237-config-volume\") pod \"coredns-668d6bf9bc-nlq8v\" (UID: \"f375d2c2-0f65-4e48-91d1-638b5796f237\") " pod="kube-system/coredns-668d6bf9bc-nlq8v"
Apr 14 12:53:27 dockerenv-311598 kubelet[1634]: E0414 12:53:27.552798 1634 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to setup network for sandbox \"1aadf549fba23f0d7615f11ec2fb517e4ff7992f1d447fc2a57fce833b851c17\": failed to find network info for sandbox \"1aadf549fba23f0d7615f11ec2fb517e4ff7992f1d447fc2a57fce833b851c17\""
Apr 14 12:53:27 dockerenv-311598 kubelet[1634]: E0414 12:53:27.552947 1634 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to setup network for sandbox \"1aadf549fba23f0d7615f11ec2fb517e4ff7992f1d447fc2a57fce833b851c17\": failed to find network info for sandbox \"1aadf549fba23f0d7615f11ec2fb517e4ff7992f1d447fc2a57fce833b851c17\"" pod="kube-system/coredns-668d6bf9bc-nlq8v"
Apr 14 12:53:27 dockerenv-311598 kubelet[1634]: E0414 12:53:27.552979 1634 kuberuntime_manager.go:1237] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to setup network for sandbox \"1aadf549fba23f0d7615f11ec2fb517e4ff7992f1d447fc2a57fce833b851c17\": failed to find network info for sandbox \"1aadf549fba23f0d7615f11ec2fb517e4ff7992f1d447fc2a57fce833b851c17\"" pod="kube-system/coredns-668d6bf9bc-nlq8v"
Apr 14 12:53:27 dockerenv-311598 kubelet[1634]: E0414 12:53:27.553044 1634 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"coredns-668d6bf9bc-nlq8v_kube-system(f375d2c2-0f65-4e48-91d1-638b5796f237)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"coredns-668d6bf9bc-nlq8v_kube-system(f375d2c2-0f65-4e48-91d1-638b5796f237)\\\": rpc error: code = Unknown desc = failed to setup network for sandbox \\\"1aadf549fba23f0d7615f11ec2fb517e4ff7992f1d447fc2a57fce833b851c17\\\": failed to find network info for sandbox \\\"1aadf549fba23f0d7615f11ec2fb517e4ff7992f1d447fc2a57fce833b851c17\\\"\"" pod="kube-system/coredns-668d6bf9bc-nlq8v" podUID="f375d2c2-0f65-4e48-91d1-638b5796f237"
Apr 14 12:53:27 dockerenv-311598 kubelet[1634]: I0414 12:53:27.979816 1634 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-proxy-v8p9b" podStartSLOduration=0.979793997 podStartE2EDuration="979.793997ms" podCreationTimestamp="2025-04-14 12:53:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-04-14 12:53:27.979475819 +0000 UTC m=+6.187326416" watchObservedRunningTime="2025-04-14 12:53:27.979793997 +0000 UTC m=+6.187644593"
Apr 14 12:53:27 dockerenv-311598 kubelet[1634]: I0414 12:53:27.989512 1634 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/storage-provisioner" podStartSLOduration=4.989485695 podStartE2EDuration="4.989485695s" podCreationTimestamp="2025-04-14 12:53:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-04-14 12:53:27.989401047 +0000 UTC m=+6.197251665" watchObservedRunningTime="2025-04-14 12:53:27.989485695 +0000 UTC m=+6.197336291"
Apr 14 12:53:30 dockerenv-311598 kubelet[1634]: I0414 12:53:30.001366 1634 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kindnet-nk6m8" podStartSLOduration=1.119216105 podStartE2EDuration="3.001344653s" podCreationTimestamp="2025-04-14 12:53:27 +0000 UTC" firstStartedPulling="2025-04-14 12:53:27.764154921 +0000 UTC m=+5.972005509" lastFinishedPulling="2025-04-14 12:53:29.646283469 +0000 UTC m=+7.854134057" observedRunningTime="2025-04-14 12:53:29.991936077 +0000 UTC m=+8.199786672" watchObservedRunningTime="2025-04-14 12:53:30.001344653 +0000 UTC m=+8.209195250"
Apr 14 12:53:32 dockerenv-311598 kubelet[1634]: I0414 12:53:32.459643 1634 kuberuntime_manager.go:1702] "Updating runtime config through cri with podcidr" CIDR="10.244.0.0/24"
Apr 14 12:53:32 dockerenv-311598 kubelet[1634]: I0414 12:53:32.460665 1634 kubelet_network.go:61] "Updating Pod CIDR" originalPodCIDR="" newPodCIDR="10.244.0.0/24"
Apr 14 12:53:37 dockerenv-311598 kubelet[1634]: E0414 12:53:37.907840 1634 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to setup network for sandbox \"2d0f403dcdf8013586c62cf924ce9a7e608b6a2d912c71b92371d49a72a72db5\": failed to find network info for sandbox \"2d0f403dcdf8013586c62cf924ce9a7e608b6a2d912c71b92371d49a72a72db5\""
Apr 14 12:53:37 dockerenv-311598 kubelet[1634]: E0414 12:53:37.907930 1634 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to setup network for sandbox \"2d0f403dcdf8013586c62cf924ce9a7e608b6a2d912c71b92371d49a72a72db5\": failed to find network info for sandbox \"2d0f403dcdf8013586c62cf924ce9a7e608b6a2d912c71b92371d49a72a72db5\"" pod="kube-system/coredns-668d6bf9bc-nlq8v"
Apr 14 12:53:37 dockerenv-311598 kubelet[1634]: E0414 12:53:37.907963 1634 kuberuntime_manager.go:1237] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to setup network for sandbox \"2d0f403dcdf8013586c62cf924ce9a7e608b6a2d912c71b92371d49a72a72db5\": failed to find network info for sandbox \"2d0f403dcdf8013586c62cf924ce9a7e608b6a2d912c71b92371d49a72a72db5\"" pod="kube-system/coredns-668d6bf9bc-nlq8v"
Apr 14 12:53:37 dockerenv-311598 kubelet[1634]: E0414 12:53:37.908038 1634 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"coredns-668d6bf9bc-nlq8v_kube-system(f375d2c2-0f65-4e48-91d1-638b5796f237)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"coredns-668d6bf9bc-nlq8v_kube-system(f375d2c2-0f65-4e48-91d1-638b5796f237)\\\": rpc error: code = Unknown desc = failed to setup network for sandbox \\\"2d0f403dcdf8013586c62cf924ce9a7e608b6a2d912c71b92371d49a72a72db5\\\": failed to find network info for sandbox \\\"2d0f403dcdf8013586c62cf924ce9a7e608b6a2d912c71b92371d49a72a72db5\\\"\"" pod="kube-system/coredns-668d6bf9bc-nlq8v" podUID="f375d2c2-0f65-4e48-91d1-638b5796f237"
==> storage-provisioner [8ea9d4ffc3bc5bbbd9356e01896cc005339594e7964ebede4f61095a0ff28cf2] <==
I0414 12:53:27.273112 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
-- /stdout --
helpers_test.go:254: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p dockerenv-311598 -n dockerenv-311598
helpers_test.go:261: (dbg) Run: kubectl --context dockerenv-311598 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:272: non-running pods: coredns-668d6bf9bc-nlq8v
helpers_test.go:274: ======> post-mortem[TestDockerEnvContainerd]: describe non-running pods <======
helpers_test.go:277: (dbg) Run: kubectl --context dockerenv-311598 describe pod coredns-668d6bf9bc-nlq8v
helpers_test.go:277: (dbg) Non-zero exit: kubectl --context dockerenv-311598 describe pod coredns-668d6bf9bc-nlq8v: exit status 1 (65.040181ms)
** stderr **
Error from server (NotFound): pods "coredns-668d6bf9bc-nlq8v" not found
** /stderr **
helpers_test.go:279: kubectl --context dockerenv-311598 describe pod coredns-668d6bf9bc-nlq8v: exit status 1
helpers_test.go:175: Cleaning up "dockerenv-311598" profile ...
helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p dockerenv-311598
helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p dockerenv-311598: (2.273541786s)
--- FAIL: TestDockerEnvContainerd (39.15s)