=== RUN TestDockerEnvContainerd
docker_test.go:170: running with containerd true linux amd64
docker_test.go:181: (dbg) Run: out/minikube-linux-amd64 start -p dockerenv-637175 --driver=docker --container-runtime=containerd
docker_test.go:181: (dbg) Done: out/minikube-linux-amd64 start -p dockerenv-637175 --driver=docker --container-runtime=containerd: (24.192691606s)
docker_test.go:189: (dbg) Run: /bin/bash -c "out/minikube-linux-amd64 docker-env --ssh-host --ssh-add -p dockerenv-637175"
docker_test.go:220: (dbg) Run: /bin/bash -c "SSH_AUTH_SOCK="/tmp/ssh-XXXXXXZPfAlg/agent.33078" SSH_AGENT_PID="33079" DOCKER_HOST=ssh://docker@127.0.0.1:32773 docker version"
docker_test.go:243: (dbg) Run: /bin/bash -c "SSH_AUTH_SOCK="/tmp/ssh-XXXXXXZPfAlg/agent.33078" SSH_AGENT_PID="33079" DOCKER_HOST=ssh://docker@127.0.0.1:32773 DOCKER_BUILDKIT=0 docker build -t local/minikube-dockerenv-containerd-test:latest testdata/docker-env"
docker_test.go:243: (dbg) Non-zero exit: /bin/bash -c "SSH_AUTH_SOCK="/tmp/ssh-XXXXXXZPfAlg/agent.33078" SSH_AGENT_PID="33079" DOCKER_HOST=ssh://docker@127.0.0.1:32773 DOCKER_BUILDKIT=0 docker build -t local/minikube-dockerenv-containerd-test:latest testdata/docker-env": exit status 1 (2.372741646s)
-- stdout --
Sending build context to Docker daemon 2.048kB
-- /stdout --
** stderr **
DEPRECATED: The legacy builder is deprecated and will be removed in a future release.
BuildKit is currently disabled; enable it by removing the DOCKER_BUILDKIT=0
environment-variable.
Error response from daemon: exit status 1
** /stderr **
docker_test.go:245: failed to build images, error: exit status 1, output:
-- stdout --
Sending build context to Docker daemon 2.048kB
-- /stdout --
** stderr **
DEPRECATED: The legacy builder is deprecated and will be removed in a future release.
BuildKit is currently disabled; enable it by removing the DOCKER_BUILDKIT=0
environment-variable.
Error response from daemon: exit status 1
** /stderr **
docker_test.go:250: (dbg) Run: /bin/bash -c "SSH_AUTH_SOCK="/tmp/ssh-XXXXXXZPfAlg/agent.33078" SSH_AGENT_PID="33079" DOCKER_HOST=ssh://docker@127.0.0.1:32773 docker image ls"
docker_test.go:255: failed to detect image 'local/minikube-dockerenv-containerd-test' in output of docker image ls
panic.go:615: *** TestDockerEnvContainerd FAILED at 2025-11-24 02:29:49.490959002 +0000 UTC m=+334.215312835
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:223: ======> post-mortem[TestDockerEnvContainerd]: network settings <======
helpers_test.go:230: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:238: ======> post-mortem[TestDockerEnvContainerd]: docker inspect <======
helpers_test.go:239: (dbg) Run: docker inspect dockerenv-637175
helpers_test.go:243: (dbg) docker inspect dockerenv-637175:
-- stdout --
[
{
"Id": "fa2f82c3c1d5b684b9835bb4c38f0a99ec99948c51801886e356a322dfb8d35b",
"Created": "2025-11-24T02:29:16.142644588Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 30501,
"ExitCode": 0,
"Error": "",
"StartedAt": "2025-11-24T02:29:16.174992054Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:cfc5db6e94549413134f251b33e15399a9f8a376c7daf23bfd6c853469fc1524",
"ResolvConfPath": "/var/lib/docker/containers/fa2f82c3c1d5b684b9835bb4c38f0a99ec99948c51801886e356a322dfb8d35b/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/fa2f82c3c1d5b684b9835bb4c38f0a99ec99948c51801886e356a322dfb8d35b/hostname",
"HostsPath": "/var/lib/docker/containers/fa2f82c3c1d5b684b9835bb4c38f0a99ec99948c51801886e356a322dfb8d35b/hosts",
"LogPath": "/var/lib/docker/containers/fa2f82c3c1d5b684b9835bb4c38f0a99ec99948c51801886e356a322dfb8d35b/fa2f82c3c1d5b684b9835bb4c38f0a99ec99948c51801886e356a322dfb8d35b-json.log",
"Name": "/dockerenv-637175",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"dockerenv-637175:/var",
"/lib/modules:/lib/modules:ro"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {
"max-size": "100m"
}
},
"NetworkMode": "dockerenv-637175",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "private",
"Dns": null,
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 8388608000,
"NanoCpus": 0,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 16777216000,
"MemorySwappiness": null,
"OomKillDisable": null,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"ID": "fa2f82c3c1d5b684b9835bb4c38f0a99ec99948c51801886e356a322dfb8d35b",
"LowerDir": "/var/lib/docker/overlay2/1faffaaf8e7125b9ace610d78bf170b0e58c07ac42cadaf307ed66bbe60b03d8-init/diff:/var/lib/docker/overlay2/2f5d717ed401f39785659385ff032a177c754c3cfdb9c7e8f0a269ab1990aca3/diff",
"MergedDir": "/var/lib/docker/overlay2/1faffaaf8e7125b9ace610d78bf170b0e58c07ac42cadaf307ed66bbe60b03d8/merged",
"UpperDir": "/var/lib/docker/overlay2/1faffaaf8e7125b9ace610d78bf170b0e58c07ac42cadaf307ed66bbe60b03d8/diff",
"WorkDir": "/var/lib/docker/overlay2/1faffaaf8e7125b9ace610d78bf170b0e58c07ac42cadaf307ed66bbe60b03d8/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "volume",
"Name": "dockerenv-637175",
"Source": "/var/lib/docker/volumes/dockerenv-637175/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
},
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
}
],
"Config": {
"Hostname": "dockerenv-637175",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763935653-21975@sha256:5273d148037cfb860f8152fbd08072e6c1f4b37ff9a51956a3c12965f5f2d787",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "dockerenv-637175",
"name.minikube.sigs.k8s.io": "dockerenv-637175",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"SandboxID": "190e06dd112287a65f040283386b5ee6bdc939725277bd42995049479a319566",
"SandboxKey": "/var/run/docker/netns/190e06dd1122",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32773"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32774"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32777"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32775"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32776"
}
]
},
"Networks": {
"dockerenv-637175": {
"IPAMConfig": {
"IPv4Address": "192.168.49.2",
"IPv6Address": ""
},
"Links": null,
"Aliases": null,
"DriverOpts": null,
"GwPriority": 0,
"NetworkID": "3833ea54a4b9fdb012bcbbde24bb330e87add2259e3b1d3f694714df6e9996a0",
"EndpointID": "148a8d49f133f06161f9f1ca094f9ad4b9688dd54b21a2adce8cdd57d28ad564",
"Gateway": "192.168.49.1",
"IPAddress": "192.168.49.2",
"MacAddress": "a2:dd:7c:95:52:8c",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"dockerenv-637175",
"fa2f82c3c1d5"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:247: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p dockerenv-637175 -n dockerenv-637175
helpers_test.go:252: <<< TestDockerEnvContainerd FAILED: start of post-mortem logs <<<
helpers_test.go:253: ======> post-mortem[TestDockerEnvContainerd]: minikube logs <======
helpers_test.go:255: (dbg) Run: out/minikube-linux-amd64 -p dockerenv-637175 logs -n 25
helpers_test.go:260: TestDockerEnvContainerd logs:
-- stdout --
==> Audit <==
┌────────────┬───────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬──────────────────┬─────────┬─────────┬─────────────────────┬─────────────────────┐
│ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │
├────────────┼───────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼──────────────────┼─────────┼─────────┼─────────────────────┼─────────────────────┤
│ addons │ addons-982350 addons disable nvidia-device-plugin --alsologtostderr -v=1 │ addons-982350 │ jenkins │ v1.37.0 │ 24 Nov 25 02:27 UTC │ 24 Nov 25 02:27 UTC │
│ addons │ addons-982350 addons disable metrics-server --alsologtostderr -v=1 │ addons-982350 │ jenkins │ v1.37.0 │ 24 Nov 25 02:27 UTC │ 24 Nov 25 02:27 UTC │
│ addons │ addons-982350 addons disable headlamp --alsologtostderr -v=1 │ addons-982350 │ jenkins │ v1.37.0 │ 24 Nov 25 02:27 UTC │ 24 Nov 25 02:28 UTC │
│ addons │ addons-982350 addons disable yakd --alsologtostderr -v=1 │ addons-982350 │ jenkins │ v1.37.0 │ 24 Nov 25 02:27 UTC │ 24 Nov 25 02:28 UTC │
│ ip │ addons-982350 ip │ addons-982350 │ jenkins │ v1.37.0 │ 24 Nov 25 02:27 UTC │ 24 Nov 25 02:27 UTC │
│ addons │ addons-982350 addons disable registry --alsologtostderr -v=1 │ addons-982350 │ jenkins │ v1.37.0 │ 24 Nov 25 02:27 UTC │ 24 Nov 25 02:27 UTC │
│ addons │ addons-982350 addons disable cloud-spanner --alsologtostderr -v=1 │ addons-982350 │ jenkins │ v1.37.0 │ 24 Nov 25 02:28 UTC │ 24 Nov 25 02:28 UTC │
│ ssh │ addons-982350 ssh cat /opt/local-path-provisioner/pvc-e10810fd-af61-4198-96e5-3f409eec7e8a_default_test-pvc/file1 │ addons-982350 │ jenkins │ v1.37.0 │ 24 Nov 25 02:28 UTC │ 24 Nov 25 02:28 UTC │
│ addons │ addons-982350 addons disable storage-provisioner-rancher --alsologtostderr -v=1 │ addons-982350 │ jenkins │ v1.37.0 │ 24 Nov 25 02:28 UTC │ 24 Nov 25 02:28 UTC │
│ addons │ addons-982350 addons disable inspektor-gadget --alsologtostderr -v=1 │ addons-982350 │ jenkins │ v1.37.0 │ 24 Nov 25 02:28 UTC │ 24 Nov 25 02:28 UTC │
│ ssh │ addons-982350 ssh curl -s http://127.0.0.1/ -H 'Host: nginx.example.com' │ addons-982350 │ jenkins │ v1.37.0 │ 24 Nov 25 02:28 UTC │ 24 Nov 25 02:28 UTC │
│ ip │ addons-982350 ip │ addons-982350 │ jenkins │ v1.37.0 │ 24 Nov 25 02:28 UTC │ 24 Nov 25 02:28 UTC │
│ addons │ addons-982350 addons disable ingress-dns --alsologtostderr -v=1 │ addons-982350 │ jenkins │ v1.37.0 │ 24 Nov 25 02:28 UTC │ 24 Nov 25 02:28 UTC │
│ addons │ addons-982350 addons disable ingress --alsologtostderr -v=1 │ addons-982350 │ jenkins │ v1.37.0 │ 24 Nov 25 02:28 UTC │ 24 Nov 25 02:28 UTC │
│ addons │ configure registry-creds -f ./testdata/addons_testconfig.json -p addons-982350 │ addons-982350 │ jenkins │ v1.37.0 │ 24 Nov 25 02:28 UTC │ 24 Nov 25 02:28 UTC │
│ addons │ addons-982350 addons disable registry-creds --alsologtostderr -v=1 │ addons-982350 │ jenkins │ v1.37.0 │ 24 Nov 25 02:28 UTC │ 24 Nov 25 02:28 UTC │
│ addons │ addons-982350 addons disable volumesnapshots --alsologtostderr -v=1 │ addons-982350 │ jenkins │ v1.37.0 │ 24 Nov 25 02:28 UTC │ 24 Nov 25 02:28 UTC │
│ addons │ addons-982350 addons disable csi-hostpath-driver --alsologtostderr -v=1 │ addons-982350 │ jenkins │ v1.37.0 │ 24 Nov 25 02:28 UTC │ 24 Nov 25 02:28 UTC │
│ stop │ -p addons-982350 │ addons-982350 │ jenkins │ v1.37.0 │ 24 Nov 25 02:28 UTC │ 24 Nov 25 02:29 UTC │
│ addons │ enable dashboard -p addons-982350 │ addons-982350 │ jenkins │ v1.37.0 │ 24 Nov 25 02:29 UTC │ 24 Nov 25 02:29 UTC │
│ addons │ disable dashboard -p addons-982350 │ addons-982350 │ jenkins │ v1.37.0 │ 24 Nov 25 02:29 UTC │ 24 Nov 25 02:29 UTC │
│ addons │ disable gvisor -p addons-982350 │ addons-982350 │ jenkins │ v1.37.0 │ 24 Nov 25 02:29 UTC │ 24 Nov 25 02:29 UTC │
│ delete │ -p addons-982350 │ addons-982350 │ jenkins │ v1.37.0 │ 24 Nov 25 02:29 UTC │ 24 Nov 25 02:29 UTC │
│ start │ -p dockerenv-637175 --driver=docker --container-runtime=containerd │ dockerenv-637175 │ jenkins │ v1.37.0 │ 24 Nov 25 02:29 UTC │ 24 Nov 25 02:29 UTC │
│ docker-env │ --ssh-host --ssh-add -p dockerenv-637175 │ dockerenv-637175 │ jenkins │ v1.37.0 │ 24 Nov 25 02:29 UTC │ 24 Nov 25 02:29 UTC │
└────────────┴───────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴──────────────────┴─────────┴─────────┴─────────────────────┴─────────────────────┘
==> Last Start <==
Log file created at: 2025/11/24 02:29:11
Running on machine: ubuntu-20-agent-6
Binary: Built with gc go1.25.3 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I1124 02:29:11.048302 29929 out.go:360] Setting OutFile to fd 1 ...
I1124 02:29:11.048384 29929 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1124 02:29:11.048386 29929 out.go:374] Setting ErrFile to fd 2...
I1124 02:29:11.048389 29929 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1124 02:29:11.048582 29929 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21975-4883/.minikube/bin
I1124 02:29:11.049062 29929 out.go:368] Setting JSON to false
I1124 02:29:11.049873 29929 start.go:133] hostinfo: {"hostname":"ubuntu-20-agent-6","uptime":694,"bootTime":1763950657,"procs":188,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"22.04","kernelVersion":"6.8.0-1044-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
I1124 02:29:11.049931 29929 start.go:143] virtualization: kvm guest
I1124 02:29:11.052251 29929 out.go:179] * [dockerenv-637175] minikube v1.37.0 on Ubuntu 22.04 (kvm/amd64)
I1124 02:29:11.054093 29929 out.go:179] - MINIKUBE_LOCATION=21975
I1124 02:29:11.054096 29929 notify.go:221] Checking for updates...
I1124 02:29:11.055582 29929 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I1124 02:29:11.056912 29929 out.go:179] - KUBECONFIG=/home/jenkins/minikube-integration/21975-4883/kubeconfig
I1124 02:29:11.058430 29929 out.go:179] - MINIKUBE_HOME=/home/jenkins/minikube-integration/21975-4883/.minikube
I1124 02:29:11.059731 29929 out.go:179] - MINIKUBE_BIN=out/minikube-linux-amd64
I1124 02:29:11.060888 29929 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I1124 02:29:11.065473 29929 driver.go:422] Setting default libvirt URI to qemu:///system
I1124 02:29:11.089174 29929 docker.go:124] docker version: linux-29.0.2:Docker Engine - Community
I1124 02:29:11.089273 29929 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1124 02:29:11.146667 29929 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:2 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:24 OomKillDisable:false NGoroutines:45 SystemTime:2025-11-24 02:29:11.136977188 +0000 UTC LoggingDriver:json-file CgroupDriver:systemd NEventsListener:0 KernelVersion:6.8.0-1044-gcp OperatingSystem:Ubuntu 22.04.5 LTS OSType:linux Architecture:x
86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8 ::1/128] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33652076544 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-6 Labels:[] ExperimentalBuild:false ServerVersion:29.0.2 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:fcd43222d6b07379a4be9786bda52438f0dd16a1 Expected:} RuncCommit:{ID:v1.3.3-0-gd842d771 Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[m
ap[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.30.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.40.3] map[Name:model Path:/usr/libexec/docker/cli-plugins/docker-model SchemaVersion:0.1.0 ShortDescription:Docker Model Runner Vendor:Docker Inc. Version:v1.0.1] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I1124 02:29:11.146763 29929 docker.go:319] overlay module found
I1124 02:29:11.148697 29929 out.go:179] * Using the docker driver based on user configuration
I1124 02:29:11.149950 29929 start.go:309] selected driver: docker
I1124 02:29:11.149959 29929 start.go:927] validating driver "docker" against <nil>
I1124 02:29:11.149972 29929 start.go:938] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I1124 02:29:11.150083 29929 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1124 02:29:11.206944 29929 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:2 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:24 OomKillDisable:false NGoroutines:45 SystemTime:2025-11-24 02:29:11.197183618 +0000 UTC LoggingDriver:json-file CgroupDriver:systemd NEventsListener:0 KernelVersion:6.8.0-1044-gcp OperatingSystem:Ubuntu 22.04.5 LTS OSType:linux Architecture:x
86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8 ::1/128] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33652076544 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-6 Labels:[] ExperimentalBuild:false ServerVersion:29.0.2 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:fcd43222d6b07379a4be9786bda52438f0dd16a1 Expected:} RuncCommit:{ID:v1.3.3-0-gd842d771 Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[m
ap[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.30.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.40.3] map[Name:model Path:/usr/libexec/docker/cli-plugins/docker-model SchemaVersion:0.1.0 ShortDescription:Docker Model Runner Vendor:Docker Inc. Version:v1.0.1] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I1124 02:29:11.207129 29929 start_flags.go:327] no existing cluster config was found, will generate one from the flags
I1124 02:29:11.207650 29929 start_flags.go:410] Using suggested 8000MB memory alloc based on sys=32093MB, container=32093MB
I1124 02:29:11.207810 29929 start_flags.go:974] Wait components to verify : map[apiserver:true system_pods:true]
I1124 02:29:11.209540 29929 out.go:179] * Using Docker driver with root privileges
I1124 02:29:11.210830 29929 cni.go:84] Creating CNI manager for ""
I1124 02:29:11.210899 29929 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1124 02:29:11.210905 29929 start_flags.go:336] Found "CNI" CNI - setting NetworkPlugin=cni
I1124 02:29:11.210964 29929 start.go:353] cluster config:
{Name:dockerenv-637175 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763935653-21975@sha256:5273d148037cfb860f8152fbd08072e6c1f4b37ff9a51956a3c12965f5f2d787 Memory:8000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:dockerenv-637175 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerR
untime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1124 02:29:11.212314 29929 out.go:179] * Starting "dockerenv-637175" primary control-plane node in "dockerenv-637175" cluster
I1124 02:29:11.213683 29929 cache.go:134] Beginning downloading kic base image for docker with containerd
I1124 02:29:11.214867 29929 out.go:179] * Pulling base image v0.0.48-1763935653-21975 ...
I1124 02:29:11.216155 29929 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime containerd
I1124 02:29:11.216181 29929 preload.go:203] Found local preload: /home/jenkins/minikube-integration/21975-4883/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-containerd-overlay2-amd64.tar.lz4
I1124 02:29:11.216194 29929 cache.go:65] Caching tarball of preloaded images
I1124 02:29:11.216261 29929 image.go:81] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763935653-21975@sha256:5273d148037cfb860f8152fbd08072e6c1f4b37ff9a51956a3c12965f5f2d787 in local docker daemon
I1124 02:29:11.216300 29929 preload.go:238] Found /home/jenkins/minikube-integration/21975-4883/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-containerd-overlay2-amd64.tar.lz4 in cache, skipping download
I1124 02:29:11.216310 29929 cache.go:68] Finished verifying existence of preloaded tar for v1.34.1 on containerd
I1124 02:29:11.216745 29929 profile.go:143] Saving config to /home/jenkins/minikube-integration/21975-4883/.minikube/profiles/dockerenv-637175/config.json ...
I1124 02:29:11.216772 29929 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21975-4883/.minikube/profiles/dockerenv-637175/config.json: {Name:mkd479a78362ede4e96c01e79feb6b8a6884d788 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 02:29:11.236563 29929 image.go:100] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763935653-21975@sha256:5273d148037cfb860f8152fbd08072e6c1f4b37ff9a51956a3c12965f5f2d787 in local docker daemon, skipping pull
I1124 02:29:11.236580 29929 cache.go:158] gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763935653-21975@sha256:5273d148037cfb860f8152fbd08072e6c1f4b37ff9a51956a3c12965f5f2d787 exists in daemon, skipping load
I1124 02:29:11.236594 29929 cache.go:243] Successfully downloaded all kic artifacts
I1124 02:29:11.236625 29929 start.go:360] acquireMachinesLock for dockerenv-637175: {Name:mkc8f619857d76a80b3ca8364d2725187ccc550d Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1124 02:29:11.236714 29929 start.go:364] duration metric: took 77.156µs to acquireMachinesLock for "dockerenv-637175"
I1124 02:29:11.236732 29929 start.go:93] Provisioning new machine with config: &{Name:dockerenv-637175 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763935653-21975@sha256:5273d148037cfb860f8152fbd08072e6c1f4b37ff9a51956a3c12965f5f2d787 Memory:8000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:dockerenv-637175 Namespace:default APIServerHAVIP: APIServerNa
me:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAut
hSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1124 02:29:11.236824 29929 start.go:125] createHost starting for "" (driver="docker")
I1124 02:29:11.238807 29929 out.go:252] * Creating docker container (CPUs=2, Memory=8000MB) ...
I1124 02:29:11.239026 29929 start.go:159] libmachine.API.Create for "dockerenv-637175" (driver="docker")
I1124 02:29:11.239057 29929 client.go:173] LocalClient.Create starting
I1124 02:29:11.239114 29929 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21975-4883/.minikube/certs/ca.pem
I1124 02:29:11.239151 29929 main.go:143] libmachine: Decoding PEM data...
I1124 02:29:11.239165 29929 main.go:143] libmachine: Parsing certificate...
I1124 02:29:11.239225 29929 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21975-4883/.minikube/certs/cert.pem
I1124 02:29:11.239239 29929 main.go:143] libmachine: Decoding PEM data...
I1124 02:29:11.239246 29929 main.go:143] libmachine: Parsing certificate...
I1124 02:29:11.239589 29929 cli_runner.go:164] Run: docker network inspect dockerenv-637175 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W1124 02:29:11.256722 29929 cli_runner.go:211] docker network inspect dockerenv-637175 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I1124 02:29:11.256809 29929 network_create.go:284] running [docker network inspect dockerenv-637175] to gather additional debugging logs...
I1124 02:29:11.256827 29929 cli_runner.go:164] Run: docker network inspect dockerenv-637175
W1124 02:29:11.273936 29929 cli_runner.go:211] docker network inspect dockerenv-637175 returned with exit code 1
I1124 02:29:11.273955 29929 network_create.go:287] error running [docker network inspect dockerenv-637175]: docker network inspect dockerenv-637175: exit status 1
stdout:
[]
stderr:
Error response from daemon: network dockerenv-637175 not found
I1124 02:29:11.273966 29929 network_create.go:289] output of [docker network inspect dockerenv-637175]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network dockerenv-637175 not found
** /stderr **
I1124 02:29:11.274076 29929 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1124 02:29:11.291771 29929 network.go:206] using free private subnet 192.168.49.0/24: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc001647e10}
I1124 02:29:11.291818 29929 network_create.go:124] attempt to create docker network dockerenv-637175 192.168.49.0/24 with gateway 192.168.49.1 and MTU of 1500 ...
I1124 02:29:11.291857 29929 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.49.0/24 --gateway=192.168.49.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=dockerenv-637175 dockerenv-637175
I1124 02:29:11.339741 29929 network_create.go:108] docker network dockerenv-637175 192.168.49.0/24 created
I1124 02:29:11.339761 29929 kic.go:121] calculated static IP "192.168.49.2" for the "dockerenv-637175" container
I1124 02:29:11.339857 29929 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I1124 02:29:11.356125 29929 cli_runner.go:164] Run: docker volume create dockerenv-637175 --label name.minikube.sigs.k8s.io=dockerenv-637175 --label created_by.minikube.sigs.k8s.io=true
I1124 02:29:11.374080 29929 oci.go:103] Successfully created a docker volume dockerenv-637175
I1124 02:29:11.374136 29929 cli_runner.go:164] Run: docker run --rm --name dockerenv-637175-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=dockerenv-637175 --entrypoint /usr/bin/test -v dockerenv-637175:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763935653-21975@sha256:5273d148037cfb860f8152fbd08072e6c1f4b37ff9a51956a3c12965f5f2d787 -d /var/lib
I1124 02:29:11.770880 29929 oci.go:107] Successfully prepared a docker volume dockerenv-637175
I1124 02:29:11.770935 29929 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime containerd
I1124 02:29:11.770942 29929 kic.go:194] Starting extracting preloaded images to volume ...
I1124 02:29:11.771006 29929 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21975-4883/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-containerd-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v dockerenv-637175:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763935653-21975@sha256:5273d148037cfb860f8152fbd08072e6c1f4b37ff9a51956a3c12965f5f2d787 -I lz4 -xf /preloaded.tar -C /extractDir
I1124 02:29:16.063813 29929 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21975-4883/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-containerd-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v dockerenv-637175:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763935653-21975@sha256:5273d148037cfb860f8152fbd08072e6c1f4b37ff9a51956a3c12965f5f2d787 -I lz4 -xf /preloaded.tar -C /extractDir: (4.29273991s)
I1124 02:29:16.063834 29929 kic.go:203] duration metric: took 4.292888923s to extract preloaded images to volume ...
W1124 02:29:16.063934 29929 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
W1124 02:29:16.063959 29929 oci.go:252] Your kernel does not support CPU cfs period/quota or the cgroup is not mounted.
I1124 02:29:16.064008 29929 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I1124 02:29:16.125467 29929 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname dockerenv-637175 --name dockerenv-637175 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=dockerenv-637175 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=dockerenv-637175 --network dockerenv-637175 --ip 192.168.49.2 --volume dockerenv-637175:/var --security-opt apparmor=unconfined --memory=8000mb -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763935653-21975@sha256:5273d148037cfb860f8152fbd08072e6c1f4b37ff9a51956a3c12965f5f2d787
I1124 02:29:16.413305 29929 cli_runner.go:164] Run: docker container inspect dockerenv-637175 --format={{.State.Running}}
I1124 02:29:16.431701 29929 cli_runner.go:164] Run: docker container inspect dockerenv-637175 --format={{.State.Status}}
I1124 02:29:16.450345 29929 cli_runner.go:164] Run: docker exec dockerenv-637175 stat /var/lib/dpkg/alternatives/iptables
I1124 02:29:16.497818 29929 oci.go:144] the created container "dockerenv-637175" has a running status.
I1124 02:29:16.497869 29929 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/21975-4883/.minikube/machines/dockerenv-637175/id_rsa...
I1124 02:29:16.549253 29929 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/21975-4883/.minikube/machines/dockerenv-637175/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I1124 02:29:16.577516 29929 cli_runner.go:164] Run: docker container inspect dockerenv-637175 --format={{.State.Status}}
I1124 02:29:16.595308 29929 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I1124 02:29:16.595321 29929 kic_runner.go:114] Args: [docker exec --privileged dockerenv-637175 chown docker:docker /home/docker/.ssh/authorized_keys]
I1124 02:29:16.636508 29929 cli_runner.go:164] Run: docker container inspect dockerenv-637175 --format={{.State.Status}}
I1124 02:29:16.658998 29929 machine.go:94] provisionDockerMachine start ...
I1124 02:29:16.659071 29929 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-637175
I1124 02:29:16.679848 29929 main.go:143] libmachine: Using SSH client type: native
I1124 02:29:16.680186 29929 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 127.0.0.1 32773 <nil> <nil>}
I1124 02:29:16.680199 29929 main.go:143] libmachine: About to run SSH command:
hostname
I1124 02:29:16.680856 29929 main.go:143] libmachine: Error dialing TCP: ssh: handshake failed: read tcp 127.0.0.1:47642->127.0.0.1:32773: read: connection reset by peer
I1124 02:29:19.822112 29929 main.go:143] libmachine: SSH cmd err, output: <nil>: dockerenv-637175
I1124 02:29:19.822134 29929 ubuntu.go:182] provisioning hostname "dockerenv-637175"
I1124 02:29:19.822184 29929 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-637175
I1124 02:29:19.840196 29929 main.go:143] libmachine: Using SSH client type: native
I1124 02:29:19.840409 29929 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 127.0.0.1 32773 <nil> <nil>}
I1124 02:29:19.840417 29929 main.go:143] libmachine: About to run SSH command:
sudo hostname dockerenv-637175 && echo "dockerenv-637175" | sudo tee /etc/hostname
I1124 02:29:19.988116 29929 main.go:143] libmachine: SSH cmd err, output: <nil>: dockerenv-637175
I1124 02:29:19.988204 29929 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-637175
I1124 02:29:20.006377 29929 main.go:143] libmachine: Using SSH client type: native
I1124 02:29:20.006604 29929 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 127.0.0.1 32773 <nil> <nil>}
I1124 02:29:20.006614 29929 main.go:143] libmachine: About to run SSH command:
if ! grep -xq '.*\sdockerenv-637175' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 dockerenv-637175/g' /etc/hosts;
else
echo '127.0.1.1 dockerenv-637175' | sudo tee -a /etc/hosts;
fi
fi
I1124 02:29:20.145387 29929 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1124 02:29:20.145402 29929 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/21975-4883/.minikube CaCertPath:/home/jenkins/minikube-integration/21975-4883/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21975-4883/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21975-4883/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21975-4883/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21975-4883/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21975-4883/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21975-4883/.minikube}
I1124 02:29:20.145426 29929 ubuntu.go:190] setting up certificates
I1124 02:29:20.145444 29929 provision.go:84] configureAuth start
I1124 02:29:20.145504 29929 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" dockerenv-637175
I1124 02:29:20.162669 29929 provision.go:143] copyHostCerts
I1124 02:29:20.162719 29929 exec_runner.go:144] found /home/jenkins/minikube-integration/21975-4883/.minikube/ca.pem, removing ...
I1124 02:29:20.162725 29929 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21975-4883/.minikube/ca.pem
I1124 02:29:20.162814 29929 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21975-4883/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21975-4883/.minikube/ca.pem (1078 bytes)
I1124 02:29:20.162897 29929 exec_runner.go:144] found /home/jenkins/minikube-integration/21975-4883/.minikube/cert.pem, removing ...
I1124 02:29:20.162901 29929 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21975-4883/.minikube/cert.pem
I1124 02:29:20.162926 29929 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21975-4883/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21975-4883/.minikube/cert.pem (1123 bytes)
I1124 02:29:20.162993 29929 exec_runner.go:144] found /home/jenkins/minikube-integration/21975-4883/.minikube/key.pem, removing ...
I1124 02:29:20.162996 29929 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21975-4883/.minikube/key.pem
I1124 02:29:20.163019 29929 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21975-4883/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21975-4883/.minikube/key.pem (1679 bytes)
I1124 02:29:20.163066 29929 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21975-4883/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21975-4883/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21975-4883/.minikube/certs/ca-key.pem org=jenkins.dockerenv-637175 san=[127.0.0.1 192.168.49.2 dockerenv-637175 localhost minikube]
I1124 02:29:20.202950 29929 provision.go:177] copyRemoteCerts
I1124 02:29:20.203003 29929 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1124 02:29:20.203032 29929 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-637175
I1124 02:29:20.220466 29929 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32773 SSHKeyPath:/home/jenkins/minikube-integration/21975-4883/.minikube/machines/dockerenv-637175/id_rsa Username:docker}
I1124 02:29:20.319043 29929 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21975-4883/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1078 bytes)
I1124 02:29:20.337842 29929 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21975-4883/.minikube/machines/server.pem --> /etc/docker/server.pem (1216 bytes)
I1124 02:29:20.354792 29929 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21975-4883/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
I1124 02:29:20.371737 29929 provision.go:87] duration metric: took 226.28145ms to configureAuth
I1124 02:29:20.371753 29929 ubuntu.go:206] setting minikube options for container-runtime
I1124 02:29:20.371950 29929 config.go:182] Loaded profile config "dockerenv-637175": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1124 02:29:20.371957 29929 machine.go:97] duration metric: took 3.712947202s to provisionDockerMachine
I1124 02:29:20.371963 29929 client.go:176] duration metric: took 9.132902079s to LocalClient.Create
I1124 02:29:20.371984 29929 start.go:167] duration metric: took 9.132958071s to libmachine.API.Create "dockerenv-637175"
I1124 02:29:20.371991 29929 start.go:293] postStartSetup for "dockerenv-637175" (driver="docker")
I1124 02:29:20.372000 29929 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1124 02:29:20.372050 29929 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1124 02:29:20.372084 29929 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-637175
I1124 02:29:20.389807 29929 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32773 SSHKeyPath:/home/jenkins/minikube-integration/21975-4883/.minikube/machines/dockerenv-637175/id_rsa Username:docker}
I1124 02:29:20.490700 29929 ssh_runner.go:195] Run: cat /etc/os-release
I1124 02:29:20.494300 29929 main.go:143] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I1124 02:29:20.494313 29929 info.go:137] Remote host: Debian GNU/Linux 12 (bookworm)
I1124 02:29:20.494322 29929 filesync.go:126] Scanning /home/jenkins/minikube-integration/21975-4883/.minikube/addons for local assets ...
I1124 02:29:20.494362 29929 filesync.go:126] Scanning /home/jenkins/minikube-integration/21975-4883/.minikube/files for local assets ...
I1124 02:29:20.494377 29929 start.go:296] duration metric: took 122.381914ms for postStartSetup
I1124 02:29:20.494643 29929 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" dockerenv-637175
I1124 02:29:20.512087 29929 profile.go:143] Saving config to /home/jenkins/minikube-integration/21975-4883/.minikube/profiles/dockerenv-637175/config.json ...
I1124 02:29:20.512333 29929 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I1124 02:29:20.512368 29929 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-637175
I1124 02:29:20.530091 29929 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32773 SSHKeyPath:/home/jenkins/minikube-integration/21975-4883/.minikube/machines/dockerenv-637175/id_rsa Username:docker}
I1124 02:29:20.626013 29929 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I1124 02:29:20.630599 29929 start.go:128] duration metric: took 9.39375996s to createHost
I1124 02:29:20.630616 29929 start.go:83] releasing machines lock for "dockerenv-637175", held for 9.393895688s
I1124 02:29:20.630683 29929 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" dockerenv-637175
I1124 02:29:20.648089 29929 ssh_runner.go:195] Run: cat /version.json
I1124 02:29:20.648132 29929 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-637175
I1124 02:29:20.648174 29929 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1124 02:29:20.648233 29929 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-637175
I1124 02:29:20.666894 29929 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32773 SSHKeyPath:/home/jenkins/minikube-integration/21975-4883/.minikube/machines/dockerenv-637175/id_rsa Username:docker}
I1124 02:29:20.667189 29929 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32773 SSHKeyPath:/home/jenkins/minikube-integration/21975-4883/.minikube/machines/dockerenv-637175/id_rsa Username:docker}
I1124 02:29:20.821748 29929 ssh_runner.go:195] Run: systemctl --version
I1124 02:29:20.828313 29929 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W1124 02:29:20.832771 29929 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I1124 02:29:20.832838 29929 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1124 02:29:20.857958 29929 cni.go:262] disabled [/etc/cni/net.d/10-crio-bridge.conflist.disabled, /etc/cni/net.d/87-podman-bridge.conflist] bridge cni config(s)
I1124 02:29:20.857971 29929 start.go:496] detecting cgroup driver to use...
I1124 02:29:20.858001 29929 detect.go:190] detected "systemd" cgroup driver on host os
I1124 02:29:20.858050 29929 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I1124 02:29:20.872025 29929 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1124 02:29:20.884129 29929 docker.go:218] disabling cri-docker service (if available) ...
I1124 02:29:20.884169 29929 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I1124 02:29:20.900364 29929 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I1124 02:29:20.917600 29929 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I1124 02:29:20.996238 29929 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I1124 02:29:21.083135 29929 docker.go:234] disabling docker service ...
I1124 02:29:21.083187 29929 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I1124 02:29:21.101392 29929 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I1124 02:29:21.113819 29929 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I1124 02:29:21.195334 29929 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I1124 02:29:21.272504 29929 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1124 02:29:21.284786 29929 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1124 02:29:21.298520 29929 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I1124 02:29:21.308508 29929 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1124 02:29:21.317093 29929 containerd.go:146] configuring containerd to use "systemd" as cgroup driver...
I1124 02:29:21.317209 29929 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = true|g' /etc/containerd/config.toml"
I1124 02:29:21.325930 29929 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1124 02:29:21.334630 29929 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1124 02:29:21.343050 29929 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1124 02:29:21.351817 29929 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1124 02:29:21.359606 29929 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1124 02:29:21.368169 29929 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1124 02:29:21.376936 29929 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1124 02:29:21.385504 29929 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1124 02:29:21.392713 29929 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1124 02:29:21.399882 29929 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1124 02:29:21.478571 29929 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1124 02:29:21.577095 29929 start.go:543] Will wait 60s for socket path /run/containerd/containerd.sock
I1124 02:29:21.577149 29929 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
I1124 02:29:21.581025 29929 start.go:564] Will wait 60s for crictl version
I1124 02:29:21.581066 29929 ssh_runner.go:195] Run: which crictl
I1124 02:29:21.584632 29929 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl version
I1124 02:29:21.609475 29929 start.go:580] Version: 0.1.0
RuntimeName: containerd
RuntimeVersion: v2.1.5
RuntimeApiVersion: v1
I1124 02:29:21.609525 29929 ssh_runner.go:195] Run: containerd --version
I1124 02:29:21.630587 29929 ssh_runner.go:195] Run: containerd --version
I1124 02:29:21.652180 29929 out.go:179] * Preparing Kubernetes v1.34.1 on containerd 2.1.5 ...
I1124 02:29:21.653294 29929 cli_runner.go:164] Run: docker network inspect dockerenv-637175 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1124 02:29:21.670184 29929 ssh_runner.go:195] Run: grep 192.168.49.1 host.minikube.internal$ /etc/hosts
I1124 02:29:21.674231 29929 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.49.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1124 02:29:21.684011 29929 kubeadm.go:884] updating cluster {Name:dockerenv-637175 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763935653-21975@sha256:5273d148037cfb860f8152fbd08072e6c1f4b37ff9a51956a3c12965f5f2d787 Memory:8000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:dockerenv-637175 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APISe
rverNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock:
SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I1124 02:29:21.684104 29929 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime containerd
I1124 02:29:21.684141 29929 ssh_runner.go:195] Run: sudo crictl images --output json
I1124 02:29:21.708647 29929 containerd.go:627] all images are preloaded for containerd runtime.
I1124 02:29:21.708660 29929 containerd.go:534] Images already preloaded, skipping extraction
I1124 02:29:21.708711 29929 ssh_runner.go:195] Run: sudo crictl images --output json
I1124 02:29:21.732938 29929 containerd.go:627] all images are preloaded for containerd runtime.
I1124 02:29:21.732963 29929 cache_images.go:86] Images are preloaded, skipping loading
I1124 02:29:21.732970 29929 kubeadm.go:935] updating node { 192.168.49.2 8443 v1.34.1 containerd true true} ...
I1124 02:29:21.733060 29929 kubeadm.go:947] kubelet [Unit]
Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=dockerenv-637175 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.49.2
[Install]
config:
{KubernetesVersion:v1.34.1 ClusterName:dockerenv-637175 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I1124 02:29:21.733105 29929 ssh_runner.go:195] Run: sudo crictl info
I1124 02:29:21.757308 29929 cni.go:84] Creating CNI manager for ""
I1124 02:29:21.757319 29929 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1124 02:29:21.757330 29929 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
I1124 02:29:21.757347 29929 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.49.2 APIServerPort:8443 KubernetesVersion:v1.34.1 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:dockerenv-637175 NodeName:dockerenv-637175 DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.49.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.49.2 CgroupDriver:systemd ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath
:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///run/containerd/containerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1124 02:29:21.757454 29929 kubeadm.go:196] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.49.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
name: "dockerenv-637175"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.49.2"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.49.2"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
kubernetesVersion: v1.34.1
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
containerRuntimeEndpoint: unix:///run/containerd/containerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1124 02:29:21.757504 29929 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.1
I1124 02:29:21.765436 29929 binaries.go:51] Found k8s binaries, skipping transfer
I1124 02:29:21.765486 29929 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1124 02:29:21.773233 29929 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (320 bytes)
I1124 02:29:21.785297 29929 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1124 02:29:21.800662 29929 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2228 bytes)
I1124 02:29:21.813530 29929 ssh_runner.go:195] Run: grep 192.168.49.2 control-plane.minikube.internal$ /etc/hosts
I1124 02:29:21.817182 29929 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.49.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1124 02:29:21.827087 29929 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1124 02:29:21.903141 29929 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1124 02:29:21.928038 29929 certs.go:69] Setting up /home/jenkins/minikube-integration/21975-4883/.minikube/profiles/dockerenv-637175 for IP: 192.168.49.2
I1124 02:29:21.928050 29929 certs.go:195] generating shared ca certs ...
I1124 02:29:21.928068 29929 certs.go:227] acquiring lock for ca certs: {Name:mkd28e9f2e8e31fe23d0ba27851eb0df56d94420 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 02:29:21.928259 29929 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21975-4883/.minikube/ca.key
I1124 02:29:21.928319 29929 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21975-4883/.minikube/proxy-client-ca.key
I1124 02:29:21.928327 29929 certs.go:257] generating profile certs ...
I1124 02:29:21.928398 29929 certs.go:364] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/21975-4883/.minikube/profiles/dockerenv-637175/client.key
I1124 02:29:21.928409 29929 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21975-4883/.minikube/profiles/dockerenv-637175/client.crt with IP's: []
I1124 02:29:21.973316 29929 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21975-4883/.minikube/profiles/dockerenv-637175/client.crt ...
I1124 02:29:21.973330 29929 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21975-4883/.minikube/profiles/dockerenv-637175/client.crt: {Name:mk4ce08c886c8bdbab90ff46ec4e8473ac82bb5e Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 02:29:21.973487 29929 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21975-4883/.minikube/profiles/dockerenv-637175/client.key ...
I1124 02:29:21.973493 29929 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21975-4883/.minikube/profiles/dockerenv-637175/client.key: {Name:mk02825949289580bb06705b85efc62dc9713456 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 02:29:21.973563 29929 certs.go:364] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21975-4883/.minikube/profiles/dockerenv-637175/apiserver.key.17c03a98
I1124 02:29:21.973574 29929 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21975-4883/.minikube/profiles/dockerenv-637175/apiserver.crt.17c03a98 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.49.2]
I1124 02:29:22.080009 29929 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21975-4883/.minikube/profiles/dockerenv-637175/apiserver.crt.17c03a98 ...
I1124 02:29:22.080025 29929 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21975-4883/.minikube/profiles/dockerenv-637175/apiserver.crt.17c03a98: {Name:mka9e2ea488b311a8c5cb28b9eb86f2afd1668d6 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 02:29:22.080193 29929 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21975-4883/.minikube/profiles/dockerenv-637175/apiserver.key.17c03a98 ...
I1124 02:29:22.080208 29929 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21975-4883/.minikube/profiles/dockerenv-637175/apiserver.key.17c03a98: {Name:mk7ffb2bb34f79cddf53bd6fe7a68b7eb8015752 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 02:29:22.080274 29929 certs.go:382] copying /home/jenkins/minikube-integration/21975-4883/.minikube/profiles/dockerenv-637175/apiserver.crt.17c03a98 -> /home/jenkins/minikube-integration/21975-4883/.minikube/profiles/dockerenv-637175/apiserver.crt
I1124 02:29:22.080357 29929 certs.go:386] copying /home/jenkins/minikube-integration/21975-4883/.minikube/profiles/dockerenv-637175/apiserver.key.17c03a98 -> /home/jenkins/minikube-integration/21975-4883/.minikube/profiles/dockerenv-637175/apiserver.key
I1124 02:29:22.080416 29929 certs.go:364] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/21975-4883/.minikube/profiles/dockerenv-637175/proxy-client.key
I1124 02:29:22.080427 29929 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21975-4883/.minikube/profiles/dockerenv-637175/proxy-client.crt with IP's: []
I1124 02:29:22.177461 29929 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21975-4883/.minikube/profiles/dockerenv-637175/proxy-client.crt ...
I1124 02:29:22.177489 29929 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21975-4883/.minikube/profiles/dockerenv-637175/proxy-client.crt: {Name:mk99c3970d5beee0caa84a974b42e4ac93810ce8 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 02:29:22.177645 29929 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21975-4883/.minikube/profiles/dockerenv-637175/proxy-client.key ...
I1124 02:29:22.177653 29929 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21975-4883/.minikube/profiles/dockerenv-637175/proxy-client.key: {Name:mkeeffd1b88d86f11badd45494ea7cbc00dde0fc Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 02:29:22.177822 29929 certs.go:484] found cert: /home/jenkins/minikube-integration/21975-4883/.minikube/certs/ca-key.pem (1675 bytes)
I1124 02:29:22.177855 29929 certs.go:484] found cert: /home/jenkins/minikube-integration/21975-4883/.minikube/certs/ca.pem (1078 bytes)
I1124 02:29:22.177878 29929 certs.go:484] found cert: /home/jenkins/minikube-integration/21975-4883/.minikube/certs/cert.pem (1123 bytes)
I1124 02:29:22.177899 29929 certs.go:484] found cert: /home/jenkins/minikube-integration/21975-4883/.minikube/certs/key.pem (1679 bytes)
I1124 02:29:22.178409 29929 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21975-4883/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1124 02:29:22.196728 29929 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21975-4883/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I1124 02:29:22.214352 29929 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21975-4883/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1124 02:29:22.232513 29929 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21975-4883/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
I1124 02:29:22.250092 29929 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21975-4883/.minikube/profiles/dockerenv-637175/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1424 bytes)
I1124 02:29:22.267706 29929 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21975-4883/.minikube/profiles/dockerenv-637175/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I1124 02:29:22.285237 29929 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21975-4883/.minikube/profiles/dockerenv-637175/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1124 02:29:22.302198 29929 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21975-4883/.minikube/profiles/dockerenv-637175/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I1124 02:29:22.319094 29929 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21975-4883/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1124 02:29:22.338353 29929 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I1124 02:29:22.350499 29929 ssh_runner.go:195] Run: openssl version
I1124 02:29:22.356142 29929 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1124 02:29:22.366797 29929 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1124 02:29:22.370391 29929 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 24 02:25 /usr/share/ca-certificates/minikubeCA.pem
I1124 02:29:22.370429 29929 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1124 02:29:22.403721 29929 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1124 02:29:22.412227 29929 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1124 02:29:22.415710 29929 certs.go:400] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I1124 02:29:22.415751 29929 kubeadm.go:401] StartCluster: {Name:dockerenv-637175 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763935653-21975@sha256:5273d148037cfb860f8152fbd08072e6c1f4b37ff9a51956a3c12965f5f2d787 Memory:8000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:dockerenv-637175 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServe
rNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSH
AgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1124 02:29:22.415854 29929 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:paused Name: Namespaces:[kube-system]}
I1124 02:29:22.415900 29929 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
I1124 02:29:22.441287 29929 cri.go:89] found id: ""
I1124 02:29:22.441348 29929 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1124 02:29:22.449219 29929 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1124 02:29:22.456759 29929 kubeadm.go:215] ignoring SystemVerification for kubeadm because of docker driver
I1124 02:29:22.456831 29929 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1124 02:29:22.464372 29929 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1124 02:29:22.464380 29929 kubeadm.go:158] found existing configuration files:
I1124 02:29:22.464416 29929 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I1124 02:29:22.472008 29929 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1124 02:29:22.472046 29929 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1124 02:29:22.478974 29929 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I1124 02:29:22.486161 29929 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1124 02:29:22.486201 29929 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1124 02:29:22.493281 29929 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I1124 02:29:22.500477 29929 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1124 02:29:22.500516 29929 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1124 02:29:22.507414 29929 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I1124 02:29:22.514502 29929 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1124 02:29:22.514552 29929 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1124 02:29:22.521733 29929 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I1124 02:29:22.587368 29929 kubeadm.go:319] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/6.8.0-1044-gcp\n", err: exit status 1
I1124 02:29:22.645736 29929 kubeadm.go:319] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I1124 02:29:34.056084 29929 kubeadm.go:319] [init] Using Kubernetes version: v1.34.1
I1124 02:29:34.056127 29929 kubeadm.go:319] [preflight] Running pre-flight checks
I1124 02:29:34.056212 29929 kubeadm.go:319] [preflight] The system verification failed. Printing the output from the verification:
I1124 02:29:34.056267 29929 kubeadm.go:319] [0;37mKERNEL_VERSION[0m: [0;32m6.8.0-1044-gcp[0m
I1124 02:29:34.056295 29929 kubeadm.go:319] [0;37mOS[0m: [0;32mLinux[0m
I1124 02:29:34.056332 29929 kubeadm.go:319] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I1124 02:29:34.056387 29929 kubeadm.go:319] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I1124 02:29:34.056437 29929 kubeadm.go:319] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I1124 02:29:34.056473 29929 kubeadm.go:319] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I1124 02:29:34.056515 29929 kubeadm.go:319] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I1124 02:29:34.056552 29929 kubeadm.go:319] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I1124 02:29:34.056597 29929 kubeadm.go:319] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I1124 02:29:34.056634 29929 kubeadm.go:319] [0;37mCGROUPS_IO[0m: [0;32menabled[0m
I1124 02:29:34.056697 29929 kubeadm.go:319] [preflight] Pulling images required for setting up a Kubernetes cluster
I1124 02:29:34.056805 29929 kubeadm.go:319] [preflight] This might take a minute or two, depending on the speed of your internet connection
I1124 02:29:34.056910 29929 kubeadm.go:319] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
I1124 02:29:34.056999 29929 kubeadm.go:319] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I1124 02:29:34.058651 29929 out.go:252] - Generating certificates and keys ...
I1124 02:29:34.058711 29929 kubeadm.go:319] [certs] Using existing ca certificate authority
I1124 02:29:34.058773 29929 kubeadm.go:319] [certs] Using existing apiserver certificate and key on disk
I1124 02:29:34.058844 29929 kubeadm.go:319] [certs] Generating "apiserver-kubelet-client" certificate and key
I1124 02:29:34.058888 29929 kubeadm.go:319] [certs] Generating "front-proxy-ca" certificate and key
I1124 02:29:34.058937 29929 kubeadm.go:319] [certs] Generating "front-proxy-client" certificate and key
I1124 02:29:34.058976 29929 kubeadm.go:319] [certs] Generating "etcd/ca" certificate and key
I1124 02:29:34.059037 29929 kubeadm.go:319] [certs] Generating "etcd/server" certificate and key
I1124 02:29:34.059146 29929 kubeadm.go:319] [certs] etcd/server serving cert is signed for DNS names [dockerenv-637175 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I1124 02:29:34.059210 29929 kubeadm.go:319] [certs] Generating "etcd/peer" certificate and key
I1124 02:29:34.059307 29929 kubeadm.go:319] [certs] etcd/peer serving cert is signed for DNS names [dockerenv-637175 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I1124 02:29:34.059364 29929 kubeadm.go:319] [certs] Generating "etcd/healthcheck-client" certificate and key
I1124 02:29:34.059414 29929 kubeadm.go:319] [certs] Generating "apiserver-etcd-client" certificate and key
I1124 02:29:34.059450 29929 kubeadm.go:319] [certs] Generating "sa" key and public key
I1124 02:29:34.059495 29929 kubeadm.go:319] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I1124 02:29:34.059535 29929 kubeadm.go:319] [kubeconfig] Writing "admin.conf" kubeconfig file
I1124 02:29:34.059579 29929 kubeadm.go:319] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I1124 02:29:34.059621 29929 kubeadm.go:319] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I1124 02:29:34.059673 29929 kubeadm.go:319] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I1124 02:29:34.059716 29929 kubeadm.go:319] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I1124 02:29:34.059823 29929 kubeadm.go:319] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I1124 02:29:34.059922 29929 kubeadm.go:319] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I1124 02:29:34.061267 29929 out.go:252] - Booting up control plane ...
I1124 02:29:34.061329 29929 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-apiserver"
I1124 02:29:34.061393 29929 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I1124 02:29:34.061446 29929 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-scheduler"
I1124 02:29:34.061554 29929 kubeadm.go:319] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I1124 02:29:34.061633 29929 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
I1124 02:29:34.061719 29929 kubeadm.go:319] [patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
I1124 02:29:34.061817 29929 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I1124 02:29:34.061851 29929 kubeadm.go:319] [kubelet-start] Starting the kubelet
I1124 02:29:34.061957 29929 kubeadm.go:319] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I1124 02:29:34.062041 29929 kubeadm.go:319] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I1124 02:29:34.062096 29929 kubeadm.go:319] [kubelet-check] The kubelet is healthy after 1.000829617s
I1124 02:29:34.062183 29929 kubeadm.go:319] [control-plane-check] Waiting for healthy control plane components. This can take up to 4m0s
I1124 02:29:34.062248 29929 kubeadm.go:319] [control-plane-check] Checking kube-apiserver at https://192.168.49.2:8443/livez
I1124 02:29:34.062328 29929 kubeadm.go:319] [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz
I1124 02:29:34.062396 29929 kubeadm.go:319] [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez
I1124 02:29:34.062460 29929 kubeadm.go:319] [control-plane-check] kube-controller-manager is healthy after 1.374104173s
I1124 02:29:34.062514 29929 kubeadm.go:319] [control-plane-check] kube-scheduler is healthy after 1.52781751s
I1124 02:29:34.062573 29929 kubeadm.go:319] [control-plane-check] kube-apiserver is healthy after 3.501669991s
I1124 02:29:34.062661 29929 kubeadm.go:319] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I1124 02:29:34.062766 29929 kubeadm.go:319] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I1124 02:29:34.062838 29929 kubeadm.go:319] [upload-certs] Skipping phase. Please see --upload-certs
I1124 02:29:34.063009 29929 kubeadm.go:319] [mark-control-plane] Marking the node dockerenv-637175 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I1124 02:29:34.063056 29929 kubeadm.go:319] [bootstrap-token] Using token: i9brsu.8rdbirdoa0tjo3s8
I1124 02:29:34.064199 29929 out.go:252] - Configuring RBAC rules ...
I1124 02:29:34.064284 29929 kubeadm.go:319] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I1124 02:29:34.064354 29929 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I1124 02:29:34.064480 29929 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I1124 02:29:34.064592 29929 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I1124 02:29:34.064686 29929 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I1124 02:29:34.064759 29929 kubeadm.go:319] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I1124 02:29:34.064870 29929 kubeadm.go:319] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I1124 02:29:34.064905 29929 kubeadm.go:319] [addons] Applied essential addon: CoreDNS
I1124 02:29:34.064942 29929 kubeadm.go:319] [addons] Applied essential addon: kube-proxy
I1124 02:29:34.064944 29929 kubeadm.go:319]
I1124 02:29:34.064998 29929 kubeadm.go:319] Your Kubernetes control-plane has initialized successfully!
I1124 02:29:34.065000 29929 kubeadm.go:319]
I1124 02:29:34.065070 29929 kubeadm.go:319] To start using your cluster, you need to run the following as a regular user:
I1124 02:29:34.065073 29929 kubeadm.go:319]
I1124 02:29:34.065093 29929 kubeadm.go:319] mkdir -p $HOME/.kube
I1124 02:29:34.065153 29929 kubeadm.go:319] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I1124 02:29:34.065195 29929 kubeadm.go:319] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I1124 02:29:34.065198 29929 kubeadm.go:319]
I1124 02:29:34.065248 29929 kubeadm.go:319] Alternatively, if you are the root user, you can run:
I1124 02:29:34.065251 29929 kubeadm.go:319]
I1124 02:29:34.065293 29929 kubeadm.go:319] export KUBECONFIG=/etc/kubernetes/admin.conf
I1124 02:29:34.065296 29929 kubeadm.go:319]
I1124 02:29:34.065337 29929 kubeadm.go:319] You should now deploy a pod network to the cluster.
I1124 02:29:34.065398 29929 kubeadm.go:319] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I1124 02:29:34.065455 29929 kubeadm.go:319] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I1124 02:29:34.065457 29929 kubeadm.go:319]
I1124 02:29:34.065526 29929 kubeadm.go:319] You can now join any number of control-plane nodes by copying certificate authorities
I1124 02:29:34.065587 29929 kubeadm.go:319] and service account keys on each node and then running the following as root:
I1124 02:29:34.065602 29929 kubeadm.go:319]
I1124 02:29:34.065678 29929 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token i9brsu.8rdbirdoa0tjo3s8 \
I1124 02:29:34.065763 29929 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:5e943442c508de754e907135e9f68708045a0a18fa82619a148153bf802a361b \
I1124 02:29:34.065802 29929 kubeadm.go:319] --control-plane
I1124 02:29:34.065806 29929 kubeadm.go:319]
I1124 02:29:34.065902 29929 kubeadm.go:319] Then you can join any number of worker nodes by running the following on each as root:
I1124 02:29:34.065908 29929 kubeadm.go:319]
I1124 02:29:34.066029 29929 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token i9brsu.8rdbirdoa0tjo3s8 \
I1124 02:29:34.066182 29929 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:5e943442c508de754e907135e9f68708045a0a18fa82619a148153bf802a361b
I1124 02:29:34.066206 29929 cni.go:84] Creating CNI manager for ""
I1124 02:29:34.066213 29929 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1124 02:29:34.067729 29929 out.go:179] * Configuring CNI (Container Networking Interface) ...
I1124 02:29:34.068889 29929 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I1124 02:29:34.073186 29929 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.34.1/kubectl ...
I1124 02:29:34.073195 29929 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2601 bytes)
I1124 02:29:34.086520 29929 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I1124 02:29:34.288748 29929 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I1124 02:29:34.288823 29929 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 02:29:34.288903 29929 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes dockerenv-637175 minikube.k8s.io/updated_at=2025_11_24T02_29_34_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=525fef2394fe4854b27b3c3385e33403fd802864 minikube.k8s.io/name=dockerenv-637175 minikube.k8s.io/primary=true
I1124 02:29:34.298716 29929 ops.go:34] apiserver oom_adj: -16
I1124 02:29:34.359433 29929 kubeadm.go:1114] duration metric: took 70.681491ms to wait for elevateKubeSystemPrivileges
I1124 02:29:34.371863 29929 kubeadm.go:403] duration metric: took 11.956109014s to StartCluster
I1124 02:29:34.371894 29929 settings.go:142] acquiring lock: {Name:mk05d84efd831d60555ea716cd9d2a0a41871249 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 02:29:34.371962 29929 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/21975-4883/kubeconfig
I1124 02:29:34.372604 29929 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21975-4883/kubeconfig: {Name:mkf99f016b653afd282cf36d34d1cc32c34d90de Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 02:29:34.372831 29929 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I1124 02:29:34.372835 29929 start.go:236] Will wait 6m0s for node &{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1124 02:29:34.372905 29929 addons.go:527] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I1124 02:29:34.373003 29929 addons.go:70] Setting storage-provisioner=true in profile "dockerenv-637175"
I1124 02:29:34.373022 29929 addons.go:239] Setting addon storage-provisioner=true in "dockerenv-637175"
I1124 02:29:34.373035 29929 config.go:182] Loaded profile config "dockerenv-637175": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1124 02:29:34.373051 29929 host.go:66] Checking if "dockerenv-637175" exists ...
I1124 02:29:34.373051 29929 addons.go:70] Setting default-storageclass=true in profile "dockerenv-637175"
I1124 02:29:34.373068 29929 addons_storage_classes.go:34] enableOrDisableStorageClasses default-storageclass=true on "dockerenv-637175"
I1124 02:29:34.373460 29929 cli_runner.go:164] Run: docker container inspect dockerenv-637175 --format={{.State.Status}}
I1124 02:29:34.373638 29929 cli_runner.go:164] Run: docker container inspect dockerenv-637175 --format={{.State.Status}}
I1124 02:29:34.374925 29929 out.go:179] * Verifying Kubernetes components...
I1124 02:29:34.376055 29929 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1124 02:29:34.399526 29929 addons.go:239] Setting addon default-storageclass=true in "dockerenv-637175"
I1124 02:29:34.399558 29929 host.go:66] Checking if "dockerenv-637175" exists ...
I1124 02:29:34.400033 29929 cli_runner.go:164] Run: docker container inspect dockerenv-637175 --format={{.State.Status}}
I1124 02:29:34.401795 29929 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1124 02:29:34.403302 29929 addons.go:436] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1124 02:29:34.403313 29929 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1124 02:29:34.403376 29929 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-637175
I1124 02:29:34.428986 29929 addons.go:436] installing /etc/kubernetes/addons/storageclass.yaml
I1124 02:29:34.428998 29929 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1124 02:29:34.429096 29929 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" dockerenv-637175
I1124 02:29:34.430253 29929 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32773 SSHKeyPath:/home/jenkins/minikube-integration/21975-4883/.minikube/machines/dockerenv-637175/id_rsa Username:docker}
I1124 02:29:34.453106 29929 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32773 SSHKeyPath:/home/jenkins/minikube-integration/21975-4883/.minikube/machines/dockerenv-637175/id_rsa Username:docker}
I1124 02:29:34.464947 29929 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.49.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I1124 02:29:34.509124 29929 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1124 02:29:34.542769 29929 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1124 02:29:34.560453 29929 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1124 02:29:34.624963 29929 start.go:977] {"host.minikube.internal": 192.168.49.1} host record injected into CoreDNS's ConfigMap
I1124 02:29:34.625813 29929 api_server.go:52] waiting for apiserver process to appear ...
I1124 02:29:34.625861 29929 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1124 02:29:34.815825 29929 api_server.go:72] duration metric: took 442.959694ms to wait for apiserver process to appear ...
I1124 02:29:34.815838 29929 api_server.go:88] waiting for apiserver healthz status ...
I1124 02:29:34.815853 29929 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8443/healthz ...
I1124 02:29:34.819799 29929 api_server.go:279] https://192.168.49.2:8443/healthz returned 200:
ok
I1124 02:29:34.820575 29929 api_server.go:141] control plane version: v1.34.1
I1124 02:29:34.820590 29929 api_server.go:131] duration metric: took 4.746678ms to wait for apiserver health ...
I1124 02:29:34.820599 29929 system_pods.go:43] waiting for kube-system pods to appear ...
I1124 02:29:34.822481 29929 out.go:179] * Enabled addons: storage-provisioner, default-storageclass
I1124 02:29:34.823021 29929 system_pods.go:59] 5 kube-system pods found
I1124 02:29:34.823045 29929 system_pods.go:61] "etcd-dockerenv-637175" [387f0e1f-3454-4e7b-81b3-32f1d78ba29e] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd])
I1124 02:29:34.823052 29929 system_pods.go:61] "kube-apiserver-dockerenv-637175" [f653f5d3-db3e-4b2e-a741-93e3b5c653ea] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
I1124 02:29:34.823059 29929 system_pods.go:61] "kube-controller-manager-dockerenv-637175" [3049db26-6944-4f33-a3fb-8c685dcfca0e] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager])
I1124 02:29:34.823063 29929 system_pods.go:61] "kube-scheduler-dockerenv-637175" [03d5b950-a089-43fc-bf37-f7e9744a8aa4] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
I1124 02:29:34.823065 29929 system_pods.go:61] "storage-provisioner" [67fa6609-c28d-4748-805a-f89fcd6ca0ef] Pending
I1124 02:29:34.823071 29929 system_pods.go:74] duration metric: took 2.466808ms to wait for pod list to return data ...
I1124 02:29:34.823078 29929 kubeadm.go:587] duration metric: took 450.217798ms to wait for: map[apiserver:true system_pods:true]
I1124 02:29:34.823087 29929 node_conditions.go:102] verifying NodePressure condition ...
I1124 02:29:34.823541 29929 addons.go:530] duration metric: took 450.645876ms for enable addons: enabled=[storage-provisioner default-storageclass]
I1124 02:29:34.841025 29929 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I1124 02:29:34.841040 29929 node_conditions.go:123] node cpu capacity is 8
I1124 02:29:34.841055 29929 node_conditions.go:105] duration metric: took 17.965554ms to run NodePressure ...
I1124 02:29:34.841066 29929 start.go:242] waiting for startup goroutines ...
I1124 02:29:35.128833 29929 kapi.go:214] "coredns" deployment in "kube-system" namespace and "dockerenv-637175" context rescaled to 1 replicas
I1124 02:29:35.128859 29929 start.go:247] waiting for cluster config update ...
I1124 02:29:35.128881 29929 start.go:256] writing updated cluster config ...
I1124 02:29:35.129154 29929 ssh_runner.go:195] Run: rm -f paused
I1124 02:29:35.177416 29929 start.go:625] kubectl: 1.34.2, cluster: 1.34.1 (minor skew: 0)
I1124 02:29:35.179602 29929 out.go:179] * Done! kubectl is now configured to use "dockerenv-637175" cluster and "default" namespace by default
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE
534ecf2bc35e8 409467f978b4a 11 seconds ago Running kindnet-cni 0 89029c0e62355 kindnet-bvkb6 kube-system
e03a1aea5f99b fc25172553d79 11 seconds ago Running kube-proxy 0 b7cdca9bfadab kube-proxy-l4bqr kube-system
26e1f19d9085a c80c8dbafe7dd 21 seconds ago Running kube-controller-manager 0 28c771b8d9f4f kube-controller-manager-dockerenv-637175 kube-system
69e56949f7860 7dd6aaa1717ab 21 seconds ago Running kube-scheduler 0 4cf1926d0ed49 kube-scheduler-dockerenv-637175 kube-system
dff8eeaf0eb63 c3994bc696102 21 seconds ago Running kube-apiserver 0 e5c17b1e08e9c kube-apiserver-dockerenv-637175 kube-system
7931bab2070e2 5f1f5298c888d 21 seconds ago Running etcd 0 9e408cf555e70 etcd-dockerenv-637175 kube-system
==> containerd <==
Nov 24 02:29:37 dockerenv-637175 containerd[661]: time="2025-11-24T02:29:37.613700286Z" level=info msg="No cni config template is specified, wait for other system components to drop the config."
Nov 24 02:29:39 dockerenv-637175 containerd[661]: time="2025-11-24T02:29:39.025851959Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:kindnet-bvkb6,Uid:38dc2084-ce55-42e6-874c-95d25d66bdf5,Namespace:kube-system,Attempt:0,}"
Nov 24 02:29:39 dockerenv-637175 containerd[661]: time="2025-11-24T02:29:39.041521684Z" level=info msg="connecting to shim 89029c0e6235565b5b88f5ecea516936d6db1b1f0be75fb3ebd791aef36c4a7b" address="unix:///run/containerd/s/ca3b25ff34395d77c424399f57bc7fdc463adcd7b24d6ecc8966b0168ec95d21" namespace=k8s.io protocol=ttrpc version=3
Nov 24 02:29:39 dockerenv-637175 containerd[661]: time="2025-11-24T02:29:39.043830134Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:kube-proxy-l4bqr,Uid:e5e682f4-e767-4532-8378-ed722b8c1d26,Namespace:kube-system,Attempt:0,}"
Nov 24 02:29:39 dockerenv-637175 containerd[661]: time="2025-11-24T02:29:39.060044413Z" level=info msg="connecting to shim b7cdca9bfadab6bf8ee542eb6cae15f87bd1486a703a0a466dffcb7fc0c53fdc" address="unix:///run/containerd/s/90afebc53d6c9c37e6b79b6aa18a9548ead3b2dccbe51ff73a71c926651169a6" namespace=k8s.io protocol=ttrpc version=3
Nov 24 02:29:39 dockerenv-637175 containerd[661]: time="2025-11-24T02:29:39.113171146Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:kube-proxy-l4bqr,Uid:e5e682f4-e767-4532-8378-ed722b8c1d26,Namespace:kube-system,Attempt:0,} returns sandbox id \"b7cdca9bfadab6bf8ee542eb6cae15f87bd1486a703a0a466dffcb7fc0c53fdc\""
Nov 24 02:29:39 dockerenv-637175 containerd[661]: time="2025-11-24T02:29:39.118809738Z" level=info msg="CreateContainer within sandbox \"b7cdca9bfadab6bf8ee542eb6cae15f87bd1486a703a0a466dffcb7fc0c53fdc\" for container &ContainerMetadata{Name:kube-proxy,Attempt:0,}"
Nov 24 02:29:39 dockerenv-637175 containerd[661]: time="2025-11-24T02:29:39.126162366Z" level=info msg="Container e03a1aea5f99b62409718f43f9bec98cc74fec578045f89b20f6d50d4e37c7c7: CDI devices from CRI Config.CDIDevices: []"
Nov 24 02:29:39 dockerenv-637175 containerd[661]: time="2025-11-24T02:29:39.133903618Z" level=info msg="CreateContainer within sandbox \"b7cdca9bfadab6bf8ee542eb6cae15f87bd1486a703a0a466dffcb7fc0c53fdc\" for &ContainerMetadata{Name:kube-proxy,Attempt:0,} returns container id \"e03a1aea5f99b62409718f43f9bec98cc74fec578045f89b20f6d50d4e37c7c7\""
Nov 24 02:29:39 dockerenv-637175 containerd[661]: time="2025-11-24T02:29:39.134588306Z" level=info msg="StartContainer for \"e03a1aea5f99b62409718f43f9bec98cc74fec578045f89b20f6d50d4e37c7c7\""
Nov 24 02:29:39 dockerenv-637175 containerd[661]: time="2025-11-24T02:29:39.136136426Z" level=info msg="connecting to shim e03a1aea5f99b62409718f43f9bec98cc74fec578045f89b20f6d50d4e37c7c7" address="unix:///run/containerd/s/90afebc53d6c9c37e6b79b6aa18a9548ead3b2dccbe51ff73a71c926651169a6" protocol=ttrpc version=3
Nov 24 02:29:39 dockerenv-637175 containerd[661]: time="2025-11-24T02:29:39.249503626Z" level=info msg="StartContainer for \"e03a1aea5f99b62409718f43f9bec98cc74fec578045f89b20f6d50d4e37c7c7\" returns successfully"
Nov 24 02:29:39 dockerenv-637175 containerd[661]: time="2025-11-24T02:29:39.292523036Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:kindnet-bvkb6,Uid:38dc2084-ce55-42e6-874c-95d25d66bdf5,Namespace:kube-system,Attempt:0,} returns sandbox id \"89029c0e6235565b5b88f5ecea516936d6db1b1f0be75fb3ebd791aef36c4a7b\""
Nov 24 02:29:39 dockerenv-637175 containerd[661]: time="2025-11-24T02:29:39.297393277Z" level=info msg="CreateContainer within sandbox \"89029c0e6235565b5b88f5ecea516936d6db1b1f0be75fb3ebd791aef36c4a7b\" for container &ContainerMetadata{Name:kindnet-cni,Attempt:0,}"
Nov 24 02:29:39 dockerenv-637175 containerd[661]: time="2025-11-24T02:29:39.303193304Z" level=info msg="Container 534ecf2bc35e8b05caf19ef70ee26d652838fa463ae54cae6c6307b5f3c904df: CDI devices from CRI Config.CDIDevices: []"
Nov 24 02:29:39 dockerenv-637175 containerd[661]: time="2025-11-24T02:29:39.309479358Z" level=info msg="CreateContainer within sandbox \"89029c0e6235565b5b88f5ecea516936d6db1b1f0be75fb3ebd791aef36c4a7b\" for &ContainerMetadata{Name:kindnet-cni,Attempt:0,} returns container id \"534ecf2bc35e8b05caf19ef70ee26d652838fa463ae54cae6c6307b5f3c904df\""
Nov 24 02:29:39 dockerenv-637175 containerd[661]: time="2025-11-24T02:29:39.310010942Z" level=info msg="StartContainer for \"534ecf2bc35e8b05caf19ef70ee26d652838fa463ae54cae6c6307b5f3c904df\""
Nov 24 02:29:39 dockerenv-637175 containerd[661]: time="2025-11-24T02:29:39.311073086Z" level=info msg="connecting to shim 534ecf2bc35e8b05caf19ef70ee26d652838fa463ae54cae6c6307b5f3c904df" address="unix:///run/containerd/s/ca3b25ff34395d77c424399f57bc7fdc463adcd7b24d6ecc8966b0168ec95d21" protocol=ttrpc version=3
Nov 24 02:29:39 dockerenv-637175 containerd[661]: time="2025-11-24T02:29:39.406661555Z" level=info msg="StartContainer for \"534ecf2bc35e8b05caf19ef70ee26d652838fa463ae54cae6c6307b5f3c904df\" returns successfully"
Nov 24 02:29:49 dockerenv-637175 containerd[661]: time="2025-11-24T02:29:49.883480194Z" level=error msg="failed to reload cni configuration after receiving fs change event(WRITE \"/etc/cni/net.d/10-kindnet.conflist.temp\")" error="cni config load failed: no network config found in /etc/cni/net.d: cni plugin not initialized: failed to load cni config"
Nov 24 02:29:49 dockerenv-637175 containerd[661]: time="2025-11-24T02:29:49.883587109Z" level=error msg="failed to reload cni configuration after receiving fs change event(WRITE \"/etc/cni/net.d/10-kindnet.conflist.temp\")" error="cni config load failed: no network config found in /etc/cni/net.d: cni plugin not initialized: failed to load cni config"
Nov 24 02:29:50 dockerenv-637175 containerd[661]: time="2025-11-24T02:29:50.317826722Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:storage-provisioner,Uid:67fa6609-c28d-4748-805a-f89fcd6ca0ef,Namespace:kube-system,Attempt:0,}"
Nov 24 02:29:50 dockerenv-637175 containerd[661]: time="2025-11-24T02:29:50.320487821Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-66bc5c9577-6gf5f,Uid:6f152ae3-917b-4ea0-a95e-941584de5e5a,Namespace:kube-system,Attempt:0,}"
Nov 24 02:29:50 dockerenv-637175 containerd[661]: time="2025-11-24T02:29:50.339803630Z" level=info msg="connecting to shim a2c75e4a65d00642961af9581ebd48db2de51bc4c81fb5ffd7fc39b391a46969" address="unix:///run/containerd/s/13301776115cfa24eecad933e10194aa9dcd63ce554446a635ee40bdfaafc923" namespace=k8s.io protocol=ttrpc version=3
Nov 24 02:29:50 dockerenv-637175 containerd[661]: time="2025-11-24T02:29:50.363526094Z" level=info msg="connecting to shim 1eb9e3a8d9d03c0413c08aef5a2ee987701f40d55b9a9122a2f5f17d195f2b0b" address="unix:///run/containerd/s/9fb27ef30b56e84d0e3374fdb48784e97411afe5126011a8c286e9c1d55ecb29" namespace=k8s.io protocol=ttrpc version=3
==> describe nodes <==
Name: dockerenv-637175
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=dockerenv-637175
kubernetes.io/os=linux
minikube.k8s.io/commit=525fef2394fe4854b27b3c3385e33403fd802864
minikube.k8s.io/name=dockerenv-637175
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2025_11_24T02_29_34_0700
minikube.k8s.io/version=v1.37.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Mon, 24 Nov 2025 02:29:30 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: dockerenv-637175
AcquireTime: <unset>
RenewTime: Mon, 24 Nov 2025 02:29:43 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Mon, 24 Nov 2025 02:29:49 +0000 Mon, 24 Nov 2025 02:29:29 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Mon, 24 Nov 2025 02:29:49 +0000 Mon, 24 Nov 2025 02:29:29 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Mon, 24 Nov 2025 02:29:49 +0000 Mon, 24 Nov 2025 02:29:29 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Mon, 24 Nov 2025 02:29:49 +0000 Mon, 24 Nov 2025 02:29:49 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.49.2
Hostname: dockerenv-637175
Capacity:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863356Ki
pods: 110
Allocatable:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863356Ki
pods: 110
System Info:
Machine ID: a6c8a789d6c7d69e45d665cd69238646
System UUID: c0c5b339-f395-45b7-84c8-83919483d743
Boot ID: 6a444014-1437-4ef5-ba54-cb22d4aebaaf
Kernel Version: 6.8.0-1044-gcp
OS Image: Debian GNU/Linux 12 (bookworm)
Operating System: linux
Architecture: amd64
Container Runtime Version: containerd://2.1.5
Kubelet Version: v1.34.1
Kube-Proxy Version:
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (8 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
kube-system coredns-66bc5c9577-6gf5f 100m (1%) 0 (0%) 70Mi (0%) 170Mi (0%) 12s
kube-system etcd-dockerenv-637175 100m (1%) 0 (0%) 100Mi (0%) 0 (0%) 17s
kube-system kindnet-bvkb6 100m (1%) 100m (1%) 50Mi (0%) 50Mi (0%) 12s
kube-system kube-apiserver-dockerenv-637175 250m (3%) 0 (0%) 0 (0%) 0 (0%) 17s
kube-system kube-controller-manager-dockerenv-637175 200m (2%) 0 (0%) 0 (0%) 0 (0%) 17s
kube-system kube-proxy-l4bqr 0 (0%) 0 (0%) 0 (0%) 0 (0%) 12s
kube-system kube-scheduler-dockerenv-637175 100m (1%) 0 (0%) 0 (0%) 0 (0%) 17s
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 16s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 850m (10%) 100m (1%)
memory 220Mi (0%) 220Mi (0%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 11s kube-proxy
Normal Starting 17s kubelet Starting kubelet.
Normal NodeAllocatableEnforced 17s kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 17s kubelet Node dockerenv-637175 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 17s kubelet Node dockerenv-637175 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 17s kubelet Node dockerenv-637175 status is now: NodeHasSufficientPID
Normal RegisteredNode 13s node-controller Node dockerenv-637175 event: Registered Node dockerenv-637175 in Controller
Normal NodeReady 1s kubelet Node dockerenv-637175 status is now: NodeReady
==> dmesg <==
[Nov24 02:17] MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.
[ +0.001875] TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.
[ +0.001000] MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.
[ +0.088013] acpi PNP0A03:00: fail to add MMCONFIG information, can't access extended configuration space under this bridge
[ +0.411990] i8042: Warning: Keylock active
[ +0.014659] device-mapper: core: CONFIG_IMA_DISABLE_HTABLE is disabled. Duplicate IMA measurements will not be recorded in the IMA log.
[ +0.513869] block sda: the capability attribute has been deprecated.
[ +0.086430] systemd[1]: Configuration file /run/systemd/system/netplan-ovs-cleanup.service is marked world-inaccessible. This has no effect as configuration data is accessible via APIs without restrictions. Proceeding anyway.
[ +0.023975] systemd[1]: /lib/systemd/system/snapd.service:23: Unknown key name 'RestartMode' in section 'Service', ignoring.
[ +5.680840] kauditd_printk_skb: 47 callbacks suppressed
==> etcd [7931bab2070e2f0b4acbb7709f55d09bcb54b57d17be2354e70639d67f283d57] <==
{"level":"warn","ts":"2025-11-24T02:29:30.049245Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:57080","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-24T02:29:30.057472Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:57110","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-24T02:29:30.064884Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:57126","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-24T02:29:30.071063Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:57148","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-24T02:29:30.077222Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:57162","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-24T02:29:30.087208Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:57188","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-24T02:29:30.095004Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:57210","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-24T02:29:30.101942Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:57232","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-24T02:29:30.109116Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:57262","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-24T02:29:30.116877Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:57282","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-24T02:29:30.123868Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:57288","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-24T02:29:30.130983Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:57298","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-24T02:29:30.138240Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:57316","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-24T02:29:30.144923Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:57326","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-24T02:29:30.151127Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:57354","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-24T02:29:30.157814Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:57372","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-24T02:29:30.163630Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:57392","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-24T02:29:30.169644Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:57394","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-24T02:29:30.175685Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:57410","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-24T02:29:30.184244Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:57420","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-24T02:29:30.195981Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:57464","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-24T02:29:30.199352Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:57472","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-24T02:29:30.206204Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:57490","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-24T02:29:30.212400Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:57506","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-24T02:29:30.265066Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:57530","server-name":"","error":"EOF"}
==> kernel <==
02:29:50 up 12 min, 0 user, load average: 0.74, 0.90, 0.47
Linux dockerenv-637175 6.8.0-1044-gcp #47~22.04.1-Ubuntu SMP Thu Oct 23 21:07:54 UTC 2025 x86_64 GNU/Linux
PRETTY_NAME="Debian GNU/Linux 12 (bookworm)"
==> kindnet [534ecf2bc35e8b05caf19ef70ee26d652838fa463ae54cae6c6307b5f3c904df] <==
I1124 02:29:39.677716 1 main.go:109] connected to apiserver: https://10.96.0.1:443
I1124 02:29:39.678039 1 main.go:139] hostIP = 192.168.49.2
podIP = 192.168.49.2
I1124 02:29:39.678202 1 main.go:148] setting mtu 1500 for CNI
I1124 02:29:39.678223 1 main.go:178] kindnetd IP family: "ipv4"
I1124 02:29:39.678249 1 main.go:182] noMask IPv4 subnets: [10.244.0.0/16]
time="2025-11-24T02:29:39Z" level=info msg="Created plugin 10-kube-network-policies (kindnetd, handles RunPodSandbox,RemovePodSandbox)"
I1124 02:29:39.880362 1 controller.go:377] "Starting controller" name="kube-network-policies"
I1124 02:29:39.880921 1 controller.go:381] "Waiting for informer caches to sync"
I1124 02:29:39.880979 1 shared_informer.go:350] "Waiting for caches to sync" controller="kube-network-policies"
I1124 02:29:39.881190 1 controller.go:390] nri plugin exited: failed to connect to NRI service: dial unix /var/run/nri/nri.sock: connect: no such file or directory
I1124 02:29:40.181285 1 shared_informer.go:357] "Caches are synced" controller="kube-network-policies"
I1124 02:29:40.181309 1 metrics.go:72] Registering metrics
I1124 02:29:40.181393 1 controller.go:711] "Syncing nftables rules"
I1124 02:29:49.882883 1 main.go:297] Handling node with IPs: map[192.168.49.2:{}]
I1124 02:29:49.882957 1 main.go:301] handling current node
==> kube-apiserver [dff8eeaf0eb6393df1f17b7f7f27c2fb17cb5ea3e8ac45a2a3d310494f2ef0d2] <==
I1124 02:29:30.721674 1 shared_informer.go:356] "Caches are synced" controller="ipallocator-repair-controller"
I1124 02:29:30.721715 1 cache.go:39] Caches are synced for LocalAvailability controller
I1124 02:29:30.723310 1 controller.go:667] quota admission added evaluator for: namespaces
I1124 02:29:30.727181 1 cidrallocator.go:301] created ClusterIP allocator for Service CIDR 10.96.0.0/12
I1124 02:29:30.727308 1 default_servicecidr_controller.go:228] Setting default ServiceCIDR condition Ready to True
I1124 02:29:30.734032 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12
I1124 02:29:30.734300 1 default_servicecidr_controller.go:137] Shutting down kubernetes-service-cidr-controller
I1124 02:29:30.914518 1 controller.go:667] quota admission added evaluator for: leases.coordination.k8s.io
I1124 02:29:31.625640 1 storage_scheduling.go:95] created PriorityClass system-node-critical with value 2000001000
I1124 02:29:31.630338 1 storage_scheduling.go:95] created PriorityClass system-cluster-critical with value 2000000000
I1124 02:29:31.630358 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist.
I1124 02:29:32.103466 1 controller.go:667] quota admission added evaluator for: roles.rbac.authorization.k8s.io
I1124 02:29:32.138704 1 controller.go:667] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
I1124 02:29:32.229973 1 alloc.go:328] "allocated clusterIPs" service="default/kubernetes" clusterIPs={"IPv4":"10.96.0.1"}
W1124 02:29:32.235818 1 lease.go:265] Resetting endpoints for master service "kubernetes" to [192.168.49.2]
I1124 02:29:32.236822 1 controller.go:667] quota admission added evaluator for: endpoints
I1124 02:29:32.240623 1 controller.go:667] quota admission added evaluator for: endpointslices.discovery.k8s.io
I1124 02:29:32.647169 1 controller.go:667] quota admission added evaluator for: serviceaccounts
I1124 02:29:33.458487 1 controller.go:667] quota admission added evaluator for: deployments.apps
I1124 02:29:33.469125 1 alloc.go:328] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs={"IPv4":"10.96.0.10"}
I1124 02:29:33.475633 1 controller.go:667] quota admission added evaluator for: daemonsets.apps
I1124 02:29:38.301872 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12
I1124 02:29:38.305681 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12
I1124 02:29:38.648329 1 controller.go:667] quota admission added evaluator for: replicasets.apps
I1124 02:29:38.699501 1 controller.go:667] quota admission added evaluator for: controllerrevisions.apps
==> kube-controller-manager [26e1f19d9085ae4c21adeb1d05b0ff2b1a85df36d966771612465d5bd8c8f01b] <==
I1124 02:29:37.608331 1 range_allocator.go:428] "Set node PodCIDR" logger="node-ipam-controller" node="dockerenv-637175" podCIDRs=["10.244.0.0/24"]
I1124 02:29:37.645011 1 shared_informer.go:356] "Caches are synced" controller="certificate-csrapproving"
I1124 02:29:37.646221 1 shared_informer.go:356] "Caches are synced" controller="taint-eviction-controller"
I1124 02:29:37.646281 1 shared_informer.go:356] "Caches are synced" controller="ephemeral"
I1124 02:29:37.646296 1 shared_informer.go:356] "Caches are synced" controller="TTL after finished"
I1124 02:29:37.646333 1 shared_informer.go:356] "Caches are synced" controller="attach detach"
I1124 02:29:37.646384 1 shared_informer.go:356] "Caches are synced" controller="taint"
I1124 02:29:37.646431 1 shared_informer.go:356] "Caches are synced" controller="bootstrap_signer"
I1124 02:29:37.646437 1 shared_informer.go:356] "Caches are synced" controller="endpoint"
I1124 02:29:37.646547 1 node_lifecycle_controller.go:1221] "Initializing eviction metric for zone" logger="node-lifecycle-controller" zone=""
I1124 02:29:37.646624 1 shared_informer.go:356] "Caches are synced" controller="deployment"
I1124 02:29:37.646703 1 shared_informer.go:356] "Caches are synced" controller="daemon sets"
I1124 02:29:37.646706 1 node_lifecycle_controller.go:873] "Missing timestamp for Node. Assuming now as a timestamp" logger="node-lifecycle-controller" node="dockerenv-637175"
I1124 02:29:37.646742 1 shared_informer.go:356] "Caches are synced" controller="endpoint_slice_mirroring"
I1124 02:29:37.646747 1 shared_informer.go:356] "Caches are synced" controller="ReplicaSet"
I1124 02:29:37.646808 1 shared_informer.go:356] "Caches are synced" controller="persistent volume"
I1124 02:29:37.646772 1 node_lifecycle_controller.go:1025] "Controller detected that all Nodes are not-Ready. Entering master disruption mode" logger="node-lifecycle-controller"
I1124 02:29:37.646954 1 shared_informer.go:356] "Caches are synced" controller="endpoint_slice"
I1124 02:29:37.648707 1 shared_informer.go:356] "Caches are synced" controller="PVC protection"
I1124 02:29:37.649852 1 shared_informer.go:356] "Caches are synced" controller="ReplicationController"
I1124 02:29:37.652661 1 shared_informer.go:356] "Caches are synced" controller="resource quota"
I1124 02:29:37.655067 1 shared_informer.go:356] "Caches are synced" controller="resource quota"
I1124 02:29:37.658621 1 shared_informer.go:356] "Caches are synced" controller="service account"
I1124 02:29:37.666477 1 shared_informer.go:356] "Caches are synced" controller="disruption"
I1124 02:29:37.668733 1 shared_informer.go:356] "Caches are synced" controller="garbage collector"
==> kube-proxy [e03a1aea5f99b62409718f43f9bec98cc74fec578045f89b20f6d50d4e37c7c7] <==
I1124 02:29:39.280134 1 server_linux.go:53] "Using iptables proxy"
I1124 02:29:39.347519 1 shared_informer.go:349] "Waiting for caches to sync" controller="node informer cache"
I1124 02:29:39.448441 1 shared_informer.go:356] "Caches are synced" controller="node informer cache"
I1124 02:29:39.448515 1 server.go:219] "Successfully retrieved NodeIPs" NodeIPs=["192.168.49.2"]
E1124 02:29:39.448655 1 server.go:256] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
I1124 02:29:39.471643 1 server.go:265] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I1124 02:29:39.471698 1 server_linux.go:132] "Using iptables Proxier"
I1124 02:29:39.476884 1 proxier.go:242] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
I1124 02:29:39.477351 1 server.go:527] "Version info" version="v1.34.1"
I1124 02:29:39.477386 1 server.go:529] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1124 02:29:39.479486 1 config.go:200] "Starting service config controller"
I1124 02:29:39.479516 1 shared_informer.go:349] "Waiting for caches to sync" controller="service config"
I1124 02:29:39.479539 1 config.go:106] "Starting endpoint slice config controller"
I1124 02:29:39.479545 1 shared_informer.go:349] "Waiting for caches to sync" controller="endpoint slice config"
I1124 02:29:39.479560 1 config.go:403] "Starting serviceCIDR config controller"
I1124 02:29:39.479566 1 shared_informer.go:349] "Waiting for caches to sync" controller="serviceCIDR config"
I1124 02:29:39.480012 1 config.go:309] "Starting node config controller"
I1124 02:29:39.480034 1 shared_informer.go:349] "Waiting for caches to sync" controller="node config"
I1124 02:29:39.480043 1 shared_informer.go:356] "Caches are synced" controller="node config"
I1124 02:29:39.580567 1 shared_informer.go:356] "Caches are synced" controller="serviceCIDR config"
I1124 02:29:39.580602 1 shared_informer.go:356] "Caches are synced" controller="endpoint slice config"
I1124 02:29:39.580629 1 shared_informer.go:356] "Caches are synced" controller="service config"
==> kube-scheduler [69e56949f786025b7bc51d1a1c6dcf75d92a9f45acc0b4a87fbe7805d154e4ae] <==
E1124 02:29:30.670746 1 reflector.go:205] "Failed to watch" err="failed to list *v1.DeviceClass: deviceclasses.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"deviceclasses\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.DeviceClass"
E1124 02:29:30.670614 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Namespace: namespaces is forbidden: User \"system:kube-scheduler\" cannot list resource \"namespaces\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Namespace"
E1124 02:29:30.670664 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Node: nodes is forbidden: User \"system:kube-scheduler\" cannot list resource \"nodes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Node"
E1124 02:29:30.670733 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Service: services is forbidden: User \"system:kube-scheduler\" cannot list resource \"services\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Service"
E1124 02:29:30.670634 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ResourceSlice: resourceslices.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"resourceslices\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ResourceSlice"
E1124 02:29:30.670759 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumeclaims\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolumeClaim"
E1124 02:29:30.670819 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Pod: pods is forbidden: User \"system:kube-scheduler\" cannot list resource \"pods\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Pod"
E1124 02:29:30.670843 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ResourceClaim: resourceclaims.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"resourceclaims\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ResourceClaim"
E1124 02:29:30.670855 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolume"
E1124 02:29:30.670942 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User \"system:kube-scheduler\" cannot list resource \"poddisruptionbudgets\" in API group \"policy\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PodDisruptionBudget"
E1124 02:29:30.671011 1 reflector.go:205] "Failed to watch" err="failed to list *v1.VolumeAttachment: volumeattachments.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"volumeattachments\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.VolumeAttachment"
E1124 02:29:30.671025 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csistoragecapacities\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIStorageCapacity"
E1124 02:29:30.671056 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csinodes\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSINode"
E1124 02:29:30.671088 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csidrivers\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIDriver"
E1124 02:29:31.535897 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolume"
E1124 02:29:31.600600 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Service: services is forbidden: User \"system:kube-scheduler\" cannot list resource \"services\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Service"
E1124 02:29:31.601593 1 reflector.go:205] "Failed to watch" err="failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"storageclasses\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.StorageClass"
E1124 02:29:31.634038 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicasets\" in API group \"apps\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ReplicaSet"
E1124 02:29:31.659370 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Node: nodes is forbidden: User \"system:kube-scheduler\" cannot list resource \"nodes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Node"
E1124 02:29:31.670013 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csistoragecapacities\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIStorageCapacity"
E1124 02:29:31.725201 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Namespace: namespaces is forbidden: User \"system:kube-scheduler\" cannot list resource \"namespaces\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Namespace"
E1124 02:29:31.740275 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ResourceClaim: resourceclaims.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"resourceclaims\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ResourceClaim"
E1124 02:29:31.777813 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicationcontrollers\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ReplicationController"
E1124 02:29:32.001198 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ConfigMap: configmaps \"extension-apiserver-authentication\" is forbidden: User \"system:kube-scheduler\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"kube-system\"" logger="UnhandledError" reflector="runtime/asm_amd64.s:1700" type="*v1.ConfigMap"
I1124 02:29:35.168174 1 shared_informer.go:356] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
==> kubelet <==
Nov 24 02:29:34 dockerenv-637175 kubelet[1441]: E1124 02:29:34.312319 1441 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-apiserver-dockerenv-637175\" already exists" pod="kube-system/kube-apiserver-dockerenv-637175"
Nov 24 02:29:34 dockerenv-637175 kubelet[1441]: E1124 02:29:34.312319 1441 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-scheduler-dockerenv-637175\" already exists" pod="kube-system/kube-scheduler-dockerenv-637175"
Nov 24 02:29:34 dockerenv-637175 kubelet[1441]: E1124 02:29:34.312660 1441 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"etcd-dockerenv-637175\" already exists" pod="kube-system/etcd-dockerenv-637175"
Nov 24 02:29:34 dockerenv-637175 kubelet[1441]: E1124 02:29:34.313024 1441 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-controller-manager-dockerenv-637175\" already exists" pod="kube-system/kube-controller-manager-dockerenv-637175"
Nov 24 02:29:34 dockerenv-637175 kubelet[1441]: I1124 02:29:34.336553 1441 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-scheduler-dockerenv-637175" podStartSLOduration=1.336529805 podStartE2EDuration="1.336529805s" podCreationTimestamp="2025-11-24 02:29:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 02:29:34.336379557 +0000 UTC m=+1.133095955" watchObservedRunningTime="2025-11-24 02:29:34.336529805 +0000 UTC m=+1.133246228"
Nov 24 02:29:34 dockerenv-637175 kubelet[1441]: I1124 02:29:34.336730 1441 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-controller-manager-dockerenv-637175" podStartSLOduration=1.336721521 podStartE2EDuration="1.336721521s" podCreationTimestamp="2025-11-24 02:29:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 02:29:34.328283609 +0000 UTC m=+1.125000015" watchObservedRunningTime="2025-11-24 02:29:34.336721521 +0000 UTC m=+1.133437900"
Nov 24 02:29:34 dockerenv-637175 kubelet[1441]: I1124 02:29:34.351708 1441 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-apiserver-dockerenv-637175" podStartSLOduration=1.351688893 podStartE2EDuration="1.351688893s" podCreationTimestamp="2025-11-24 02:29:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 02:29:34.351360495 +0000 UTC m=+1.148076899" watchObservedRunningTime="2025-11-24 02:29:34.351688893 +0000 UTC m=+1.148405262"
Nov 24 02:29:34 dockerenv-637175 kubelet[1441]: I1124 02:29:34.360233 1441 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/etcd-dockerenv-637175" podStartSLOduration=1.3602142640000001 podStartE2EDuration="1.360214264s" podCreationTimestamp="2025-11-24 02:29:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 02:29:34.36017886 +0000 UTC m=+1.156895261" watchObservedRunningTime="2025-11-24 02:29:34.360214264 +0000 UTC m=+1.156930643"
Nov 24 02:29:37 dockerenv-637175 kubelet[1441]: I1124 02:29:37.613236 1441 kuberuntime_manager.go:1828] "Updating runtime config through cri with podcidr" CIDR="10.244.0.0/24"
Nov 24 02:29:37 dockerenv-637175 kubelet[1441]: I1124 02:29:37.613958 1441 kubelet_network.go:47] "Updating Pod CIDR" originalPodCIDR="" newPodCIDR="10.244.0.0/24"
Nov 24 02:29:38 dockerenv-637175 kubelet[1441]: I1124 02:29:38.808454 1441 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-cfg\" (UniqueName: \"kubernetes.io/host-path/38dc2084-ce55-42e6-874c-95d25d66bdf5-cni-cfg\") pod \"kindnet-bvkb6\" (UID: \"38dc2084-ce55-42e6-874c-95d25d66bdf5\") " pod="kube-system/kindnet-bvkb6"
Nov 24 02:29:38 dockerenv-637175 kubelet[1441]: I1124 02:29:38.808509 1441 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-proxy\" (UniqueName: \"kubernetes.io/configmap/e5e682f4-e767-4532-8378-ed722b8c1d26-kube-proxy\") pod \"kube-proxy-l4bqr\" (UID: \"e5e682f4-e767-4532-8378-ed722b8c1d26\") " pod="kube-system/kube-proxy-l4bqr"
Nov 24 02:29:38 dockerenv-637175 kubelet[1441]: I1124 02:29:38.808528 1441 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/38dc2084-ce55-42e6-874c-95d25d66bdf5-lib-modules\") pod \"kindnet-bvkb6\" (UID: \"38dc2084-ce55-42e6-874c-95d25d66bdf5\") " pod="kube-system/kindnet-bvkb6"
Nov 24 02:29:38 dockerenv-637175 kubelet[1441]: I1124 02:29:38.808547 1441 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2jjsv\" (UniqueName: \"kubernetes.io/projected/38dc2084-ce55-42e6-874c-95d25d66bdf5-kube-api-access-2jjsv\") pod \"kindnet-bvkb6\" (UID: \"38dc2084-ce55-42e6-874c-95d25d66bdf5\") " pod="kube-system/kindnet-bvkb6"
Nov 24 02:29:38 dockerenv-637175 kubelet[1441]: I1124 02:29:38.808595 1441 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/38dc2084-ce55-42e6-874c-95d25d66bdf5-xtables-lock\") pod \"kindnet-bvkb6\" (UID: \"38dc2084-ce55-42e6-874c-95d25d66bdf5\") " pod="kube-system/kindnet-bvkb6"
Nov 24 02:29:38 dockerenv-637175 kubelet[1441]: I1124 02:29:38.808623 1441 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6mhbq\" (UniqueName: \"kubernetes.io/projected/e5e682f4-e767-4532-8378-ed722b8c1d26-kube-api-access-6mhbq\") pod \"kube-proxy-l4bqr\" (UID: \"e5e682f4-e767-4532-8378-ed722b8c1d26\") " pod="kube-system/kube-proxy-l4bqr"
Nov 24 02:29:38 dockerenv-637175 kubelet[1441]: I1124 02:29:38.808664 1441 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/e5e682f4-e767-4532-8378-ed722b8c1d26-xtables-lock\") pod \"kube-proxy-l4bqr\" (UID: \"e5e682f4-e767-4532-8378-ed722b8c1d26\") " pod="kube-system/kube-proxy-l4bqr"
Nov 24 02:29:38 dockerenv-637175 kubelet[1441]: I1124 02:29:38.808706 1441 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/e5e682f4-e767-4532-8378-ed722b8c1d26-lib-modules\") pod \"kube-proxy-l4bqr\" (UID: \"e5e682f4-e767-4532-8378-ed722b8c1d26\") " pod="kube-system/kube-proxy-l4bqr"
Nov 24 02:29:40 dockerenv-637175 kubelet[1441]: I1124 02:29:40.330149 1441 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kindnet-bvkb6" podStartSLOduration=2.330119176 podStartE2EDuration="2.330119176s" podCreationTimestamp="2025-11-24 02:29:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 02:29:40.329975899 +0000 UTC m=+7.126692300" watchObservedRunningTime="2025-11-24 02:29:40.330119176 +0000 UTC m=+7.126835561"
Nov 24 02:29:40 dockerenv-637175 kubelet[1441]: I1124 02:29:40.330286 1441 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-proxy-l4bqr" podStartSLOduration=2.330278298 podStartE2EDuration="2.330278298s" podCreationTimestamp="2025-11-24 02:29:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 02:29:39.331281387 +0000 UTC m=+6.127997792" watchObservedRunningTime="2025-11-24 02:29:40.330278298 +0000 UTC m=+7.126994682"
Nov 24 02:29:49 dockerenv-637175 kubelet[1441]: I1124 02:29:49.975158 1441 kubelet_node_status.go:439] "Fast updating node status as it just became ready"
Nov 24 02:29:50 dockerenv-637175 kubelet[1441]: I1124 02:29:50.092729 1441 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/67fa6609-c28d-4748-805a-f89fcd6ca0ef-tmp\") pod \"storage-provisioner\" (UID: \"67fa6609-c28d-4748-805a-f89fcd6ca0ef\") " pod="kube-system/storage-provisioner"
Nov 24 02:29:50 dockerenv-637175 kubelet[1441]: I1124 02:29:50.092800 1441 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6f152ae3-917b-4ea0-a95e-941584de5e5a-config-volume\") pod \"coredns-66bc5c9577-6gf5f\" (UID: \"6f152ae3-917b-4ea0-a95e-941584de5e5a\") " pod="kube-system/coredns-66bc5c9577-6gf5f"
Nov 24 02:29:50 dockerenv-637175 kubelet[1441]: I1124 02:29:50.092896 1441 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gwhgq\" (UniqueName: \"kubernetes.io/projected/67fa6609-c28d-4748-805a-f89fcd6ca0ef-kube-api-access-gwhgq\") pod \"storage-provisioner\" (UID: \"67fa6609-c28d-4748-805a-f89fcd6ca0ef\") " pod="kube-system/storage-provisioner"
Nov 24 02:29:50 dockerenv-637175 kubelet[1441]: I1124 02:29:50.092929 1441 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7frd5\" (UniqueName: \"kubernetes.io/projected/6f152ae3-917b-4ea0-a95e-941584de5e5a-kube-api-access-7frd5\") pod \"coredns-66bc5c9577-6gf5f\" (UID: \"6f152ae3-917b-4ea0-a95e-941584de5e5a\") " pod="kube-system/coredns-66bc5c9577-6gf5f"
-- /stdout --
helpers_test.go:262: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p dockerenv-637175 -n dockerenv-637175
helpers_test.go:269: (dbg) Run: kubectl --context dockerenv-637175 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:280: non-running pods: coredns-66bc5c9577-6gf5f storage-provisioner
helpers_test.go:282: ======> post-mortem[TestDockerEnvContainerd]: describe non-running pods <======
helpers_test.go:285: (dbg) Run: kubectl --context dockerenv-637175 describe pod coredns-66bc5c9577-6gf5f storage-provisioner
helpers_test.go:285: (dbg) Non-zero exit: kubectl --context dockerenv-637175 describe pod coredns-66bc5c9577-6gf5f storage-provisioner: exit status 1 (58.152974ms)
** stderr **
Error from server (NotFound): pods "coredns-66bc5c9577-6gf5f" not found
Error from server (NotFound): pods "storage-provisioner" not found
** /stderr **
helpers_test.go:287: kubectl --context dockerenv-637175 describe pod coredns-66bc5c9577-6gf5f storage-provisioner: exit status 1
helpers_test.go:175: Cleaning up "dockerenv-637175" profile ...
helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p dockerenv-637175
helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p dockerenv-637175: (2.380158313s)
--- FAIL: TestDockerEnvContainerd (42.64s)