=== RUN TestPause/serial/VerifyStatus
status_test.go:76: (dbg) Run: out/minikube-linux-amd64 status -p pause-327044 --output=json --layout=cluster
status_test.go:76: (dbg) Non-zero exit: out/minikube-linux-amd64 status -p pause-327044 --output=json --layout=cluster: exit status 2 (356.34589ms)
-- stdout --
{"Name":"pause-327044","StatusCode":200,"StatusName":"OK","Step":"Done","StepDetail":"* Paused 0 containers in: kube-system, kubernetes-dashboard, istio-operator","BinaryVersion":"v1.37.0","Components":{"kubeconfig":{"Name":"kubeconfig","StatusCode":200,"StatusName":"OK"}},"Nodes":[{"Name":"pause-327044","StatusCode":200,"StatusName":"OK","Components":{"apiserver":{"Name":"apiserver","StatusCode":200,"StatusName":"OK"},"kubelet":{"Name":"kubelet","StatusCode":405,"StatusName":"Stopped"}}}]}
-- /stdout --
pause_test.go:200: incorrect status code: 200
helpers_test.go:223: -----------------------post-mortem--------------------------------
helpers_test.go:224: ======> post-mortem[TestPause/serial/VerifyStatus]: network settings <======
helpers_test.go:231: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:239: ======> post-mortem[TestPause/serial/VerifyStatus]: docker inspect <======
helpers_test.go:240: (dbg) Run: docker inspect pause-327044
helpers_test.go:244: (dbg) docker inspect pause-327044:
-- stdout --
[
{
"Id": "8dbcc5cd04c1698e2aa465643a77450ebc515606211717822484e514f6ad6e85",
"Created": "2025-12-28T06:57:13.286602091Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 768084,
"ExitCode": 0,
"Error": "",
"StartedAt": "2025-12-28T06:57:13.317062598Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:8b8cccb9afb2a57c3d011fcf33e0403b1551aa7036e30b12a395646869801935",
"ResolvConfPath": "/var/lib/docker/containers/8dbcc5cd04c1698e2aa465643a77450ebc515606211717822484e514f6ad6e85/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/8dbcc5cd04c1698e2aa465643a77450ebc515606211717822484e514f6ad6e85/hostname",
"HostsPath": "/var/lib/docker/containers/8dbcc5cd04c1698e2aa465643a77450ebc515606211717822484e514f6ad6e85/hosts",
"LogPath": "/var/lib/docker/containers/8dbcc5cd04c1698e2aa465643a77450ebc515606211717822484e514f6ad6e85/8dbcc5cd04c1698e2aa465643a77450ebc515606211717822484e514f6ad6e85-json.log",
"Name": "/pause-327044",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"pause-327044:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {
"max-size": "100m"
}
},
"NetworkMode": "pause-327044",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "private",
"Dns": null,
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 3221225472,
"NanoCpus": 0,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 6442450944,
"MemorySwappiness": null,
"OomKillDisable": null,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"ID": "8dbcc5cd04c1698e2aa465643a77450ebc515606211717822484e514f6ad6e85",
"LowerDir": "/var/lib/docker/overlay2/d22d2c8cf5baddd3495c083432fea6f2cd6669acffc58cfdc311523f6dece243-init/diff:/var/lib/docker/overlay2/dfc7a4c580b7be84b0f83410b7478c6f6aa3f00b996556623ab9129bd6527422/diff",
"MergedDir": "/var/lib/docker/overlay2/d22d2c8cf5baddd3495c083432fea6f2cd6669acffc58cfdc311523f6dece243/merged",
"UpperDir": "/var/lib/docker/overlay2/d22d2c8cf5baddd3495c083432fea6f2cd6669acffc58cfdc311523f6dece243/diff",
"WorkDir": "/var/lib/docker/overlay2/d22d2c8cf5baddd3495c083432fea6f2cd6669acffc58cfdc311523f6dece243/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
},
{
"Type": "volume",
"Name": "pause-327044",
"Source": "/var/lib/docker/volumes/pause-327044/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
}
],
"Config": {
"Hostname": "pause-327044",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1766884053-22351@sha256:2a274089182002e4ae2c5a05f988da35736dc812d4e6b2b8d1dd2036cb8212b1",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "pause-327044",
"name.minikube.sigs.k8s.io": "pause-327044",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"SandboxID": "ceddb6ac6b22e2194c34fac4e69584dd303650039a08d5bc95a9002fbe1001bc",
"SandboxKey": "/var/run/docker/netns/ceddb6ac6b22",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33030"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33031"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33034"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33032"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33033"
}
]
},
"Networks": {
"pause-327044": {
"IPAMConfig": {
"IPv4Address": "192.168.85.2",
"IPv6Address": ""
},
"Links": null,
"Aliases": null,
"DriverOpts": null,
"GwPriority": 0,
"NetworkID": "70ac247aad7b5af7fbc54defc2d96e35f5cdf496bd44affc3254df0702f730ec",
"EndpointID": "ae7a7a0dedecddb084c50052ed9d01885b7caff17a5590578b94a0cca1e0ed86",
"Gateway": "192.168.85.1",
"IPAddress": "192.168.85.2",
"MacAddress": "0a:5b:04:c7:36:91",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"pause-327044",
"8dbcc5cd04c1"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:248: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p pause-327044 -n pause-327044
helpers_test.go:248: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.Host}} -p pause-327044 -n pause-327044: exit status 2 (345.873039ms)
-- stdout --
Running
-- /stdout --
helpers_test.go:248: status error: exit status 2 (may be ok)
helpers_test.go:253: <<< TestPause/serial/VerifyStatus FAILED: start of post-mortem logs <<<
helpers_test.go:254: ======> post-mortem[TestPause/serial/VerifyStatus]: minikube logs <======
helpers_test.go:256: (dbg) Run: out/minikube-linux-amd64 -p pause-327044 logs -n 25
helpers_test.go:261: TestPause/serial/VerifyStatus logs:
-- stdout --
==> Audit <==
┌─────────┬────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬───────────────────────────┬─────────┬─────────┬─────────────────────┬─────────────────────┐
│ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │
├─────────┼────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼───────────────────────────┼─────────┼─────────┼─────────────────────┼─────────────────────┤
│ ssh │ -p cert-options-948332 -- sudo cat /etc/kubernetes/admin.conf │ cert-options-948332 │ jenkins │ v1.37.0 │ 28 Dec 25 06:55 UTC │ 28 Dec 25 06:55 UTC │
│ delete │ -p cert-options-948332 │ cert-options-948332 │ jenkins │ v1.37.0 │ 28 Dec 25 06:55 UTC │ 28 Dec 25 06:55 UTC │
│ start │ -p running-upgrade-397849 --memory=3072 --alsologtostderr -v=1 --driver=docker --container-runtime=containerd │ running-upgrade-397849 │ jenkins │ v1.37.0 │ 28 Dec 25 06:55 UTC │ 28 Dec 25 06:55 UTC │
│ start │ -p test-preload-517921 --memory=3072 --alsologtostderr --wait=true --preload=false --driver=docker --container-runtime=containerd │ test-preload-517921 │ jenkins │ v1.37.0 │ 28 Dec 25 06:55 UTC │ 28 Dec 25 06:56 UTC │
│ delete │ -p running-upgrade-397849 │ running-upgrade-397849 │ jenkins │ v1.37.0 │ 28 Dec 25 06:55 UTC │ 28 Dec 25 06:55 UTC │
│ start │ -p kubernetes-upgrade-926675 --memory=3072 --kubernetes-version=v1.28.0 --alsologtostderr -v=1 --driver=docker --container-runtime=containerd │ kubernetes-upgrade-926675 │ jenkins │ v1.37.0 │ 28 Dec 25 06:55 UTC │ 28 Dec 25 06:56 UTC │
│ ssh │ force-systemd-env-455558 ssh cat /etc/containerd/config.toml │ force-systemd-env-455558 │ jenkins │ v1.37.0 │ 28 Dec 25 06:55 UTC │ 28 Dec 25 06:55 UTC │
│ delete │ -p force-systemd-env-455558 │ force-systemd-env-455558 │ jenkins │ v1.37.0 │ 28 Dec 25 06:55 UTC │ 28 Dec 25 06:55 UTC │
│ start │ -p missing-upgrade-317261 --memory=3072 --driver=docker --container-runtime=containerd │ missing-upgrade-317261 │ jenkins │ v1.35.0 │ 28 Dec 25 06:55 UTC │ 28 Dec 25 06:56 UTC │
│ stop │ -p kubernetes-upgrade-926675 --alsologtostderr │ kubernetes-upgrade-926675 │ jenkins │ v1.37.0 │ 28 Dec 25 06:56 UTC │ 28 Dec 25 06:56 UTC │
│ start │ -p kubernetes-upgrade-926675 --memory=3072 --kubernetes-version=v1.35.0 --alsologtostderr -v=1 --driver=docker --container-runtime=containerd │ kubernetes-upgrade-926675 │ jenkins │ v1.37.0 │ 28 Dec 25 06:56 UTC │ │
│ start │ -p missing-upgrade-317261 --memory=3072 --alsologtostderr -v=1 --driver=docker --container-runtime=containerd │ missing-upgrade-317261 │ jenkins │ v1.37.0 │ 28 Dec 25 06:56 UTC │ 28 Dec 25 06:57 UTC │
│ image │ test-preload-517921 image pull ghcr.io/medyagh/image-mirrors/busybox:latest │ test-preload-517921 │ jenkins │ v1.37.0 │ 28 Dec 25 06:56 UTC │ 28 Dec 25 06:56 UTC │
│ stop │ -p test-preload-517921 │ test-preload-517921 │ jenkins │ v1.37.0 │ 28 Dec 25 06:56 UTC │ 28 Dec 25 06:56 UTC │
│ start │ -p test-preload-517921 --preload=true --alsologtostderr -v=1 --wait=true --driver=docker --container-runtime=containerd │ test-preload-517921 │ jenkins │ v1.37.0 │ 28 Dec 25 06:56 UTC │ 28 Dec 25 06:57 UTC │
│ delete │ -p missing-upgrade-317261 │ missing-upgrade-317261 │ jenkins │ v1.37.0 │ 28 Dec 25 06:57 UTC │ 28 Dec 25 06:57 UTC │
│ start │ -p pause-327044 --memory=3072 --install-addons=false --wait=all --driver=docker --container-runtime=containerd │ pause-327044 │ jenkins │ v1.37.0 │ 28 Dec 25 06:57 UTC │ 28 Dec 25 06:57 UTC │
│ image │ test-preload-517921 image list │ test-preload-517921 │ jenkins │ v1.37.0 │ 28 Dec 25 06:57 UTC │ 28 Dec 25 06:57 UTC │
│ delete │ -p test-preload-517921 │ test-preload-517921 │ jenkins │ v1.37.0 │ 28 Dec 25 06:57 UTC │ 28 Dec 25 06:57 UTC │
│ start │ -p NoKubernetes-875069 --no-kubernetes --kubernetes-version=v1.28.0 --driver=docker --container-runtime=containerd │ NoKubernetes-875069 │ jenkins │ v1.37.0 │ 28 Dec 25 06:57 UTC │ │
│ start │ -p NoKubernetes-875069 --memory=3072 --alsologtostderr -v=5 --driver=docker --container-runtime=containerd │ NoKubernetes-875069 │ jenkins │ v1.37.0 │ 28 Dec 25 06:57 UTC │ 28 Dec 25 06:57 UTC │
│ start │ -p pause-327044 --alsologtostderr -v=1 --driver=docker --container-runtime=containerd │ pause-327044 │ jenkins │ v1.37.0 │ 28 Dec 25 06:57 UTC │ 28 Dec 25 06:57 UTC │
│ start │ -p NoKubernetes-875069 --no-kubernetes --memory=3072 --alsologtostderr -v=5 --driver=docker --container-runtime=containerd │ NoKubernetes-875069 │ jenkins │ v1.37.0 │ 28 Dec 25 06:57 UTC │ 28 Dec 25 06:57 UTC │
│ pause │ -p pause-327044 --alsologtostderr -v=5 │ pause-327044 │ jenkins │ v1.37.0 │ 28 Dec 25 06:57 UTC │ 28 Dec 25 06:57 UTC │
│ delete │ -p NoKubernetes-875069 │ NoKubernetes-875069 │ jenkins │ v1.37.0 │ 28 Dec 25 06:57 UTC │ │
└─────────┴────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴───────────────────────────┴─────────┴─────────┴─────────────────────┴─────────────────────┘
==> Last Start <==
Log file created at: 2025/12/28 06:57:50
Running on machine: ubuntu-20-agent-10
Binary: Built with gc go1.25.5 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I1228 06:57:50.761013 775090 out.go:360] Setting OutFile to fd 1 ...
I1228 06:57:50.761321 775090 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1228 06:57:50.761332 775090 out.go:374] Setting ErrFile to fd 2...
I1228 06:57:50.761336 775090 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1228 06:57:50.761523 775090 root.go:338] Updating PATH: /home/jenkins/minikube-integration/22352-552174/.minikube/bin
I1228 06:57:50.761959 775090 out.go:368] Setting JSON to false
I1228 06:57:50.763243 775090 start.go:133] hostinfo: {"hostname":"ubuntu-20-agent-10","uptime":13215,"bootTime":1766891856,"procs":337,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"22.04","kernelVersion":"6.8.0-1045-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
I1228 06:57:50.763308 775090 start.go:143] virtualization: kvm guest
I1228 06:57:50.765858 775090 out.go:179] * [NoKubernetes-875069] minikube v1.37.0 on Ubuntu 22.04 (kvm/amd64)
I1228 06:57:50.767106 775090 out.go:179] - MINIKUBE_LOCATION=22352
I1228 06:57:50.767134 775090 notify.go:221] Checking for updates...
I1228 06:57:50.769337 775090 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I1228 06:57:50.770644 775090 out.go:179] - KUBECONFIG=/home/jenkins/minikube-integration/22352-552174/kubeconfig
I1228 06:57:50.771741 775090 out.go:179] - MINIKUBE_HOME=/home/jenkins/minikube-integration/22352-552174/.minikube
I1228 06:57:50.772969 775090 out.go:179] - MINIKUBE_BIN=out/minikube-linux-amd64
I1228 06:57:50.774204 775090 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I1228 06:57:50.775737 775090 config.go:182] Loaded profile config "NoKubernetes-875069": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.35.0
I1228 06:57:50.776286 775090 start.go:1905] No Kubernetes flag is set, setting Kubernetes version to v0.0.0
I1228 06:57:50.776373 775090 start.go:1810] No Kubernetes version set for minikube, setting Kubernetes version to v0.0.0
I1228 06:57:50.776420 775090 driver.go:422] Setting default libvirt URI to qemu:///system
I1228 06:57:50.803120 775090 docker.go:124] docker version: linux-29.1.3:Docker Engine - Community
I1228 06:57:50.803284 775090 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1228 06:57:50.870845 775090 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:4 ContainersRunning:4 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:77 OomKillDisable:false NGoroutines:85 SystemTime:2025-12-28 06:57:50.859338803 +0000 UTC LoggingDriver:json-file CgroupDriver:systemd NEventsListener:0 KernelVersion:6.8.0-1045-gcp OperatingSystem:Ubuntu 22.04.5 LTS OSType:linux Architecture:x
86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33652076544 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-10 Labels:[] ExperimentalBuild:false ServerVersion:29.1.3 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:dea7da592f5d1d2b7755e3a161be07f43fad8f75 Expected:} RuncCommit:{ID:v1.3.4-0-gd6d73eb8 Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[
map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.30.1] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v5.0.0] map[Name:model Path:/usr/libexec/docker/cli-plugins/docker-model SchemaVersion:0.1.0 ShortDescription:Docker Model Runner Vendor:Docker Inc. Version:v1.0.6] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I1228 06:57:50.870948 775090 docker.go:319] overlay module found
I1228 06:57:50.874191 775090 out.go:179] * Using the docker driver based on existing profile
I1228 06:57:50.875370 775090 start.go:309] selected driver: docker
I1228 06:57:50.875387 775090 start.go:928] validating driver "docker" against &{Name:NoKubernetes-875069 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1766884053-22351@sha256:2a274089182002e4ae2c5a05f988da35736dc812d4e6b2b8d1dd2036cb8212b1 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v0.0.0 ClusterName:NoKubernetes-875069 Namespace:default APIServerHAVIP: APIServe
rName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.35.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmw
arePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s Rosetta:false}
I1228 06:57:50.875477 775090 start.go:939] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I1228 06:57:50.876051 775090 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1228 06:57:50.937018 775090 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:4 ContainersRunning:4 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:77 OomKillDisable:false NGoroutines:85 SystemTime:2025-12-28 06:57:50.92713464 +0000 UTC LoggingDriver:json-file CgroupDriver:systemd NEventsListener:0 KernelVersion:6.8.0-1045-gcp OperatingSystem:Ubuntu 22.04.5 LTS OSType:linux Architecture:x8
6_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33652076544 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-10 Labels:[] ExperimentalBuild:false ServerVersion:29.1.3 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:dea7da592f5d1d2b7755e3a161be07f43fad8f75 Expected:} RuncCommit:{ID:v1.3.4-0-gd6d73eb8 Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[m
ap[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.30.1] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v5.0.0] map[Name:model Path:/usr/libexec/docker/cli-plugins/docker-model SchemaVersion:0.1.0 ShortDescription:Docker Model Runner Vendor:Docker Inc. Version:v1.0.6] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I1228 06:57:50.937117 775090 start.go:1905] No Kubernetes flag is set, setting Kubernetes version to v0.0.0
I1228 06:57:50.937324 775090 cni.go:84] Creating CNI manager for ""
I1228 06:57:50.937411 775090 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1228 06:57:50.937437 775090 start.go:1905] No Kubernetes flag is set, setting Kubernetes version to v0.0.0
I1228 06:57:50.937482 775090 start.go:353] cluster config:
{Name:NoKubernetes-875069 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1766884053-22351@sha256:2a274089182002e4ae2c5a05f988da35736dc812d4e6b2b8d1dd2036cb8212b1 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v0.0.0 ClusterName:NoKubernetes-875069 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local Conta
inerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v0.0.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentP
ID:0 GPUs: AutoPauseInterval:1m0s Rosetta:false}
I1228 06:57:50.939644 775090 out.go:179] * Starting minikube without Kubernetes in cluster NoKubernetes-875069
I1228 06:57:50.940660 775090 cache.go:134] Beginning downloading kic base image for docker with containerd
I1228 06:57:50.941659 775090 out.go:179] * Pulling base image v0.0.48-1766884053-22351 ...
I1228 06:57:50.942586 775090 cache.go:59] Skipping Kubernetes image caching due to --no-kubernetes flag
I1228 06:57:50.942618 775090 image.go:81] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1766884053-22351@sha256:2a274089182002e4ae2c5a05f988da35736dc812d4e6b2b8d1dd2036cb8212b1 in local docker daemon
I1228 06:57:50.942753 775090 profile.go:143] Saving config to /home/jenkins/minikube-integration/22352-552174/.minikube/profiles/NoKubernetes-875069/config.json ...
I1228 06:57:50.964905 775090 image.go:100] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1766884053-22351@sha256:2a274089182002e4ae2c5a05f988da35736dc812d4e6b2b8d1dd2036cb8212b1 in local docker daemon, skipping pull
I1228 06:57:50.964929 775090 cache.go:158] gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1766884053-22351@sha256:2a274089182002e4ae2c5a05f988da35736dc812d4e6b2b8d1dd2036cb8212b1 exists in daemon, skipping load
I1228 06:57:50.964943 775090 cache.go:243] Successfully downloaded all kic artifacts
I1228 06:57:50.965006 775090 start.go:360] acquireMachinesLock for NoKubernetes-875069: {Name:mkea1e091d863ed14414015f3fb5b2b4c2c65fb0 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1228 06:57:50.965074 775090 start.go:364] duration metric: took 42.527µs to acquireMachinesLock for "NoKubernetes-875069"
I1228 06:57:50.965109 775090 start.go:96] Skipping create...Using existing machine configuration
I1228 06:57:50.965118 775090 fix.go:54] fixHost starting:
I1228 06:57:50.965432 775090 cli_runner.go:164] Run: docker container inspect NoKubernetes-875069 --format={{.State.Status}}
I1228 06:57:50.986131 775090 fix.go:112] recreateIfNeeded on NoKubernetes-875069: state=Running err=<nil>
W1228 06:57:50.986162 775090 fix.go:138] unexpected machine state, will restart: <nil>
I1228 06:57:50.370329 773850 cli_runner.go:164] Run: docker network inspect pause-327044 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1228 06:57:50.395150 773850 ssh_runner.go:195] Run: grep 192.168.85.1 host.minikube.internal$ /etc/hosts
I1228 06:57:50.400564 773850 kubeadm.go:884] updating cluster {Name:pause-327044 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1766884053-22351@sha256:2a274089182002e4ae2c5a05f988da35736dc812d4e6b2b8d1dd2036cb8212b1 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.35.0 ClusterName:pause-327044 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerName
s:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.85.2 Port:8443 KubernetesVersion:v1.35.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:false efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:
false registry-aliases:false registry-creds:false storage-provisioner:false storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s Rosetta:false} ...
I1228 06:57:50.400743 773850 preload.go:188] Checking if preload exists for k8s version v1.35.0 and runtime containerd
I1228 06:57:50.400814 773850 ssh_runner.go:195] Run: sudo crictl images --output json
I1228 06:57:50.433432 773850 containerd.go:635] all images are preloaded for containerd runtime.
I1228 06:57:50.433458 773850 containerd.go:542] Images already preloaded, skipping extraction
I1228 06:57:50.433513 773850 ssh_runner.go:195] Run: sudo crictl images --output json
I1228 06:57:50.463539 773850 containerd.go:635] all images are preloaded for containerd runtime.
I1228 06:57:50.463583 773850 cache_images.go:86] Images are preloaded, skipping loading
I1228 06:57:50.463594 773850 kubeadm.go:935] updating node { 192.168.85.2 8443 v1.35.0 containerd true true} ...
I1228 06:57:50.463738 773850 kubeadm.go:947] kubelet [Unit]
Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.35.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=pause-327044 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.85.2
[Install]
config:
{KubernetesVersion:v1.35.0 ClusterName:pause-327044 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I1228 06:57:50.463809 773850 ssh_runner.go:195] Run: sudo crictl --timeout=10s info
I1228 06:57:50.493319 773850 cni.go:84] Creating CNI manager for ""
I1228 06:57:50.493344 773850 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1228 06:57:50.493365 773850 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
I1228 06:57:50.493395 773850 kubeadm.go:197] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.85.2 APIServerPort:8443 KubernetesVersion:v1.35.0 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:pause-327044 NodeName:pause-327044 DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.85.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.85.2 CgroupDriver:systemd ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:/etc/ku
bernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///run/containerd/containerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1228 06:57:50.493575 773850 kubeadm.go:203] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.85.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
name: "pause-327044"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.85.2"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.85.2"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
kubernetesVersion: v1.35.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
containerRuntimeEndpoint: unix:///run/containerd/containerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1228 06:57:50.493657 773850 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.35.0
I1228 06:57:50.501819 773850 binaries.go:51] Found k8s binaries, skipping transfer
I1228 06:57:50.501881 773850 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1228 06:57:50.510614 773850 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (316 bytes)
I1228 06:57:50.524052 773850 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1228 06:57:50.536973 773850 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2224 bytes)
I1228 06:57:50.549733 773850 ssh_runner.go:195] Run: grep 192.168.85.2 control-plane.minikube.internal$ /etc/hosts
I1228 06:57:50.554096 773850 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1228 06:57:50.698387 773850 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1228 06:57:50.711928 773850 certs.go:69] Setting up /home/jenkins/minikube-integration/22352-552174/.minikube/profiles/pause-327044 for IP: 192.168.85.2
I1228 06:57:50.711945 773850 certs.go:195] generating shared ca certs ...
I1228 06:57:50.711959 773850 certs.go:227] acquiring lock for ca certs: {Name:mkf3a34076ce55c96c0ca7e803bd863f5c48e3ff Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1228 06:57:50.712119 773850 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/22352-552174/.minikube/ca.key
I1228 06:57:50.712170 773850 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/22352-552174/.minikube/proxy-client-ca.key
I1228 06:57:50.712182 773850 certs.go:257] generating profile certs ...
I1228 06:57:50.712324 773850 certs.go:360] skipping valid signed profile cert regeneration for "minikube-user": /home/jenkins/minikube-integration/22352-552174/.minikube/profiles/pause-327044/client.key
I1228 06:57:50.712403 773850 certs.go:360] skipping valid signed profile cert regeneration for "minikube": /home/jenkins/minikube-integration/22352-552174/.minikube/profiles/pause-327044/apiserver.key.2100f19c
I1228 06:57:50.712460 773850 certs.go:360] skipping valid signed profile cert regeneration for "aggregator": /home/jenkins/minikube-integration/22352-552174/.minikube/profiles/pause-327044/proxy-client.key
I1228 06:57:50.712598 773850 certs.go:484] found cert: /home/jenkins/minikube-integration/22352-552174/.minikube/certs/555878.pem (1338 bytes)
W1228 06:57:50.712643 773850 certs.go:480] ignoring /home/jenkins/minikube-integration/22352-552174/.minikube/certs/555878_empty.pem, impossibly tiny 0 bytes
I1228 06:57:50.712655 773850 certs.go:484] found cert: /home/jenkins/minikube-integration/22352-552174/.minikube/certs/ca-key.pem (1675 bytes)
I1228 06:57:50.712692 773850 certs.go:484] found cert: /home/jenkins/minikube-integration/22352-552174/.minikube/certs/ca.pem (1078 bytes)
I1228 06:57:50.712725 773850 certs.go:484] found cert: /home/jenkins/minikube-integration/22352-552174/.minikube/certs/cert.pem (1123 bytes)
I1228 06:57:50.712754 773850 certs.go:484] found cert: /home/jenkins/minikube-integration/22352-552174/.minikube/certs/key.pem (1675 bytes)
I1228 06:57:50.712829 773850 certs.go:484] found cert: /home/jenkins/minikube-integration/22352-552174/.minikube/files/etc/ssl/certs/5558782.pem (1708 bytes)
I1228 06:57:50.714355 773850 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22352-552174/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1228 06:57:50.735185 773850 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22352-552174/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I1228 06:57:50.755501 773850 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22352-552174/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1228 06:57:50.777833 773850 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22352-552174/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I1228 06:57:50.798449 773850 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22352-552174/.minikube/profiles/pause-327044/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1419 bytes)
I1228 06:57:50.819374 773850 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22352-552174/.minikube/profiles/pause-327044/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I1228 06:57:50.843989 773850 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22352-552174/.minikube/profiles/pause-327044/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1228 06:57:50.866292 773850 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22352-552174/.minikube/profiles/pause-327044/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
I1228 06:57:50.887390 773850 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22352-552174/.minikube/certs/555878.pem --> /usr/share/ca-certificates/555878.pem (1338 bytes)
I1228 06:57:50.910342 773850 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22352-552174/.minikube/files/etc/ssl/certs/5558782.pem --> /usr/share/ca-certificates/5558782.pem (1708 bytes)
I1228 06:57:50.931315 773850 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22352-552174/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1228 06:57:50.950225 773850 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (722 bytes)
I1228 06:57:50.964780 773850 ssh_runner.go:195] Run: openssl version
I1228 06:57:50.972773 773850 ssh_runner.go:195] Run: sudo test -s /usr/share/ca-certificates/555878.pem
I1228 06:57:50.982188 773850 ssh_runner.go:195] Run: sudo ln -fs /usr/share/ca-certificates/555878.pem /etc/ssl/certs/555878.pem
I1228 06:57:50.991052 773850 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/555878.pem
I1228 06:57:50.995673 773850 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Dec 28 06:34 /usr/share/ca-certificates/555878.pem
I1228 06:57:50.995732 773850 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/555878.pem
I1228 06:57:51.037862 773850 ssh_runner.go:195] Run: sudo test -L /etc/ssl/certs/51391683.0
I1228 06:57:51.046579 773850 ssh_runner.go:195] Run: sudo test -s /usr/share/ca-certificates/5558782.pem
I1228 06:57:51.054705 773850 ssh_runner.go:195] Run: sudo ln -fs /usr/share/ca-certificates/5558782.pem /etc/ssl/certs/5558782.pem
I1228 06:57:51.062790 773850 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/5558782.pem
I1228 06:57:51.067365 773850 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Dec 28 06:34 /usr/share/ca-certificates/5558782.pem
I1228 06:57:51.067425 773850 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/5558782.pem
I1228 06:57:51.103390 773850 ssh_runner.go:195] Run: sudo test -L /etc/ssl/certs/3ec20f2e.0
I1228 06:57:51.111306 773850 ssh_runner.go:195] Run: sudo test -s /usr/share/ca-certificates/minikubeCA.pem
I1228 06:57:51.125809 773850 ssh_runner.go:195] Run: sudo ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem
I1228 06:57:51.136455 773850 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1228 06:57:51.141202 773850 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Dec 28 06:29 /usr/share/ca-certificates/minikubeCA.pem
I1228 06:57:51.141269 773850 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1228 06:57:51.179568 773850 ssh_runner.go:195] Run: sudo test -L /etc/ssl/certs/b5213941.0
I1228 06:57:51.188165 773850 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1228 06:57:51.192137 773850 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-etcd-client.crt -checkend 86400
I1228 06:57:51.229914 773850 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-kubelet-client.crt -checkend 86400
I1228 06:57:51.265773 773850 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/server.crt -checkend 86400
I1228 06:57:51.305294 773850 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/healthcheck-client.crt -checkend 86400
I1228 06:57:51.345412 773850 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/peer.crt -checkend 86400
I1228 06:57:51.386157 773850 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/front-proxy-client.crt -checkend 86400
I1228 06:57:51.421533 773850 kubeadm.go:401] StartCluster: {Name:pause-327044 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1766884053-22351@sha256:2a274089182002e4ae2c5a05f988da35736dc812d4e6b2b8d1dd2036cb8212b1 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.35.0 ClusterName:pause-327044 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[
] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.85.2 Port:8443 KubernetesVersion:v1.35.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:false efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:fal
se registry-aliases:false registry-creds:false storage-provisioner:false storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s Rosetta:false}
I1228 06:57:51.421689 773850 ssh_runner.go:195] Run: sudo runc --root /run/containerd/runc/k8s.io list -f json
I1228 06:57:51.447113 773850 cri.go:83] list returned 14 containers
I1228 06:57:51.447186 773850 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1228 06:57:51.455752 773850 kubeadm.go:417] found existing configuration files, will attempt cluster restart
I1228 06:57:51.455772 773850 kubeadm.go:598] restartPrimaryControlPlane start ...
I1228 06:57:51.455830 773850 ssh_runner.go:195] Run: sudo test -d /data/minikube
I1228 06:57:51.464972 773850 kubeadm.go:131] /data/minikube skipping compat symlinks: sudo test -d /data/minikube: Process exited with status 1
stdout:
stderr:
I1228 06:57:51.466067 773850 kubeconfig.go:125] found "pause-327044" server: "https://192.168.85.2:8443"
I1228 06:57:51.467593 773850 kapi.go:59] client config for pause-327044: &rest.Config{Host:"https://192.168.85.2:8443", APIPath:"", ContentConfig:rest.ContentConfig{AcceptContentTypes:"", ContentType:"", GroupVersion:(*schema.GroupVersion)(nil), NegotiatedSerializer:runtime.NegotiatedSerializer(nil)}, Username:"", Password:"", BearerToken:"", BearerTokenFile:"", Impersonate:rest.ImpersonationConfig{UserName:"", UID:"", Groups:[]string(nil), Extra:map[string][]string(nil)}, AuthProvider:<nil>, AuthConfigPersister:rest.AuthProviderConfigPersister(nil), ExecProvider:<nil>, TLSClientConfig:rest.sanitizedTLSClientConfig{Insecure:false, ServerName:"", CertFile:"/home/jenkins/minikube-integration/22352-552174/.minikube/profiles/pause-327044/client.crt", KeyFile:"/home/jenkins/minikube-integration/22352-552174/.minikube/profiles/pause-327044/client.key", CAFile:"/home/jenkins/minikube-integration/22352-552174/.minikube/ca.crt", CertData:[]uint8(nil), KeyData:[]uint8(nil), CAData:[]uint8(nil), NextProtos:[]s
tring(nil)}, UserAgent:"", DisableCompression:false, Transport:http.RoundTripper(nil), WrapTransport:(transport.WrapperFunc)(0x2780200), QPS:0, Burst:0, RateLimiter:flowcontrol.RateLimiter(nil), WarningHandler:rest.WarningHandler(nil), WarningHandlerWithContext:rest.WarningHandlerWithContext(nil), Timeout:0, Dial:(func(context.Context, string, string) (net.Conn, error))(nil), Proxy:(func(*http.Request) (*url.URL, error))(nil)}
I1228 06:57:51.468045 773850 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=true
I1228 06:57:51.468061 773850 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false
I1228 06:57:51.468066 773850 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false
I1228 06:57:51.468070 773850 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true
I1228 06:57:51.468076 773850 envvar.go:172] "Feature gate default state" feature="InOrderInformersBatchProcess" enabled=true
I1228 06:57:51.468082 773850 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=true
I1228 06:57:51.468480 773850 ssh_runner.go:195] Run: sudo diff -u /var/tmp/minikube/kubeadm.yaml /var/tmp/minikube/kubeadm.yaml.new
I1228 06:57:51.476134 773850 kubeadm.go:635] The running cluster does not require reconfiguration: 192.168.85.2
I1228 06:57:51.476159 773850 kubeadm.go:602] duration metric: took 20.381077ms to restartPrimaryControlPlane
I1228 06:57:51.476167 773850 kubeadm.go:403] duration metric: took 54.647366ms to StartCluster
I1228 06:57:51.476181 773850 settings.go:142] acquiring lock: {Name:mk0a3f928fed4bf13f8897ea15768d1c7b315118 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1228 06:57:51.476260 773850 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/22352-552174/kubeconfig
I1228 06:57:51.477466 773850 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22352-552174/kubeconfig: {Name:mk8eb66c78260a0013ac235827a08f86055faf33 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1228 06:57:51.477676 773850 start.go:236] Will wait 6m0s for node &{Name: IP:192.168.85.2 Port:8443 KubernetesVersion:v1.35.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1228 06:57:51.477747 773850 addons.go:527] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:false efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:false storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I1228 06:57:51.477982 773850 config.go:182] Loaded profile config "pause-327044": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.35.0
I1228 06:57:51.479929 773850 out.go:179] * Verifying Kubernetes components...
I1228 06:57:51.479933 773850 out.go:179] * Enabled addons:
I1228 06:57:50.989261 775090 out.go:252] * Updating the running docker "NoKubernetes-875069" container ...
I1228 06:57:50.989294 775090 machine.go:94] provisionDockerMachine start ...
I1228 06:57:50.989366 775090 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" NoKubernetes-875069
I1228 06:57:51.008794 775090 main.go:144] libmachine: Using SSH client type: native
I1228 06:57:51.009076 775090 main.go:144] libmachine: &{{{<nil> 0 [] [] []} docker [0x84e300] 0x850fa0 <nil> [] 0s} 127.0.0.1 33035 <nil> <nil>}
I1228 06:57:51.009091 775090 main.go:144] libmachine: About to run SSH command:
hostname
I1228 06:57:51.135536 775090 main.go:144] libmachine: SSH cmd err, output: <nil>: NoKubernetes-875069
I1228 06:57:51.135569 775090 ubuntu.go:182] provisioning hostname "NoKubernetes-875069"
I1228 06:57:51.135646 775090 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" NoKubernetes-875069
I1228 06:57:51.155972 775090 main.go:144] libmachine: Using SSH client type: native
I1228 06:57:51.156337 775090 main.go:144] libmachine: &{{{<nil> 0 [] [] []} docker [0x84e300] 0x850fa0 <nil> [] 0s} 127.0.0.1 33035 <nil> <nil>}
I1228 06:57:51.156358 775090 main.go:144] libmachine: About to run SSH command:
sudo hostname NoKubernetes-875069 && echo "NoKubernetes-875069" | sudo tee /etc/hostname
I1228 06:57:51.291632 775090 main.go:144] libmachine: SSH cmd err, output: <nil>: NoKubernetes-875069
I1228 06:57:51.291746 775090 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" NoKubernetes-875069
I1228 06:57:51.310947 775090 main.go:144] libmachine: Using SSH client type: native
I1228 06:57:51.311206 775090 main.go:144] libmachine: &{{{<nil> 0 [] [] []} docker [0x84e300] 0x850fa0 <nil> [] 0s} 127.0.0.1 33035 <nil> <nil>}
I1228 06:57:51.311246 775090 main.go:144] libmachine: About to run SSH command:
if ! grep -xq '.*\sNoKubernetes-875069' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 NoKubernetes-875069/g' /etc/hosts;
else
echo '127.0.1.1 NoKubernetes-875069' | sudo tee -a /etc/hosts;
fi
fi
I1228 06:57:51.439765 775090 main.go:144] libmachine: SSH cmd err, output: <nil>:
I1228 06:57:51.439802 775090 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/22352-552174/.minikube CaCertPath:/home/jenkins/minikube-integration/22352-552174/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/22352-552174/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/22352-552174/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/22352-552174/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/22352-552174/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/22352-552174/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/22352-552174/.minikube}
I1228 06:57:51.439830 775090 ubuntu.go:190] setting up certificates
I1228 06:57:51.439854 775090 provision.go:84] configureAuth start
I1228 06:57:51.439941 775090 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" NoKubernetes-875069
I1228 06:57:51.460079 775090 provision.go:143] copyHostCerts
I1228 06:57:51.460120 775090 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22352-552174/.minikube/certs/ca.pem -> /home/jenkins/minikube-integration/22352-552174/.minikube/ca.pem
I1228 06:57:51.460155 775090 exec_runner.go:144] found /home/jenkins/minikube-integration/22352-552174/.minikube/ca.pem, removing ...
I1228 06:57:51.460175 775090 exec_runner.go:203] rm: /home/jenkins/minikube-integration/22352-552174/.minikube/ca.pem
I1228 06:57:51.460285 775090 exec_runner.go:151] cp: /home/jenkins/minikube-integration/22352-552174/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/22352-552174/.minikube/ca.pem (1078 bytes)
I1228 06:57:51.460422 775090 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22352-552174/.minikube/certs/cert.pem -> /home/jenkins/minikube-integration/22352-552174/.minikube/cert.pem
I1228 06:57:51.460454 775090 exec_runner.go:144] found /home/jenkins/minikube-integration/22352-552174/.minikube/cert.pem, removing ...
I1228 06:57:51.460464 775090 exec_runner.go:203] rm: /home/jenkins/minikube-integration/22352-552174/.minikube/cert.pem
I1228 06:57:51.460504 775090 exec_runner.go:151] cp: /home/jenkins/minikube-integration/22352-552174/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/22352-552174/.minikube/cert.pem (1123 bytes)
I1228 06:57:51.460595 775090 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22352-552174/.minikube/certs/key.pem -> /home/jenkins/minikube-integration/22352-552174/.minikube/key.pem
I1228 06:57:51.460624 775090 exec_runner.go:144] found /home/jenkins/minikube-integration/22352-552174/.minikube/key.pem, removing ...
I1228 06:57:51.460638 775090 exec_runner.go:203] rm: /home/jenkins/minikube-integration/22352-552174/.minikube/key.pem
I1228 06:57:51.460675 775090 exec_runner.go:151] cp: /home/jenkins/minikube-integration/22352-552174/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/22352-552174/.minikube/key.pem (1675 bytes)
I1228 06:57:51.460774 775090 provision.go:117] generating server cert: /home/jenkins/minikube-integration/22352-552174/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/22352-552174/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/22352-552174/.minikube/certs/ca-key.pem org=jenkins.NoKubernetes-875069 san=[127.0.0.1 192.168.76.2 NoKubernetes-875069 localhost minikube]
I1228 06:57:51.543683 775090 provision.go:177] copyRemoteCerts
I1228 06:57:51.543749 775090 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1228 06:57:51.543800 775090 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" NoKubernetes-875069
I1228 06:57:51.562552 775090 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33035 SSHKeyPath:/home/jenkins/minikube-integration/22352-552174/.minikube/machines/NoKubernetes-875069/id_rsa Username:docker}
I1228 06:57:51.657538 775090 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22352-552174/.minikube/certs/ca.pem -> /etc/docker/ca.pem
I1228 06:57:51.657607 775090 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22352-552174/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1078 bytes)
I1228 06:57:51.676982 775090 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22352-552174/.minikube/machines/server.pem -> /etc/docker/server.pem
I1228 06:57:51.677049 775090 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22352-552174/.minikube/machines/server.pem --> /etc/docker/server.pem (1224 bytes)
I1228 06:57:51.697345 775090 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22352-552174/.minikube/machines/server-key.pem -> /etc/docker/server-key.pem
I1228 06:57:51.697405 775090 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22352-552174/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
I1228 06:57:51.716961 775090 provision.go:87] duration metric: took 277.082563ms to configureAuth
I1228 06:57:51.716985 775090 ubuntu.go:206] setting minikube options for container-runtime
I1228 06:57:51.717127 775090 config.go:182] Loaded profile config "NoKubernetes-875069": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v0.0.0
I1228 06:57:51.717145 775090 machine.go:97] duration metric: took 727.838914ms to provisionDockerMachine
I1228 06:57:51.717159 775090 start.go:293] postStartSetup for "NoKubernetes-875069" (driver="docker")
I1228 06:57:51.717170 775090 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1228 06:57:51.717211 775090 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1228 06:57:51.717291 775090 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" NoKubernetes-875069
I1228 06:57:51.734810 775090 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33035 SSHKeyPath:/home/jenkins/minikube-integration/22352-552174/.minikube/machines/NoKubernetes-875069/id_rsa Username:docker}
I1228 06:57:51.827864 775090 ssh_runner.go:195] Run: cat /etc/os-release
I1228 06:57:51.832403 775090 main.go:144] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I1228 06:57:51.832434 775090 info.go:137] Remote host: Debian GNU/Linux 12 (bookworm)
I1228 06:57:51.832448 775090 filesync.go:126] Scanning /home/jenkins/minikube-integration/22352-552174/.minikube/addons for local assets ...
I1228 06:57:51.832535 775090 filesync.go:126] Scanning /home/jenkins/minikube-integration/22352-552174/.minikube/files for local assets ...
I1228 06:57:51.832657 775090 filesync.go:149] local asset: /home/jenkins/minikube-integration/22352-552174/.minikube/files/etc/ssl/certs/5558782.pem -> 5558782.pem in /etc/ssl/certs
I1228 06:57:51.832671 775090 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22352-552174/.minikube/files/etc/ssl/certs/5558782.pem -> /etc/ssl/certs/5558782.pem
I1228 06:57:51.832809 775090 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1228 06:57:51.841194 775090 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22352-552174/.minikube/files/etc/ssl/certs/5558782.pem --> /etc/ssl/certs/5558782.pem (1708 bytes)
I1228 06:57:51.860021 775090 start.go:296] duration metric: took 142.846052ms for postStartSetup
I1228 06:57:51.860125 775090 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I1228 06:57:51.860179 775090 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" NoKubernetes-875069
I1228 06:57:51.880069 775090 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33035 SSHKeyPath:/home/jenkins/minikube-integration/22352-552174/.minikube/machines/NoKubernetes-875069/id_rsa Username:docker}
I1228 06:57:51.973504 775090 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I1228 06:57:51.979353 775090 fix.go:56] duration metric: took 1.014229386s for fixHost
I1228 06:57:51.979380 775090 start.go:83] releasing machines lock for "NoKubernetes-875069", held for 1.014291987s
I1228 06:57:51.979468 775090 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" NoKubernetes-875069
I1228 06:57:51.999065 775090 ssh_runner.go:195] Run: cat /version.json
I1228 06:57:51.999088 775090 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1228 06:57:51.999129 775090 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" NoKubernetes-875069
I1228 06:57:51.999160 775090 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" NoKubernetes-875069
I1228 06:57:52.021491 775090 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33035 SSHKeyPath:/home/jenkins/minikube-integration/22352-552174/.minikube/machines/NoKubernetes-875069/id_rsa Username:docker}
I1228 06:57:52.022334 775090 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33035 SSHKeyPath:/home/jenkins/minikube-integration/22352-552174/.minikube/machines/NoKubernetes-875069/id_rsa Username:docker}
I1228 06:57:52.113709 775090 ssh_runner.go:195] Run: systemctl --version
I1228 06:57:52.172508 775090 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1228 06:57:52.187680 775090 out.go:179] - Kubernetes: Stopping ...
I1228 06:57:52.188766 775090 ssh_runner.go:195] Run: sudo systemctl stop -f kubelet
I1228 06:57:52.227805 775090 ssh_runner.go:195] Run: sudo runc --root /run/containerd/runc/k8s.io list -f json
I1228 06:57:52.249294 775090 cri.go:83] list returned 8 containers
I1228 06:57:52.249361 775090 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1228 06:57:52.263733 775090 out.go:179] - Kubernetes: Stopped
I1228 06:57:51.481186 773850 addons.go:530] duration metric: took 3.4374ms for enable addons: enabled=[]
I1228 06:57:51.481204 773850 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1228 06:57:51.608700 773850 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1228 06:57:51.622084 773850 node_ready.go:35] waiting up to 6m0s for node "pause-327044" to be "Ready" ...
I1228 06:57:51.629616 773850 node_ready.go:49] node "pause-327044" is "Ready"
I1228 06:57:51.629644 773850 node_ready.go:38] duration metric: took 7.513874ms for node "pause-327044" to be "Ready" ...
I1228 06:57:51.629660 773850 api_server.go:52] waiting for apiserver process to appear ...
I1228 06:57:51.629713 773850 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1228 06:57:51.641786 773850 api_server.go:72] duration metric: took 164.075676ms to wait for apiserver process to appear ...
I1228 06:57:51.641811 773850 api_server.go:88] waiting for apiserver healthz status ...
I1228 06:57:51.641831 773850 api_server.go:299] Checking apiserver healthz at https://192.168.85.2:8443/healthz ...
I1228 06:57:51.645743 773850 api_server.go:325] https://192.168.85.2:8443/healthz returned 200:
ok
I1228 06:57:51.646676 773850 api_server.go:141] control plane version: v1.35.0
I1228 06:57:51.646702 773850 api_server.go:131] duration metric: took 4.882272ms to wait for apiserver health ...
I1228 06:57:51.646712 773850 system_pods.go:43] waiting for kube-system pods to appear ...
I1228 06:57:51.649662 773850 system_pods.go:59] 7 kube-system pods found
I1228 06:57:51.649708 773850 system_pods.go:61] "coredns-7d764666f9-25xk9" [027bff7d-bef6-4d94-9cf5-0feb5f7d4c99] Running
I1228 06:57:51.649714 773850 system_pods.go:61] "etcd-pause-327044" [a83ddd5b-58e9-4407-8242-451bb7a4f2c8] Running
I1228 06:57:51.649719 773850 system_pods.go:61] "kindnet-tzx87" [b3862b54-c993-4353-bc0d-57485386eff2] Running
I1228 06:57:51.649723 773850 system_pods.go:61] "kube-apiserver-pause-327044" [b9bcb09b-f0bf-4554-9a62-5f8279ff3c81] Running
I1228 06:57:51.649732 773850 system_pods.go:61] "kube-controller-manager-pause-327044" [fdfdd570-5e85-4809-be2d-922dae6b4bb5] Running
I1228 06:57:51.649741 773850 system_pods.go:61] "kube-proxy-8zhkz" [fda10b86-5089-4bf2-a2c1-b9f38a0784c4] Running
I1228 06:57:51.649747 773850 system_pods.go:61] "kube-scheduler-pause-327044" [2c1f6f43-16ce-4b3c-a2b4-dae134ed5e0d] Running
I1228 06:57:51.649757 773850 system_pods.go:74] duration metric: took 3.038094ms to wait for pod list to return data ...
I1228 06:57:51.649768 773850 default_sa.go:34] waiting for default service account to be created ...
I1228 06:57:51.651574 773850 default_sa.go:45] found service account: "default"
I1228 06:57:51.651596 773850 default_sa.go:55] duration metric: took 1.817222ms for default service account to be created ...
I1228 06:57:51.651604 773850 system_pods.go:116] waiting for k8s-apps to be running ...
I1228 06:57:51.654189 773850 system_pods.go:86] 7 kube-system pods found
I1228 06:57:51.654229 773850 system_pods.go:89] "coredns-7d764666f9-25xk9" [027bff7d-bef6-4d94-9cf5-0feb5f7d4c99] Running
I1228 06:57:51.654237 773850 system_pods.go:89] "etcd-pause-327044" [a83ddd5b-58e9-4407-8242-451bb7a4f2c8] Running
I1228 06:57:51.654243 773850 system_pods.go:89] "kindnet-tzx87" [b3862b54-c993-4353-bc0d-57485386eff2] Running
I1228 06:57:51.654248 773850 system_pods.go:89] "kube-apiserver-pause-327044" [b9bcb09b-f0bf-4554-9a62-5f8279ff3c81] Running
I1228 06:57:51.654253 773850 system_pods.go:89] "kube-controller-manager-pause-327044" [fdfdd570-5e85-4809-be2d-922dae6b4bb5] Running
I1228 06:57:51.654257 773850 system_pods.go:89] "kube-proxy-8zhkz" [fda10b86-5089-4bf2-a2c1-b9f38a0784c4] Running
I1228 06:57:51.654262 773850 system_pods.go:89] "kube-scheduler-pause-327044" [2c1f6f43-16ce-4b3c-a2b4-dae134ed5e0d] Running
I1228 06:57:51.654271 773850 system_pods.go:126] duration metric: took 2.660349ms to wait for k8s-apps to be running ...
I1228 06:57:51.654283 773850 system_svc.go:44] waiting for kubelet service to be running ....
I1228 06:57:51.654328 773850 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1228 06:57:51.675079 773850 system_svc.go:56] duration metric: took 20.785175ms WaitForService to wait for kubelet
I1228 06:57:51.675119 773850 kubeadm.go:587] duration metric: took 197.410283ms to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1228 06:57:51.675142 773850 node_conditions.go:102] verifying NodePressure condition ...
I1228 06:57:51.677741 773850 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I1228 06:57:51.677770 773850 node_conditions.go:123] node cpu capacity is 8
I1228 06:57:51.677785 773850 node_conditions.go:105] duration metric: took 2.637321ms to run NodePressure ...
I1228 06:57:51.677799 773850 start.go:242] waiting for startup goroutines ...
I1228 06:57:51.677813 773850 start.go:247] waiting for cluster config update ...
I1228 06:57:51.677827 773850 start.go:256] writing updated cluster config ...
I1228 06:57:51.678163 773850 ssh_runner.go:195] Run: rm -f paused
I1228 06:57:51.682152 773850 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1228 06:57:51.682870 773850 kapi.go:59] client config for pause-327044: &rest.Config{Host:"https://192.168.85.2:8443", APIPath:"", ContentConfig:rest.ContentConfig{AcceptContentTypes:"", ContentType:"", GroupVersion:(*schema.GroupVersion)(nil), NegotiatedSerializer:runtime.NegotiatedSerializer(nil)}, Username:"", Password:"", BearerToken:"", BearerTokenFile:"", Impersonate:rest.ImpersonationConfig{UserName:"", UID:"", Groups:[]string(nil), Extra:map[string][]string(nil)}, AuthProvider:<nil>, AuthConfigPersister:rest.AuthProviderConfigPersister(nil), ExecProvider:<nil>, TLSClientConfig:rest.sanitizedTLSClientConfig{Insecure:false, ServerName:"", CertFile:"/home/jenkins/minikube-integration/22352-552174/.minikube/profiles/pause-327044/client.crt", KeyFile:"/home/jenkins/minikube-integration/22352-552174/.minikube/profiles/pause-327044/client.key", CAFile:"/home/jenkins/minikube-integration/22352-552174/.minikube/ca.crt", CertData:[]uint8(nil), KeyData:[]uint8(nil), CAData:[]uint8(nil), NextProtos:[]s
tring(nil)}, UserAgent:"", DisableCompression:false, Transport:http.RoundTripper(nil), WrapTransport:(transport.WrapperFunc)(0x2780200), QPS:0, Burst:0, RateLimiter:flowcontrol.RateLimiter(nil), WarningHandler:rest.WarningHandler(nil), WarningHandlerWithContext:rest.WarningHandlerWithContext(nil), Timeout:0, Dial:(func(context.Context, string, string) (net.Conn, error))(nil), Proxy:(func(*http.Request) (*url.URL, error))(nil)}
I1228 06:57:51.685578 773850 pod_ready.go:83] waiting for pod "coredns-7d764666f9-25xk9" in "kube-system" namespace to be "Ready" or be gone ...
I1228 06:57:51.690122 773850 pod_ready.go:94] pod "coredns-7d764666f9-25xk9" is "Ready"
I1228 06:57:51.690147 773850 pod_ready.go:86] duration metric: took 4.546823ms for pod "coredns-7d764666f9-25xk9" in "kube-system" namespace to be "Ready" or be gone ...
I1228 06:57:51.692184 773850 pod_ready.go:83] waiting for pod "etcd-pause-327044" in "kube-system" namespace to be "Ready" or be gone ...
I1228 06:57:51.695656 773850 pod_ready.go:94] pod "etcd-pause-327044" is "Ready"
I1228 06:57:51.695676 773850 pod_ready.go:86] duration metric: took 3.472564ms for pod "etcd-pause-327044" in "kube-system" namespace to be "Ready" or be gone ...
I1228 06:57:51.697527 773850 pod_ready.go:83] waiting for pod "kube-apiserver-pause-327044" in "kube-system" namespace to be "Ready" or be gone ...
I1228 06:57:51.701451 773850 pod_ready.go:94] pod "kube-apiserver-pause-327044" is "Ready"
I1228 06:57:51.701474 773850 pod_ready.go:86] duration metric: took 3.923749ms for pod "kube-apiserver-pause-327044" in "kube-system" namespace to be "Ready" or be gone ...
I1228 06:57:51.703166 773850 pod_ready.go:83] waiting for pod "kube-controller-manager-pause-327044" in "kube-system" namespace to be "Ready" or be gone ...
I1228 06:57:52.086412 773850 pod_ready.go:94] pod "kube-controller-manager-pause-327044" is "Ready"
I1228 06:57:52.086440 773850 pod_ready.go:86] duration metric: took 383.255819ms for pod "kube-controller-manager-pause-327044" in "kube-system" namespace to be "Ready" or be gone ...
I1228 06:57:52.286426 773850 pod_ready.go:83] waiting for pod "kube-proxy-8zhkz" in "kube-system" namespace to be "Ready" or be gone ...
I1228 06:57:50.002298 757425 api_server.go:299] Checking apiserver healthz at https://192.168.103.2:8443/healthz ...
I1228 06:57:50.002716 757425 api_server.go:315] stopped: https://192.168.103.2:8443/healthz: Get "https://192.168.103.2:8443/healthz": dial tcp 192.168.103.2:8443: connect: connection refused
I1228 06:57:50.002815 757425 ssh_runner.go:195] Run: sudo runc --root /run/containerd/runc/k8s.io list -f json
I1228 06:57:50.026255 757425 cri.go:83] list returned 4 containers
I1228 06:57:50.026287 757425 logs.go:282] 0 containers: []
W1228 06:57:50.026298 757425 logs.go:284] No container was found matching "kube-apiserver"
I1228 06:57:50.026358 757425 ssh_runner.go:195] Run: sudo runc --root /run/containerd/runc/k8s.io list -f json
I1228 06:57:50.044740 757425 cri.go:83] list returned 4 containers
I1228 06:57:50.044769 757425 logs.go:282] 0 containers: []
W1228 06:57:50.044778 757425 logs.go:284] No container was found matching "etcd"
I1228 06:57:50.044837 757425 ssh_runner.go:195] Run: sudo runc --root /run/containerd/runc/k8s.io list -f json
I1228 06:57:50.062619 757425 cri.go:83] list returned 4 containers
I1228 06:57:50.062650 757425 logs.go:282] 0 containers: []
W1228 06:57:50.062659 757425 logs.go:284] No container was found matching "coredns"
I1228 06:57:50.062702 757425 ssh_runner.go:195] Run: sudo runc --root /run/containerd/runc/k8s.io list -f json
I1228 06:57:50.080107 757425 cri.go:83] list returned 4 containers
I1228 06:57:50.080131 757425 logs.go:282] 0 containers: []
W1228 06:57:50.080138 757425 logs.go:284] No container was found matching "kube-scheduler"
I1228 06:57:50.080178 757425 ssh_runner.go:195] Run: sudo runc --root /run/containerd/runc/k8s.io list -f json
I1228 06:57:50.097146 757425 cri.go:83] list returned 4 containers
I1228 06:57:50.097177 757425 logs.go:282] 0 containers: []
W1228 06:57:50.097185 757425 logs.go:284] No container was found matching "kube-proxy"
I1228 06:57:50.097246 757425 ssh_runner.go:195] Run: sudo runc --root /run/containerd/runc/k8s.io list -f json
I1228 06:57:50.114299 757425 cri.go:83] list returned 4 containers
I1228 06:57:50.114328 757425 logs.go:282] 0 containers: []
W1228 06:57:50.114338 757425 logs.go:284] No container was found matching "kube-controller-manager"
I1228 06:57:50.114391 757425 ssh_runner.go:195] Run: sudo runc --root /run/containerd/runc/k8s.io list -f json
I1228 06:57:50.131981 757425 cri.go:83] list returned 4 containers
I1228 06:57:50.132008 757425 logs.go:282] 0 containers: []
W1228 06:57:50.132017 757425 logs.go:284] No container was found matching "kindnet"
I1228 06:57:50.132071 757425 ssh_runner.go:195] Run: sudo runc --root /run/containerd/runc/k8s.io list -f json
I1228 06:57:50.149729 757425 cri.go:83] list returned 4 containers
I1228 06:57:50.149758 757425 logs.go:282] 0 containers: []
W1228 06:57:50.149767 757425 logs.go:284] No container was found matching "storage-provisioner"
I1228 06:57:50.149778 757425 logs.go:123] Gathering logs for kubelet ...
I1228 06:57:50.149793 757425 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1228 06:57:50.242920 757425 logs.go:123] Gathering logs for dmesg ...
I1228 06:57:50.242956 757425 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1228 06:57:50.259983 757425 logs.go:123] Gathering logs for describe nodes ...
I1228 06:57:50.260017 757425 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1228 06:57:50.325972 757425 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1228 06:57:50.325990 757425 logs.go:123] Gathering logs for containerd ...
I1228 06:57:50.326005 757425 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1228 06:57:50.379593 757425 logs.go:123] Gathering logs for container status ...
I1228 06:57:50.379660 757425 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1228 06:57:52.265084 775090 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W1228 06:57:52.270319 775090 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I1228 06:57:52.270389 775090 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1228 06:57:52.278712 775090 cni.go:259] no active bridge cni configs found in "/etc/cni/net.d" - nothing to disable
I1228 06:57:52.278735 775090 start.go:496] detecting cgroup driver to use...
I1228 06:57:52.278768 775090 detect.go:190] detected "systemd" cgroup driver on host os
I1228 06:57:52.278815 775090 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I1228 06:57:52.295031 775090 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1228 06:57:52.307713 775090 docker.go:218] disabling cri-docker service (if available) ...
I1228 06:57:52.307764 775090 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I1228 06:57:52.326250 775090 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I1228 06:57:52.338874 775090 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I1228 06:57:52.442186 775090 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I1228 06:57:52.564064 775090 docker.go:234] disabling docker service ...
I1228 06:57:52.564152 775090 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I1228 06:57:52.583379 775090 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I1228 06:57:52.603996 775090 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I1228 06:57:52.711464 775090 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I1228 06:57:52.815512 775090 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1228 06:57:52.830211 775090 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1228 06:57:52.846966 775090 binary.go:59] Skipping Kubernetes binary download due to --no-kubernetes flag
I1228 06:57:52.847044 775090 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.9"|' /etc/containerd/config.toml"
I1228 06:57:52.856468 775090 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1228 06:57:52.865349 775090 containerd.go:147] configuring containerd to use "systemd" as cgroup driver...
I1228 06:57:52.865401 775090 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = true|g' /etc/containerd/config.toml"
I1228 06:57:52.874711 775090 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1228 06:57:52.883645 775090 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1228 06:57:52.893312 775090 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1228 06:57:52.902623 775090 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1228 06:57:52.911201 775090 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1228 06:57:52.920608 775090 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1228 06:57:52.927976 775090 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1228 06:57:52.935523 775090 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1228 06:57:53.053603 775090 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1228 06:57:53.203953 775090 start.go:553] Will wait 60s for socket path /run/containerd/containerd.sock
I1228 06:57:53.204028 775090 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
I1228 06:57:53.209109 775090 start.go:574] Will wait 60s for crictl version
I1228 06:57:53.209172 775090 ssh_runner.go:195] Run: which crictl
I1228 06:57:53.213510 775090 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl version
I1228 06:57:53.244557 775090 start.go:590] Version: 0.1.0
RuntimeName: containerd
RuntimeVersion: v2.2.1
RuntimeApiVersion: v1
I1228 06:57:53.244630 775090 ssh_runner.go:195] Run: containerd --version
I1228 06:57:53.268432 775090 ssh_runner.go:195] Run: containerd --version
I1228 06:57:53.296350 775090 out.go:179] * Preparing containerd 2.2.1 ...
I1228 06:57:53.298007 775090 ssh_runner.go:195] Run: rm -f paused
I1228 06:57:53.303973 775090 out.go:179] * Done! minikube is ready without Kubernetes!
I1228 06:57:53.305179 775090 out.go:203] ╭──────────────────────────────────────────────────────────╮
│ │
│ * Things to try without Kubernetes ... │
│ │
│ - "minikube ssh" to SSH into minikube's node. │
│ - "minikube image" to build images without docker. │
│ │
╰──────────────────────────────────────────────────────────╯
I1228 06:57:52.686816 773850 pod_ready.go:94] pod "kube-proxy-8zhkz" is "Ready"
I1228 06:57:52.686848 773850 pod_ready.go:86] duration metric: took 400.3966ms for pod "kube-proxy-8zhkz" in "kube-system" namespace to be "Ready" or be gone ...
I1228 06:57:52.886851 773850 pod_ready.go:83] waiting for pod "kube-scheduler-pause-327044" in "kube-system" namespace to be "Ready" or be gone ...
I1228 06:57:53.286420 773850 pod_ready.go:94] pod "kube-scheduler-pause-327044" is "Ready"
I1228 06:57:53.286450 773850 pod_ready.go:86] duration metric: took 399.565635ms for pod "kube-scheduler-pause-327044" in "kube-system" namespace to be "Ready" or be gone ...
I1228 06:57:53.286466 773850 pod_ready.go:40] duration metric: took 1.604265836s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1228 06:57:53.342018 773850 start.go:625] kubectl: 1.35.0, cluster: 1.35.0 (minor skew: 0)
I1228 06:57:53.344256 773850 out.go:179] * Done! kubectl is now configured to use "pause-327044" cluster and "default" namespace by default
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE
0c035631e5151 aa5e3ebc0dfed 9 seconds ago Running coredns 0 55cc563e7b7d2 coredns-7d764666f9-25xk9 kube-system
50672f9c32ffa 4921d7a6dffa9 20 seconds ago Running kindnet-cni 0 c3693a1a8e48d kindnet-tzx87 kube-system
9155fcef604b3 32652ff1bbe6b 24 seconds ago Running kube-proxy 0 0f2c0a3b1a444 kube-proxy-8zhkz kube-system
2b16ec6eaf39f 2c9a4b058bd7e 34 seconds ago Running kube-controller-manager 0 0eeee28d3644b kube-controller-manager-pause-327044 kube-system
0c69c386edc79 550794e3b12ac 35 seconds ago Running kube-scheduler 0 b4486600e9858 kube-scheduler-pause-327044 kube-system
96f2e58cff4e8 5c6acd67e9cd1 35 seconds ago Running kube-apiserver 0 5c580983540c8 kube-apiserver-pause-327044 kube-system
ce2eef6d0943c 0a108f7189562 35 seconds ago Running etcd 0 d8290fea6fe0d etcd-pause-327044 kube-system
==> containerd <==
Dec 28 06:57:50 pause-327044 containerd[2394]: time="2025-12-28T06:57:50.204181271Z" level=info msg="loading plugin" id=io.containerd.grpc.v1.transfer type=io.containerd.grpc.v1
Dec 28 06:57:50 pause-327044 containerd[2394]: time="2025-12-28T06:57:50.204203973Z" level=info msg="loading plugin" id=io.containerd.grpc.v1.version type=io.containerd.grpc.v1
Dec 28 06:57:50 pause-327044 containerd[2394]: time="2025-12-28T06:57:50.204268708Z" level=info msg="loading plugin" id=io.containerd.monitor.container.v1.restart type=io.containerd.monitor.container.v1
Dec 28 06:57:50 pause-327044 containerd[2394]: time="2025-12-28T06:57:50.204319313Z" level=info msg="loading plugin" id=io.containerd.tracing.processor.v1.otlp type=io.containerd.tracing.processor.v1
Dec 28 06:57:50 pause-327044 containerd[2394]: time="2025-12-28T06:57:50.204342828Z" level=info msg="skip loading plugin" error="skip plugin: tracing endpoint not configured" id=io.containerd.tracing.processor.v1.otlp type=io.containerd.tracing.processor.v1
Dec 28 06:57:50 pause-327044 containerd[2394]: time="2025-12-28T06:57:50.204354788Z" level=info msg="loading plugin" id=io.containerd.internal.v1.tracing type=io.containerd.internal.v1
Dec 28 06:57:50 pause-327044 containerd[2394]: time="2025-12-28T06:57:50.204367861Z" level=info msg="skip loading plugin" error="skip plugin: tracing endpoint not configured" id=io.containerd.internal.v1.tracing type=io.containerd.internal.v1
Dec 28 06:57:50 pause-327044 containerd[2394]: time="2025-12-28T06:57:50.204382972Z" level=info msg="loading plugin" id=io.containerd.ttrpc.v1.otelttrpc type=io.containerd.ttrpc.v1
Dec 28 06:57:50 pause-327044 containerd[2394]: time="2025-12-28T06:57:50.204396125Z" level=info msg="loading plugin" id=io.containerd.grpc.v1.healthcheck type=io.containerd.grpc.v1
Dec 28 06:57:50 pause-327044 containerd[2394]: time="2025-12-28T06:57:50.204414757Z" level=info msg="loading plugin" id=io.containerd.grpc.v1.cri type=io.containerd.grpc.v1
Dec 28 06:57:50 pause-327044 containerd[2394]: time="2025-12-28T06:57:50.204452883Z" level=info msg="Connect containerd service"
Dec 28 06:57:50 pause-327044 containerd[2394]: time="2025-12-28T06:57:50.205282874Z" level=info msg="using experimental NRI integration - disable nri plugin to prevent this"
Dec 28 06:57:50 pause-327044 containerd[2394]: time="2025-12-28T06:57:50.217033351Z" level=info msg="Start subscribing containerd event"
Dec 28 06:57:50 pause-327044 containerd[2394]: time="2025-12-28T06:57:50.217088040Z" level=info msg="Start recovering state"
Dec 28 06:57:50 pause-327044 containerd[2394]: time="2025-12-28T06:57:50.217321665Z" level=info msg=serving... address=/run/containerd/containerd.sock.ttrpc
Dec 28 06:57:50 pause-327044 containerd[2394]: time="2025-12-28T06:57:50.217397773Z" level=info msg=serving... address=/run/containerd/containerd.sock
Dec 28 06:57:50 pause-327044 containerd[2394]: time="2025-12-28T06:57:50.255182770Z" level=info msg="Start event monitor"
Dec 28 06:57:50 pause-327044 containerd[2394]: time="2025-12-28T06:57:50.255241289Z" level=info msg="Start cni network conf syncer for default"
Dec 28 06:57:50 pause-327044 containerd[2394]: time="2025-12-28T06:57:50.255254265Z" level=info msg="Start streaming server"
Dec 28 06:57:50 pause-327044 containerd[2394]: time="2025-12-28T06:57:50.255263999Z" level=info msg="Registered namespace \"k8s.io\" with NRI"
Dec 28 06:57:50 pause-327044 containerd[2394]: time="2025-12-28T06:57:50.255273456Z" level=info msg="runtime interface starting up..."
Dec 28 06:57:50 pause-327044 containerd[2394]: time="2025-12-28T06:57:50.255280764Z" level=info msg="starting plugins..."
Dec 28 06:57:50 pause-327044 containerd[2394]: time="2025-12-28T06:57:50.255294866Z" level=info msg="Synchronizing NRI (plugin) with current runtime state"
Dec 28 06:57:50 pause-327044 containerd[2394]: time="2025-12-28T06:57:50.264647762Z" level=info msg="containerd successfully booted in 0.110355s"
Dec 28 06:57:50 pause-327044 systemd[1]: Started containerd.service - containerd container runtime.
==> describe nodes <==
Name: pause-327044
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=pause-327044
kubernetes.io/os=linux
minikube.k8s.io/commit=a9d18bae8c1fce4e804f90745897ed87020e8dba
minikube.k8s.io/name=pause-327044
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2025_12_28T06_57_24_0700
minikube.k8s.io/version=v1.37.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Sun, 28 Dec 2025 06:57:21 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: pause-327044
AcquireTime: <unset>
RenewTime: Sun, 28 Dec 2025 06:57:44 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Sun, 28 Dec 2025 06:57:45 +0000 Sun, 28 Dec 2025 06:57:21 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Sun, 28 Dec 2025 06:57:45 +0000 Sun, 28 Dec 2025 06:57:21 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Sun, 28 Dec 2025 06:57:45 +0000 Sun, 28 Dec 2025 06:57:21 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Sun, 28 Dec 2025 06:57:45 +0000 Sun, 28 Dec 2025 06:57:45 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.85.2
Hostname: pause-327044
Capacity:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863356Ki
pods: 110
Allocatable:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863356Ki
pods: 110
System Info:
Machine ID: 493159aea3d8b8768b108b926950835d
System UUID: ce751e5d-a580-4467-a1ff-dc48ffb99606
Boot ID: b0f6328b-901c-4d58-bf8e-80c711dcb897
Kernel Version: 6.8.0-1045-gcp
OS Image: Debian GNU/Linux 12 (bookworm)
Operating System: linux
Architecture: amd64
Container Runtime Version: containerd://2.2.1
Kubelet Version: v1.35.0
Kube-Proxy Version:
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (7 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
kube-system coredns-7d764666f9-25xk9 100m (1%) 0 (0%) 70Mi (0%) 170Mi (0%) 26s
kube-system etcd-pause-327044 100m (1%) 0 (0%) 100Mi (0%) 0 (0%) 32s
kube-system kindnet-tzx87 100m (1%) 100m (1%) 50Mi (0%) 50Mi (0%) 26s
kube-system kube-apiserver-pause-327044 250m (3%) 0 (0%) 0 (0%) 0 (0%) 32s
kube-system kube-controller-manager-pause-327044 200m (2%) 0 (0%) 0 (0%) 0 (0%) 32s
kube-system kube-proxy-8zhkz 0 (0%) 0 (0%) 0 (0%) 0 (0%) 26s
kube-system kube-scheduler-pause-327044 100m (1%) 0 (0%) 0 (0%) 0 (0%) 32s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 850m (10%) 100m (1%)
memory 220Mi (0%) 220Mi (0%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal RegisteredNode 27s node-controller Node pause-327044 event: Registered Node pause-327044 in Controller
==> dmesg <==
==> kernel <==
06:57:55 up 3:40, 0 user, load average: 3.84, 3.05, 14.94
Linux pause-327044 6.8.0-1045-gcp #48~22.04.1-Ubuntu SMP Tue Nov 25 13:07:56 UTC 2025 x86_64 GNU/Linux
PRETTY_NAME="Debian GNU/Linux 12 (bookworm)"
==> kubelet <==
Dec 28 06:57:29 pause-327044 kubelet[1421]: I1228 06:57:29.745311 1421 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/b3862b54-c993-4353-bc0d-57485386eff2-lib-modules\") pod \"kindnet-tzx87\" (UID: \"b3862b54-c993-4353-bc0d-57485386eff2\") " pod="kube-system/kindnet-tzx87"
Dec 28 06:57:29 pause-327044 kubelet[1421]: I1228 06:57:29.745354 1421 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z7zl2\" (UniqueName: \"kubernetes.io/projected/b3862b54-c993-4353-bc0d-57485386eff2-kube-api-access-z7zl2\") pod \"kindnet-tzx87\" (UID: \"b3862b54-c993-4353-bc0d-57485386eff2\") " pod="kube-system/kindnet-tzx87"
Dec 28 06:57:29 pause-327044 kubelet[1421]: I1228 06:57:29.745376 1421 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-proxy\" (UniqueName: \"kubernetes.io/configmap/fda10b86-5089-4bf2-a2c1-b9f38a0784c4-kube-proxy\") pod \"kube-proxy-8zhkz\" (UID: \"fda10b86-5089-4bf2-a2c1-b9f38a0784c4\") " pod="kube-system/kube-proxy-8zhkz"
Dec 28 06:57:29 pause-327044 kubelet[1421]: I1228 06:57:29.745413 1421 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/fda10b86-5089-4bf2-a2c1-b9f38a0784c4-lib-modules\") pod \"kube-proxy-8zhkz\" (UID: \"fda10b86-5089-4bf2-a2c1-b9f38a0784c4\") " pod="kube-system/kube-proxy-8zhkz"
Dec 28 06:57:29 pause-327044 kubelet[1421]: I1228 06:57:29.745432 1421 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hxjbk\" (UniqueName: \"kubernetes.io/projected/fda10b86-5089-4bf2-a2c1-b9f38a0784c4-kube-api-access-hxjbk\") pod \"kube-proxy-8zhkz\" (UID: \"fda10b86-5089-4bf2-a2c1-b9f38a0784c4\") " pod="kube-system/kube-proxy-8zhkz"
Dec 28 06:57:29 pause-327044 kubelet[1421]: E1228 06:57:29.856610 1421 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-pause-327044" containerName="kube-controller-manager"
Dec 28 06:57:30 pause-327044 kubelet[1421]: I1228 06:57:30.858877 1421 pod_startup_latency_tracker.go:108] "Observed pod startup duration" pod="kube-system/kube-proxy-8zhkz" podStartSLOduration=1.858859847 podStartE2EDuration="1.858859847s" podCreationTimestamp="2025-12-28 06:57:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-28 06:57:30.858847647 +0000 UTC m=+7.125532242" watchObservedRunningTime="2025-12-28 06:57:30.858859847 +0000 UTC m=+7.125544424"
Dec 28 06:57:32 pause-327044 kubelet[1421]: E1228 06:57:32.818519 1421 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-pause-327044" containerName="etcd"
Dec 28 06:57:33 pause-327044 kubelet[1421]: E1228 06:57:33.942288 1421 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-pause-327044" containerName="kube-scheduler"
Dec 28 06:57:34 pause-327044 kubelet[1421]: I1228 06:57:34.867728 1421 pod_startup_latency_tracker.go:108] "Observed pod startup duration" pod="kube-system/kindnet-tzx87" podStartSLOduration=1.997574582 podStartE2EDuration="5.867708541s" podCreationTimestamp="2025-12-28 06:57:29 +0000 UTC" firstStartedPulling="2025-12-28 06:57:30.385670034 +0000 UTC m=+6.652354591" lastFinishedPulling="2025-12-28 06:57:34.255803983 +0000 UTC m=+10.522488550" observedRunningTime="2025-12-28 06:57:34.867325361 +0000 UTC m=+11.134009955" watchObservedRunningTime="2025-12-28 06:57:34.867708541 +0000 UTC m=+11.134393116"
Dec 28 06:57:35 pause-327044 kubelet[1421]: E1228 06:57:35.386534 1421 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-pause-327044" containerName="kube-apiserver"
Dec 28 06:57:39 pause-327044 kubelet[1421]: E1228 06:57:39.862355 1421 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-pause-327044" containerName="kube-controller-manager"
Dec 28 06:57:42 pause-327044 kubelet[1421]: E1228 06:57:42.820104 1421 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-pause-327044" containerName="etcd"
Dec 28 06:57:43 pause-327044 kubelet[1421]: E1228 06:57:43.946481 1421 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-pause-327044" containerName="kube-scheduler"
Dec 28 06:57:45 pause-327044 kubelet[1421]: I1228 06:57:45.126097 1421 kubelet_node_status.go:427] "Fast updating node status as it just became ready"
Dec 28 06:57:45 pause-327044 kubelet[1421]: I1228 06:57:45.256444 1421 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p4qpd\" (UniqueName: \"kubernetes.io/projected/027bff7d-bef6-4d94-9cf5-0feb5f7d4c99-kube-api-access-p4qpd\") pod \"coredns-7d764666f9-25xk9\" (UID: \"027bff7d-bef6-4d94-9cf5-0feb5f7d4c99\") " pod="kube-system/coredns-7d764666f9-25xk9"
Dec 28 06:57:45 pause-327044 kubelet[1421]: I1228 06:57:45.256514 1421 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/027bff7d-bef6-4d94-9cf5-0feb5f7d4c99-config-volume\") pod \"coredns-7d764666f9-25xk9\" (UID: \"027bff7d-bef6-4d94-9cf5-0feb5f7d4c99\") " pod="kube-system/coredns-7d764666f9-25xk9"
Dec 28 06:57:45 pause-327044 kubelet[1421]: E1228 06:57:45.882278 1421 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-25xk9" containerName="coredns"
Dec 28 06:57:45 pause-327044 kubelet[1421]: I1228 06:57:45.896141 1421 pod_startup_latency_tracker.go:108] "Observed pod startup duration" pod="kube-system/coredns-7d764666f9-25xk9" podStartSLOduration=16.896119898 podStartE2EDuration="16.896119898s" podCreationTimestamp="2025-12-28 06:57:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-28 06:57:45.895609159 +0000 UTC m=+22.162293734" watchObservedRunningTime="2025-12-28 06:57:45.896119898 +0000 UTC m=+22.162804473"
Dec 28 06:57:46 pause-327044 kubelet[1421]: E1228 06:57:46.884445 1421 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-25xk9" containerName="coredns"
Dec 28 06:57:47 pause-327044 kubelet[1421]: E1228 06:57:47.886869 1421 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-25xk9" containerName="coredns"
Dec 28 06:57:53 pause-327044 systemd[1]: Stopping kubelet.service - kubelet: The Kubernetes Node Agent...
Dec 28 06:57:53 pause-327044 systemd[1]: kubelet.service: Deactivated successfully.
Dec 28 06:57:53 pause-327044 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent.
Dec 28 06:57:53 pause-327044 systemd[1]: kubelet.service: Consumed 1.431s CPU time.
-- /stdout --
helpers_test.go:263: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p pause-327044 -n pause-327044
helpers_test.go:263: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.APIServer}} -p pause-327044 -n pause-327044: exit status 2 (334.397526ms)
-- stdout --
Running
-- /stdout --
helpers_test.go:263: status error: exit status 2 (may be ok)
helpers_test.go:270: (dbg) Run: kubectl --context pause-327044 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:294: <<< TestPause/serial/VerifyStatus FAILED: end of post-mortem logs <<<
helpers_test.go:295: ---------------------/post-mortem---------------------------------
--- FAIL: TestPause/serial/VerifyStatus (1.84s)