=== RUN TestScheduledStopUnix
scheduled_stop_test.go:128: (dbg) Run: out/minikube-linux-arm64 start -p scheduled-stop-599041 --memory=3072 --driver=docker --container-runtime=containerd
scheduled_stop_test.go:128: (dbg) Done: out/minikube-linux-arm64 start -p scheduled-stop-599041 --memory=3072 --driver=docker --container-runtime=containerd: (36.446855721s)
scheduled_stop_test.go:137: (dbg) Run: out/minikube-linux-arm64 stop -p scheduled-stop-599041 --schedule 5m
scheduled_stop_test.go:191: (dbg) Run: out/minikube-linux-arm64 status --format={{.TimeToStop}} -p scheduled-stop-599041 -n scheduled-stop-599041
scheduled_stop_test.go:169: signal error was: <nil>
scheduled_stop_test.go:137: (dbg) Run: out/minikube-linux-arm64 stop -p scheduled-stop-599041 --schedule 15s
scheduled_stop_test.go:169: signal error was: <nil>
scheduled_stop_test.go:98: process 152898 running but should have been killed on reschedule of stop
panic.go:636: *** TestScheduledStopUnix FAILED at 2025-11-01 09:05:35.750794877 +0000 UTC m=+2170.839262175
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:223: ======> post-mortem[TestScheduledStopUnix]: network settings <======
helpers_test.go:230: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:238: ======> post-mortem[TestScheduledStopUnix]: docker inspect <======
helpers_test.go:239: (dbg) Run: docker inspect scheduled-stop-599041
helpers_test.go:243: (dbg) docker inspect scheduled-stop-599041:
-- stdout --
[
{
"Id": "769d2cdeb9d73a580a908770ec6171a9277e0dbcfe105b256454403f549ab5c1",
"Created": "2025-11-01T09:05:04.673121421Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 150909,
"ExitCode": 0,
"Error": "",
"StartedAt": "2025-11-01T09:05:04.749577116Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:1b8004df0b408966a254b2ecd4551aa85aaac4627e7e9cb1cefc14dfe51ec273",
"ResolvConfPath": "/var/lib/docker/containers/769d2cdeb9d73a580a908770ec6171a9277e0dbcfe105b256454403f549ab5c1/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/769d2cdeb9d73a580a908770ec6171a9277e0dbcfe105b256454403f549ab5c1/hostname",
"HostsPath": "/var/lib/docker/containers/769d2cdeb9d73a580a908770ec6171a9277e0dbcfe105b256454403f549ab5c1/hosts",
"LogPath": "/var/lib/docker/containers/769d2cdeb9d73a580a908770ec6171a9277e0dbcfe105b256454403f549ab5c1/769d2cdeb9d73a580a908770ec6171a9277e0dbcfe105b256454403f549ab5c1-json.log",
"Name": "/scheduled-stop-599041",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"scheduled-stop-599041:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {}
},
"NetworkMode": "scheduled-stop-599041",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "host",
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 3221225472,
"NanoCpus": 2000000000,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 6442450944,
"MemorySwappiness": null,
"OomKillDisable": false,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"ID": "769d2cdeb9d73a580a908770ec6171a9277e0dbcfe105b256454403f549ab5c1",
"LowerDir": "/var/lib/docker/overlay2/b5b3d19e1e23a1e4229284b8e48621fffb114388b12f8f5843022a577e60a9ad-init/diff:/var/lib/docker/overlay2/2ae9db781f71f6b40134c14ce962b520e95fb32a2be583edc8b9ca9696e3b6fd/diff",
"MergedDir": "/var/lib/docker/overlay2/b5b3d19e1e23a1e4229284b8e48621fffb114388b12f8f5843022a577e60a9ad/merged",
"UpperDir": "/var/lib/docker/overlay2/b5b3d19e1e23a1e4229284b8e48621fffb114388b12f8f5843022a577e60a9ad/diff",
"WorkDir": "/var/lib/docker/overlay2/b5b3d19e1e23a1e4229284b8e48621fffb114388b12f8f5843022a577e60a9ad/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "volume",
"Name": "scheduled-stop-599041",
"Source": "/var/lib/docker/volumes/scheduled-stop-599041/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
},
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
}
],
"Config": {
"Hostname": "scheduled-stop-599041",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"OnBuild": null,
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "scheduled-stop-599041",
"name.minikube.sigs.k8s.io": "scheduled-stop-599041",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "db98d887e68ca8a792f88f370660ba5a7b45050403f48d0f8fe7ca97f3075c2d",
"SandboxKey": "/var/run/docker/netns/db98d887e68c",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32969"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32970"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32973"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32971"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32972"
}
]
},
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"scheduled-stop-599041": {
"IPAMConfig": {
"IPv4Address": "192.168.76.2"
},
"Links": null,
"Aliases": null,
"MacAddress": "0e:5d:09:d8:c4:21",
"DriverOpts": null,
"GwPriority": 0,
"NetworkID": "1423ff4e7bda50d7f2b4443b86acd1e88ce4d225f3212189f44a33bd0c1d1b50",
"EndpointID": "7b2293b7b7677fc70db958d9b7bee067a6c8a0cd606399196af66c25f4fe3087",
"Gateway": "192.168.76.1",
"IPAddress": "192.168.76.2",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"scheduled-stop-599041",
"769d2cdeb9d7"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:247: (dbg) Run: out/minikube-linux-arm64 status --format={{.Host}} -p scheduled-stop-599041 -n scheduled-stop-599041
helpers_test.go:252: <<< TestScheduledStopUnix FAILED: start of post-mortem logs <<<
helpers_test.go:253: ======> post-mortem[TestScheduledStopUnix]: minikube logs <======
helpers_test.go:255: (dbg) Run: out/minikube-linux-arm64 -p scheduled-stop-599041 logs -n 25
helpers_test.go:255: (dbg) Done: out/minikube-linux-arm64 -p scheduled-stop-599041 logs -n 25: (1.072269211s)
helpers_test.go:260: TestScheduledStopUnix logs:
-- stdout --
==> Audit <==
┌─────────┬─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬───────────────────────┬─────────┬─────────┬─────────────────────┬─────────────────────┐
│ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │
├─────────┼─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼───────────────────────┼─────────┼─────────┼─────────────────────┼─────────────────────┤
│ stop │ -p multinode-819483 │ multinode-819483 │ jenkins │ v1.37.0 │ 01 Nov 25 08:59 UTC │ 01 Nov 25 09:00 UTC │
│ start │ -p multinode-819483 --wait=true -v=5 --alsologtostderr │ multinode-819483 │ jenkins │ v1.37.0 │ 01 Nov 25 09:00 UTC │ 01 Nov 25 09:00 UTC │
│ node │ list -p multinode-819483 │ multinode-819483 │ jenkins │ v1.37.0 │ 01 Nov 25 09:00 UTC │ │
│ node │ multinode-819483 node delete m03 │ multinode-819483 │ jenkins │ v1.37.0 │ 01 Nov 25 09:00 UTC │ 01 Nov 25 09:00 UTC │
│ stop │ multinode-819483 stop │ multinode-819483 │ jenkins │ v1.37.0 │ 01 Nov 25 09:00 UTC │ 01 Nov 25 09:01 UTC │
│ start │ -p multinode-819483 --wait=true -v=5 --alsologtostderr --driver=docker --container-runtime=containerd │ multinode-819483 │ jenkins │ v1.37.0 │ 01 Nov 25 09:01 UTC │ 01 Nov 25 09:02 UTC │
│ node │ list -p multinode-819483 │ multinode-819483 │ jenkins │ v1.37.0 │ 01 Nov 25 09:02 UTC │ │
│ start │ -p multinode-819483-m02 --driver=docker --container-runtime=containerd │ multinode-819483-m02 │ jenkins │ v1.37.0 │ 01 Nov 25 09:02 UTC │ │
│ start │ -p multinode-819483-m03 --driver=docker --container-runtime=containerd │ multinode-819483-m03 │ jenkins │ v1.37.0 │ 01 Nov 25 09:02 UTC │ 01 Nov 25 09:02 UTC │
│ node │ add -p multinode-819483 │ multinode-819483 │ jenkins │ v1.37.0 │ 01 Nov 25 09:02 UTC │ │
│ delete │ -p multinode-819483-m03 │ multinode-819483-m03 │ jenkins │ v1.37.0 │ 01 Nov 25 09:02 UTC │ 01 Nov 25 09:02 UTC │
│ delete │ -p multinode-819483 │ multinode-819483 │ jenkins │ v1.37.0 │ 01 Nov 25 09:02 UTC │ 01 Nov 25 09:02 UTC │
│ start │ -p test-preload-208683 --memory=3072 --alsologtostderr --wait=true --preload=false --driver=docker --container-runtime=containerd --kubernetes-version=v1.32.0 │ test-preload-208683 │ jenkins │ v1.37.0 │ 01 Nov 25 09:02 UTC │ 01 Nov 25 09:03 UTC │
│ image │ test-preload-208683 image pull gcr.io/k8s-minikube/busybox │ test-preload-208683 │ jenkins │ v1.37.0 │ 01 Nov 25 09:03 UTC │ 01 Nov 25 09:03 UTC │
│ stop │ -p test-preload-208683 │ test-preload-208683 │ jenkins │ v1.37.0 │ 01 Nov 25 09:03 UTC │ 01 Nov 25 09:04 UTC │
│ start │ -p test-preload-208683 --memory=3072 --alsologtostderr -v=1 --wait=true --driver=docker --container-runtime=containerd │ test-preload-208683 │ jenkins │ v1.37.0 │ 01 Nov 25 09:04 UTC │ 01 Nov 25 09:04 UTC │
│ image │ test-preload-208683 image list │ test-preload-208683 │ jenkins │ v1.37.0 │ 01 Nov 25 09:04 UTC │ 01 Nov 25 09:04 UTC │
│ delete │ -p test-preload-208683 │ test-preload-208683 │ jenkins │ v1.37.0 │ 01 Nov 25 09:04 UTC │ 01 Nov 25 09:04 UTC │
│ start │ -p scheduled-stop-599041 --memory=3072 --driver=docker --container-runtime=containerd │ scheduled-stop-599041 │ jenkins │ v1.37.0 │ 01 Nov 25 09:04 UTC │ 01 Nov 25 09:05 UTC │
│ stop │ -p scheduled-stop-599041 --schedule 5m │ scheduled-stop-599041 │ jenkins │ v1.37.0 │ 01 Nov 25 09:05 UTC │ │
│ stop │ -p scheduled-stop-599041 --schedule 5m │ scheduled-stop-599041 │ jenkins │ v1.37.0 │ 01 Nov 25 09:05 UTC │ │
│ stop │ -p scheduled-stop-599041 --schedule 5m │ scheduled-stop-599041 │ jenkins │ v1.37.0 │ 01 Nov 25 09:05 UTC │ │
│ stop │ -p scheduled-stop-599041 --schedule 15s │ scheduled-stop-599041 │ jenkins │ v1.37.0 │ 01 Nov 25 09:05 UTC │ │
│ stop │ -p scheduled-stop-599041 --schedule 15s │ scheduled-stop-599041 │ jenkins │ v1.37.0 │ 01 Nov 25 09:05 UTC │ │
│ stop │ -p scheduled-stop-599041 --schedule 15s │ scheduled-stop-599041 │ jenkins │ v1.37.0 │ 01 Nov 25 09:05 UTC │ │
└─────────┴─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴───────────────────────┴─────────┴─────────┴─────────────────────┴─────────────────────┘
==> Last Start <==
Log file created at: 2025/11/01 09:04:58
Running on machine: ip-172-31-31-251
Binary: Built with gc go1.24.6 for linux/arm64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I1101 09:04:58.821876 150525 out.go:360] Setting OutFile to fd 1 ...
I1101 09:04:58.821977 150525 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1101 09:04:58.821981 150525 out.go:374] Setting ErrFile to fd 2...
I1101 09:04:58.821984 150525 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1101 09:04:58.822245 150525 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21835-2307/.minikube/bin
I1101 09:04:58.822661 150525 out.go:368] Setting JSON to false
I1101 09:04:58.823549 150525 start.go:133] hostinfo: {"hostname":"ip-172-31-31-251","uptime":2848,"bootTime":1761985051,"procs":153,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1084-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"982e3628-3742-4b3e-bb63-ac1b07660ec7"}
I1101 09:04:58.823605 150525 start.go:143] virtualization:
I1101 09:04:58.827493 150525 out.go:179] * [scheduled-stop-599041] minikube v1.37.0 on Ubuntu 20.04 (arm64)
I1101 09:04:58.832221 150525 out.go:179] - MINIKUBE_LOCATION=21835
I1101 09:04:58.832281 150525 notify.go:221] Checking for updates...
I1101 09:04:58.838842 150525 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I1101 09:04:58.842165 150525 out.go:179] - KUBECONFIG=/home/jenkins/minikube-integration/21835-2307/kubeconfig
I1101 09:04:58.845451 150525 out.go:179] - MINIKUBE_HOME=/home/jenkins/minikube-integration/21835-2307/.minikube
I1101 09:04:58.848547 150525 out.go:179] - MINIKUBE_BIN=out/minikube-linux-arm64
I1101 09:04:58.851613 150525 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I1101 09:04:58.854965 150525 driver.go:422] Setting default libvirt URI to qemu:///system
I1101 09:04:58.883919 150525 docker.go:124] docker version: linux-28.1.1:Docker Engine - Community
I1101 09:04:58.884025 150525 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1101 09:04:58.940944 150525 info.go:266] docker info: {ID:EOU5:DNGX:XN6V:L2FZ:UXRM:5TWK:EVUR:KC2F:GT7Z:Y4O4:GB77:5PD3 Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:3 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:24 OomKillDisable:true NGoroutines:42 SystemTime:2025-11-01 09:04:58.931880007 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1084-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214831104 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-31-251 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx P
ath:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1]] Warnings:<nil>}}
I1101 09:04:58.941045 150525 docker.go:319] overlay module found
I1101 09:04:58.946326 150525 out.go:179] * Using the docker driver based on user configuration
I1101 09:04:58.949394 150525 start.go:309] selected driver: docker
I1101 09:04:58.949404 150525 start.go:930] validating driver "docker" against <nil>
I1101 09:04:58.949415 150525 start.go:941] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I1101 09:04:58.950262 150525 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1101 09:04:59.008846 150525 info.go:266] docker info: {ID:EOU5:DNGX:XN6V:L2FZ:UXRM:5TWK:EVUR:KC2F:GT7Z:Y4O4:GB77:5PD3 Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:3 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:24 OomKillDisable:true NGoroutines:42 SystemTime:2025-11-01 09:04:58.99971848 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1084-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:aa
rch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214831104 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-31-251 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Pa
th:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1]] Warnings:<nil>}}
I1101 09:04:59.009000 150525 start_flags.go:327] no existing cluster config was found, will generate one from the flags
I1101 09:04:59.009213 150525 start_flags.go:974] Wait components to verify : map[apiserver:true system_pods:true]
I1101 09:04:59.012277 150525 out.go:179] * Using Docker driver with root privileges
I1101 09:04:59.015278 150525 cni.go:84] Creating CNI manager for ""
I1101 09:04:59.015342 150525 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1101 09:04:59.015357 150525 start_flags.go:336] Found "CNI" CNI - setting NetworkPlugin=cni
I1101 09:04:59.015440 150525 start.go:353] cluster config:
{Name:scheduled-stop-599041 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:scheduled-stop-599041 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local
ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1101 09:04:59.020460 150525 out.go:179] * Starting "scheduled-stop-599041" primary control-plane node in "scheduled-stop-599041" cluster
I1101 09:04:59.023671 150525 cache.go:124] Beginning downloading kic base image for docker with containerd
I1101 09:04:59.026603 150525 out.go:179] * Pulling base image v0.0.48-1760939008-21773 ...
I1101 09:04:59.029715 150525 preload.go:183] Checking if preload exists for k8s version v1.34.1 and runtime containerd
I1101 09:04:59.029735 150525 image.go:81] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 in local docker daemon
I1101 09:04:59.029767 150525 preload.go:198] Found local preload: /home/jenkins/minikube-integration/21835-2307/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-containerd-overlay2-arm64.tar.lz4
I1101 09:04:59.029775 150525 cache.go:59] Caching tarball of preloaded images
I1101 09:04:59.029873 150525 preload.go:233] Found /home/jenkins/minikube-integration/21835-2307/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-containerd-overlay2-arm64.tar.lz4 in cache, skipping download
I1101 09:04:59.029881 150525 cache.go:62] Finished verifying existence of preloaded tar for v1.34.1 on containerd
I1101 09:04:59.030220 150525 profile.go:143] Saving config to /home/jenkins/minikube-integration/21835-2307/.minikube/profiles/scheduled-stop-599041/config.json ...
I1101 09:04:59.030238 150525 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21835-2307/.minikube/profiles/scheduled-stop-599041/config.json: {Name:mka44dd4be43300ac877ef95aed7e04a7d6c4d5a Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1101 09:04:59.048416 150525 image.go:100] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 in local docker daemon, skipping pull
I1101 09:04:59.048428 150525 cache.go:148] gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 exists in daemon, skipping load
I1101 09:04:59.048446 150525 cache.go:233] Successfully downloaded all kic artifacts
I1101 09:04:59.048478 150525 start.go:360] acquireMachinesLock for scheduled-stop-599041: {Name:mk673be61ba470c7862ae4fbcae3e56f8e83cebd Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1101 09:04:59.048594 150525 start.go:364] duration metric: took 102.262µs to acquireMachinesLock for "scheduled-stop-599041"
I1101 09:04:59.048619 150525 start.go:93] Provisioning new machine with config: &{Name:scheduled-stop-599041 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:scheduled-stop-599041 Namespace:default APIServerHAVIP: A
PIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: Static
IP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1101 09:04:59.048683 150525 start.go:125] createHost starting for "" (driver="docker")
I1101 09:04:59.053779 150525 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ...
I1101 09:04:59.054000 150525 start.go:159] libmachine.API.Create for "scheduled-stop-599041" (driver="docker")
I1101 09:04:59.054027 150525 client.go:173] LocalClient.Create starting
I1101 09:04:59.054109 150525 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21835-2307/.minikube/certs/ca.pem
I1101 09:04:59.054144 150525 main.go:143] libmachine: Decoding PEM data...
I1101 09:04:59.054162 150525 main.go:143] libmachine: Parsing certificate...
I1101 09:04:59.054210 150525 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21835-2307/.minikube/certs/cert.pem
I1101 09:04:59.054227 150525 main.go:143] libmachine: Decoding PEM data...
I1101 09:04:59.054248 150525 main.go:143] libmachine: Parsing certificate...
I1101 09:04:59.054593 150525 cli_runner.go:164] Run: docker network inspect scheduled-stop-599041 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W1101 09:04:59.069998 150525 cli_runner.go:211] docker network inspect scheduled-stop-599041 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I1101 09:04:59.070076 150525 network_create.go:284] running [docker network inspect scheduled-stop-599041] to gather additional debugging logs...
I1101 09:04:59.070092 150525 cli_runner.go:164] Run: docker network inspect scheduled-stop-599041
W1101 09:04:59.086651 150525 cli_runner.go:211] docker network inspect scheduled-stop-599041 returned with exit code 1
I1101 09:04:59.086689 150525 network_create.go:287] error running [docker network inspect scheduled-stop-599041]: docker network inspect scheduled-stop-599041: exit status 1
stdout:
[]
stderr:
Error response from daemon: network scheduled-stop-599041 not found
I1101 09:04:59.086699 150525 network_create.go:289] output of [docker network inspect scheduled-stop-599041]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network scheduled-stop-599041 not found
** /stderr **
I1101 09:04:59.086812 150525 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1101 09:04:59.103027 150525 network.go:211] skipping subnet 192.168.49.0/24 that is taken: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName:br-519f9941df81 IfaceIPv4:192.168.49.1 IfaceMTU:1500 IfaceMAC:da:56:5d:1d:ec:84} reservation:<nil>}
I1101 09:04:59.103244 150525 network.go:211] skipping subnet 192.168.58.0/24 that is taken: &{IP:192.168.58.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.58.0/24 Gateway:192.168.58.1 ClientMin:192.168.58.2 ClientMax:192.168.58.254 Broadcast:192.168.58.255 IsPrivate:true Interface:{IfaceName:br-4e7f056af18f IfaceIPv4:192.168.58.1 IfaceMTU:1500 IfaceMAC:1e:1b:39:7e:aa:dd} reservation:<nil>}
I1101 09:04:59.103507 150525 network.go:211] skipping subnet 192.168.67.0/24 that is taken: &{IP:192.168.67.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.67.0/24 Gateway:192.168.67.1 ClientMin:192.168.67.2 ClientMax:192.168.67.254 Broadcast:192.168.67.255 IsPrivate:true Interface:{IfaceName:br-cbe92f0bc81a IfaceIPv4:192.168.67.1 IfaceMTU:1500 IfaceMAC:66:8d:b8:d6:a8:8c} reservation:<nil>}
I1101 09:04:59.103826 150525 network.go:206] using free private subnet 192.168.76.0/24: &{IP:192.168.76.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.76.0/24 Gateway:192.168.76.1 ClientMin:192.168.76.2 ClientMax:192.168.76.254 Broadcast:192.168.76.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0x4001a0a680}
I1101 09:04:59.103841 150525 network_create.go:124] attempt to create docker network scheduled-stop-599041 192.168.76.0/24 with gateway 192.168.76.1 and MTU of 1500 ...
I1101 09:04:59.103894 150525 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.76.0/24 --gateway=192.168.76.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=scheduled-stop-599041 scheduled-stop-599041
I1101 09:04:59.167449 150525 network_create.go:108] docker network scheduled-stop-599041 192.168.76.0/24 created
I1101 09:04:59.167470 150525 kic.go:121] calculated static IP "192.168.76.2" for the "scheduled-stop-599041" container
I1101 09:04:59.167542 150525 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I1101 09:04:59.183697 150525 cli_runner.go:164] Run: docker volume create scheduled-stop-599041 --label name.minikube.sigs.k8s.io=scheduled-stop-599041 --label created_by.minikube.sigs.k8s.io=true
I1101 09:04:59.202370 150525 oci.go:103] Successfully created a docker volume scheduled-stop-599041
I1101 09:04:59.202456 150525 cli_runner.go:164] Run: docker run --rm --name scheduled-stop-599041-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=scheduled-stop-599041 --entrypoint /usr/bin/test -v scheduled-stop-599041:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 -d /var/lib
I1101 09:04:59.763149 150525 oci.go:107] Successfully prepared a docker volume scheduled-stop-599041
I1101 09:04:59.763197 150525 preload.go:183] Checking if preload exists for k8s version v1.34.1 and runtime containerd
I1101 09:04:59.763216 150525 kic.go:194] Starting extracting preloaded images to volume ...
I1101 09:04:59.763279 150525 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21835-2307/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-containerd-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v scheduled-stop-599041:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 -I lz4 -xf /preloaded.tar -C /extractDir
I1101 09:05:04.601661 150525 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21835-2307/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-containerd-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v scheduled-stop-599041:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 -I lz4 -xf /preloaded.tar -C /extractDir: (4.838333664s)
I1101 09:05:04.601681 150525 kic.go:203] duration metric: took 4.838462191s to extract preloaded images to volume ...
W1101 09:05:04.601843 150525 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
I1101 09:05:04.601944 150525 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I1101 09:05:04.657508 150525 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname scheduled-stop-599041 --name scheduled-stop-599041 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=scheduled-stop-599041 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=scheduled-stop-599041 --network scheduled-stop-599041 --ip 192.168.76.2 --volume scheduled-stop-599041:/var --security-opt apparmor=unconfined --memory=3072mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8
I1101 09:05:04.965832 150525 cli_runner.go:164] Run: docker container inspect scheduled-stop-599041 --format={{.State.Running}}
I1101 09:05:04.987794 150525 cli_runner.go:164] Run: docker container inspect scheduled-stop-599041 --format={{.State.Status}}
I1101 09:05:05.014931 150525 cli_runner.go:164] Run: docker exec scheduled-stop-599041 stat /var/lib/dpkg/alternatives/iptables
I1101 09:05:05.063958 150525 oci.go:144] the created container "scheduled-stop-599041" has a running status.
I1101 09:05:05.063977 150525 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/21835-2307/.minikube/machines/scheduled-stop-599041/id_rsa...
I1101 09:05:05.673173 150525 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/21835-2307/.minikube/machines/scheduled-stop-599041/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I1101 09:05:05.692931 150525 cli_runner.go:164] Run: docker container inspect scheduled-stop-599041 --format={{.State.Status}}
I1101 09:05:05.709936 150525 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I1101 09:05:05.709948 150525 kic_runner.go:114] Args: [docker exec --privileged scheduled-stop-599041 chown docker:docker /home/docker/.ssh/authorized_keys]
I1101 09:05:05.753593 150525 cli_runner.go:164] Run: docker container inspect scheduled-stop-599041 --format={{.State.Status}}
I1101 09:05:05.770201 150525 machine.go:94] provisionDockerMachine start ...
I1101 09:05:05.770290 150525 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-599041
I1101 09:05:05.786579 150525 main.go:143] libmachine: Using SSH client type: native
I1101 09:05:05.786899 150525 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x3ef110] 0x3f18d0 <nil> [] 0s} 127.0.0.1 32969 <nil> <nil>}
I1101 09:05:05.786905 150525 main.go:143] libmachine: About to run SSH command:
hostname
I1101 09:05:05.787559 150525 main.go:143] libmachine: Error dialing TCP: ssh: handshake failed: EOF
I1101 09:05:08.941452 150525 main.go:143] libmachine: SSH cmd err, output: <nil>: scheduled-stop-599041
I1101 09:05:08.941467 150525 ubuntu.go:182] provisioning hostname "scheduled-stop-599041"
I1101 09:05:08.941538 150525 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-599041
I1101 09:05:08.959396 150525 main.go:143] libmachine: Using SSH client type: native
I1101 09:05:08.959703 150525 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x3ef110] 0x3f18d0 <nil> [] 0s} 127.0.0.1 32969 <nil> <nil>}
I1101 09:05:08.959713 150525 main.go:143] libmachine: About to run SSH command:
sudo hostname scheduled-stop-599041 && echo "scheduled-stop-599041" | sudo tee /etc/hostname
I1101 09:05:09.120026 150525 main.go:143] libmachine: SSH cmd err, output: <nil>: scheduled-stop-599041
I1101 09:05:09.120093 150525 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-599041
I1101 09:05:09.153541 150525 main.go:143] libmachine: Using SSH client type: native
I1101 09:05:09.153894 150525 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x3ef110] 0x3f18d0 <nil> [] 0s} 127.0.0.1 32969 <nil> <nil>}
I1101 09:05:09.153910 150525 main.go:143] libmachine: About to run SSH command:
if ! grep -xq '.*\sscheduled-stop-599041' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 scheduled-stop-599041/g' /etc/hosts;
else
echo '127.0.1.1 scheduled-stop-599041' | sudo tee -a /etc/hosts;
fi
fi
I1101 09:05:09.302071 150525 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1101 09:05:09.302102 150525 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/21835-2307/.minikube CaCertPath:/home/jenkins/minikube-integration/21835-2307/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21835-2307/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21835-2307/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21835-2307/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21835-2307/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21835-2307/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21835-2307/.minikube}
I1101 09:05:09.302123 150525 ubuntu.go:190] setting up certificates
I1101 09:05:09.302131 150525 provision.go:84] configureAuth start
I1101 09:05:09.302190 150525 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" scheduled-stop-599041
I1101 09:05:09.320516 150525 provision.go:143] copyHostCerts
I1101 09:05:09.320570 150525 exec_runner.go:144] found /home/jenkins/minikube-integration/21835-2307/.minikube/ca.pem, removing ...
I1101 09:05:09.320577 150525 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21835-2307/.minikube/ca.pem
I1101 09:05:09.320650 150525 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21835-2307/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21835-2307/.minikube/ca.pem (1082 bytes)
I1101 09:05:09.320745 150525 exec_runner.go:144] found /home/jenkins/minikube-integration/21835-2307/.minikube/cert.pem, removing ...
I1101 09:05:09.320748 150525 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21835-2307/.minikube/cert.pem
I1101 09:05:09.320774 150525 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21835-2307/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21835-2307/.minikube/cert.pem (1123 bytes)
I1101 09:05:09.320824 150525 exec_runner.go:144] found /home/jenkins/minikube-integration/21835-2307/.minikube/key.pem, removing ...
I1101 09:05:09.320828 150525 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21835-2307/.minikube/key.pem
I1101 09:05:09.320849 150525 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21835-2307/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21835-2307/.minikube/key.pem (1679 bytes)
I1101 09:05:09.320892 150525 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21835-2307/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21835-2307/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21835-2307/.minikube/certs/ca-key.pem org=jenkins.scheduled-stop-599041 san=[127.0.0.1 192.168.76.2 localhost minikube scheduled-stop-599041]
I1101 09:05:10.059114 150525 provision.go:177] copyRemoteCerts
I1101 09:05:10.059171 150525 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1101 09:05:10.059211 150525 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-599041
I1101 09:05:10.076964 150525 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32969 SSHKeyPath:/home/jenkins/minikube-integration/21835-2307/.minikube/machines/scheduled-stop-599041/id_rsa Username:docker}
I1101 09:05:10.185742 150525 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21835-2307/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I1101 09:05:10.203020 150525 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21835-2307/.minikube/machines/server.pem --> /etc/docker/server.pem (1229 bytes)
I1101 09:05:10.220881 150525 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21835-2307/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I1101 09:05:10.238505 150525 provision.go:87] duration metric: took 936.350408ms to configureAuth
I1101 09:05:10.238522 150525 ubuntu.go:206] setting minikube options for container-runtime
I1101 09:05:10.238704 150525 config.go:182] Loaded profile config "scheduled-stop-599041": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1101 09:05:10.238710 150525 machine.go:97] duration metric: took 4.468501073s to provisionDockerMachine
I1101 09:05:10.238716 150525 client.go:176] duration metric: took 11.184684455s to LocalClient.Create
I1101 09:05:10.238735 150525 start.go:167] duration metric: took 11.18473495s to libmachine.API.Create "scheduled-stop-599041"
I1101 09:05:10.238741 150525 start.go:293] postStartSetup for "scheduled-stop-599041" (driver="docker")
I1101 09:05:10.238749 150525 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1101 09:05:10.238802 150525 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1101 09:05:10.238850 150525 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-599041
I1101 09:05:10.255899 150525 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32969 SSHKeyPath:/home/jenkins/minikube-integration/21835-2307/.minikube/machines/scheduled-stop-599041/id_rsa Username:docker}
I1101 09:05:10.361672 150525 ssh_runner.go:195] Run: cat /etc/os-release
I1101 09:05:10.365054 150525 main.go:143] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I1101 09:05:10.365075 150525 info.go:137] Remote host: Debian GNU/Linux 12 (bookworm)
I1101 09:05:10.365084 150525 filesync.go:126] Scanning /home/jenkins/minikube-integration/21835-2307/.minikube/addons for local assets ...
I1101 09:05:10.365143 150525 filesync.go:126] Scanning /home/jenkins/minikube-integration/21835-2307/.minikube/files for local assets ...
I1101 09:05:10.365228 150525 filesync.go:149] local asset: /home/jenkins/minikube-integration/21835-2307/.minikube/files/etc/ssl/certs/41072.pem -> 41072.pem in /etc/ssl/certs
I1101 09:05:10.365326 150525 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1101 09:05:10.372476 150525 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21835-2307/.minikube/files/etc/ssl/certs/41072.pem --> /etc/ssl/certs/41072.pem (1708 bytes)
I1101 09:05:10.389769 150525 start.go:296] duration metric: took 151.009171ms for postStartSetup
I1101 09:05:10.390116 150525 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" scheduled-stop-599041
I1101 09:05:10.406778 150525 profile.go:143] Saving config to /home/jenkins/minikube-integration/21835-2307/.minikube/profiles/scheduled-stop-599041/config.json ...
I1101 09:05:10.407060 150525 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I1101 09:05:10.407104 150525 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-599041
I1101 09:05:10.423778 150525 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32969 SSHKeyPath:/home/jenkins/minikube-integration/21835-2307/.minikube/machines/scheduled-stop-599041/id_rsa Username:docker}
I1101 09:05:10.522749 150525 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I1101 09:05:10.527326 150525 start.go:128] duration metric: took 11.478629645s to createHost
I1101 09:05:10.527340 150525 start.go:83] releasing machines lock for "scheduled-stop-599041", held for 11.478739194s
I1101 09:05:10.527417 150525 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" scheduled-stop-599041
I1101 09:05:10.544789 150525 ssh_runner.go:195] Run: cat /version.json
I1101 09:05:10.544867 150525 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-599041
I1101 09:05:10.545109 150525 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1101 09:05:10.545160 150525 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-599041
I1101 09:05:10.561623 150525 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32969 SSHKeyPath:/home/jenkins/minikube-integration/21835-2307/.minikube/machines/scheduled-stop-599041/id_rsa Username:docker}
I1101 09:05:10.567212 150525 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32969 SSHKeyPath:/home/jenkins/minikube-integration/21835-2307/.minikube/machines/scheduled-stop-599041/id_rsa Username:docker}
I1101 09:05:10.661800 150525 ssh_runner.go:195] Run: systemctl --version
I1101 09:05:10.760453 150525 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W1101 09:05:10.764863 150525 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I1101 09:05:10.764920 150525 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1101 09:05:10.792852 150525 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/10-crio-bridge.conflist.disabled] bridge cni config(s)
I1101 09:05:10.792865 150525 start.go:496] detecting cgroup driver to use...
I1101 09:05:10.792897 150525 detect.go:187] detected "cgroupfs" cgroup driver on host os
I1101 09:05:10.792950 150525 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I1101 09:05:10.808265 150525 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1101 09:05:10.821088 150525 docker.go:218] disabling cri-docker service (if available) ...
I1101 09:05:10.821139 150525 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I1101 09:05:10.837150 150525 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I1101 09:05:10.855925 150525 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I1101 09:05:10.974984 150525 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I1101 09:05:11.098725 150525 docker.go:234] disabling docker service ...
I1101 09:05:11.098812 150525 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I1101 09:05:11.122217 150525 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I1101 09:05:11.136771 150525 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I1101 09:05:11.253793 150525 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I1101 09:05:11.374168 150525 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1101 09:05:11.387382 150525 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1101 09:05:11.401008 150525 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I1101 09:05:11.410112 150525 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1101 09:05:11.418692 150525 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I1101 09:05:11.418752 150525 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I1101 09:05:11.427397 150525 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1101 09:05:11.436037 150525 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1101 09:05:11.444764 150525 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1101 09:05:11.453255 150525 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1101 09:05:11.461381 150525 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1101 09:05:11.470194 150525 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1101 09:05:11.478863 150525 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1101 09:05:11.487828 150525 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1101 09:05:11.495475 150525 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1101 09:05:11.502890 150525 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1101 09:05:11.612108 150525 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1101 09:05:11.756972 150525 start.go:543] Will wait 60s for socket path /run/containerd/containerd.sock
I1101 09:05:11.757037 150525 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
I1101 09:05:11.761078 150525 start.go:564] Will wait 60s for crictl version
I1101 09:05:11.761141 150525 ssh_runner.go:195] Run: which crictl
I1101 09:05:11.764566 150525 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl version
I1101 09:05:11.792103 150525 start.go:580] Version: 0.1.0
RuntimeName: containerd
RuntimeVersion: v1.7.28
RuntimeApiVersion: v1
I1101 09:05:11.792174 150525 ssh_runner.go:195] Run: containerd --version
I1101 09:05:11.815075 150525 ssh_runner.go:195] Run: containerd --version
I1101 09:05:11.842983 150525 out.go:179] * Preparing Kubernetes v1.34.1 on containerd 1.7.28 ...
I1101 09:05:11.846025 150525 cli_runner.go:164] Run: docker network inspect scheduled-stop-599041 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1101 09:05:11.862301 150525 ssh_runner.go:195] Run: grep 192.168.76.1 host.minikube.internal$ /etc/hosts
I1101 09:05:11.866104 150525 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.76.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1101 09:05:11.876404 150525 kubeadm.go:884] updating cluster {Name:scheduled-stop-599041 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:scheduled-stop-599041 Namespace:default APIServerHAVIP: APIServerName:miniku
beCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSH
AuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I1101 09:05:11.876513 150525 preload.go:183] Checking if preload exists for k8s version v1.34.1 and runtime containerd
I1101 09:05:11.876583 150525 ssh_runner.go:195] Run: sudo crictl images --output json
I1101 09:05:11.901121 150525 containerd.go:627] all images are preloaded for containerd runtime.
I1101 09:05:11.901133 150525 containerd.go:534] Images already preloaded, skipping extraction
I1101 09:05:11.901193 150525 ssh_runner.go:195] Run: sudo crictl images --output json
I1101 09:05:11.925562 150525 containerd.go:627] all images are preloaded for containerd runtime.
I1101 09:05:11.925575 150525 cache_images.go:86] Images are preloaded, skipping loading
I1101 09:05:11.925582 150525 kubeadm.go:935] updating node { 192.168.76.2 8443 v1.34.1 containerd true true} ...
I1101 09:05:11.925669 150525 kubeadm.go:947] kubelet [Unit]
Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=scheduled-stop-599041 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.76.2
[Install]
config:
{KubernetesVersion:v1.34.1 ClusterName:scheduled-stop-599041 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I1101 09:05:11.925752 150525 ssh_runner.go:195] Run: sudo crictl info
I1101 09:05:11.952851 150525 cni.go:84] Creating CNI manager for ""
I1101 09:05:11.952862 150525 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1101 09:05:11.952883 150525 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
I1101 09:05:11.952904 150525 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.76.2 APIServerPort:8443 KubernetesVersion:v1.34.1 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:scheduled-stop-599041 NodeName:scheduled-stop-599041 DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.76.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.76.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt St
aticPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///run/containerd/containerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1101 09:05:11.953013 150525 kubeadm.go:196] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.76.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
name: "scheduled-stop-599041"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.76.2"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.76.2"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
kubernetesVersion: v1.34.1
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///run/containerd/containerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1101 09:05:11.953094 150525 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.1
I1101 09:05:11.960892 150525 binaries.go:44] Found k8s binaries, skipping transfer
I1101 09:05:11.960953 150525 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1101 09:05:11.968500 150525 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (325 bytes)
I1101 09:05:11.981281 150525 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1101 09:05:11.995762 150525 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2234 bytes)
I1101 09:05:12.011084 150525 ssh_runner.go:195] Run: grep 192.168.76.2 control-plane.minikube.internal$ /etc/hosts
I1101 09:05:12.015186 150525 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.76.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1101 09:05:12.025285 150525 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1101 09:05:12.135453 150525 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1101 09:05:12.152271 150525 certs.go:69] Setting up /home/jenkins/minikube-integration/21835-2307/.minikube/profiles/scheduled-stop-599041 for IP: 192.168.76.2
I1101 09:05:12.152281 150525 certs.go:195] generating shared ca certs ...
I1101 09:05:12.152295 150525 certs.go:227] acquiring lock for ca certs: {Name:mk6850b6a29536d9828e4f0f9b1ede9faf3180b2 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1101 09:05:12.152451 150525 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21835-2307/.minikube/ca.key
I1101 09:05:12.152490 150525 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21835-2307/.minikube/proxy-client-ca.key
I1101 09:05:12.152496 150525 certs.go:257] generating profile certs ...
I1101 09:05:12.152550 150525 certs.go:364] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/21835-2307/.minikube/profiles/scheduled-stop-599041/client.key
I1101 09:05:12.152559 150525 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21835-2307/.minikube/profiles/scheduled-stop-599041/client.crt with IP's: []
I1101 09:05:13.794029 150525 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21835-2307/.minikube/profiles/scheduled-stop-599041/client.crt ...
I1101 09:05:13.794045 150525 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21835-2307/.minikube/profiles/scheduled-stop-599041/client.crt: {Name:mkb7f90bc6723b8dbd945928ee29c0ee103c370c Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1101 09:05:13.794254 150525 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21835-2307/.minikube/profiles/scheduled-stop-599041/client.key ...
I1101 09:05:13.794262 150525 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21835-2307/.minikube/profiles/scheduled-stop-599041/client.key: {Name:mk9cfe17088238a1950a2270fb89f71aae682ea1 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1101 09:05:13.794363 150525 certs.go:364] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21835-2307/.minikube/profiles/scheduled-stop-599041/apiserver.key.cdac70e0
I1101 09:05:13.794379 150525 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21835-2307/.minikube/profiles/scheduled-stop-599041/apiserver.crt.cdac70e0 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.76.2]
I1101 09:05:15.341317 150525 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21835-2307/.minikube/profiles/scheduled-stop-599041/apiserver.crt.cdac70e0 ...
I1101 09:05:15.341332 150525 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21835-2307/.minikube/profiles/scheduled-stop-599041/apiserver.crt.cdac70e0: {Name:mk07eb870f965531247aa2d264dd9102c9defe01 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1101 09:05:15.341524 150525 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21835-2307/.minikube/profiles/scheduled-stop-599041/apiserver.key.cdac70e0 ...
I1101 09:05:15.341532 150525 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21835-2307/.minikube/profiles/scheduled-stop-599041/apiserver.key.cdac70e0: {Name:mk0af640416ce30af243d4dde4d977deb290ebbd Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1101 09:05:15.341617 150525 certs.go:382] copying /home/jenkins/minikube-integration/21835-2307/.minikube/profiles/scheduled-stop-599041/apiserver.crt.cdac70e0 -> /home/jenkins/minikube-integration/21835-2307/.minikube/profiles/scheduled-stop-599041/apiserver.crt
I1101 09:05:15.341747 150525 certs.go:386] copying /home/jenkins/minikube-integration/21835-2307/.minikube/profiles/scheduled-stop-599041/apiserver.key.cdac70e0 -> /home/jenkins/minikube-integration/21835-2307/.minikube/profiles/scheduled-stop-599041/apiserver.key
I1101 09:05:15.341822 150525 certs.go:364] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/21835-2307/.minikube/profiles/scheduled-stop-599041/proxy-client.key
I1101 09:05:15.341835 150525 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21835-2307/.minikube/profiles/scheduled-stop-599041/proxy-client.crt with IP's: []
I1101 09:05:15.552716 150525 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21835-2307/.minikube/profiles/scheduled-stop-599041/proxy-client.crt ...
I1101 09:05:15.552730 150525 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21835-2307/.minikube/profiles/scheduled-stop-599041/proxy-client.crt: {Name:mk3abfadda991c8280d486870a29a34664acd426 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1101 09:05:15.552921 150525 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21835-2307/.minikube/profiles/scheduled-stop-599041/proxy-client.key ...
I1101 09:05:15.552928 150525 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21835-2307/.minikube/profiles/scheduled-stop-599041/proxy-client.key: {Name:mk4305c4db6257a9d091465139b7460e126806a1 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1101 09:05:15.553133 150525 certs.go:484] found cert: /home/jenkins/minikube-integration/21835-2307/.minikube/certs/4107.pem (1338 bytes)
W1101 09:05:15.553174 150525 certs.go:480] ignoring /home/jenkins/minikube-integration/21835-2307/.minikube/certs/4107_empty.pem, impossibly tiny 0 bytes
I1101 09:05:15.553181 150525 certs.go:484] found cert: /home/jenkins/minikube-integration/21835-2307/.minikube/certs/ca-key.pem (1675 bytes)
I1101 09:05:15.553205 150525 certs.go:484] found cert: /home/jenkins/minikube-integration/21835-2307/.minikube/certs/ca.pem (1082 bytes)
I1101 09:05:15.553225 150525 certs.go:484] found cert: /home/jenkins/minikube-integration/21835-2307/.minikube/certs/cert.pem (1123 bytes)
I1101 09:05:15.553254 150525 certs.go:484] found cert: /home/jenkins/minikube-integration/21835-2307/.minikube/certs/key.pem (1679 bytes)
I1101 09:05:15.553297 150525 certs.go:484] found cert: /home/jenkins/minikube-integration/21835-2307/.minikube/files/etc/ssl/certs/41072.pem (1708 bytes)
I1101 09:05:15.553925 150525 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21835-2307/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1101 09:05:15.573727 150525 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21835-2307/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1679 bytes)
I1101 09:05:15.592568 150525 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21835-2307/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1101 09:05:15.612127 150525 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21835-2307/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
I1101 09:05:15.630426 150525 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21835-2307/.minikube/profiles/scheduled-stop-599041/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1432 bytes)
I1101 09:05:15.649237 150525 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21835-2307/.minikube/profiles/scheduled-stop-599041/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I1101 09:05:15.666581 150525 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21835-2307/.minikube/profiles/scheduled-stop-599041/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1101 09:05:15.684437 150525 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21835-2307/.minikube/profiles/scheduled-stop-599041/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I1101 09:05:15.701533 150525 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21835-2307/.minikube/files/etc/ssl/certs/41072.pem --> /usr/share/ca-certificates/41072.pem (1708 bytes)
I1101 09:05:15.719352 150525 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21835-2307/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1101 09:05:15.737069 150525 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21835-2307/.minikube/certs/4107.pem --> /usr/share/ca-certificates/4107.pem (1338 bytes)
I1101 09:05:15.755141 150525 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I1101 09:05:15.768463 150525 ssh_runner.go:195] Run: openssl version
I1101 09:05:15.774825 150525 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/41072.pem && ln -fs /usr/share/ca-certificates/41072.pem /etc/ssl/certs/41072.pem"
I1101 09:05:15.783429 150525 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/41072.pem
I1101 09:05:15.787589 150525 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 1 08:36 /usr/share/ca-certificates/41072.pem
I1101 09:05:15.787657 150525 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/41072.pem
I1101 09:05:15.829140 150525 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/41072.pem /etc/ssl/certs/3ec20f2e.0"
I1101 09:05:15.838115 150525 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1101 09:05:15.846884 150525 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1101 09:05:15.850847 150525 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 1 08:30 /usr/share/ca-certificates/minikubeCA.pem
I1101 09:05:15.850902 150525 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1101 09:05:15.892430 150525 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1101 09:05:15.902232 150525 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/4107.pem && ln -fs /usr/share/ca-certificates/4107.pem /etc/ssl/certs/4107.pem"
I1101 09:05:15.911060 150525 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/4107.pem
I1101 09:05:15.918251 150525 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 1 08:36 /usr/share/ca-certificates/4107.pem
I1101 09:05:15.918303 150525 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/4107.pem
I1101 09:05:15.964112 150525 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/4107.pem /etc/ssl/certs/51391683.0"
I1101 09:05:15.972422 150525 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1101 09:05:15.975994 150525 certs.go:400] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I1101 09:05:15.976037 150525 kubeadm.go:401] StartCluster: {Name:scheduled-stop-599041 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:scheduled-stop-599041 Namespace:default APIServerHAVIP: APIServerName:minikubeC
A APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAut
hSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1101 09:05:15.976113 150525 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:paused Name: Namespaces:[kube-system]}
I1101 09:05:15.976190 150525 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
I1101 09:05:16.004490 150525 cri.go:89] found id: ""
I1101 09:05:16.004553 150525 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1101 09:05:16.013433 150525 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1101 09:05:16.021898 150525 kubeadm.go:215] ignoring SystemVerification for kubeadm because of docker driver
I1101 09:05:16.021964 150525 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1101 09:05:16.030240 150525 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1101 09:05:16.030248 150525 kubeadm.go:158] found existing configuration files:
I1101 09:05:16.030298 150525 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I1101 09:05:16.038657 150525 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1101 09:05:16.038724 150525 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1101 09:05:16.046547 150525 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I1101 09:05:16.054553 150525 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1101 09:05:16.054614 150525 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1101 09:05:16.062818 150525 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I1101 09:05:16.071055 150525 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1101 09:05:16.071114 150525 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1101 09:05:16.078600 150525 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I1101 09:05:16.086599 150525 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1101 09:05:16.086667 150525 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1101 09:05:16.094721 150525 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I1101 09:05:16.135824 150525 kubeadm.go:319] [init] Using Kubernetes version: v1.34.1
I1101 09:05:16.135941 150525 kubeadm.go:319] [preflight] Running pre-flight checks
I1101 09:05:16.158923 150525 kubeadm.go:319] [preflight] The system verification failed. Printing the output from the verification:
I1101 09:05:16.158987 150525 kubeadm.go:319] [0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1084-aws[0m
I1101 09:05:16.159023 150525 kubeadm.go:319] [0;37mOS[0m: [0;32mLinux[0m
I1101 09:05:16.159070 150525 kubeadm.go:319] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I1101 09:05:16.159119 150525 kubeadm.go:319] [0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
I1101 09:05:16.159167 150525 kubeadm.go:319] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I1101 09:05:16.159216 150525 kubeadm.go:319] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I1101 09:05:16.159266 150525 kubeadm.go:319] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I1101 09:05:16.159315 150525 kubeadm.go:319] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I1101 09:05:16.159361 150525 kubeadm.go:319] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I1101 09:05:16.159421 150525 kubeadm.go:319] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I1101 09:05:16.159471 150525 kubeadm.go:319] [0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
I1101 09:05:16.229618 150525 kubeadm.go:319] [preflight] Pulling images required for setting up a Kubernetes cluster
I1101 09:05:16.229743 150525 kubeadm.go:319] [preflight] This might take a minute or two, depending on the speed of your internet connection
I1101 09:05:16.229919 150525 kubeadm.go:319] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
I1101 09:05:16.235590 150525 kubeadm.go:319] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I1101 09:05:16.241422 150525 out.go:252] - Generating certificates and keys ...
I1101 09:05:16.241561 150525 kubeadm.go:319] [certs] Using existing ca certificate authority
I1101 09:05:16.241636 150525 kubeadm.go:319] [certs] Using existing apiserver certificate and key on disk
I1101 09:05:16.662772 150525 kubeadm.go:319] [certs] Generating "apiserver-kubelet-client" certificate and key
I1101 09:05:16.738312 150525 kubeadm.go:319] [certs] Generating "front-proxy-ca" certificate and key
I1101 09:05:17.230766 150525 kubeadm.go:319] [certs] Generating "front-proxy-client" certificate and key
I1101 09:05:17.660732 150525 kubeadm.go:319] [certs] Generating "etcd/ca" certificate and key
I1101 09:05:18.438847 150525 kubeadm.go:319] [certs] Generating "etcd/server" certificate and key
I1101 09:05:18.439134 150525 kubeadm.go:319] [certs] etcd/server serving cert is signed for DNS names [localhost scheduled-stop-599041] and IPs [192.168.76.2 127.0.0.1 ::1]
I1101 09:05:18.944837 150525 kubeadm.go:319] [certs] Generating "etcd/peer" certificate and key
I1101 09:05:18.945371 150525 kubeadm.go:319] [certs] etcd/peer serving cert is signed for DNS names [localhost scheduled-stop-599041] and IPs [192.168.76.2 127.0.0.1 ::1]
I1101 09:05:19.309344 150525 kubeadm.go:319] [certs] Generating "etcd/healthcheck-client" certificate and key
I1101 09:05:19.997441 150525 kubeadm.go:319] [certs] Generating "apiserver-etcd-client" certificate and key
I1101 09:05:20.597177 150525 kubeadm.go:319] [certs] Generating "sa" key and public key
I1101 09:05:20.597490 150525 kubeadm.go:319] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I1101 09:05:20.737984 150525 kubeadm.go:319] [kubeconfig] Writing "admin.conf" kubeconfig file
I1101 09:05:21.175021 150525 kubeadm.go:319] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I1101 09:05:21.859631 150525 kubeadm.go:319] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I1101 09:05:22.502694 150525 kubeadm.go:319] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I1101 09:05:23.363292 150525 kubeadm.go:319] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I1101 09:05:23.364080 150525 kubeadm.go:319] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I1101 09:05:23.366823 150525 kubeadm.go:319] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I1101 09:05:23.370326 150525 out.go:252] - Booting up control plane ...
I1101 09:05:23.370431 150525 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-apiserver"
I1101 09:05:23.370511 150525 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I1101 09:05:23.370590 150525 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-scheduler"
I1101 09:05:23.403428 150525 kubeadm.go:319] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I1101 09:05:23.403534 150525 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
I1101 09:05:23.411463 150525 kubeadm.go:319] [patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
I1101 09:05:23.411971 150525 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I1101 09:05:23.412177 150525 kubeadm.go:319] [kubelet-start] Starting the kubelet
I1101 09:05:23.540975 150525 kubeadm.go:319] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I1101 09:05:23.541092 150525 kubeadm.go:319] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I1101 09:05:25.042890 150525 kubeadm.go:319] [kubelet-check] The kubelet is healthy after 1.502180396s
I1101 09:05:25.047290 150525 kubeadm.go:319] [control-plane-check] Waiting for healthy control plane components. This can take up to 4m0s
I1101 09:05:25.047403 150525 kubeadm.go:319] [control-plane-check] Checking kube-apiserver at https://192.168.76.2:8443/livez
I1101 09:05:25.047502 150525 kubeadm.go:319] [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz
I1101 09:05:25.047591 150525 kubeadm.go:319] [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez
I1101 09:05:29.218974 150525 kubeadm.go:319] [control-plane-check] kube-controller-manager is healthy after 4.171819339s
I1101 09:05:30.709511 150525 kubeadm.go:319] [control-plane-check] kube-scheduler is healthy after 5.662647809s
I1101 09:05:32.048443 150525 kubeadm.go:319] [control-plane-check] kube-apiserver is healthy after 7.001434313s
I1101 09:05:32.072586 150525 kubeadm.go:319] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I1101 09:05:32.096469 150525 kubeadm.go:319] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I1101 09:05:32.126358 150525 kubeadm.go:319] [upload-certs] Skipping phase. Please see --upload-certs
I1101 09:05:32.126563 150525 kubeadm.go:319] [mark-control-plane] Marking the node scheduled-stop-599041 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I1101 09:05:32.140442 150525 kubeadm.go:319] [bootstrap-token] Using token: 5obfx4.nkvkc2gyi15s7edh
I1101 09:05:32.143315 150525 out.go:252] - Configuring RBAC rules ...
I1101 09:05:32.143447 150525 kubeadm.go:319] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I1101 09:05:32.149029 150525 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I1101 09:05:32.157334 150525 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I1101 09:05:32.166450 150525 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I1101 09:05:32.172922 150525 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I1101 09:05:32.177326 150525 kubeadm.go:319] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I1101 09:05:32.457138 150525 kubeadm.go:319] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I1101 09:05:32.882068 150525 kubeadm.go:319] [addons] Applied essential addon: CoreDNS
I1101 09:05:33.455462 150525 kubeadm.go:319] [addons] Applied essential addon: kube-proxy
I1101 09:05:33.456433 150525 kubeadm.go:319]
I1101 09:05:33.456515 150525 kubeadm.go:319] Your Kubernetes control-plane has initialized successfully!
I1101 09:05:33.456520 150525 kubeadm.go:319]
I1101 09:05:33.456599 150525 kubeadm.go:319] To start using your cluster, you need to run the following as a regular user:
I1101 09:05:33.456603 150525 kubeadm.go:319]
I1101 09:05:33.456628 150525 kubeadm.go:319] mkdir -p $HOME/.kube
I1101 09:05:33.456689 150525 kubeadm.go:319] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I1101 09:05:33.456741 150525 kubeadm.go:319] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I1101 09:05:33.456744 150525 kubeadm.go:319]
I1101 09:05:33.456800 150525 kubeadm.go:319] Alternatively, if you are the root user, you can run:
I1101 09:05:33.456803 150525 kubeadm.go:319]
I1101 09:05:33.456858 150525 kubeadm.go:319] export KUBECONFIG=/etc/kubernetes/admin.conf
I1101 09:05:33.456881 150525 kubeadm.go:319]
I1101 09:05:33.456935 150525 kubeadm.go:319] You should now deploy a pod network to the cluster.
I1101 09:05:33.457011 150525 kubeadm.go:319] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I1101 09:05:33.457082 150525 kubeadm.go:319] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I1101 09:05:33.457085 150525 kubeadm.go:319]
I1101 09:05:33.457180 150525 kubeadm.go:319] You can now join any number of control-plane nodes by copying certificate authorities
I1101 09:05:33.457271 150525 kubeadm.go:319] and service account keys on each node and then running the following as root:
I1101 09:05:33.457275 150525 kubeadm.go:319]
I1101 09:05:33.457361 150525 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token 5obfx4.nkvkc2gyi15s7edh \
I1101 09:05:33.457467 150525 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:8e6f9f2b3de173f1f4e906e50c50de3f7183de6384d3ec0b8a8e2be0c3eae33b \
I1101 09:05:33.457487 150525 kubeadm.go:319] --control-plane
I1101 09:05:33.457490 150525 kubeadm.go:319]
I1101 09:05:33.457578 150525 kubeadm.go:319] Then you can join any number of worker nodes by running the following on each as root:
I1101 09:05:33.457581 150525 kubeadm.go:319]
I1101 09:05:33.457665 150525 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token 5obfx4.nkvkc2gyi15s7edh \
I1101 09:05:33.457798 150525 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:8e6f9f2b3de173f1f4e906e50c50de3f7183de6384d3ec0b8a8e2be0c3eae33b
I1101 09:05:33.462457 150525 kubeadm.go:319] [WARNING SystemVerification]: cgroups v1 support is in maintenance mode, please migrate to cgroups v2
I1101 09:05:33.462698 150525 kubeadm.go:319] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1084-aws\n", err: exit status 1
I1101 09:05:33.462810 150525 kubeadm.go:319] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I1101 09:05:33.462826 150525 cni.go:84] Creating CNI manager for ""
I1101 09:05:33.462833 150525 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1101 09:05:33.467781 150525 out.go:179] * Configuring CNI (Container Networking Interface) ...
I1101 09:05:33.470616 150525 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I1101 09:05:33.474565 150525 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.34.1/kubectl ...
I1101 09:05:33.474576 150525 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2601 bytes)
I1101 09:05:33.488979 150525 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I1101 09:05:33.824883 150525 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I1101 09:05:33.825036 150525 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I1101 09:05:33.825109 150525 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes scheduled-stop-599041 minikube.k8s.io/updated_at=2025_11_01T09_05_33_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=21e20c7776311c6e29254646bf2620ea610dd192 minikube.k8s.io/name=scheduled-stop-599041 minikube.k8s.io/primary=true
I1101 09:05:34.029341 150525 kubeadm.go:1114] duration metric: took 204.36283ms to wait for elevateKubeSystemPrivileges
I1101 09:05:34.029367 150525 ops.go:34] apiserver oom_adj: -16
I1101 09:05:34.045242 150525 kubeadm.go:403] duration metric: took 18.06920126s to StartCluster
I1101 09:05:34.045266 150525 settings.go:142] acquiring lock: {Name:mkb61beb9c55121316e3b119291d0716c14c3a6a Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1101 09:05:34.045330 150525 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/21835-2307/kubeconfig
I1101 09:05:34.046053 150525 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21835-2307/kubeconfig: {Name:mk1f500f846ffda8ad893dd2bff7271191c5c640 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1101 09:05:34.046276 150525 start.go:236] Will wait 6m0s for node &{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1101 09:05:34.046381 150525 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I1101 09:05:34.046632 150525 config.go:182] Loaded profile config "scheduled-stop-599041": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1101 09:05:34.046677 150525 addons.go:512] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I1101 09:05:34.046742 150525 addons.go:70] Setting storage-provisioner=true in profile "scheduled-stop-599041"
I1101 09:05:34.046755 150525 addons.go:239] Setting addon storage-provisioner=true in "scheduled-stop-599041"
I1101 09:05:34.046774 150525 host.go:66] Checking if "scheduled-stop-599041" exists ...
I1101 09:05:34.047269 150525 cli_runner.go:164] Run: docker container inspect scheduled-stop-599041 --format={{.State.Status}}
I1101 09:05:34.047670 150525 addons.go:70] Setting default-storageclass=true in profile "scheduled-stop-599041"
I1101 09:05:34.047688 150525 addons_storage_classes.go:34] enableOrDisableStorageClasses default-storageclass=true on "scheduled-stop-599041"
I1101 09:05:34.048021 150525 cli_runner.go:164] Run: docker container inspect scheduled-stop-599041 --format={{.State.Status}}
I1101 09:05:34.049623 150525 out.go:179] * Verifying Kubernetes components...
I1101 09:05:34.053964 150525 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1101 09:05:34.092690 150525 addons.go:239] Setting addon default-storageclass=true in "scheduled-stop-599041"
I1101 09:05:34.092718 150525 host.go:66] Checking if "scheduled-stop-599041" exists ...
I1101 09:05:34.093128 150525 cli_runner.go:164] Run: docker container inspect scheduled-stop-599041 --format={{.State.Status}}
I1101 09:05:34.096417 150525 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1101 09:05:34.099319 150525 addons.go:436] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1101 09:05:34.099331 150525 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1101 09:05:34.099411 150525 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-599041
I1101 09:05:34.132785 150525 addons.go:436] installing /etc/kubernetes/addons/storageclass.yaml
I1101 09:05:34.132798 150525 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1101 09:05:34.132863 150525 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-599041
I1101 09:05:34.160334 150525 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32969 SSHKeyPath:/home/jenkins/minikube-integration/21835-2307/.minikube/machines/scheduled-stop-599041/id_rsa Username:docker}
I1101 09:05:34.179238 150525 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32969 SSHKeyPath:/home/jenkins/minikube-integration/21835-2307/.minikube/machines/scheduled-stop-599041/id_rsa Username:docker}
I1101 09:05:34.280480 150525 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.76.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I1101 09:05:34.335719 150525 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1101 09:05:34.394195 150525 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1101 09:05:34.406858 150525 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1101 09:05:34.630316 150525 start.go:977] {"host.minikube.internal": 192.168.76.1} host record injected into CoreDNS's ConfigMap
I1101 09:05:34.632039 150525 api_server.go:52] waiting for apiserver process to appear ...
I1101 09:05:34.632088 150525 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1101 09:05:34.888367 150525 api_server.go:72] duration metric: took 842.06647ms to wait for apiserver process to appear ...
I1101 09:05:34.888378 150525 api_server.go:88] waiting for apiserver healthz status ...
I1101 09:05:34.888394 150525 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1101 09:05:34.891326 150525 out.go:179] * Enabled addons: default-storageclass, storage-provisioner
I1101 09:05:34.894415 150525 addons.go:515] duration metric: took 847.713988ms for enable addons: enabled=[default-storageclass storage-provisioner]
I1101 09:05:34.900279 150525 api_server.go:279] https://192.168.76.2:8443/healthz returned 200:
ok
I1101 09:05:34.901967 150525 api_server.go:141] control plane version: v1.34.1
I1101 09:05:34.901983 150525 api_server.go:131] duration metric: took 13.59965ms to wait for apiserver health ...
I1101 09:05:34.902003 150525 system_pods.go:43] waiting for kube-system pods to appear ...
I1101 09:05:34.905167 150525 system_pods.go:59] 5 kube-system pods found
I1101 09:05:34.905187 150525 system_pods.go:61] "etcd-scheduled-stop-599041" [df3d1822-dea0-4741-8999-bd3911262f6a] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd])
I1101 09:05:34.905195 150525 system_pods.go:61] "kube-apiserver-scheduled-stop-599041" [dea0d936-87c5-42c7-b69b-024bf9be659d] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
I1101 09:05:34.905201 150525 system_pods.go:61] "kube-controller-manager-scheduled-stop-599041" [03160410-5e30-4035-8cc2-46591d93ec33] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager])
I1101 09:05:34.905208 150525 system_pods.go:61] "kube-scheduler-scheduled-stop-599041" [b05c1749-b943-4824-8827-0da270b4a514] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
I1101 09:05:34.905212 150525 system_pods.go:61] "storage-provisioner" [046043f5-3b02-4b43-8874-284f01dcc398] Pending: PodScheduled:Unschedulable (0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/not-ready: }. no new claims to deallocate, preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling.)
I1101 09:05:34.905218 150525 system_pods.go:74] duration metric: took 3.210013ms to wait for pod list to return data ...
I1101 09:05:34.905228 150525 kubeadm.go:587] duration metric: took 858.931526ms to wait for: map[apiserver:true system_pods:true]
I1101 09:05:34.905241 150525 node_conditions.go:102] verifying NodePressure condition ...
I1101 09:05:34.908007 150525 node_conditions.go:122] node storage ephemeral capacity is 203034800Ki
I1101 09:05:34.908025 150525 node_conditions.go:123] node cpu capacity is 2
I1101 09:05:34.908039 150525 node_conditions.go:105] duration metric: took 2.790946ms to run NodePressure ...
I1101 09:05:34.908051 150525 start.go:242] waiting for startup goroutines ...
I1101 09:05:35.134834 150525 kapi.go:214] "coredns" deployment in "kube-system" namespace and "scheduled-stop-599041" context rescaled to 1 replicas
I1101 09:05:35.134863 150525 start.go:247] waiting for cluster config update ...
I1101 09:05:35.134875 150525 start.go:256] writing updated cluster config ...
I1101 09:05:35.135190 150525 ssh_runner.go:195] Run: rm -f paused
I1101 09:05:35.205581 150525 start.go:628] kubectl: 1.33.2, cluster: 1.34.1 (minor skew: 1)
I1101 09:05:35.209195 150525 out.go:179] * Done! kubectl is now configured to use "scheduled-stop-599041" cluster and "default" namespace by default
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE
6eef76a9f5230 b5f57ec6b9867 11 seconds ago Running kube-scheduler 0 9b05b51c59fce kube-scheduler-scheduled-stop-599041 kube-system
fd502af6b3406 7eb2c6ff0c5a7 11 seconds ago Running kube-controller-manager 0 49087a6407c64 kube-controller-manager-scheduled-stop-599041 kube-system
ae43d133e1cd3 43911e833d64d 11 seconds ago Running kube-apiserver 0 f7da27c994162 kube-apiserver-scheduled-stop-599041 kube-system
2595eda52acf6 a1894772a478e 11 seconds ago Running etcd 0 8536336348a5a etcd-scheduled-stop-599041 kube-system
==> containerd <==
Nov 01 09:05:11 scheduled-stop-599041 containerd[762]: time="2025-11-01T09:05:11.755934785Z" level=info msg="containerd successfully booted in 0.090085s"
Nov 01 09:05:25 scheduled-stop-599041 containerd[762]: time="2025-11-01T09:05:25.149916799Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:etcd-scheduled-stop-599041,Uid:88019947b935004d6bf205a7a549be36,Namespace:kube-system,Attempt:0,}"
Nov 01 09:05:25 scheduled-stop-599041 containerd[762]: time="2025-11-01T09:05:25.153667693Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:kube-apiserver-scheduled-stop-599041,Uid:449597b5207707a77f08f5708640cfbc,Namespace:kube-system,Attempt:0,}"
Nov 01 09:05:25 scheduled-stop-599041 containerd[762]: time="2025-11-01T09:05:25.164484653Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:kube-controller-manager-scheduled-stop-599041,Uid:c825b846f35a1ae5d6628cea7dc686d2,Namespace:kube-system,Attempt:0,}"
Nov 01 09:05:25 scheduled-stop-599041 containerd[762]: time="2025-11-01T09:05:25.169737664Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:kube-scheduler-scheduled-stop-599041,Uid:1247b3c5987decb90966f535d48db508,Namespace:kube-system,Attempt:0,}"
Nov 01 09:05:25 scheduled-stop-599041 containerd[762]: time="2025-11-01T09:05:25.311261391Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:etcd-scheduled-stop-599041,Uid:88019947b935004d6bf205a7a549be36,Namespace:kube-system,Attempt:0,} returns sandbox id \"8536336348a5ab754e7c01132943f2acd93c042a9310df30615a2fe742b6b450\""
Nov 01 09:05:25 scheduled-stop-599041 containerd[762]: time="2025-11-01T09:05:25.321605778Z" level=info msg="CreateContainer within sandbox \"8536336348a5ab754e7c01132943f2acd93c042a9310df30615a2fe742b6b450\" for container &ContainerMetadata{Name:etcd,Attempt:0,}"
Nov 01 09:05:25 scheduled-stop-599041 containerd[762]: time="2025-11-01T09:05:25.329489390Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:kube-apiserver-scheduled-stop-599041,Uid:449597b5207707a77f08f5708640cfbc,Namespace:kube-system,Attempt:0,} returns sandbox id \"f7da27c994162d03cd1ed27b6a973a40b64cc3186b0c775a36f7ceff4628fa9c\""
Nov 01 09:05:25 scheduled-stop-599041 containerd[762]: time="2025-11-01T09:05:25.343340097Z" level=info msg="CreateContainer within sandbox \"f7da27c994162d03cd1ed27b6a973a40b64cc3186b0c775a36f7ceff4628fa9c\" for container &ContainerMetadata{Name:kube-apiserver,Attempt:0,}"
Nov 01 09:05:25 scheduled-stop-599041 containerd[762]: time="2025-11-01T09:05:25.377471115Z" level=info msg="CreateContainer within sandbox \"8536336348a5ab754e7c01132943f2acd93c042a9310df30615a2fe742b6b450\" for &ContainerMetadata{Name:etcd,Attempt:0,} returns container id \"2595eda52acf6fda108c9a6599cf9e0e5e1d769bc8738a456bf000c1641ab539\""
Nov 01 09:05:25 scheduled-stop-599041 containerd[762]: time="2025-11-01T09:05:25.378349953Z" level=info msg="StartContainer for \"2595eda52acf6fda108c9a6599cf9e0e5e1d769bc8738a456bf000c1641ab539\""
Nov 01 09:05:25 scheduled-stop-599041 containerd[762]: time="2025-11-01T09:05:25.381549275Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:kube-controller-manager-scheduled-stop-599041,Uid:c825b846f35a1ae5d6628cea7dc686d2,Namespace:kube-system,Attempt:0,} returns sandbox id \"49087a6407c64f0f3d20106bcfa6325d700df347064f8c3c530251d267beacbc\""
Nov 01 09:05:25 scheduled-stop-599041 containerd[762]: time="2025-11-01T09:05:25.381805172Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:kube-scheduler-scheduled-stop-599041,Uid:1247b3c5987decb90966f535d48db508,Namespace:kube-system,Attempt:0,} returns sandbox id \"9b05b51c59fce5beb7101d3d857c8dc991b00fefb57697697caf2cbce5bad94b\""
Nov 01 09:05:25 scheduled-stop-599041 containerd[762]: time="2025-11-01T09:05:25.391112014Z" level=info msg="CreateContainer within sandbox \"49087a6407c64f0f3d20106bcfa6325d700df347064f8c3c530251d267beacbc\" for container &ContainerMetadata{Name:kube-controller-manager,Attempt:0,}"
Nov 01 09:05:25 scheduled-stop-599041 containerd[762]: time="2025-11-01T09:05:25.397677322Z" level=info msg="CreateContainer within sandbox \"f7da27c994162d03cd1ed27b6a973a40b64cc3186b0c775a36f7ceff4628fa9c\" for &ContainerMetadata{Name:kube-apiserver,Attempt:0,} returns container id \"ae43d133e1cd3149dafd04b5318383a95b5024508647d37a3228a80d4bead3ca\""
Nov 01 09:05:25 scheduled-stop-599041 containerd[762]: time="2025-11-01T09:05:25.398219935Z" level=info msg="CreateContainer within sandbox \"9b05b51c59fce5beb7101d3d857c8dc991b00fefb57697697caf2cbce5bad94b\" for container &ContainerMetadata{Name:kube-scheduler,Attempt:0,}"
Nov 01 09:05:25 scheduled-stop-599041 containerd[762]: time="2025-11-01T09:05:25.399201150Z" level=info msg="StartContainer for \"ae43d133e1cd3149dafd04b5318383a95b5024508647d37a3228a80d4bead3ca\""
Nov 01 09:05:25 scheduled-stop-599041 containerd[762]: time="2025-11-01T09:05:25.420376602Z" level=info msg="CreateContainer within sandbox \"49087a6407c64f0f3d20106bcfa6325d700df347064f8c3c530251d267beacbc\" for &ContainerMetadata{Name:kube-controller-manager,Attempt:0,} returns container id \"fd502af6b34069b20b2f00a06001cf95d4761e59bdea5999510671d4ba8287a1\""
Nov 01 09:05:25 scheduled-stop-599041 containerd[762]: time="2025-11-01T09:05:25.421136391Z" level=info msg="StartContainer for \"fd502af6b34069b20b2f00a06001cf95d4761e59bdea5999510671d4ba8287a1\""
Nov 01 09:05:25 scheduled-stop-599041 containerd[762]: time="2025-11-01T09:05:25.425803253Z" level=info msg="CreateContainer within sandbox \"9b05b51c59fce5beb7101d3d857c8dc991b00fefb57697697caf2cbce5bad94b\" for &ContainerMetadata{Name:kube-scheduler,Attempt:0,} returns container id \"6eef76a9f5230755a79cfcbcfecd0d39220acc190b50d5b2f92976e8762ff4b2\""
Nov 01 09:05:25 scheduled-stop-599041 containerd[762]: time="2025-11-01T09:05:25.426471151Z" level=info msg="StartContainer for \"6eef76a9f5230755a79cfcbcfecd0d39220acc190b50d5b2f92976e8762ff4b2\""
Nov 01 09:05:25 scheduled-stop-599041 containerd[762]: time="2025-11-01T09:05:25.501804100Z" level=info msg="StartContainer for \"2595eda52acf6fda108c9a6599cf9e0e5e1d769bc8738a456bf000c1641ab539\" returns successfully"
Nov 01 09:05:25 scheduled-stop-599041 containerd[762]: time="2025-11-01T09:05:25.502617043Z" level=info msg="StartContainer for \"ae43d133e1cd3149dafd04b5318383a95b5024508647d37a3228a80d4bead3ca\" returns successfully"
Nov 01 09:05:25 scheduled-stop-599041 containerd[762]: time="2025-11-01T09:05:25.643211009Z" level=info msg="StartContainer for \"6eef76a9f5230755a79cfcbcfecd0d39220acc190b50d5b2f92976e8762ff4b2\" returns successfully"
Nov 01 09:05:25 scheduled-stop-599041 containerd[762]: time="2025-11-01T09:05:25.654447216Z" level=info msg="StartContainer for \"fd502af6b34069b20b2f00a06001cf95d4761e59bdea5999510671d4ba8287a1\" returns successfully"
==> describe nodes <==
Name: scheduled-stop-599041
Roles: control-plane
Labels: beta.kubernetes.io/arch=arm64
beta.kubernetes.io/os=linux
kubernetes.io/arch=arm64
kubernetes.io/hostname=scheduled-stop-599041
kubernetes.io/os=linux
minikube.k8s.io/commit=21e20c7776311c6e29254646bf2620ea610dd192
minikube.k8s.io/name=scheduled-stop-599041
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2025_11_01T09_05_33_0700
minikube.k8s.io/version=v1.37.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Sat, 01 Nov 2025 09:05:30 +0000
Taints: node.kubernetes.io/not-ready:NoSchedule
Unschedulable: false
Lease:
HolderIdentity: scheduled-stop-599041
AcquireTime: <unset>
RenewTime: Sat, 01 Nov 2025 09:05:32 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Sat, 01 Nov 2025 09:05:33 +0000 Sat, 01 Nov 2025 09:05:26 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Sat, 01 Nov 2025 09:05:33 +0000 Sat, 01 Nov 2025 09:05:26 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Sat, 01 Nov 2025 09:05:33 +0000 Sat, 01 Nov 2025 09:05:26 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready False Sat, 01 Nov 2025 09:05:33 +0000 Sat, 01 Nov 2025 09:05:26 +0000 KubeletNotReady container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized
Addresses:
InternalIP: 192.168.76.2
Hostname: scheduled-stop-599041
Capacity:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022296Ki
pods: 110
Allocatable:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022296Ki
pods: 110
System Info:
Machine ID: ef38fbc8889a0e5f09e9dc0868f5cd19
System UUID: dc7fc40c-a18f-4e46-b20b-dd134b64d61a
Boot ID: a8ac8503-6b7a-4208-b896-162cdcafe81c
Kernel Version: 5.15.0-1084-aws
OS Image: Debian GNU/Linux 12 (bookworm)
Operating System: linux
Architecture: arm64
Container Runtime Version: containerd://1.7.28
Kubelet Version: v1.34.1
Kube-Proxy Version:
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (4 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
kube-system etcd-scheduled-stop-599041 100m (5%) 0 (0%) 100Mi (1%) 0 (0%) 4s
kube-system kube-apiserver-scheduled-stop-599041 250m (12%) 0 (0%) 0 (0%) 0 (0%) 4s
kube-system kube-controller-manager-scheduled-stop-599041 200m (10%) 0 (0%) 0 (0%) 0 (0%) 4s
kube-system kube-scheduler-scheduled-stop-599041 100m (5%) 0 (0%) 0 (0%) 0 (0%) 5s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 650m (32%) 0 (0%)
memory 100Mi (1%) 0 (0%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
hugepages-32Mi 0 (0%) 0 (0%)
hugepages-64Ki 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Warning CgroupV1 13s kubelet cgroup v1 support is in maintenance mode, please migrate to cgroup v2
Normal NodeHasSufficientMemory 13s (x8 over 13s) kubelet Node scheduled-stop-599041 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 13s (x8 over 13s) kubelet Node scheduled-stop-599041 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 13s (x7 over 13s) kubelet Node scheduled-stop-599041 status is now: NodeHasSufficientPID
Normal NodeAllocatableEnforced 13s kubelet Updated Node Allocatable limit across pods
Normal Starting 5s kubelet Starting kubelet.
Warning CgroupV1 5s kubelet cgroup v1 support is in maintenance mode, please migrate to cgroup v2
Normal NodeAllocatableEnforced 5s kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 4s kubelet Node scheduled-stop-599041 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 4s kubelet Node scheduled-stop-599041 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 4s kubelet Node scheduled-stop-599041 status is now: NodeHasSufficientPID
Normal RegisteredNode 0s node-controller Node scheduled-stop-599041 event: Registered Node scheduled-stop-599041 in Controller
==> dmesg <==
[Nov 1 08:17] ACPI: SRAT not present
[ +0.000000] ACPI: SRAT not present
[ +0.000000] SPI driver altr_a10sr has no spi_device_id for altr,a10sr
[ +0.014572] device-mapper: core: CONFIG_IMA_DISABLE_HTABLE is disabled. Duplicate IMA measurements will not be recorded in the IMA log.
[ +0.501039] systemd[1]: Configuration file /run/systemd/system/netplan-ovs-cleanup.service is marked world-inaccessible. This has no effect as configuration data is accessible via APIs without restrictions. Proceeding anyway.
[ +0.033197] systemd[1]: /lib/systemd/system/snapd.service:23: Unknown key name 'RestartMode' in section 'Service', ignoring.
[ +0.753566] ena 0000:00:05.0: LLQ is not supported Fallback to host mode policy.
[ +6.779214] kauditd_printk_skb: 36 callbacks suppressed
[Nov 1 09:03] hrtimer: interrupt took 8309137 ns
==> etcd [2595eda52acf6fda108c9a6599cf9e0e5e1d769bc8738a456bf000c1641ab539] <==
{"level":"warn","ts":"2025-11-01T09:05:28.504288Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49744","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-01T09:05:28.521870Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49768","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-01T09:05:28.545853Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49780","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-01T09:05:28.559027Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49798","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-01T09:05:28.579739Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49812","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-01T09:05:28.599730Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49844","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-01T09:05:28.620283Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49864","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-01T09:05:28.638951Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49894","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-01T09:05:28.653341Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49910","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-01T09:05:28.695916Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49934","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-01T09:05:28.718079Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49952","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-01T09:05:28.755375Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49972","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-01T09:05:28.769939Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49986","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-01T09:05:28.793778Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49998","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-01T09:05:28.816868Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:50006","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-01T09:05:28.827608Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:50024","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-01T09:05:28.846556Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:50050","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-01T09:05:28.862542Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:50074","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-01T09:05:28.882481Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:50090","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-01T09:05:28.896748Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:50110","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-01T09:05:28.919531Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:50120","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-01T09:05:28.951082Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:50136","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-01T09:05:28.982567Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:50146","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-01T09:05:29.001206Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:50164","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-01T09:05:29.162329Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:50182","server-name":"","error":"EOF"}
==> kernel <==
09:05:37 up 48 min, 0 user, load average: 1.60, 1.98, 2.27
Linux scheduled-stop-599041 5.15.0-1084-aws #91~20.04.1-Ubuntu SMP Fri May 2 07:00:04 UTC 2025 aarch64 GNU/Linux
PRETTY_NAME="Debian GNU/Linux 12 (bookworm)"
==> kube-apiserver [ae43d133e1cd3149dafd04b5318383a95b5024508647d37a3228a80d4bead3ca] <==
I1101 09:05:30.245297 1 handler_discovery.go:451] Starting ResourceDiscoveryManager
I1101 09:05:30.267405 1 shared_informer.go:356] "Caches are synced" controller="node_authorizer"
I1101 09:05:30.275869 1 shared_informer.go:356] "Caches are synced" controller="*generic.policySource[*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicy,*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicyBinding,k8s.io/apiserver/pkg/admission/plugin/policy/validating.Validator]"
I1101 09:05:30.275909 1 policy_source.go:240] refreshing policies
E1101 09:05:30.304146 1 controller.go:145] "Failed to ensure lease exists, will retry" err="namespaces \"kube-system\" not found" interval="200ms"
E1101 09:05:30.309116 1 controller.go:148] "Unhandled Error" err="while syncing ConfigMap \"kube-system/kube-apiserver-legacy-service-account-token-tracking\", err: namespaces \"kube-system\" not found" logger="UnhandledError"
I1101 09:05:30.349351 1 controller.go:667] quota admission added evaluator for: namespaces
I1101 09:05:30.362819 1 cidrallocator.go:301] created ClusterIP allocator for Service CIDR 10.96.0.0/12
I1101 09:05:30.364347 1 default_servicecidr_controller.go:228] Setting default ServiceCIDR condition Ready to True
I1101 09:05:30.386152 1 default_servicecidr_controller.go:137] Shutting down kubernetes-service-cidr-controller
I1101 09:05:30.390566 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12
I1101 09:05:30.521793 1 controller.go:667] quota admission added evaluator for: leases.coordination.k8s.io
I1101 09:05:30.918270 1 storage_scheduling.go:95] created PriorityClass system-node-critical with value 2000001000
I1101 09:05:30.926534 1 storage_scheduling.go:95] created PriorityClass system-cluster-critical with value 2000000000
I1101 09:05:30.926727 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist.
I1101 09:05:31.673066 1 controller.go:667] quota admission added evaluator for: roles.rbac.authorization.k8s.io
I1101 09:05:31.729817 1 controller.go:667] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
I1101 09:05:31.826901 1 alloc.go:328] "allocated clusterIPs" service="default/kubernetes" clusterIPs={"IPv4":"10.96.0.1"}
W1101 09:05:31.837534 1 lease.go:265] Resetting endpoints for master service "kubernetes" to [192.168.76.2]
I1101 09:05:31.838830 1 controller.go:667] quota admission added evaluator for: endpoints
I1101 09:05:31.843911 1 controller.go:667] quota admission added evaluator for: endpointslices.discovery.k8s.io
I1101 09:05:31.965949 1 controller.go:667] quota admission added evaluator for: serviceaccounts
I1101 09:05:32.866534 1 controller.go:667] quota admission added evaluator for: deployments.apps
I1101 09:05:32.880088 1 alloc.go:328] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs={"IPv4":"10.96.0.10"}
I1101 09:05:32.892418 1 controller.go:667] quota admission added evaluator for: daemonsets.apps
==> kube-controller-manager [fd502af6b34069b20b2f00a06001cf95d4761e59bdea5999510671d4ba8287a1] <==
I1101 09:05:37.021796 1 shared_informer.go:356] "Caches are synced" controller="node"
I1101 09:05:37.021999 1 range_allocator.go:177] "Sending events to api server" logger="node-ipam-controller"
I1101 09:05:37.022082 1 range_allocator.go:183] "Starting range CIDR allocator" logger="node-ipam-controller"
I1101 09:05:37.022131 1 shared_informer.go:349] "Waiting for caches to sync" controller="cidrallocator"
I1101 09:05:37.022167 1 shared_informer.go:356] "Caches are synced" controller="cidrallocator"
I1101 09:05:37.022647 1 shared_informer.go:356] "Caches are synced" controller="PVC protection"
I1101 09:05:37.022790 1 shared_informer.go:356] "Caches are synced" controller="resource quota"
I1101 09:05:37.022943 1 shared_informer.go:356] "Caches are synced" controller="endpoint_slice_mirroring"
I1101 09:05:37.024122 1 shared_informer.go:356] "Caches are synced" controller="expand"
I1101 09:05:37.024258 1 shared_informer.go:356] "Caches are synced" controller="ReplicationController"
I1101 09:05:37.030213 1 shared_informer.go:356] "Caches are synced" controller="disruption"
I1101 09:05:37.048403 1 range_allocator.go:428] "Set node PodCIDR" logger="node-ipam-controller" node="scheduled-stop-599041" podCIDRs=["10.244.0.0/24"]
I1101 09:05:37.049935 1 shared_informer.go:356] "Caches are synced" controller="garbage collector"
I1101 09:05:37.058585 1 shared_informer.go:356] "Caches are synced" controller="endpoint"
I1101 09:05:37.061940 1 shared_informer.go:356] "Caches are synced" controller="garbage collector"
I1101 09:05:37.061958 1 garbagecollector.go:154] "Garbage collector: all resource monitors have synced" logger="garbage-collector-controller"
I1101 09:05:37.061965 1 garbagecollector.go:157] "Proceeding to collect garbage" logger="garbage-collector-controller"
I1101 09:05:37.063343 1 shared_informer.go:356] "Caches are synced" controller="cronjob"
I1101 09:05:37.063408 1 shared_informer.go:356] "Caches are synced" controller="taint"
I1101 09:05:37.063502 1 node_lifecycle_controller.go:1221] "Initializing eviction metric for zone" logger="node-lifecycle-controller" zone=""
I1101 09:05:37.063561 1 node_lifecycle_controller.go:873] "Missing timestamp for Node. Assuming now as a timestamp" logger="node-lifecycle-controller" node="scheduled-stop-599041"
I1101 09:05:37.063601 1 node_lifecycle_controller.go:1025] "Controller detected that all Nodes are not-Ready. Entering master disruption mode" logger="node-lifecycle-controller"
I1101 09:05:37.064180 1 shared_informer.go:356] "Caches are synced" controller="attach detach"
I1101 09:05:37.064468 1 shared_informer.go:356] "Caches are synced" controller="VAC protection"
I1101 09:05:37.064486 1 shared_informer.go:356] "Caches are synced" controller="ephemeral"
==> kube-scheduler [6eef76a9f5230755a79cfcbcfecd0d39220acc190b50d5b2f92976e8762ff4b2] <==
I1101 09:05:30.686380 1 server.go:177] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1101 09:05:30.689451 1 secure_serving.go:211] Serving securely on 127.0.0.1:10259
I1101 09:05:30.689539 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
I1101 09:05:30.690631 1 shared_informer.go:349] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
I1101 09:05:30.689563 1 tlsconfig.go:243] "Starting DynamicServingCertificateController"
E1101 09:05:30.694341 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ConfigMap: configmaps \"extension-apiserver-authentication\" is forbidden: User \"system:kube-scheduler\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"kube-system\"" logger="UnhandledError" reflector="runtime/asm_arm64.s:1223" type="*v1.ConfigMap"
E1101 09:05:30.696877 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Namespace: namespaces is forbidden: User \"system:kube-scheduler\" cannot list resource \"namespaces\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Namespace"
E1101 09:05:30.697040 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csinodes\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSINode"
E1101 09:05:30.697135 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csidrivers\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIDriver"
E1101 09:05:30.697249 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csistoragecapacities\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIStorageCapacity"
E1101 09:05:30.697342 1 reflector.go:205] "Failed to watch" err="failed to list *v1.VolumeAttachment: volumeattachments.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"volumeattachments\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.VolumeAttachment"
E1101 09:05:30.697427 1 reflector.go:205] "Failed to watch" err="failed to list *v1.DeviceClass: deviceclasses.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"deviceclasses\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.DeviceClass"
E1101 09:05:30.697503 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Service: services is forbidden: User \"system:kube-scheduler\" cannot list resource \"services\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Service"
E1101 09:05:30.697608 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicasets\" in API group \"apps\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ReplicaSet"
E1101 09:05:30.702323 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicationcontrollers\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ReplicationController"
E1101 09:05:30.706114 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumeclaims\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolumeClaim"
E1101 09:05:30.706332 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Pod: pods is forbidden: User \"system:kube-scheduler\" cannot list resource \"pods\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Pod"
E1101 09:05:30.706562 1 reflector.go:205] "Failed to watch" err="failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"storageclasses\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.StorageClass"
E1101 09:05:30.706721 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ResourceClaim: resourceclaims.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"resourceclaims\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ResourceClaim"
E1101 09:05:30.706820 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ResourceSlice: resourceslices.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"resourceslices\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ResourceSlice"
E1101 09:05:30.706904 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolume"
E1101 09:05:30.706985 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User \"system:kube-scheduler\" cannot list resource \"poddisruptionbudgets\" in API group \"policy\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PodDisruptionBudget"
E1101 09:05:30.707078 1 reflector.go:205] "Failed to watch" err="failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"statefulsets\" in API group \"apps\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.StatefulSet"
E1101 09:05:30.707157 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Node: nodes is forbidden: User \"system:kube-scheduler\" cannot list resource \"nodes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Node"
I1101 09:05:32.291602 1 shared_informer.go:356] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
==> kubelet <==
Nov 01 09:05:33 scheduled-stop-599041 kubelet[1506]: I1101 09:05:33.304112 1506 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/1247b3c5987decb90966f535d48db508-kubeconfig\") pod \"kube-scheduler-scheduled-stop-599041\" (UID: \"1247b3c5987decb90966f535d48db508\") " pod="kube-system/kube-scheduler-scheduled-stop-599041"
Nov 01 09:05:33 scheduled-stop-599041 kubelet[1506]: I1101 09:05:33.304130 1506 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/c825b846f35a1ae5d6628cea7dc686d2-usr-local-share-ca-certificates\") pod \"kube-controller-manager-scheduled-stop-599041\" (UID: \"c825b846f35a1ae5d6628cea7dc686d2\") " pod="kube-system/kube-controller-manager-scheduled-stop-599041"
Nov 01 09:05:33 scheduled-stop-599041 kubelet[1506]: I1101 09:05:33.304149 1506 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/c825b846f35a1ae5d6628cea7dc686d2-k8s-certs\") pod \"kube-controller-manager-scheduled-stop-599041\" (UID: \"c825b846f35a1ae5d6628cea7dc686d2\") " pod="kube-system/kube-controller-manager-scheduled-stop-599041"
Nov 01 09:05:33 scheduled-stop-599041 kubelet[1506]: I1101 09:05:33.304167 1506 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/c825b846f35a1ae5d6628cea7dc686d2-kubeconfig\") pod \"kube-controller-manager-scheduled-stop-599041\" (UID: \"c825b846f35a1ae5d6628cea7dc686d2\") " pod="kube-system/kube-controller-manager-scheduled-stop-599041"
Nov 01 09:05:33 scheduled-stop-599041 kubelet[1506]: I1101 09:05:33.304185 1506 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-data\" (UniqueName: \"kubernetes.io/host-path/88019947b935004d6bf205a7a549be36-etcd-data\") pod \"etcd-scheduled-stop-599041\" (UID: \"88019947b935004d6bf205a7a549be36\") " pod="kube-system/etcd-scheduled-stop-599041"
Nov 01 09:05:33 scheduled-stop-599041 kubelet[1506]: I1101 09:05:33.304202 1506 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/449597b5207707a77f08f5708640cfbc-ca-certs\") pod \"kube-apiserver-scheduled-stop-599041\" (UID: \"449597b5207707a77f08f5708640cfbc\") " pod="kube-system/kube-apiserver-scheduled-stop-599041"
Nov 01 09:05:33 scheduled-stop-599041 kubelet[1506]: I1101 09:05:33.304220 1506 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/449597b5207707a77f08f5708640cfbc-usr-share-ca-certificates\") pod \"kube-apiserver-scheduled-stop-599041\" (UID: \"449597b5207707a77f08f5708640cfbc\") " pod="kube-system/kube-apiserver-scheduled-stop-599041"
Nov 01 09:05:33 scheduled-stop-599041 kubelet[1506]: I1101 09:05:33.304236 1506 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/c825b846f35a1ae5d6628cea7dc686d2-ca-certs\") pod \"kube-controller-manager-scheduled-stop-599041\" (UID: \"c825b846f35a1ae5d6628cea7dc686d2\") " pod="kube-system/kube-controller-manager-scheduled-stop-599041"
Nov 01 09:05:33 scheduled-stop-599041 kubelet[1506]: I1101 09:05:33.304255 1506 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"flexvolume-dir\" (UniqueName: \"kubernetes.io/host-path/c825b846f35a1ae5d6628cea7dc686d2-flexvolume-dir\") pod \"kube-controller-manager-scheduled-stop-599041\" (UID: \"c825b846f35a1ae5d6628cea7dc686d2\") " pod="kube-system/kube-controller-manager-scheduled-stop-599041"
Nov 01 09:05:33 scheduled-stop-599041 kubelet[1506]: I1101 09:05:33.304278 1506 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/449597b5207707a77f08f5708640cfbc-k8s-certs\") pod \"kube-apiserver-scheduled-stop-599041\" (UID: \"449597b5207707a77f08f5708640cfbc\") " pod="kube-system/kube-apiserver-scheduled-stop-599041"
Nov 01 09:05:33 scheduled-stop-599041 kubelet[1506]: I1101 09:05:33.304303 1506 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/449597b5207707a77f08f5708640cfbc-usr-local-share-ca-certificates\") pod \"kube-apiserver-scheduled-stop-599041\" (UID: \"449597b5207707a77f08f5708640cfbc\") " pod="kube-system/kube-apiserver-scheduled-stop-599041"
Nov 01 09:05:33 scheduled-stop-599041 kubelet[1506]: I1101 09:05:33.770921 1506 apiserver.go:52] "Watching apiserver"
Nov 01 09:05:33 scheduled-stop-599041 kubelet[1506]: I1101 09:05:33.802992 1506 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world"
Nov 01 09:05:33 scheduled-stop-599041 kubelet[1506]: I1101 09:05:33.826093 1506 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/etcd-scheduled-stop-599041" podStartSLOduration=0.826072133 podStartE2EDuration="826.072133ms" podCreationTimestamp="2025-11-01 09:05:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-01 09:05:33.806186416 +0000 UTC m=+1.112336859" watchObservedRunningTime="2025-11-01 09:05:33.826072133 +0000 UTC m=+1.132222568"
Nov 01 09:05:33 scheduled-stop-599041 kubelet[1506]: I1101 09:05:33.861345 1506 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-apiserver-scheduled-stop-599041" podStartSLOduration=0.861326436 podStartE2EDuration="861.326436ms" podCreationTimestamp="2025-11-01 09:05:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-01 09:05:33.826471728 +0000 UTC m=+1.132622335" watchObservedRunningTime="2025-11-01 09:05:33.861326436 +0000 UTC m=+1.167476871"
Nov 01 09:05:33 scheduled-stop-599041 kubelet[1506]: I1101 09:05:33.874587 1506 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-controller-manager-scheduled-stop-599041" podStartSLOduration=0.874568568 podStartE2EDuration="874.568568ms" podCreationTimestamp="2025-11-01 09:05:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-01 09:05:33.862132666 +0000 UTC m=+1.168283109" watchObservedRunningTime="2025-11-01 09:05:33.874568568 +0000 UTC m=+1.180718994"
Nov 01 09:05:33 scheduled-stop-599041 kubelet[1506]: I1101 09:05:33.889070 1506 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-scheduler-scheduled-stop-599041" podStartSLOduration=1.889052054 podStartE2EDuration="1.889052054s" podCreationTimestamp="2025-11-01 09:05:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-01 09:05:33.874949734 +0000 UTC m=+1.181100169" watchObservedRunningTime="2025-11-01 09:05:33.889052054 +0000 UTC m=+1.195202489"
Nov 01 09:05:33 scheduled-stop-599041 kubelet[1506]: I1101 09:05:33.893324 1506 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/etcd-scheduled-stop-599041"
Nov 01 09:05:33 scheduled-stop-599041 kubelet[1506]: I1101 09:05:33.896952 1506 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-apiserver-scheduled-stop-599041"
Nov 01 09:05:33 scheduled-stop-599041 kubelet[1506]: I1101 09:05:33.901934 1506 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-controller-manager-scheduled-stop-599041"
Nov 01 09:05:33 scheduled-stop-599041 kubelet[1506]: E1101 09:05:33.906560 1506 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"etcd-scheduled-stop-599041\" already exists" pod="kube-system/etcd-scheduled-stop-599041"
Nov 01 09:05:33 scheduled-stop-599041 kubelet[1506]: E1101 09:05:33.915115 1506 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-apiserver-scheduled-stop-599041\" already exists" pod="kube-system/kube-apiserver-scheduled-stop-599041"
Nov 01 09:05:33 scheduled-stop-599041 kubelet[1506]: E1101 09:05:33.915724 1506 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-controller-manager-scheduled-stop-599041\" already exists" pod="kube-system/kube-controller-manager-scheduled-stop-599041"
Nov 01 09:05:37 scheduled-stop-599041 kubelet[1506]: I1101 09:05:37.145990 1506 kuberuntime_manager.go:1828] "Updating runtime config through cri with podcidr" CIDR="10.244.0.0/24"
Nov 01 09:05:37 scheduled-stop-599041 kubelet[1506]: I1101 09:05:37.146790 1506 kubelet_network.go:47] "Updating Pod CIDR" originalPodCIDR="" newPodCIDR="10.244.0.0/24"
-- /stdout --
helpers_test.go:262: (dbg) Run: out/minikube-linux-arm64 status --format={{.APIServer}} -p scheduled-stop-599041 -n scheduled-stop-599041
helpers_test.go:269: (dbg) Run: kubectl --context scheduled-stop-599041 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:280: non-running pods: storage-provisioner
helpers_test.go:282: ======> post-mortem[TestScheduledStopUnix]: describe non-running pods <======
helpers_test.go:285: (dbg) Run: kubectl --context scheduled-stop-599041 describe pod storage-provisioner
helpers_test.go:285: (dbg) Non-zero exit: kubectl --context scheduled-stop-599041 describe pod storage-provisioner: exit status 1 (166.37193ms)
** stderr **
Error from server (NotFound): pods "storage-provisioner" not found
** /stderr **
helpers_test.go:287: kubectl --context scheduled-stop-599041 describe pod storage-provisioner: exit status 1
helpers_test.go:175: Cleaning up "scheduled-stop-599041" profile ...
helpers_test.go:178: (dbg) Run: out/minikube-linux-arm64 delete -p scheduled-stop-599041
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p scheduled-stop-599041: (2.227530369s)
--- FAIL: TestScheduledStopUnix (41.62s)