=== RUN TestScheduledStopUnix
scheduled_stop_test.go:128: (dbg) Run: out/minikube-linux-arm64 start -p scheduled-stop-995971 --memory=3072 --driver=docker --container-runtime=docker
scheduled_stop_test.go:128: (dbg) Done: out/minikube-linux-arm64 start -p scheduled-stop-995971 --memory=3072 --driver=docker --container-runtime=docker: (36.42142864s)
scheduled_stop_test.go:137: (dbg) Run: out/minikube-linux-arm64 stop -p scheduled-stop-995971 --schedule 5m
scheduled_stop_test.go:191: (dbg) Run: out/minikube-linux-arm64 status --format={{.TimeToStop}} -p scheduled-stop-995971 -n scheduled-stop-995971
scheduled_stop_test.go:169: signal error was: <nil>
scheduled_stop_test.go:137: (dbg) Run: out/minikube-linux-arm64 stop -p scheduled-stop-995971 --schedule 15s
scheduled_stop_test.go:169: signal error was: <nil>
scheduled_stop_test.go:98: process 208538 running but should have been killed on reschedule of stop
panic.go:636: *** TestScheduledStopUnix FAILED at 2025-10-26 08:26:26.19146897 +0000 UTC m=+2334.258792947
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:223: ======> post-mortem[TestScheduledStopUnix]: network settings <======
helpers_test.go:230: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:238: ======> post-mortem[TestScheduledStopUnix]: docker inspect <======
helpers_test.go:239: (dbg) Run: docker inspect scheduled-stop-995971
helpers_test.go:243: (dbg) docker inspect scheduled-stop-995971:
-- stdout --
[
{
"Id": "159355d3a8af925b907c76645746e5427014159dd18acc9da863845a74bc1f6c",
"Created": "2025-10-26T08:25:54.265640074Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 205731,
"ExitCode": 0,
"Error": "",
"StartedAt": "2025-10-26T08:25:54.341846041Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:1b8004df0b408966a254b2ecd4551aa85aaac4627e7e9cb1cefc14dfe51ec273",
"ResolvConfPath": "/var/lib/docker/containers/159355d3a8af925b907c76645746e5427014159dd18acc9da863845a74bc1f6c/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/159355d3a8af925b907c76645746e5427014159dd18acc9da863845a74bc1f6c/hostname",
"HostsPath": "/var/lib/docker/containers/159355d3a8af925b907c76645746e5427014159dd18acc9da863845a74bc1f6c/hosts",
"LogPath": "/var/lib/docker/containers/159355d3a8af925b907c76645746e5427014159dd18acc9da863845a74bc1f6c/159355d3a8af925b907c76645746e5427014159dd18acc9da863845a74bc1f6c-json.log",
"Name": "/scheduled-stop-995971",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"scheduled-stop-995971:/var",
"/lib/modules:/lib/modules:ro"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {}
},
"NetworkMode": "scheduled-stop-995971",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "host",
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 3221225472,
"NanoCpus": 2000000000,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 6442450944,
"MemorySwappiness": null,
"OomKillDisable": false,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"ID": "159355d3a8af925b907c76645746e5427014159dd18acc9da863845a74bc1f6c",
"LowerDir": "/var/lib/docker/overlay2/70510743380afbfb0b331712027b38b1559e7fa2995d4b8d4ebfee22f29db570-init/diff:/var/lib/docker/overlay2/f4ae9e34d51cdfacd2f648aed0156903f5945b817594903fbc53d2aaaff32c64/diff",
"MergedDir": "/var/lib/docker/overlay2/70510743380afbfb0b331712027b38b1559e7fa2995d4b8d4ebfee22f29db570/merged",
"UpperDir": "/var/lib/docker/overlay2/70510743380afbfb0b331712027b38b1559e7fa2995d4b8d4ebfee22f29db570/diff",
"WorkDir": "/var/lib/docker/overlay2/70510743380afbfb0b331712027b38b1559e7fa2995d4b8d4ebfee22f29db570/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "volume",
"Name": "scheduled-stop-995971",
"Source": "/var/lib/docker/volumes/scheduled-stop-995971/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
},
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
}
],
"Config": {
"Hostname": "scheduled-stop-995971",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"OnBuild": null,
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "scheduled-stop-995971",
"name.minikube.sigs.k8s.io": "scheduled-stop-995971",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "334da74288b2fa6fe2a4681eaee5740fb84beb5ebe3d8cae5d8c60358c834d81",
"SandboxKey": "/var/run/docker/netns/334da74288b2",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32968"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32969"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32972"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32970"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32971"
}
]
},
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"scheduled-stop-995971": {
"IPAMConfig": {
"IPv4Address": "192.168.76.2"
},
"Links": null,
"Aliases": null,
"MacAddress": "9a:7c:69:be:f6:3f",
"DriverOpts": null,
"GwPriority": 0,
"NetworkID": "d4cc5b4a245ac169df806a99c4ff7c1782eea77349c2efbc0787bc41de88ef6d",
"EndpointID": "836abcc4bb23bfe183204dcda9be9b597caf0f34c454f4d51b3ad6c22be38b21",
"Gateway": "192.168.76.1",
"IPAddress": "192.168.76.2",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"scheduled-stop-995971",
"159355d3a8af"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:247: (dbg) Run: out/minikube-linux-arm64 status --format={{.Host}} -p scheduled-stop-995971 -n scheduled-stop-995971
helpers_test.go:252: <<< TestScheduledStopUnix FAILED: start of post-mortem logs <<<
helpers_test.go:253: ======> post-mortem[TestScheduledStopUnix]: minikube logs <======
helpers_test.go:255: (dbg) Run: out/minikube-linux-arm64 -p scheduled-stop-995971 logs -n 25
helpers_test.go:260: TestScheduledStopUnix logs:
-- stdout --
==> Audit <==
┌─────────┬─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬───────────────────────┬─────────┬─────────┬─────────────────────┬─────────────────────┐
│ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │
├─────────┼─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼───────────────────────┼─────────┼─────────┼─────────────────────┼─────────────────────┤
│ stop │ -p multinode-471242 │ multinode-471242 │ jenkins │ v1.37.0 │ 26 Oct 25 08:20 UTC │ 26 Oct 25 08:20 UTC │
│ start │ -p multinode-471242 --wait=true -v=5 --alsologtostderr │ multinode-471242 │ jenkins │ v1.37.0 │ 26 Oct 25 08:20 UTC │ 26 Oct 25 08:21 UTC │
│ node │ list -p multinode-471242 │ multinode-471242 │ jenkins │ v1.37.0 │ 26 Oct 25 08:21 UTC │ │
│ node │ multinode-471242 node delete m03 │ multinode-471242 │ jenkins │ v1.37.0 │ 26 Oct 25 08:21 UTC │ 26 Oct 25 08:21 UTC │
│ stop │ multinode-471242 stop │ multinode-471242 │ jenkins │ v1.37.0 │ 26 Oct 25 08:21 UTC │ 26 Oct 25 08:21 UTC │
│ start │ -p multinode-471242 --wait=true -v=5 --alsologtostderr --driver=docker --container-runtime=docker │ multinode-471242 │ jenkins │ v1.37.0 │ 26 Oct 25 08:21 UTC │ 26 Oct 25 08:22 UTC │
│ node │ list -p multinode-471242 │ multinode-471242 │ jenkins │ v1.37.0 │ 26 Oct 25 08:22 UTC │ │
│ start │ -p multinode-471242-m02 --driver=docker --container-runtime=docker │ multinode-471242-m02 │ jenkins │ v1.37.0 │ 26 Oct 25 08:22 UTC │ │
│ start │ -p multinode-471242-m03 --driver=docker --container-runtime=docker │ multinode-471242-m03 │ jenkins │ v1.37.0 │ 26 Oct 25 08:22 UTC │ 26 Oct 25 08:23 UTC │
│ node │ add -p multinode-471242 │ multinode-471242 │ jenkins │ v1.37.0 │ 26 Oct 25 08:23 UTC │ │
│ delete │ -p multinode-471242-m03 │ multinode-471242-m03 │ jenkins │ v1.37.0 │ 26 Oct 25 08:23 UTC │ 26 Oct 25 08:23 UTC │
│ delete │ -p multinode-471242 │ multinode-471242 │ jenkins │ v1.37.0 │ 26 Oct 25 08:23 UTC │ 26 Oct 25 08:23 UTC │
│ start │ -p test-preload-930835 --memory=3072 --alsologtostderr --wait=true --preload=false --driver=docker --container-runtime=docker --kubernetes-version=v1.32.0 │ test-preload-930835 │ jenkins │ v1.37.0 │ 26 Oct 25 08:23 UTC │ 26 Oct 25 08:24 UTC │
│ image │ test-preload-930835 image pull gcr.io/k8s-minikube/busybox │ test-preload-930835 │ jenkins │ v1.37.0 │ 26 Oct 25 08:24 UTC │ 26 Oct 25 08:24 UTC │
│ stop │ -p test-preload-930835 │ test-preload-930835 │ jenkins │ v1.37.0 │ 26 Oct 25 08:24 UTC │ 26 Oct 25 08:24 UTC │
│ start │ -p test-preload-930835 --memory=3072 --alsologtostderr -v=1 --wait=true --driver=docker --container-runtime=docker │ test-preload-930835 │ jenkins │ v1.37.0 │ 26 Oct 25 08:24 UTC │ 26 Oct 25 08:25 UTC │
│ image │ test-preload-930835 image list │ test-preload-930835 │ jenkins │ v1.37.0 │ 26 Oct 25 08:25 UTC │ 26 Oct 25 08:25 UTC │
│ delete │ -p test-preload-930835 │ test-preload-930835 │ jenkins │ v1.37.0 │ 26 Oct 25 08:25 UTC │ 26 Oct 25 08:25 UTC │
│ start │ -p scheduled-stop-995971 --memory=3072 --driver=docker --container-runtime=docker │ scheduled-stop-995971 │ jenkins │ v1.37.0 │ 26 Oct 25 08:25 UTC │ 26 Oct 25 08:26 UTC │
│ stop │ -p scheduled-stop-995971 --schedule 5m │ scheduled-stop-995971 │ jenkins │ v1.37.0 │ 26 Oct 25 08:26 UTC │ │
│ stop │ -p scheduled-stop-995971 --schedule 5m │ scheduled-stop-995971 │ jenkins │ v1.37.0 │ 26 Oct 25 08:26 UTC │ │
│ stop │ -p scheduled-stop-995971 --schedule 5m │ scheduled-stop-995971 │ jenkins │ v1.37.0 │ 26 Oct 25 08:26 UTC │ │
│ stop │ -p scheduled-stop-995971 --schedule 15s │ scheduled-stop-995971 │ jenkins │ v1.37.0 │ 26 Oct 25 08:26 UTC │ │
│ stop │ -p scheduled-stop-995971 --schedule 15s │ scheduled-stop-995971 │ jenkins │ v1.37.0 │ 26 Oct 25 08:26 UTC │ │
│ stop │ -p scheduled-stop-995971 --schedule 15s │ scheduled-stop-995971 │ jenkins │ v1.37.0 │ 26 Oct 25 08:26 UTC │ │
└─────────┴─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴───────────────────────┴─────────┴─────────┴─────────────────────┴─────────────────────┘
==> Last Start <==
Log file created at: 2025/10/26 08:25:49
Running on machine: ip-172-31-30-239
Binary: Built with gc go1.24.6 for linux/arm64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I1026 08:25:49.288464 205345 out.go:360] Setting OutFile to fd 1 ...
I1026 08:25:49.288592 205345 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1026 08:25:49.288596 205345 out.go:374] Setting ErrFile to fd 2...
I1026 08:25:49.288600 205345 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1026 08:25:49.288957 205345 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21772-2316/.minikube/bin
I1026 08:25:49.289422 205345 out.go:368] Setting JSON to false
I1026 08:25:49.290389 205345 start.go:131] hostinfo: {"hostname":"ip-172-31-30-239","uptime":4102,"bootTime":1761463048,"procs":153,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1084-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"92f46a7d-c249-4c12-924a-77f64874c910"}
I1026 08:25:49.290441 205345 start.go:141] virtualization:
I1026 08:25:49.294373 205345 out.go:179] * [scheduled-stop-995971] minikube v1.37.0 on Ubuntu 20.04 (arm64)
I1026 08:25:49.298851 205345 out.go:179] - MINIKUBE_LOCATION=21772
I1026 08:25:49.298960 205345 notify.go:220] Checking for updates...
I1026 08:25:49.305841 205345 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I1026 08:25:49.309139 205345 out.go:179] - KUBECONFIG=/home/jenkins/minikube-integration/21772-2316/kubeconfig
I1026 08:25:49.312494 205345 out.go:179] - MINIKUBE_HOME=/home/jenkins/minikube-integration/21772-2316/.minikube
I1026 08:25:49.315726 205345 out.go:179] - MINIKUBE_BIN=out/minikube-linux-arm64
I1026 08:25:49.318854 205345 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I1026 08:25:49.322144 205345 driver.go:421] Setting default libvirt URI to qemu:///system
I1026 08:25:49.359337 205345 docker.go:123] docker version: linux-28.1.1:Docker Engine - Community
I1026 08:25:49.359467 205345 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1026 08:25:49.412964 205345 info.go:266] docker info: {ID:6ZPO:QZND:VNGE:LUKL:4Y3K:XELL:AAX4:2GTK:E6LM:MPRN:3ZXR:TTMR Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:3 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:24 OomKillDisable:true NGoroutines:42 SystemTime:2025-10-26 08:25:49.403797509 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1084-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214831104 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-30-239 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx P
ath:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1]] Warnings:<nil>}}
I1026 08:25:49.413058 205345 docker.go:318] overlay module found
I1026 08:25:49.416380 205345 out.go:179] * Using the docker driver based on user configuration
I1026 08:25:49.419384 205345 start.go:305] selected driver: docker
I1026 08:25:49.419392 205345 start.go:925] validating driver "docker" against <nil>
I1026 08:25:49.419404 205345 start.go:936] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I1026 08:25:49.420213 205345 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1026 08:25:49.485642 205345 info.go:266] docker info: {ID:6ZPO:QZND:VNGE:LUKL:4Y3K:XELL:AAX4:2GTK:E6LM:MPRN:3ZXR:TTMR Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:3 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:24 OomKillDisable:true NGoroutines:42 SystemTime:2025-10-26 08:25:49.475690158 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1084-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214831104 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-30-239 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx P
ath:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1]] Warnings:<nil>}}
I1026 08:25:49.485798 205345 start_flags.go:327] no existing cluster config was found, will generate one from the flags
I1026 08:25:49.486021 205345 start_flags.go:974] Wait components to verify : map[apiserver:true system_pods:true]
I1026 08:25:49.489054 205345 out.go:179] * Using Docker driver with root privileges
I1026 08:25:49.491926 205345 cni.go:84] Creating CNI manager for ""
I1026 08:25:49.491990 205345 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I1026 08:25:49.491998 205345 start_flags.go:336] Found "bridge CNI" CNI - setting NetworkPlugin=cni
I1026 08:25:49.492070 205345 start.go:349] cluster config:
{Name:scheduled-stop-995971 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:scheduled-stop-995971 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local
ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1026 08:25:49.495135 205345 out.go:179] * Starting "scheduled-stop-995971" primary control-plane node in "scheduled-stop-995971" cluster
I1026 08:25:49.497977 205345 cache.go:123] Beginning downloading kic base image for docker with docker
I1026 08:25:49.500983 205345 out.go:179] * Pulling base image v0.0.48-1760939008-21773 ...
I1026 08:25:49.503883 205345 preload.go:183] Checking if preload exists for k8s version v1.34.1 and runtime docker
I1026 08:25:49.503932 205345 preload.go:198] Found local preload: /home/jenkins/minikube-integration/21772-2316/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-docker-overlay2-arm64.tar.lz4
I1026 08:25:49.503939 205345 cache.go:58] Caching tarball of preloaded images
I1026 08:25:49.503976 205345 image.go:81] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 in local docker daemon
I1026 08:25:49.504036 205345 preload.go:233] Found /home/jenkins/minikube-integration/21772-2316/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-docker-overlay2-arm64.tar.lz4 in cache, skipping download
I1026 08:25:49.504045 205345 cache.go:61] Finished verifying existence of preloaded tar for v1.34.1 on docker
I1026 08:25:49.504391 205345 profile.go:143] Saving config to /home/jenkins/minikube-integration/21772-2316/.minikube/profiles/scheduled-stop-995971/config.json ...
I1026 08:25:49.504408 205345 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21772-2316/.minikube/profiles/scheduled-stop-995971/config.json: {Name:mk63180053288e2fe6d92f91c6dcd1aa840ab352 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1026 08:25:49.523622 205345 image.go:100] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 in local docker daemon, skipping pull
I1026 08:25:49.523634 205345 cache.go:147] gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 exists in daemon, skipping load
I1026 08:25:49.523655 205345 cache.go:232] Successfully downloaded all kic artifacts
I1026 08:25:49.523677 205345 start.go:360] acquireMachinesLock for scheduled-stop-995971: {Name:mkb74e4e2b5f7a6fd08e09d6fc032d99d14b3518 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1026 08:25:49.523793 205345 start.go:364] duration metric: took 99.736µs to acquireMachinesLock for "scheduled-stop-995971"
I1026 08:25:49.523830 205345 start.go:93] Provisioning new machine with config: &{Name:scheduled-stop-995971 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:scheduled-stop-995971 Namespace:default APIServerHAVIP: A
PIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHA
uthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}
I1026 08:25:49.523887 205345 start.go:125] createHost starting for "" (driver="docker")
I1026 08:25:49.527425 205345 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ...
I1026 08:25:49.527688 205345 start.go:159] libmachine.API.Create for "scheduled-stop-995971" (driver="docker")
I1026 08:25:49.527733 205345 client.go:168] LocalClient.Create starting
I1026 08:25:49.527805 205345 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21772-2316/.minikube/certs/ca.pem
I1026 08:25:49.527838 205345 main.go:141] libmachine: Decoding PEM data...
I1026 08:25:49.527850 205345 main.go:141] libmachine: Parsing certificate...
I1026 08:25:49.527904 205345 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21772-2316/.minikube/certs/cert.pem
I1026 08:25:49.527919 205345 main.go:141] libmachine: Decoding PEM data...
I1026 08:25:49.527928 205345 main.go:141] libmachine: Parsing certificate...
I1026 08:25:49.528300 205345 cli_runner.go:164] Run: docker network inspect scheduled-stop-995971 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W1026 08:25:49.543413 205345 cli_runner.go:211] docker network inspect scheduled-stop-995971 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I1026 08:25:49.543500 205345 network_create.go:284] running [docker network inspect scheduled-stop-995971] to gather additional debugging logs...
I1026 08:25:49.543514 205345 cli_runner.go:164] Run: docker network inspect scheduled-stop-995971
W1026 08:25:49.560152 205345 cli_runner.go:211] docker network inspect scheduled-stop-995971 returned with exit code 1
I1026 08:25:49.560172 205345 network_create.go:287] error running [docker network inspect scheduled-stop-995971]: docker network inspect scheduled-stop-995971: exit status 1
stdout:
[]
stderr:
Error response from daemon: network scheduled-stop-995971 not found
I1026 08:25:49.560184 205345 network_create.go:289] output of [docker network inspect scheduled-stop-995971]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network scheduled-stop-995971 not found
** /stderr **
I1026 08:25:49.560301 205345 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1026 08:25:49.576681 205345 network.go:211] skipping subnet 192.168.49.0/24 that is taken: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName:br-64224559a2d4 IfaceIPv4:192.168.49.1 IfaceMTU:1500 IfaceMAC:3a:0f:fc:ee:33:e6} reservation:<nil>}
I1026 08:25:49.576927 205345 network.go:211] skipping subnet 192.168.58.0/24 that is taken: &{IP:192.168.58.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.58.0/24 Gateway:192.168.58.1 ClientMin:192.168.58.2 ClientMax:192.168.58.254 Broadcast:192.168.58.255 IsPrivate:true Interface:{IfaceName:br-e9ea4208913f IfaceIPv4:192.168.58.1 IfaceMTU:1500 IfaceMAC:1e:75:af:5d:2f:8d} reservation:<nil>}
I1026 08:25:49.577200 205345 network.go:211] skipping subnet 192.168.67.0/24 that is taken: &{IP:192.168.67.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.67.0/24 Gateway:192.168.67.1 ClientMin:192.168.67.2 ClientMax:192.168.67.254 Broadcast:192.168.67.255 IsPrivate:true Interface:{IfaceName:br-efc95b6a0c4e IfaceIPv4:192.168.67.1 IfaceMTU:1500 IfaceMAC:5e:94:b4:e8:58:06} reservation:<nil>}
I1026 08:25:49.577527 205345 network.go:206] using free private subnet 192.168.76.0/24: &{IP:192.168.76.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.76.0/24 Gateway:192.168.76.1 ClientMin:192.168.76.2 ClientMax:192.168.76.254 Broadcast:192.168.76.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0x4001a041d0}
I1026 08:25:49.577543 205345 network_create.go:124] attempt to create docker network scheduled-stop-995971 192.168.76.0/24 with gateway 192.168.76.1 and MTU of 1500 ...
I1026 08:25:49.577603 205345 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.76.0/24 --gateway=192.168.76.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=scheduled-stop-995971 scheduled-stop-995971
I1026 08:25:49.645792 205345 network_create.go:108] docker network scheduled-stop-995971 192.168.76.0/24 created
I1026 08:25:49.645814 205345 kic.go:121] calculated static IP "192.168.76.2" for the "scheduled-stop-995971" container
I1026 08:25:49.645885 205345 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I1026 08:25:49.661875 205345 cli_runner.go:164] Run: docker volume create scheduled-stop-995971 --label name.minikube.sigs.k8s.io=scheduled-stop-995971 --label created_by.minikube.sigs.k8s.io=true
I1026 08:25:49.680007 205345 oci.go:103] Successfully created a docker volume scheduled-stop-995971
I1026 08:25:49.680093 205345 cli_runner.go:164] Run: docker run --rm --name scheduled-stop-995971-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=scheduled-stop-995971 --entrypoint /usr/bin/test -v scheduled-stop-995971:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 -d /var/lib
I1026 08:25:50.162863 205345 oci.go:107] Successfully prepared a docker volume scheduled-stop-995971
I1026 08:25:50.162901 205345 preload.go:183] Checking if preload exists for k8s version v1.34.1 and runtime docker
I1026 08:25:50.162919 205345 kic.go:194] Starting extracting preloaded images to volume ...
I1026 08:25:50.162981 205345 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21772-2316/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-docker-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v scheduled-stop-995971:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 -I lz4 -xf /preloaded.tar -C /extractDir
I1026 08:25:54.193393 205345 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21772-2316/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-docker-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v scheduled-stop-995971:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 -I lz4 -xf /preloaded.tar -C /extractDir: (4.030377948s)
I1026 08:25:54.193421 205345 kic.go:203] duration metric: took 4.030489891s to extract preloaded images to volume ...
W1026 08:25:54.193581 205345 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
I1026 08:25:54.193684 205345 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I1026 08:25:54.250417 205345 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname scheduled-stop-995971 --name scheduled-stop-995971 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=scheduled-stop-995971 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=scheduled-stop-995971 --network scheduled-stop-995971 --ip 192.168.76.2 --volume scheduled-stop-995971:/var --security-opt apparmor=unconfined --memory=3072mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8
I1026 08:25:54.563375 205345 cli_runner.go:164] Run: docker container inspect scheduled-stop-995971 --format={{.State.Running}}
I1026 08:25:54.592348 205345 cli_runner.go:164] Run: docker container inspect scheduled-stop-995971 --format={{.State.Status}}
I1026 08:25:54.617838 205345 cli_runner.go:164] Run: docker exec scheduled-stop-995971 stat /var/lib/dpkg/alternatives/iptables
I1026 08:25:54.672704 205345 oci.go:144] the created container "scheduled-stop-995971" has a running status.
I1026 08:25:54.672729 205345 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/21772-2316/.minikube/machines/scheduled-stop-995971/id_rsa...
I1026 08:25:54.945815 205345 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/21772-2316/.minikube/machines/scheduled-stop-995971/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I1026 08:25:54.980548 205345 cli_runner.go:164] Run: docker container inspect scheduled-stop-995971 --format={{.State.Status}}
I1026 08:25:54.999861 205345 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I1026 08:25:54.999872 205345 kic_runner.go:114] Args: [docker exec --privileged scheduled-stop-995971 chown docker:docker /home/docker/.ssh/authorized_keys]
I1026 08:25:55.065852 205345 cli_runner.go:164] Run: docker container inspect scheduled-stop-995971 --format={{.State.Status}}
I1026 08:25:55.084442 205345 machine.go:93] provisionDockerMachine start ...
I1026 08:25:55.084544 205345 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-995971
I1026 08:25:55.102118 205345 main.go:141] libmachine: Using SSH client type: native
I1026 08:25:55.102446 205345 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3eefe0] 0x3f1790 <nil> [] 0s} 127.0.0.1 32968 <nil> <nil>}
I1026 08:25:55.102453 205345 main.go:141] libmachine: About to run SSH command:
hostname
I1026 08:25:55.103091 205345 main.go:141] libmachine: Error dialing TCP: ssh: handshake failed: EOF
I1026 08:25:58.255389 205345 main.go:141] libmachine: SSH cmd err, output: <nil>: scheduled-stop-995971
I1026 08:25:58.255403 205345 ubuntu.go:182] provisioning hostname "scheduled-stop-995971"
I1026 08:25:58.255497 205345 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-995971
I1026 08:25:58.273785 205345 main.go:141] libmachine: Using SSH client type: native
I1026 08:25:58.274087 205345 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3eefe0] 0x3f1790 <nil> [] 0s} 127.0.0.1 32968 <nil> <nil>}
I1026 08:25:58.274097 205345 main.go:141] libmachine: About to run SSH command:
sudo hostname scheduled-stop-995971 && echo "scheduled-stop-995971" | sudo tee /etc/hostname
I1026 08:25:58.432942 205345 main.go:141] libmachine: SSH cmd err, output: <nil>: scheduled-stop-995971
I1026 08:25:58.433029 205345 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-995971
I1026 08:25:58.450401 205345 main.go:141] libmachine: Using SSH client type: native
I1026 08:25:58.450714 205345 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3eefe0] 0x3f1790 <nil> [] 0s} 127.0.0.1 32968 <nil> <nil>}
I1026 08:25:58.450729 205345 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\sscheduled-stop-995971' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 scheduled-stop-995971/g' /etc/hosts;
else
echo '127.0.1.1 scheduled-stop-995971' | sudo tee -a /etc/hosts;
fi
fi
I1026 08:25:58.603954 205345 main.go:141] libmachine: SSH cmd err, output: <nil>:
I1026 08:25:58.603968 205345 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/21772-2316/.minikube CaCertPath:/home/jenkins/minikube-integration/21772-2316/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21772-2316/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21772-2316/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21772-2316/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21772-2316/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21772-2316/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21772-2316/.minikube}
I1026 08:25:58.603994 205345 ubuntu.go:190] setting up certificates
I1026 08:25:58.604003 205345 provision.go:84] configureAuth start
I1026 08:25:58.604061 205345 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" scheduled-stop-995971
I1026 08:25:58.621292 205345 provision.go:143] copyHostCerts
I1026 08:25:58.621349 205345 exec_runner.go:144] found /home/jenkins/minikube-integration/21772-2316/.minikube/key.pem, removing ...
I1026 08:25:58.621356 205345 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21772-2316/.minikube/key.pem
I1026 08:25:58.621437 205345 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21772-2316/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21772-2316/.minikube/key.pem (1679 bytes)
I1026 08:25:58.621520 205345 exec_runner.go:144] found /home/jenkins/minikube-integration/21772-2316/.minikube/ca.pem, removing ...
I1026 08:25:58.621524 205345 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21772-2316/.minikube/ca.pem
I1026 08:25:58.621548 205345 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21772-2316/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21772-2316/.minikube/ca.pem (1082 bytes)
I1026 08:25:58.621596 205345 exec_runner.go:144] found /home/jenkins/minikube-integration/21772-2316/.minikube/cert.pem, removing ...
I1026 08:25:58.621599 205345 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21772-2316/.minikube/cert.pem
I1026 08:25:58.621619 205345 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21772-2316/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21772-2316/.minikube/cert.pem (1123 bytes)
I1026 08:25:58.621661 205345 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21772-2316/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21772-2316/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21772-2316/.minikube/certs/ca-key.pem org=jenkins.scheduled-stop-995971 san=[127.0.0.1 192.168.76.2 localhost minikube scheduled-stop-995971]
I1026 08:25:59.757601 205345 provision.go:177] copyRemoteCerts
I1026 08:25:59.757654 205345 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1026 08:25:59.757693 205345 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-995971
I1026 08:25:59.775363 205345 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32968 SSHKeyPath:/home/jenkins/minikube-integration/21772-2316/.minikube/machines/scheduled-stop-995971/id_rsa Username:docker}
I1026 08:25:59.879410 205345 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21772-2316/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I1026 08:25:59.897019 205345 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21772-2316/.minikube/machines/server.pem --> /etc/docker/server.pem (1229 bytes)
I1026 08:25:59.914872 205345 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21772-2316/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I1026 08:25:59.932259 205345 provision.go:87] duration metric: took 1.328234423s to configureAuth
I1026 08:25:59.932275 205345 ubuntu.go:206] setting minikube options for container-runtime
I1026 08:25:59.932466 205345 config.go:182] Loaded profile config "scheduled-stop-995971": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.1
I1026 08:25:59.932514 205345 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-995971
I1026 08:25:59.948964 205345 main.go:141] libmachine: Using SSH client type: native
I1026 08:25:59.949275 205345 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3eefe0] 0x3f1790 <nil> [] 0s} 127.0.0.1 32968 <nil> <nil>}
I1026 08:25:59.949281 205345 main.go:141] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I1026 08:26:00.139012 205345 main.go:141] libmachine: SSH cmd err, output: <nil>: overlay
I1026 08:26:00.139024 205345 ubuntu.go:71] root file system type: overlay
I1026 08:26:00.139283 205345 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
I1026 08:26:00.139362 205345 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-995971
I1026 08:26:00.214506 205345 main.go:141] libmachine: Using SSH client type: native
I1026 08:26:00.214842 205345 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3eefe0] 0x3f1790 <nil> [] 0s} 127.0.0.1 32968 <nil> <nil>}
I1026 08:26:00.214930 205345 main.go:141] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 \
-H fd:// --containerd=/run/containerd/containerd.sock \
-H unix:///var/run/docker.sock \
--default-ulimit=nofile=1048576:1048576 \
--tlsverify \
--tlscacert /etc/docker/ca.pem \
--tlscert /etc/docker/server.pem \
--tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I1026 08:26:00.507157 205345 main.go:141] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
I1026 08:26:00.507232 205345 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-995971
I1026 08:26:00.528013 205345 main.go:141] libmachine: Using SSH client type: native
I1026 08:26:00.528347 205345 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3eefe0] 0x3f1790 <nil> [] 0s} 127.0.0.1 32968 <nil> <nil>}
I1026 08:26:00.528364 205345 main.go:141] libmachine: About to run SSH command:
sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
I1026 08:26:01.498850 205345 main.go:141] libmachine: SSH cmd err, output: <nil>: --- /lib/systemd/system/docker.service 2025-10-08 12:17:10.000000000 +0000
+++ /lib/systemd/system/docker.service.new 2025-10-26 08:26:00.499763495 +0000
@@ -9,23 +9,34 @@
[Service]
Type=notify
-# the default is not to use systemd for cgroups because the delegate issues still
-# exists and systemd currently does not support the cgroup feature set required
-# for containers run by docker
-ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
-ExecReload=/bin/kill -s HUP $MAINPID
-TimeoutStartSec=0
-RestartSec=2
Restart=always
+
+
+# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
+# The base configuration already specifies an 'ExecStart=...' command. The first directive
+# here is to clear out that command inherited from the base configuration. Without this,
+# the command from the base configuration and the command specified here are treated as
+# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
+# will catch this invalid input and refuse to start the service with an error like:
+# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
+
+# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
+# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
+ExecStart=
+ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
+ExecReload=/bin/kill -s HUP $MAINPID
+
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
+LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
-# Comment TasksMax if your systemd version does not support it.
-# Only systemd 226 and above support this option.
+# Uncomment TasksMax if your systemd version supports it.
+# Only systemd 226 and above support this version.
TasksMax=infinity
+TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
Synchronizing state of docker.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable docker
I1026 08:26:01.498871 205345 machine.go:96] duration metric: took 6.414418621s to provisionDockerMachine
I1026 08:26:01.498881 205345 client.go:171] duration metric: took 11.971143911s to LocalClient.Create
I1026 08:26:01.498894 205345 start.go:167] duration metric: took 11.97120646s to libmachine.API.Create "scheduled-stop-995971"
I1026 08:26:01.498899 205345 start.go:293] postStartSetup for "scheduled-stop-995971" (driver="docker")
I1026 08:26:01.498908 205345 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1026 08:26:01.499028 205345 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1026 08:26:01.499069 205345 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-995971
I1026 08:26:01.517484 205345 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32968 SSHKeyPath:/home/jenkins/minikube-integration/21772-2316/.minikube/machines/scheduled-stop-995971/id_rsa Username:docker}
I1026 08:26:01.624050 205345 ssh_runner.go:195] Run: cat /etc/os-release
I1026 08:26:01.627832 205345 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I1026 08:26:01.627855 205345 info.go:137] Remote host: Debian GNU/Linux 12 (bookworm)
I1026 08:26:01.627866 205345 filesync.go:126] Scanning /home/jenkins/minikube-integration/21772-2316/.minikube/addons for local assets ...
I1026 08:26:01.627927 205345 filesync.go:126] Scanning /home/jenkins/minikube-integration/21772-2316/.minikube/files for local assets ...
I1026 08:26:01.628018 205345 filesync.go:149] local asset: /home/jenkins/minikube-integration/21772-2316/.minikube/files/etc/ssl/certs/41132.pem -> 41132.pem in /etc/ssl/certs
I1026 08:26:01.628125 205345 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1026 08:26:01.636510 205345 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21772-2316/.minikube/files/etc/ssl/certs/41132.pem --> /etc/ssl/certs/41132.pem (1708 bytes)
I1026 08:26:01.655221 205345 start.go:296] duration metric: took 156.308093ms for postStartSetup
I1026 08:26:01.655637 205345 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" scheduled-stop-995971
I1026 08:26:01.674005 205345 profile.go:143] Saving config to /home/jenkins/minikube-integration/21772-2316/.minikube/profiles/scheduled-stop-995971/config.json ...
I1026 08:26:01.674340 205345 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I1026 08:26:01.674379 205345 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-995971
I1026 08:26:01.693160 205345 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32968 SSHKeyPath:/home/jenkins/minikube-integration/21772-2316/.minikube/machines/scheduled-stop-995971/id_rsa Username:docker}
I1026 08:26:01.792948 205345 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I1026 08:26:01.797730 205345 start.go:128] duration metric: took 12.273830646s to createHost
I1026 08:26:01.797753 205345 start.go:83] releasing machines lock for "scheduled-stop-995971", held for 12.273945009s
I1026 08:26:01.797823 205345 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" scheduled-stop-995971
I1026 08:26:01.814587 205345 ssh_runner.go:195] Run: cat /version.json
I1026 08:26:01.814639 205345 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-995971
I1026 08:26:01.814908 205345 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1026 08:26:01.814949 205345 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-995971
I1026 08:26:01.832438 205345 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32968 SSHKeyPath:/home/jenkins/minikube-integration/21772-2316/.minikube/machines/scheduled-stop-995971/id_rsa Username:docker}
I1026 08:26:01.837574 205345 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32968 SSHKeyPath:/home/jenkins/minikube-integration/21772-2316/.minikube/machines/scheduled-stop-995971/id_rsa Username:docker}
I1026 08:26:02.019683 205345 ssh_runner.go:195] Run: systemctl --version
I1026 08:26:02.026054 205345 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W1026 08:26:02.031690 205345 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I1026 08:26:02.031780 205345 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1026 08:26:02.063098 205345 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/10-crio-bridge.conflist.disabled] bridge cni config(s)
I1026 08:26:02.063113 205345 start.go:495] detecting cgroup driver to use...
I1026 08:26:02.063144 205345 detect.go:187] detected "cgroupfs" cgroup driver on host os
I1026 08:26:02.063249 205345 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1026 08:26:02.077484 205345 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I1026 08:26:02.086410 205345 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1026 08:26:02.095484 205345 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I1026 08:26:02.095556 205345 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I1026 08:26:02.104526 205345 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1026 08:26:02.113699 205345 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1026 08:26:02.122675 205345 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1026 08:26:02.131469 205345 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1026 08:26:02.139831 205345 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1026 08:26:02.149577 205345 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1026 08:26:02.158366 205345 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1026 08:26:02.167127 205345 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1026 08:26:02.174877 205345 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1026 08:26:02.182363 205345 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1026 08:26:02.302418 205345 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1026 08:26:02.404117 205345 start.go:495] detecting cgroup driver to use...
I1026 08:26:02.404154 205345 detect.go:187] detected "cgroupfs" cgroup driver on host os
I1026 08:26:02.404202 205345 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I1026 08:26:02.418113 205345 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I1026 08:26:02.431893 205345 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
I1026 08:26:02.460578 205345 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I1026 08:26:02.473672 205345 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1026 08:26:02.487229 205345 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
" | sudo tee /etc/crictl.yaml"
I1026 08:26:02.502357 205345 ssh_runner.go:195] Run: which cri-dockerd
I1026 08:26:02.506211 205345 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
I1026 08:26:02.513949 205345 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (192 bytes)
I1026 08:26:02.532575 205345 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I1026 08:26:02.656630 205345 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I1026 08:26:02.778566 205345 docker.go:575] configuring docker to use "cgroupfs" as cgroup driver...
I1026 08:26:02.778679 205345 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (130 bytes)
I1026 08:26:02.793424 205345 ssh_runner.go:195] Run: sudo systemctl reset-failed docker
I1026 08:26:02.806558 205345 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1026 08:26:02.927258 205345 ssh_runner.go:195] Run: sudo systemctl restart docker
I1026 08:26:03.310873 205345 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1026 08:26:03.323734 205345 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
I1026 08:26:03.337490 205345 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1026 08:26:03.351184 205345 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
I1026 08:26:03.474537 205345 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
I1026 08:26:03.593820 205345 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1026 08:26:03.712583 205345 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
I1026 08:26:03.727740 205345 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
I1026 08:26:03.740851 205345 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1026 08:26:03.861723 205345 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I1026 08:26:03.930054 205345 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1026 08:26:03.944278 205345 start.go:542] Will wait 60s for socket path /var/run/cri-dockerd.sock
I1026 08:26:03.944340 205345 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
I1026 08:26:03.948364 205345 start.go:563] Will wait 60s for crictl version
I1026 08:26:03.948421 205345 ssh_runner.go:195] Run: which crictl
I1026 08:26:03.952061 205345 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl version
I1026 08:26:03.977080 205345 start.go:579] Version: 0.1.0
RuntimeName: docker
RuntimeVersion: 28.5.1
RuntimeApiVersion: v1
I1026 08:26:03.977140 205345 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I1026 08:26:04.000133 205345 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I1026 08:26:04.029442 205345 out.go:252] * Preparing Kubernetes v1.34.1 on Docker 28.5.1 ...
I1026 08:26:04.029555 205345 cli_runner.go:164] Run: docker network inspect scheduled-stop-995971 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1026 08:26:04.048501 205345 ssh_runner.go:195] Run: grep 192.168.76.1 host.minikube.internal$ /etc/hosts
I1026 08:26:04.052722 205345 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.76.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1026 08:26:04.063045 205345 kubeadm.go:883] updating cluster {Name:scheduled-stop-995971 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:scheduled-stop-995971 Namespace:default APIServerHAVIP: APIServerName:miniku
beCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock
: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I1026 08:26:04.063148 205345 preload.go:183] Checking if preload exists for k8s version v1.34.1 and runtime docker
I1026 08:26:04.063201 205345 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I1026 08:26:04.082811 205345 docker.go:691] Got preloaded images: -- stdout --
registry.k8s.io/kube-apiserver:v1.34.1
registry.k8s.io/kube-controller-manager:v1.34.1
registry.k8s.io/kube-scheduler:v1.34.1
registry.k8s.io/kube-proxy:v1.34.1
registry.k8s.io/etcd:3.6.4-0
registry.k8s.io/pause:3.10.1
registry.k8s.io/coredns/coredns:v1.12.1
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I1026 08:26:04.082824 205345 docker.go:621] Images already preloaded, skipping extraction
I1026 08:26:04.082892 205345 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I1026 08:26:04.102051 205345 docker.go:691] Got preloaded images: -- stdout --
registry.k8s.io/kube-apiserver:v1.34.1
registry.k8s.io/kube-scheduler:v1.34.1
registry.k8s.io/kube-controller-manager:v1.34.1
registry.k8s.io/kube-proxy:v1.34.1
registry.k8s.io/etcd:3.6.4-0
registry.k8s.io/pause:3.10.1
registry.k8s.io/coredns/coredns:v1.12.1
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I1026 08:26:04.102072 205345 cache_images.go:85] Images are preloaded, skipping loading
I1026 08:26:04.102082 205345 kubeadm.go:934] updating node { 192.168.76.2 8443 v1.34.1 docker true true} ...
I1026 08:26:04.102218 205345 kubeadm.go:946] kubelet [Unit]
Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=scheduled-stop-995971 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.76.2
[Install]
config:
{KubernetesVersion:v1.34.1 ClusterName:scheduled-stop-995971 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I1026 08:26:04.102284 205345 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
I1026 08:26:04.152486 205345 cni.go:84] Creating CNI manager for ""
I1026 08:26:04.152503 205345 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I1026 08:26:04.152520 205345 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
I1026 08:26:04.152547 205345 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.76.2 APIServerPort:8443 KubernetesVersion:v1.34.1 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:scheduled-stop-995971 NodeName:scheduled-stop-995971 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.76.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.76.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPo
dPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///var/run/cri-dockerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1026 08:26:04.152669 205345 kubeadm.go:196] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.76.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///var/run/cri-dockerd.sock
name: "scheduled-stop-995971"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.76.2"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.76.2"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
kubernetesVersion: v1.34.1
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1026 08:26:04.152733 205345 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.1
I1026 08:26:04.160952 205345 binaries.go:44] Found k8s binaries, skipping transfer
I1026 08:26:04.161017 205345 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1026 08:26:04.168930 205345 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (320 bytes)
I1026 08:26:04.181310 205345 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1026 08:26:04.194398 205345 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2222 bytes)
I1026 08:26:04.206909 205345 ssh_runner.go:195] Run: grep 192.168.76.2 control-plane.minikube.internal$ /etc/hosts
I1026 08:26:04.210515 205345 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.76.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1026 08:26:04.220138 205345 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1026 08:26:04.330112 205345 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1026 08:26:04.347272 205345 certs.go:69] Setting up /home/jenkins/minikube-integration/21772-2316/.minikube/profiles/scheduled-stop-995971 for IP: 192.168.76.2
I1026 08:26:04.347282 205345 certs.go:195] generating shared ca certs ...
I1026 08:26:04.347296 205345 certs.go:227] acquiring lock for ca certs: {Name:mk9e6ca08351429d616a577a4b67a8e1d9983347 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1026 08:26:04.347429 205345 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21772-2316/.minikube/ca.key
I1026 08:26:04.347479 205345 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21772-2316/.minikube/proxy-client-ca.key
I1026 08:26:04.347485 205345 certs.go:257] generating profile certs ...
I1026 08:26:04.347537 205345 certs.go:364] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/21772-2316/.minikube/profiles/scheduled-stop-995971/client.key
I1026 08:26:04.347554 205345 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21772-2316/.minikube/profiles/scheduled-stop-995971/client.crt with IP's: []
I1026 08:26:05.021230 205345 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21772-2316/.minikube/profiles/scheduled-stop-995971/client.crt ...
I1026 08:26:05.021246 205345 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21772-2316/.minikube/profiles/scheduled-stop-995971/client.crt: {Name:mkec713ba648357f7474b45d50652242df8ed842 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1026 08:26:05.021448 205345 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21772-2316/.minikube/profiles/scheduled-stop-995971/client.key ...
I1026 08:26:05.021458 205345 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21772-2316/.minikube/profiles/scheduled-stop-995971/client.key: {Name:mk6319e8a9dd5bfe932256e07b8cee348af24c96 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1026 08:26:05.021552 205345 certs.go:364] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21772-2316/.minikube/profiles/scheduled-stop-995971/apiserver.key.f65a4a10
I1026 08:26:05.021565 205345 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21772-2316/.minikube/profiles/scheduled-stop-995971/apiserver.crt.f65a4a10 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.76.2]
I1026 08:26:05.792103 205345 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21772-2316/.minikube/profiles/scheduled-stop-995971/apiserver.crt.f65a4a10 ...
I1026 08:26:05.792117 205345 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21772-2316/.minikube/profiles/scheduled-stop-995971/apiserver.crt.f65a4a10: {Name:mke8d148da7fdef59a777400998a8014b21a7fa1 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1026 08:26:05.792305 205345 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21772-2316/.minikube/profiles/scheduled-stop-995971/apiserver.key.f65a4a10 ...
I1026 08:26:05.792313 205345 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21772-2316/.minikube/profiles/scheduled-stop-995971/apiserver.key.f65a4a10: {Name:mk31bea7d14a966f8f45af5b0402a0ced3d13700 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1026 08:26:05.792391 205345 certs.go:382] copying /home/jenkins/minikube-integration/21772-2316/.minikube/profiles/scheduled-stop-995971/apiserver.crt.f65a4a10 -> /home/jenkins/minikube-integration/21772-2316/.minikube/profiles/scheduled-stop-995971/apiserver.crt
I1026 08:26:05.792471 205345 certs.go:386] copying /home/jenkins/minikube-integration/21772-2316/.minikube/profiles/scheduled-stop-995971/apiserver.key.f65a4a10 -> /home/jenkins/minikube-integration/21772-2316/.minikube/profiles/scheduled-stop-995971/apiserver.key
I1026 08:26:05.792530 205345 certs.go:364] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/21772-2316/.minikube/profiles/scheduled-stop-995971/proxy-client.key
I1026 08:26:05.792542 205345 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21772-2316/.minikube/profiles/scheduled-stop-995971/proxy-client.crt with IP's: []
I1026 08:26:06.109928 205345 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21772-2316/.minikube/profiles/scheduled-stop-995971/proxy-client.crt ...
I1026 08:26:06.109942 205345 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21772-2316/.minikube/profiles/scheduled-stop-995971/proxy-client.crt: {Name:mk8bb811161a026ea2867c36692fee165dd66ede Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1026 08:26:06.110141 205345 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21772-2316/.minikube/profiles/scheduled-stop-995971/proxy-client.key ...
I1026 08:26:06.110151 205345 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21772-2316/.minikube/profiles/scheduled-stop-995971/proxy-client.key: {Name:mkbd4841d297ae11f4de50068da3a89be89515f2 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1026 08:26:06.110324 205345 certs.go:484] found cert: /home/jenkins/minikube-integration/21772-2316/.minikube/certs/4113.pem (1338 bytes)
W1026 08:26:06.110358 205345 certs.go:480] ignoring /home/jenkins/minikube-integration/21772-2316/.minikube/certs/4113_empty.pem, impossibly tiny 0 bytes
I1026 08:26:06.110365 205345 certs.go:484] found cert: /home/jenkins/minikube-integration/21772-2316/.minikube/certs/ca-key.pem (1675 bytes)
I1026 08:26:06.110389 205345 certs.go:484] found cert: /home/jenkins/minikube-integration/21772-2316/.minikube/certs/ca.pem (1082 bytes)
I1026 08:26:06.110411 205345 certs.go:484] found cert: /home/jenkins/minikube-integration/21772-2316/.minikube/certs/cert.pem (1123 bytes)
I1026 08:26:06.110432 205345 certs.go:484] found cert: /home/jenkins/minikube-integration/21772-2316/.minikube/certs/key.pem (1679 bytes)
I1026 08:26:06.110472 205345 certs.go:484] found cert: /home/jenkins/minikube-integration/21772-2316/.minikube/files/etc/ssl/certs/41132.pem (1708 bytes)
I1026 08:26:06.111109 205345 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21772-2316/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1026 08:26:06.130924 205345 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21772-2316/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I1026 08:26:06.152036 205345 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21772-2316/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1026 08:26:06.170930 205345 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21772-2316/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I1026 08:26:06.190814 205345 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21772-2316/.minikube/profiles/scheduled-stop-995971/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1432 bytes)
I1026 08:26:06.210740 205345 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21772-2316/.minikube/profiles/scheduled-stop-995971/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I1026 08:26:06.229203 205345 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21772-2316/.minikube/profiles/scheduled-stop-995971/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1026 08:26:06.246880 205345 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21772-2316/.minikube/profiles/scheduled-stop-995971/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I1026 08:26:06.265426 205345 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21772-2316/.minikube/files/etc/ssl/certs/41132.pem --> /usr/share/ca-certificates/41132.pem (1708 bytes)
I1026 08:26:06.284539 205345 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21772-2316/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1026 08:26:06.303974 205345 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21772-2316/.minikube/certs/4113.pem --> /usr/share/ca-certificates/4113.pem (1338 bytes)
I1026 08:26:06.322412 205345 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I1026 08:26:06.338566 205345 ssh_runner.go:195] Run: openssl version
I1026 08:26:06.344959 205345 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/41132.pem && ln -fs /usr/share/ca-certificates/41132.pem /etc/ssl/certs/41132.pem"
I1026 08:26:06.354052 205345 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/41132.pem
I1026 08:26:06.357890 205345 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Oct 26 07:54 /usr/share/ca-certificates/41132.pem
I1026 08:26:06.357967 205345 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/41132.pem
I1026 08:26:06.400307 205345 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/41132.pem /etc/ssl/certs/3ec20f2e.0"
I1026 08:26:06.408658 205345 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1026 08:26:06.416908 205345 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1026 08:26:06.420678 205345 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Oct 26 07:48 /usr/share/ca-certificates/minikubeCA.pem
I1026 08:26:06.420749 205345 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1026 08:26:06.461869 205345 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1026 08:26:06.470378 205345 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/4113.pem && ln -fs /usr/share/ca-certificates/4113.pem /etc/ssl/certs/4113.pem"
I1026 08:26:06.478738 205345 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/4113.pem
I1026 08:26:06.482421 205345 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Oct 26 07:54 /usr/share/ca-certificates/4113.pem
I1026 08:26:06.482480 205345 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/4113.pem
I1026 08:26:06.524111 205345 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/4113.pem /etc/ssl/certs/51391683.0"
I1026 08:26:06.532406 205345 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1026 08:26:06.536230 205345 certs.go:400] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I1026 08:26:06.536280 205345 kubeadm.go:400] StartCluster: {Name:scheduled-stop-995971 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:scheduled-stop-995971 Namespace:default APIServerHAVIP: APIServerName:minikubeC
A APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: S
SHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1026 08:26:06.536384 205345 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
I1026 08:26:06.555092 205345 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1026 08:26:06.563235 205345 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1026 08:26:06.571094 205345 kubeadm.go:214] ignoring SystemVerification for kubeadm because of docker driver
I1026 08:26:06.571149 205345 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1026 08:26:06.579707 205345 kubeadm.go:155] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1026 08:26:06.579717 205345 kubeadm.go:157] found existing configuration files:
I1026 08:26:06.579792 205345 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I1026 08:26:06.588144 205345 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1026 08:26:06.588209 205345 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1026 08:26:06.595852 205345 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I1026 08:26:06.603531 205345 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1026 08:26:06.603690 205345 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1026 08:26:06.611467 205345 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I1026 08:26:06.620473 205345 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1026 08:26:06.620532 205345 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1026 08:26:06.628169 205345 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I1026 08:26:06.636478 205345 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1026 08:26:06.636531 205345 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1026 08:26:06.644190 205345 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I1026 08:26:06.691841 205345 kubeadm.go:318] [init] Using Kubernetes version: v1.34.1
I1026 08:26:06.691892 205345 kubeadm.go:318] [preflight] Running pre-flight checks
I1026 08:26:06.721246 205345 kubeadm.go:318] [preflight] The system verification failed. Printing the output from the verification:
I1026 08:26:06.721312 205345 kubeadm.go:318] [0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1084-aws[0m
I1026 08:26:06.721348 205345 kubeadm.go:318] [0;37mOS[0m: [0;32mLinux[0m
I1026 08:26:06.721394 205345 kubeadm.go:318] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I1026 08:26:06.721444 205345 kubeadm.go:318] [0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
I1026 08:26:06.721492 205345 kubeadm.go:318] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I1026 08:26:06.721541 205345 kubeadm.go:318] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I1026 08:26:06.721590 205345 kubeadm.go:318] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I1026 08:26:06.721639 205345 kubeadm.go:318] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I1026 08:26:06.721685 205345 kubeadm.go:318] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I1026 08:26:06.721734 205345 kubeadm.go:318] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I1026 08:26:06.721786 205345 kubeadm.go:318] [0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
I1026 08:26:06.797791 205345 kubeadm.go:318] [preflight] Pulling images required for setting up a Kubernetes cluster
I1026 08:26:06.797916 205345 kubeadm.go:318] [preflight] This might take a minute or two, depending on the speed of your internet connection
I1026 08:26:06.798043 205345 kubeadm.go:318] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
I1026 08:26:06.815278 205345 kubeadm.go:318] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I1026 08:26:06.821899 205345 out.go:252] - Generating certificates and keys ...
I1026 08:26:06.822003 205345 kubeadm.go:318] [certs] Using existing ca certificate authority
I1026 08:26:06.822078 205345 kubeadm.go:318] [certs] Using existing apiserver certificate and key on disk
I1026 08:26:07.283709 205345 kubeadm.go:318] [certs] Generating "apiserver-kubelet-client" certificate and key
I1026 08:26:07.786685 205345 kubeadm.go:318] [certs] Generating "front-proxy-ca" certificate and key
I1026 08:26:08.340319 205345 kubeadm.go:318] [certs] Generating "front-proxy-client" certificate and key
I1026 08:26:09.059256 205345 kubeadm.go:318] [certs] Generating "etcd/ca" certificate and key
I1026 08:26:11.039879 205345 kubeadm.go:318] [certs] Generating "etcd/server" certificate and key
I1026 08:26:11.040055 205345 kubeadm.go:318] [certs] etcd/server serving cert is signed for DNS names [localhost scheduled-stop-995971] and IPs [192.168.76.2 127.0.0.1 ::1]
I1026 08:26:11.124394 205345 kubeadm.go:318] [certs] Generating "etcd/peer" certificate and key
I1026 08:26:11.124687 205345 kubeadm.go:318] [certs] etcd/peer serving cert is signed for DNS names [localhost scheduled-stop-995971] and IPs [192.168.76.2 127.0.0.1 ::1]
I1026 08:26:11.265350 205345 kubeadm.go:318] [certs] Generating "etcd/healthcheck-client" certificate and key
I1026 08:26:12.082900 205345 kubeadm.go:318] [certs] Generating "apiserver-etcd-client" certificate and key
I1026 08:26:12.403685 205345 kubeadm.go:318] [certs] Generating "sa" key and public key
I1026 08:26:12.403965 205345 kubeadm.go:318] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I1026 08:26:12.481294 205345 kubeadm.go:318] [kubeconfig] Writing "admin.conf" kubeconfig file
I1026 08:26:12.557284 205345 kubeadm.go:318] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I1026 08:26:12.860311 205345 kubeadm.go:318] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I1026 08:26:13.108881 205345 kubeadm.go:318] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I1026 08:26:14.011167 205345 kubeadm.go:318] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I1026 08:26:14.012011 205345 kubeadm.go:318] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I1026 08:26:14.015473 205345 kubeadm.go:318] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I1026 08:26:14.018979 205345 out.go:252] - Booting up control plane ...
I1026 08:26:14.019087 205345 kubeadm.go:318] [control-plane] Creating static Pod manifest for "kube-apiserver"
I1026 08:26:14.019167 205345 kubeadm.go:318] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I1026 08:26:14.019236 205345 kubeadm.go:318] [control-plane] Creating static Pod manifest for "kube-scheduler"
I1026 08:26:14.047428 205345 kubeadm.go:318] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I1026 08:26:14.047534 205345 kubeadm.go:318] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
I1026 08:26:14.056252 205345 kubeadm.go:318] [patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
I1026 08:26:14.056538 205345 kubeadm.go:318] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I1026 08:26:14.056583 205345 kubeadm.go:318] [kubelet-start] Starting the kubelet
I1026 08:26:14.196633 205345 kubeadm.go:318] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I1026 08:26:14.196763 205345 kubeadm.go:318] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I1026 08:26:15.697480 205345 kubeadm.go:318] [kubelet-check] The kubelet is healthy after 1.50082906s
I1026 08:26:15.700895 205345 kubeadm.go:318] [control-plane-check] Waiting for healthy control plane components. This can take up to 4m0s
I1026 08:26:15.700984 205345 kubeadm.go:318] [control-plane-check] Checking kube-apiserver at https://192.168.76.2:8443/livez
I1026 08:26:15.701271 205345 kubeadm.go:318] [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz
I1026 08:26:15.701356 205345 kubeadm.go:318] [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez
I1026 08:26:19.504756 205345 kubeadm.go:318] [control-plane-check] kube-controller-manager is healthy after 3.803430747s
I1026 08:26:21.131347 205345 kubeadm.go:318] [control-plane-check] kube-scheduler is healthy after 5.430431964s
I1026 08:26:22.702522 205345 kubeadm.go:318] [control-plane-check] kube-apiserver is healthy after 7.00135896s
I1026 08:26:22.723180 205345 kubeadm.go:318] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I1026 08:26:22.742606 205345 kubeadm.go:318] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I1026 08:26:22.771473 205345 kubeadm.go:318] [upload-certs] Skipping phase. Please see --upload-certs
I1026 08:26:22.771706 205345 kubeadm.go:318] [mark-control-plane] Marking the node scheduled-stop-995971 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I1026 08:26:22.791538 205345 kubeadm.go:318] [bootstrap-token] Using token: vi1o9b.d4r5io9u8bx55awv
I1026 08:26:22.794590 205345 out.go:252] - Configuring RBAC rules ...
I1026 08:26:22.794711 205345 kubeadm.go:318] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I1026 08:26:22.801350 205345 kubeadm.go:318] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I1026 08:26:22.812945 205345 kubeadm.go:318] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I1026 08:26:22.820201 205345 kubeadm.go:318] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I1026 08:26:22.827323 205345 kubeadm.go:318] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I1026 08:26:22.832564 205345 kubeadm.go:318] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I1026 08:26:23.110139 205345 kubeadm.go:318] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I1026 08:26:23.545317 205345 kubeadm.go:318] [addons] Applied essential addon: CoreDNS
I1026 08:26:24.110247 205345 kubeadm.go:318] [addons] Applied essential addon: kube-proxy
I1026 08:26:24.111645 205345 kubeadm.go:318]
I1026 08:26:24.111717 205345 kubeadm.go:318] Your Kubernetes control-plane has initialized successfully!
I1026 08:26:24.111722 205345 kubeadm.go:318]
I1026 08:26:24.111801 205345 kubeadm.go:318] To start using your cluster, you need to run the following as a regular user:
I1026 08:26:24.111805 205345 kubeadm.go:318]
I1026 08:26:24.111831 205345 kubeadm.go:318] mkdir -p $HOME/.kube
I1026 08:26:24.111910 205345 kubeadm.go:318] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I1026 08:26:24.111962 205345 kubeadm.go:318] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I1026 08:26:24.111966 205345 kubeadm.go:318]
I1026 08:26:24.112021 205345 kubeadm.go:318] Alternatively, if you are the root user, you can run:
I1026 08:26:24.112025 205345 kubeadm.go:318]
I1026 08:26:24.112073 205345 kubeadm.go:318] export KUBECONFIG=/etc/kubernetes/admin.conf
I1026 08:26:24.112077 205345 kubeadm.go:318]
I1026 08:26:24.112130 205345 kubeadm.go:318] You should now deploy a pod network to the cluster.
I1026 08:26:24.112207 205345 kubeadm.go:318] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I1026 08:26:24.112289 205345 kubeadm.go:318] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I1026 08:26:24.112294 205345 kubeadm.go:318]
I1026 08:26:24.112381 205345 kubeadm.go:318] You can now join any number of control-plane nodes by copying certificate authorities
I1026 08:26:24.112460 205345 kubeadm.go:318] and service account keys on each node and then running the following as root:
I1026 08:26:24.112464 205345 kubeadm.go:318]
I1026 08:26:24.112550 205345 kubeadm.go:318] kubeadm join control-plane.minikube.internal:8443 --token vi1o9b.d4r5io9u8bx55awv \
I1026 08:26:24.112657 205345 kubeadm.go:318] --discovery-token-ca-cert-hash sha256:c9ecbd3bfc9d3a59b5c4e5ab1eb59ab1bdfb707b04f7b8d72b46b157105da92f \
I1026 08:26:24.112677 205345 kubeadm.go:318] --control-plane
I1026 08:26:24.112681 205345 kubeadm.go:318]
I1026 08:26:24.112778 205345 kubeadm.go:318] Then you can join any number of worker nodes by running the following on each as root:
I1026 08:26:24.112781 205345 kubeadm.go:318]
I1026 08:26:24.112865 205345 kubeadm.go:318] kubeadm join control-plane.minikube.internal:8443 --token vi1o9b.d4r5io9u8bx55awv \
I1026 08:26:24.112971 205345 kubeadm.go:318] --discovery-token-ca-cert-hash sha256:c9ecbd3bfc9d3a59b5c4e5ab1eb59ab1bdfb707b04f7b8d72b46b157105da92f
I1026 08:26:24.117227 205345 kubeadm.go:318] [WARNING SystemVerification]: cgroups v1 support is in maintenance mode, please migrate to cgroups v2
I1026 08:26:24.117477 205345 kubeadm.go:318] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1084-aws\n", err: exit status 1
I1026 08:26:24.117590 205345 kubeadm.go:318] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I1026 08:26:24.117612 205345 cni.go:84] Creating CNI manager for ""
I1026 08:26:24.117625 205345 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I1026 08:26:24.120928 205345 out.go:179] * Configuring bridge CNI (Container Networking Interface) ...
I1026 08:26:24.123893 205345 ssh_runner.go:195] Run: sudo mkdir -p /etc/cni/net.d
I1026 08:26:24.134470 205345 ssh_runner.go:362] scp memory --> /etc/cni/net.d/1-k8s.conflist (496 bytes)
I1026 08:26:24.147347 205345 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I1026 08:26:24.147477 205345 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes scheduled-stop-995971 minikube.k8s.io/updated_at=2025_10_26T08_26_24_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=7bff0055abe294a06ae9b3b2dd6f86bacf87f0d4 minikube.k8s.io/name=scheduled-stop-995971 minikube.k8s.io/primary=true
I1026 08:26:24.147485 205345 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I1026 08:26:24.312419 205345 kubeadm.go:1113] duration metric: took 165.011036ms to wait for elevateKubeSystemPrivileges
I1026 08:26:24.312451 205345 ops.go:34] apiserver oom_adj: -16
I1026 08:26:24.312468 205345 kubeadm.go:402] duration metric: took 17.776192426s to StartCluster
I1026 08:26:24.312483 205345 settings.go:142] acquiring lock: {Name:mk58fb3aa49224132ac4928d71b05a02cb54d631 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1026 08:26:24.312539 205345 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/21772-2316/kubeconfig
I1026 08:26:24.313208 205345 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21772-2316/kubeconfig: {Name:mk654af1762e3ce47a7229e102a16c97932e1884 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1026 08:26:24.313403 205345 start.go:235] Will wait 6m0s for node &{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}
I1026 08:26:24.313503 205345 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I1026 08:26:24.313716 205345 config.go:182] Loaded profile config "scheduled-stop-995971": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.1
I1026 08:26:24.313749 205345 addons.go:511] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I1026 08:26:24.313809 205345 addons.go:69] Setting storage-provisioner=true in profile "scheduled-stop-995971"
I1026 08:26:24.313820 205345 addons.go:238] Setting addon storage-provisioner=true in "scheduled-stop-995971"
I1026 08:26:24.313839 205345 host.go:66] Checking if "scheduled-stop-995971" exists ...
I1026 08:26:24.314127 205345 addons.go:69] Setting default-storageclass=true in profile "scheduled-stop-995971"
I1026 08:26:24.314141 205345 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "scheduled-stop-995971"
I1026 08:26:24.314436 205345 cli_runner.go:164] Run: docker container inspect scheduled-stop-995971 --format={{.State.Status}}
I1026 08:26:24.314444 205345 cli_runner.go:164] Run: docker container inspect scheduled-stop-995971 --format={{.State.Status}}
I1026 08:26:24.319472 205345 out.go:179] * Verifying Kubernetes components...
I1026 08:26:24.323695 205345 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1026 08:26:24.349610 205345 addons.go:238] Setting addon default-storageclass=true in "scheduled-stop-995971"
I1026 08:26:24.349636 205345 host.go:66] Checking if "scheduled-stop-995971" exists ...
I1026 08:26:24.350042 205345 cli_runner.go:164] Run: docker container inspect scheduled-stop-995971 --format={{.State.Status}}
I1026 08:26:24.367130 205345 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1026 08:26:24.369980 205345 addons.go:435] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1026 08:26:24.369991 205345 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1026 08:26:24.370056 205345 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-995971
I1026 08:26:24.395731 205345 addons.go:435] installing /etc/kubernetes/addons/storageclass.yaml
I1026 08:26:24.395743 205345 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1026 08:26:24.395807 205345 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-995971
I1026 08:26:24.421415 205345 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32968 SSHKeyPath:/home/jenkins/minikube-integration/21772-2316/.minikube/machines/scheduled-stop-995971/id_rsa Username:docker}
I1026 08:26:24.444763 205345 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32968 SSHKeyPath:/home/jenkins/minikube-integration/21772-2316/.minikube/machines/scheduled-stop-995971/id_rsa Username:docker}
I1026 08:26:24.543701 205345 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.76.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I1026 08:26:24.648190 205345 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1026 08:26:24.688849 205345 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1026 08:26:24.729830 205345 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1026 08:26:25.067116 205345 start.go:976] {"host.minikube.internal": 192.168.76.1} host record injected into CoreDNS's ConfigMap
I1026 08:26:25.068060 205345 api_server.go:52] waiting for apiserver process to appear ...
I1026 08:26:25.068111 205345 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1026 08:26:25.277424 205345 api_server.go:72] duration metric: took 963.997209ms to wait for apiserver process to appear ...
I1026 08:26:25.277435 205345 api_server.go:88] waiting for apiserver healthz status ...
I1026 08:26:25.277450 205345 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1026 08:26:25.280515 205345 out.go:179] * Enabled addons: default-storageclass, storage-provisioner
I1026 08:26:25.283312 205345 addons.go:514] duration metric: took 969.545225ms for enable addons: enabled=[default-storageclass storage-provisioner]
I1026 08:26:25.287643 205345 api_server.go:279] https://192.168.76.2:8443/healthz returned 200:
ok
I1026 08:26:25.289001 205345 api_server.go:141] control plane version: v1.34.1
I1026 08:26:25.289016 205345 api_server.go:131] duration metric: took 11.57667ms to wait for apiserver health ...
I1026 08:26:25.289031 205345 system_pods.go:43] waiting for kube-system pods to appear ...
I1026 08:26:25.296675 205345 system_pods.go:59] 5 kube-system pods found
I1026 08:26:25.296695 205345 system_pods.go:61] "etcd-scheduled-stop-995971" [7c2730fa-7621-46ea-aa05-5c7e45c0b028] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd])
I1026 08:26:25.296702 205345 system_pods.go:61] "kube-apiserver-scheduled-stop-995971" [c1573f23-ea58-41ec-8faa-46bfe00c4b5f] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
I1026 08:26:25.296710 205345 system_pods.go:61] "kube-controller-manager-scheduled-stop-995971" [f892a15e-5091-4834-98c5-66d163901cd8] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager])
I1026 08:26:25.296716 205345 system_pods.go:61] "kube-scheduler-scheduled-stop-995971" [468d5f3f-7ba8-47e1-a6af-a441c8feb16f] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
I1026 08:26:25.296721 205345 system_pods.go:61] "storage-provisioner" [ea5138b1-be06-44c3-9291-9605c9819030] Pending: PodScheduled:Unschedulable (0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/not-ready: }. no new claims to deallocate, preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling.)
I1026 08:26:25.296727 205345 system_pods.go:74] duration metric: took 7.691024ms to wait for pod list to return data ...
I1026 08:26:25.296740 205345 kubeadm.go:586] duration metric: took 983.318529ms to wait for: map[apiserver:true system_pods:true]
I1026 08:26:25.296751 205345 node_conditions.go:102] verifying NodePressure condition ...
I1026 08:26:25.307869 205345 node_conditions.go:122] node storage ephemeral capacity is 203034800Ki
I1026 08:26:25.307889 205345 node_conditions.go:123] node cpu capacity is 2
I1026 08:26:25.307901 205345 node_conditions.go:105] duration metric: took 11.144809ms to run NodePressure ...
I1026 08:26:25.307913 205345 start.go:241] waiting for startup goroutines ...
I1026 08:26:25.572711 205345 kapi.go:214] "coredns" deployment in "kube-system" namespace and "scheduled-stop-995971" context rescaled to 1 replicas
I1026 08:26:25.572733 205345 start.go:246] waiting for cluster config update ...
I1026 08:26:25.572743 205345 start.go:255] writing updated cluster config ...
I1026 08:26:25.573049 205345 ssh_runner.go:195] Run: rm -f paused
I1026 08:26:25.639518 205345 start.go:624] kubectl: 1.33.2, cluster: 1.34.1 (minor skew: 1)
I1026 08:26:25.642849 205345 out.go:179] * Done! kubectl is now configured to use "scheduled-stop-995971" cluster and "default" namespace by default
==> Docker <==
Oct 26 08:26:03 scheduled-stop-995971 dockerd[1132]: time="2025-10-26T08:26:03.271323865Z" level=info msg="Loading containers: done."
Oct 26 08:26:03 scheduled-stop-995971 dockerd[1132]: time="2025-10-26T08:26:03.282258147Z" level=info msg="Docker daemon" commit=f8215cc containerd-snapshotter=false storage-driver=overlay2 version=28.5.1
Oct 26 08:26:03 scheduled-stop-995971 dockerd[1132]: time="2025-10-26T08:26:03.282323703Z" level=info msg="Initializing buildkit"
Oct 26 08:26:03 scheduled-stop-995971 dockerd[1132]: time="2025-10-26T08:26:03.300035650Z" level=info msg="Completed buildkit initialization"
Oct 26 08:26:03 scheduled-stop-995971 dockerd[1132]: time="2025-10-26T08:26:03.308523654Z" level=info msg="Daemon has completed initialization"
Oct 26 08:26:03 scheduled-stop-995971 dockerd[1132]: time="2025-10-26T08:26:03.308627084Z" level=info msg="API listen on /run/docker.sock"
Oct 26 08:26:03 scheduled-stop-995971 dockerd[1132]: time="2025-10-26T08:26:03.308761233Z" level=info msg="API listen on /var/run/docker.sock"
Oct 26 08:26:03 scheduled-stop-995971 dockerd[1132]: time="2025-10-26T08:26:03.308875985Z" level=info msg="API listen on [::]:2376"
Oct 26 08:26:03 scheduled-stop-995971 systemd[1]: Started docker.service - Docker Application Container Engine.
Oct 26 08:26:03 scheduled-stop-995971 systemd[1]: Starting cri-docker.service - CRI Interface for Docker Application Container Engine...
Oct 26 08:26:03 scheduled-stop-995971 cri-dockerd[1435]: time="2025-10-26T08:26:03Z" level=info msg="Starting cri-dockerd dev (HEAD)"
Oct 26 08:26:03 scheduled-stop-995971 cri-dockerd[1435]: time="2025-10-26T08:26:03Z" level=info msg="Connecting to docker on the Endpoint unix:///var/run/docker.sock"
Oct 26 08:26:03 scheduled-stop-995971 cri-dockerd[1435]: time="2025-10-26T08:26:03Z" level=info msg="Start docker client with request timeout 0s"
Oct 26 08:26:03 scheduled-stop-995971 cri-dockerd[1435]: time="2025-10-26T08:26:03Z" level=info msg="Hairpin mode is set to hairpin-veth"
Oct 26 08:26:03 scheduled-stop-995971 cri-dockerd[1435]: time="2025-10-26T08:26:03Z" level=info msg="Loaded network plugin cni"
Oct 26 08:26:03 scheduled-stop-995971 cri-dockerd[1435]: time="2025-10-26T08:26:03Z" level=info msg="Docker cri networking managed by network plugin cni"
Oct 26 08:26:03 scheduled-stop-995971 cri-dockerd[1435]: time="2025-10-26T08:26:03Z" level=info msg="Setting cgroupDriver cgroupfs"
Oct 26 08:26:03 scheduled-stop-995971 cri-dockerd[1435]: time="2025-10-26T08:26:03Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:,},}"
Oct 26 08:26:03 scheduled-stop-995971 cri-dockerd[1435]: time="2025-10-26T08:26:03Z" level=info msg="Starting the GRPC backend for the Docker CRI interface."
Oct 26 08:26:03 scheduled-stop-995971 cri-dockerd[1435]: time="2025-10-26T08:26:03Z" level=info msg="Start cri-dockerd grpc backend"
Oct 26 08:26:03 scheduled-stop-995971 systemd[1]: Started cri-docker.service - CRI Interface for Docker Application Container Engine.
Oct 26 08:26:16 scheduled-stop-995971 cri-dockerd[1435]: time="2025-10-26T08:26:16Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/8f4aed817d1d0f044e3125ccbec7b20f60338f68b9a7c45faad60e46e5f62629/resolv.conf as [nameserver 192.168.76.1 search us-east-2.compute.internal options edns0 trust-ad ndots:0]"
Oct 26 08:26:16 scheduled-stop-995971 cri-dockerd[1435]: time="2025-10-26T08:26:16Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/1727ccab5559255360d4526186c59155df9a058028dc7e21ef0f67f6aaa118ad/resolv.conf as [nameserver 192.168.76.1 search us-east-2.compute.internal options edns0 trust-ad ndots:0]"
Oct 26 08:26:16 scheduled-stop-995971 cri-dockerd[1435]: time="2025-10-26T08:26:16Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/58c9b5e94b8b362f6ab28b3283bfdb67ac3c7fb97a79cdb7da338eac716aa614/resolv.conf as [nameserver 192.168.76.1 search us-east-2.compute.internal options edns0 trust-ad ndots:0]"
Oct 26 08:26:16 scheduled-stop-995971 cri-dockerd[1435]: time="2025-10-26T08:26:16Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/4e2632d9837ab4ec24169d45469599059455cddde48cddd95b7a28230f9979de/resolv.conf as [nameserver 192.168.76.1 search us-east-2.compute.internal options edns0 trust-ad ndots:0]"
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE
c724406c1d17f 43911e833d64d 11 seconds ago Running kube-apiserver 0 4e2632d9837ab kube-apiserver-scheduled-stop-995971 kube-system
81f1a82134566 a1894772a478e 11 seconds ago Running etcd 0 58c9b5e94b8b3 etcd-scheduled-stop-995971 kube-system
2363e0aaf874b b5f57ec6b9867 11 seconds ago Running kube-scheduler 0 1727ccab55592 kube-scheduler-scheduled-stop-995971 kube-system
dd59d0032b447 7eb2c6ff0c5a7 11 seconds ago Running kube-controller-manager 0 8f4aed817d1d0 kube-controller-manager-scheduled-stop-995971 kube-system
==> describe nodes <==
Name: scheduled-stop-995971
Roles: control-plane
Labels: beta.kubernetes.io/arch=arm64
beta.kubernetes.io/os=linux
kubernetes.io/arch=arm64
kubernetes.io/hostname=scheduled-stop-995971
kubernetes.io/os=linux
minikube.k8s.io/commit=7bff0055abe294a06ae9b3b2dd6f86bacf87f0d4
minikube.k8s.io/name=scheduled-stop-995971
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2025_10_26T08_26_24_0700
minikube.k8s.io/version=v1.37.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Sun, 26 Oct 2025 08:26:20 +0000
Taints: node.kubernetes.io/not-ready:NoSchedule
Unschedulable: false
Lease:
HolderIdentity: scheduled-stop-995971
AcquireTime: <unset>
RenewTime: Sun, 26 Oct 2025 08:26:23 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Sun, 26 Oct 2025 08:26:23 +0000 Sun, 26 Oct 2025 08:26:16 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Sun, 26 Oct 2025 08:26:23 +0000 Sun, 26 Oct 2025 08:26:16 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Sun, 26 Oct 2025 08:26:23 +0000 Sun, 26 Oct 2025 08:26:16 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready False Sun, 26 Oct 2025 08:26:23 +0000 Sun, 26 Oct 2025 08:26:16 +0000 KubeletNotReady container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:docker: network plugin is not ready: cni config uninitialized
Addresses:
InternalIP: 192.168.76.2
Hostname: scheduled-stop-995971
Capacity:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022296Ki
pods: 110
Allocatable:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022296Ki
pods: 110
System Info:
Machine ID: ef38fbc8889a0e5f09e9dc0868f5cd19
System UUID: ee78af28-df22-47c4-a968-94ffddfae51a
Boot ID: 34b1f8dc-4372-49e9-9ea3-c9ec215e9cff
Kernel Version: 5.15.0-1084-aws
OS Image: Debian GNU/Linux 12 (bookworm)
Operating System: linux
Architecture: arm64
Container Runtime Version: docker://28.5.1
Kubelet Version: v1.34.1
Kube-Proxy Version:
Non-terminated Pods: (4 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
kube-system etcd-scheduled-stop-995971 100m (5%) 0 (0%) 100Mi (1%) 0 (0%) 5s
kube-system kube-apiserver-scheduled-stop-995971 250m (12%) 0 (0%) 0 (0%) 0 (0%) 4s
kube-system kube-controller-manager-scheduled-stop-995971 200m (10%) 0 (0%) 0 (0%) 0 (0%) 5s
kube-system kube-scheduler-scheduled-stop-995971 100m (5%) 0 (0%) 0 (0%) 0 (0%) 4s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 650m (32%) 0 (0%)
memory 100Mi (1%) 0 (0%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
hugepages-32Mi 0 (0%) 0 (0%)
hugepages-64Ki 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 4s kubelet Starting kubelet.
Warning CgroupV1 4s kubelet cgroup v1 support is in maintenance mode, please migrate to cgroup v2
Normal NodeAllocatableEnforced 4s kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 4s kubelet Node scheduled-stop-995971 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 4s kubelet Node scheduled-stop-995971 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 4s kubelet Node scheduled-stop-995971 status is now: NodeHasSufficientPID
==> dmesg <==
[Oct26 07:17] ACPI: SRAT not present
[ +0.000000] ACPI: SRAT not present
[ +0.000000] SPI driver altr_a10sr has no spi_device_id for altr,a10sr
[ +0.014216] device-mapper: core: CONFIG_IMA_DISABLE_HTABLE is disabled. Duplicate IMA measurements will not be recorded in the IMA log.
[ +0.506625] systemd[1]: Configuration file /run/systemd/system/netplan-ovs-cleanup.service is marked world-inaccessible. This has no effect as configuration data is accessible via APIs without restrictions. Proceeding anyway.
[ +0.034855] systemd[1]: /lib/systemd/system/snapd.service:23: Unknown key name 'RestartMode' in section 'Service', ignoring.
[ +0.750604] ena 0000:00:05.0: LLQ is not supported Fallback to host mode policy.
[ +6.915942] kauditd_printk_skb: 36 callbacks suppressed
[Oct26 08:02] hrtimer: interrupt took 5373625 ns
==> etcd [81f1a8213456] <==
{"level":"warn","ts":"2025-10-26T08:26:19.094711Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:52348","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-10-26T08:26:19.133165Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:52360","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-10-26T08:26:19.154161Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:52364","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-10-26T08:26:19.177776Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:52372","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-10-26T08:26:19.207685Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:52398","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-10-26T08:26:19.238051Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:52420","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-10-26T08:26:19.264841Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:52440","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-10-26T08:26:19.287388Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:52464","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-10-26T08:26:19.375632Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:52486","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-10-26T08:26:19.394450Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:52492","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-10-26T08:26:19.439732Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:52512","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-10-26T08:26:19.454416Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:52524","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-10-26T08:26:19.488079Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:52546","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-10-26T08:26:19.510249Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:52576","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-10-26T08:26:19.526856Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:52598","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-10-26T08:26:19.552482Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:52622","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-10-26T08:26:19.568784Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:52632","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-10-26T08:26:19.580934Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:52646","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-10-26T08:26:19.621003Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:52676","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-10-26T08:26:19.621074Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:52662","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-10-26T08:26:19.641919Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:52704","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-10-26T08:26:19.676380Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:52720","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-10-26T08:26:19.694092Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:52744","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-10-26T08:26:19.708159Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:52768","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-10-26T08:26:19.809070Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:52772","server-name":"","error":"EOF"}
==> kernel <==
08:26:27 up 1:08, 0 user, load average: 2.92, 3.28, 3.45
Linux scheduled-stop-995971 5.15.0-1084-aws #91~20.04.1-Ubuntu SMP Fri May 2 07:00:04 UTC 2025 aarch64 GNU/Linux
PRETTY_NAME="Debian GNU/Linux 12 (bookworm)"
==> kube-apiserver [c724406c1d17] <==
I1026 08:26:20.852943 1 shared_informer.go:356] "Caches are synced" controller="node_authorizer"
I1026 08:26:20.853100 1 shared_informer.go:356] "Caches are synced" controller="ipallocator-repair-controller"
I1026 08:26:20.854252 1 handler_discovery.go:451] Starting ResourceDiscoveryManager
I1026 08:26:20.854692 1 aggregator.go:171] initial CRD sync complete...
I1026 08:26:20.854829 1 autoregister_controller.go:144] Starting autoregister controller
I1026 08:26:20.854952 1 cache.go:32] Waiting for caches to sync for autoregister controller
I1026 08:26:20.855041 1 cache.go:39] Caches are synced for autoregister controller
I1026 08:26:20.865691 1 cidrallocator.go:301] created ClusterIP allocator for Service CIDR 10.96.0.0/12
I1026 08:26:20.865785 1 default_servicecidr_controller.go:228] Setting default ServiceCIDR condition Ready to True
I1026 08:26:20.891965 1 controller.go:667] quota admission added evaluator for: leases.coordination.k8s.io
I1026 08:26:20.899372 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12
I1026 08:26:20.899817 1 default_servicecidr_controller.go:137] Shutting down kubernetes-service-cidr-controller
I1026 08:26:21.508876 1 storage_scheduling.go:95] created PriorityClass system-node-critical with value 2000001000
I1026 08:26:21.514686 1 storage_scheduling.go:95] created PriorityClass system-cluster-critical with value 2000000000
I1026 08:26:21.514714 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist.
I1026 08:26:22.274811 1 controller.go:667] quota admission added evaluator for: roles.rbac.authorization.k8s.io
I1026 08:26:22.325013 1 controller.go:667] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
I1026 08:26:22.477553 1 alloc.go:328] "allocated clusterIPs" service="default/kubernetes" clusterIPs={"IPv4":"10.96.0.1"}
W1026 08:26:22.486162 1 lease.go:265] Resetting endpoints for master service "kubernetes" to [192.168.76.2]
I1026 08:26:22.487414 1 controller.go:667] quota admission added evaluator for: endpoints
I1026 08:26:22.493742 1 controller.go:667] quota admission added evaluator for: endpointslices.discovery.k8s.io
I1026 08:26:22.740668 1 controller.go:667] quota admission added evaluator for: serviceaccounts
I1026 08:26:23.521948 1 controller.go:667] quota admission added evaluator for: deployments.apps
I1026 08:26:23.543854 1 alloc.go:328] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs={"IPv4":"10.96.0.10"}
I1026 08:26:23.557321 1 controller.go:667] quota admission added evaluator for: daemonsets.apps
==> kube-controller-manager [dd59d0032b44] <==
I1026 08:26:27.392048 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="cronjobs.batch"
I1026 08:26:27.392080 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="resourceclaimtemplates.resource.k8s.io"
I1026 08:26:27.392098 1 shared_informer.go:682] "Warning: resync period is smaller than resync check period and the informer has already started. Changing it to the resync check period" resyncPeriod="17h7m27.153359314s" resyncCheckPeriod="21h20m27.224052319s"
I1026 08:26:27.392167 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="serviceaccounts"
I1026 08:26:27.392197 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="roles.rbac.authorization.k8s.io"
I1026 08:26:27.392253 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="ingresses.networking.k8s.io"
I1026 08:26:27.392285 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="endpointslices.discovery.k8s.io"
I1026 08:26:27.392303 1 shared_informer.go:682] "Warning: resync period is smaller than resync check period and the informer has already started. Changing it to the resync check period" resyncPeriod="16h46m13.110333716s" resyncCheckPeriod="21h20m27.224052319s"
I1026 08:26:27.392337 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="csistoragecapacities.storage.k8s.io"
I1026 08:26:27.392384 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="deployments.apps"
I1026 08:26:27.392411 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="jobs.batch"
I1026 08:26:27.392441 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="rolebindings.rbac.authorization.k8s.io"
I1026 08:26:27.392474 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="networkpolicies.networking.k8s.io"
I1026 08:26:27.392496 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="endpoints"
I1026 08:26:27.392540 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="podtemplates"
I1026 08:26:27.392563 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="poddisruptionbudgets.policy"
I1026 08:26:27.392577 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="leases.coordination.k8s.io"
I1026 08:26:27.392611 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="controllerrevisions.apps"
I1026 08:26:27.392634 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="horizontalpodautoscalers.autoscaling"
I1026 08:26:27.392650 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="limitranges"
I1026 08:26:27.392671 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="replicasets.apps"
I1026 08:26:27.392695 1 controllermanager.go:781] "Started controller" controller="resourcequota-controller"
I1026 08:26:27.392939 1 resource_quota_controller.go:300] "Starting resource quota controller" logger="resourcequota-controller"
I1026 08:26:27.392954 1 shared_informer.go:349] "Waiting for caches to sync" controller="resource quota"
I1026 08:26:27.392971 1 resource_quota_monitor.go:308] "QuotaMonitor running" logger="resourcequota-controller"
==> kube-scheduler [2363e0aaf874] <==
I1026 08:26:21.113038 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
I1026 08:26:21.122025 1 shared_informer.go:349] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
E1026 08:26:21.135851 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ConfigMap: configmaps \"extension-apiserver-authentication\" is forbidden: User \"system:kube-scheduler\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"kube-system\"" logger="UnhandledError" reflector="runtime/asm_arm64.s:1223" type="*v1.ConfigMap"
E1026 08:26:21.136363 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User \"system:kube-scheduler\" cannot list resource \"poddisruptionbudgets\" in API group \"policy\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PodDisruptionBudget"
E1026 08:26:21.136429 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Namespace: namespaces is forbidden: User \"system:kube-scheduler\" cannot list resource \"namespaces\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Namespace"
E1026 08:26:21.136566 1 reflector.go:205] "Failed to watch" err="failed to list *v1.DeviceClass: deviceclasses.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"deviceclasses\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.DeviceClass"
E1026 08:26:21.136667 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumeclaims\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolumeClaim"
E1026 08:26:21.136762 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolume"
E1026 08:26:21.136956 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csinodes\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSINode"
E1026 08:26:21.137066 1 reflector.go:205] "Failed to watch" err="failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"statefulsets\" in API group \"apps\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.StatefulSet"
E1026 08:26:21.137155 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Pod: pods is forbidden: User \"system:kube-scheduler\" cannot list resource \"pods\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Pod"
E1026 08:26:21.137259 1 reflector.go:205] "Failed to watch" err="failed to list *v1.VolumeAttachment: volumeattachments.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"volumeattachments\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.VolumeAttachment"
E1026 08:26:21.137563 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Service: services is forbidden: User \"system:kube-scheduler\" cannot list resource \"services\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Service"
E1026 08:26:21.138176 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Node: nodes is forbidden: User \"system:kube-scheduler\" cannot list resource \"nodes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Node"
E1026 08:26:21.138355 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csistoragecapacities\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIStorageCapacity"
E1026 08:26:21.138448 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ResourceSlice: resourceslices.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"resourceslices\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ResourceSlice"
E1026 08:26:21.138585 1 reflector.go:205] "Failed to watch" err="failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"storageclasses\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.StorageClass"
E1026 08:26:21.138666 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csidrivers\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIDriver"
E1026 08:26:21.138682 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ResourceClaim: resourceclaims.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"resourceclaims\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ResourceClaim"
E1026 08:26:21.138715 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicationcontrollers\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ReplicationController"
E1026 08:26:21.138103 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicasets\" in API group \"apps\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ReplicaSet"
E1026 08:26:21.949811 1 reflector.go:205] "Failed to watch" err="failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"storageclasses\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.StorageClass"
E1026 08:26:21.987861 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolume"
E1026 08:26:22.233112 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ConfigMap: configmaps \"extension-apiserver-authentication\" is forbidden: User \"system:kube-scheduler\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"kube-system\"" logger="UnhandledError" reflector="runtime/asm_arm64.s:1223" type="*v1.ConfigMap"
I1026 08:26:24.829647 1 shared_informer.go:356] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
==> kubelet <==
Oct 26 08:26:23 scheduled-stop-995971 kubelet[2298]: I1026 08:26:23.868896 2298 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-certs\" (UniqueName: \"kubernetes.io/host-path/eee0322e828c3cbcc7e11f13f19d106b-etcd-certs\") pod \"etcd-scheduled-stop-995971\" (UID: \"eee0322e828c3cbcc7e11f13f19d106b\") " pod="kube-system/etcd-scheduled-stop-995971"
Oct 26 08:26:23 scheduled-stop-995971 kubelet[2298]: I1026 08:26:23.868915 2298 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/8073743c50461e0d11985f3043a11e12-ca-certs\") pod \"kube-apiserver-scheduled-stop-995971\" (UID: \"8073743c50461e0d11985f3043a11e12\") " pod="kube-system/kube-apiserver-scheduled-stop-995971"
Oct 26 08:26:23 scheduled-stop-995971 kubelet[2298]: I1026 08:26:23.868938 2298 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/8073743c50461e0d11985f3043a11e12-k8s-certs\") pod \"kube-apiserver-scheduled-stop-995971\" (UID: \"8073743c50461e0d11985f3043a11e12\") " pod="kube-system/kube-apiserver-scheduled-stop-995971"
Oct 26 08:26:23 scheduled-stop-995971 kubelet[2298]: I1026 08:26:23.868956 2298 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/8073743c50461e0d11985f3043a11e12-usr-local-share-ca-certificates\") pod \"kube-apiserver-scheduled-stop-995971\" (UID: \"8073743c50461e0d11985f3043a11e12\") " pod="kube-system/kube-apiserver-scheduled-stop-995971"
Oct 26 08:26:23 scheduled-stop-995971 kubelet[2298]: I1026 08:26:23.868974 2298 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/67a3838caac338854c982fed3ac21aa7-etc-ca-certificates\") pod \"kube-controller-manager-scheduled-stop-995971\" (UID: \"67a3838caac338854c982fed3ac21aa7\") " pod="kube-system/kube-controller-manager-scheduled-stop-995971"
Oct 26 08:26:23 scheduled-stop-995971 kubelet[2298]: I1026 08:26:23.868993 2298 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-data\" (UniqueName: \"kubernetes.io/host-path/eee0322e828c3cbcc7e11f13f19d106b-etcd-data\") pod \"etcd-scheduled-stop-995971\" (UID: \"eee0322e828c3cbcc7e11f13f19d106b\") " pod="kube-system/etcd-scheduled-stop-995971"
Oct 26 08:26:23 scheduled-stop-995971 kubelet[2298]: I1026 08:26:23.869010 2298 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/8073743c50461e0d11985f3043a11e12-etc-ca-certificates\") pod \"kube-apiserver-scheduled-stop-995971\" (UID: \"8073743c50461e0d11985f3043a11e12\") " pod="kube-system/kube-apiserver-scheduled-stop-995971"
Oct 26 08:26:23 scheduled-stop-995971 kubelet[2298]: I1026 08:26:23.869038 2298 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/8073743c50461e0d11985f3043a11e12-usr-share-ca-certificates\") pod \"kube-apiserver-scheduled-stop-995971\" (UID: \"8073743c50461e0d11985f3043a11e12\") " pod="kube-system/kube-apiserver-scheduled-stop-995971"
Oct 26 08:26:23 scheduled-stop-995971 kubelet[2298]: I1026 08:26:23.869058 2298 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/67a3838caac338854c982fed3ac21aa7-ca-certs\") pod \"kube-controller-manager-scheduled-stop-995971\" (UID: \"67a3838caac338854c982fed3ac21aa7\") " pod="kube-system/kube-controller-manager-scheduled-stop-995971"
Oct 26 08:26:23 scheduled-stop-995971 kubelet[2298]: I1026 08:26:23.869076 2298 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/67a3838caac338854c982fed3ac21aa7-k8s-certs\") pod \"kube-controller-manager-scheduled-stop-995971\" (UID: \"67a3838caac338854c982fed3ac21aa7\") " pod="kube-system/kube-controller-manager-scheduled-stop-995971"
Oct 26 08:26:23 scheduled-stop-995971 kubelet[2298]: I1026 08:26:23.869103 2298 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"flexvolume-dir\" (UniqueName: \"kubernetes.io/host-path/67a3838caac338854c982fed3ac21aa7-flexvolume-dir\") pod \"kube-controller-manager-scheduled-stop-995971\" (UID: \"67a3838caac338854c982fed3ac21aa7\") " pod="kube-system/kube-controller-manager-scheduled-stop-995971"
Oct 26 08:26:23 scheduled-stop-995971 kubelet[2298]: I1026 08:26:23.869126 2298 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/67a3838caac338854c982fed3ac21aa7-kubeconfig\") pod \"kube-controller-manager-scheduled-stop-995971\" (UID: \"67a3838caac338854c982fed3ac21aa7\") " pod="kube-system/kube-controller-manager-scheduled-stop-995971"
Oct 26 08:26:23 scheduled-stop-995971 kubelet[2298]: I1026 08:26:23.869144 2298 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/22c129a98c4c779ba9a8163f132b8e69-kubeconfig\") pod \"kube-scheduler-scheduled-stop-995971\" (UID: \"22c129a98c4c779ba9a8163f132b8e69\") " pod="kube-system/kube-scheduler-scheduled-stop-995971"
Oct 26 08:26:24 scheduled-stop-995971 kubelet[2298]: I1026 08:26:24.433542 2298 apiserver.go:52] "Watching apiserver"
Oct 26 08:26:24 scheduled-stop-995971 kubelet[2298]: I1026 08:26:24.473791 2298 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world"
Oct 26 08:26:24 scheduled-stop-995971 kubelet[2298]: I1026 08:26:24.604306 2298 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/etcd-scheduled-stop-995971"
Oct 26 08:26:24 scheduled-stop-995971 kubelet[2298]: I1026 08:26:24.604600 2298 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-scheduler-scheduled-stop-995971"
Oct 26 08:26:24 scheduled-stop-995971 kubelet[2298]: I1026 08:26:24.609093 2298 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-apiserver-scheduled-stop-995971"
Oct 26 08:26:24 scheduled-stop-995971 kubelet[2298]: E1026 08:26:24.626902 2298 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-apiserver-scheduled-stop-995971\" already exists" pod="kube-system/kube-apiserver-scheduled-stop-995971"
Oct 26 08:26:24 scheduled-stop-995971 kubelet[2298]: E1026 08:26:24.628213 2298 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-scheduler-scheduled-stop-995971\" already exists" pod="kube-system/kube-scheduler-scheduled-stop-995971"
Oct 26 08:26:24 scheduled-stop-995971 kubelet[2298]: E1026 08:26:24.628879 2298 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"etcd-scheduled-stop-995971\" already exists" pod="kube-system/etcd-scheduled-stop-995971"
Oct 26 08:26:24 scheduled-stop-995971 kubelet[2298]: I1026 08:26:24.666261 2298 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/etcd-scheduled-stop-995971" podStartSLOduration=2.666241393 podStartE2EDuration="2.666241393s" podCreationTimestamp="2025-10-26 08:26:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-26 08:26:24.65337002 +0000 UTC m=+1.298701351" watchObservedRunningTime="2025-10-26 08:26:24.666241393 +0000 UTC m=+1.311572706"
Oct 26 08:26:24 scheduled-stop-995971 kubelet[2298]: I1026 08:26:24.680922 2298 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-scheduler-scheduled-stop-995971" podStartSLOduration=1.680902621 podStartE2EDuration="1.680902621s" podCreationTimestamp="2025-10-26 08:26:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-26 08:26:24.666209098 +0000 UTC m=+1.311540429" watchObservedRunningTime="2025-10-26 08:26:24.680902621 +0000 UTC m=+1.326233935"
Oct 26 08:26:24 scheduled-stop-995971 kubelet[2298]: I1026 08:26:24.695457 2298 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-apiserver-scheduled-stop-995971" podStartSLOduration=1.695436987 podStartE2EDuration="1.695436987s" podCreationTimestamp="2025-10-26 08:26:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-26 08:26:24.681001547 +0000 UTC m=+1.326332869" watchObservedRunningTime="2025-10-26 08:26:24.695436987 +0000 UTC m=+1.340768317"
Oct 26 08:26:24 scheduled-stop-995971 kubelet[2298]: I1026 08:26:24.713243 2298 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-controller-manager-scheduled-stop-995971" podStartSLOduration=2.713228385 podStartE2EDuration="2.713228385s" podCreationTimestamp="2025-10-26 08:26:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-26 08:26:24.695840631 +0000 UTC m=+1.341171953" watchObservedRunningTime="2025-10-26 08:26:24.713228385 +0000 UTC m=+1.358559707"
-- /stdout --
helpers_test.go:262: (dbg) Run: out/minikube-linux-arm64 status --format={{.APIServer}} -p scheduled-stop-995971 -n scheduled-stop-995971
helpers_test.go:269: (dbg) Run: kubectl --context scheduled-stop-995971 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:280: non-running pods: storage-provisioner
helpers_test.go:282: ======> post-mortem[TestScheduledStopUnix]: describe non-running pods <======
helpers_test.go:285: (dbg) Run: kubectl --context scheduled-stop-995971 describe pod storage-provisioner
helpers_test.go:285: (dbg) Non-zero exit: kubectl --context scheduled-stop-995971 describe pod storage-provisioner: exit status 1 (96.788399ms)
** stderr **
Error from server (NotFound): pods "storage-provisioner" not found
** /stderr **
helpers_test.go:287: kubectl --context scheduled-stop-995971 describe pod storage-provisioner: exit status 1
helpers_test.go:175: Cleaning up "scheduled-stop-995971" profile ...
helpers_test.go:178: (dbg) Run: out/minikube-linux-arm64 delete -p scheduled-stop-995971
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p scheduled-stop-995971: (2.197356134s)
--- FAIL: TestScheduledStopUnix (41.21s)