=== RUN TestScheduledStopUnix
scheduled_stop_test.go:128: (dbg) Run: out/minikube-linux-arm64 start -p scheduled-stop-826255 --memory=3072 --driver=docker --container-runtime=docker
scheduled_stop_test.go:128: (dbg) Done: out/minikube-linux-arm64 start -p scheduled-stop-826255 --memory=3072 --driver=docker --container-runtime=docker: (33.639712337s)
scheduled_stop_test.go:137: (dbg) Run: out/minikube-linux-arm64 stop -p scheduled-stop-826255 --schedule 5m
scheduled_stop_test.go:191: (dbg) Run: out/minikube-linux-arm64 status --format={{.TimeToStop}} -p scheduled-stop-826255 -n scheduled-stop-826255
scheduled_stop_test.go:169: signal error was: <nil>
scheduled_stop_test.go:137: (dbg) Run: out/minikube-linux-arm64 stop -p scheduled-stop-826255 --schedule 15s
scheduled_stop_test.go:169: signal error was: <nil>
scheduled_stop_test.go:98: process 638510 running but should have been killed on reschedule of stop
panic.go:636: *** TestScheduledStopUnix FAILED at 2025-11-01 10:36:13.20794453 +0000 UTC m=+2432.272605839
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:223: ======> post-mortem[TestScheduledStopUnix]: network settings <======
helpers_test.go:230: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:238: ======> post-mortem[TestScheduledStopUnix]: docker inspect <======
helpers_test.go:239: (dbg) Run: docker inspect scheduled-stop-826255
helpers_test.go:243: (dbg) docker inspect scheduled-stop-826255:
-- stdout --
[
{
"Id": "740b8b2fde3be544693833dcded4cc3c0408986c51c980185f9abfc198b4f22f",
"Created": "2025-11-01T10:35:44.112603592Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 635751,
"ExitCode": 0,
"Error": "",
"StartedAt": "2025-11-01T10:35:44.209048177Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:1b8004df0b408966a254b2ecd4551aa85aaac4627e7e9cb1cefc14dfe51ec273",
"ResolvConfPath": "/var/lib/docker/containers/740b8b2fde3be544693833dcded4cc3c0408986c51c980185f9abfc198b4f22f/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/740b8b2fde3be544693833dcded4cc3c0408986c51c980185f9abfc198b4f22f/hostname",
"HostsPath": "/var/lib/docker/containers/740b8b2fde3be544693833dcded4cc3c0408986c51c980185f9abfc198b4f22f/hosts",
"LogPath": "/var/lib/docker/containers/740b8b2fde3be544693833dcded4cc3c0408986c51c980185f9abfc198b4f22f/740b8b2fde3be544693833dcded4cc3c0408986c51c980185f9abfc198b4f22f-json.log",
"Name": "/scheduled-stop-826255",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"scheduled-stop-826255:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {}
},
"NetworkMode": "scheduled-stop-826255",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "host",
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 3221225472,
"NanoCpus": 2000000000,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 6442450944,
"MemorySwappiness": null,
"OomKillDisable": false,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"ID": "740b8b2fde3be544693833dcded4cc3c0408986c51c980185f9abfc198b4f22f",
"LowerDir": "/var/lib/docker/overlay2/41d1e2279e644edab86ec4fc91410e8ae35c80928b7461551f5a611ab6191807-init/diff:/var/lib/docker/overlay2/3062594f33e84d9f57208883f6bde8c2790fe1978f163d6263eb71a492711c10/diff",
"MergedDir": "/var/lib/docker/overlay2/41d1e2279e644edab86ec4fc91410e8ae35c80928b7461551f5a611ab6191807/merged",
"UpperDir": "/var/lib/docker/overlay2/41d1e2279e644edab86ec4fc91410e8ae35c80928b7461551f5a611ab6191807/diff",
"WorkDir": "/var/lib/docker/overlay2/41d1e2279e644edab86ec4fc91410e8ae35c80928b7461551f5a611ab6191807/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
},
{
"Type": "volume",
"Name": "scheduled-stop-826255",
"Source": "/var/lib/docker/volumes/scheduled-stop-826255/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
}
],
"Config": {
"Hostname": "scheduled-stop-826255",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"OnBuild": null,
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "scheduled-stop-826255",
"name.minikube.sigs.k8s.io": "scheduled-stop-826255",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "f9c732e8b7880dc2e4d80b49f6430415d70380e626c3f56357b5816890704594",
"SandboxKey": "/var/run/docker/netns/f9c732e8b788",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33363"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33364"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33367"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33365"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33366"
}
]
},
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"scheduled-stop-826255": {
"IPAMConfig": {
"IPv4Address": "192.168.76.2"
},
"Links": null,
"Aliases": null,
"MacAddress": "36:c6:db:8f:4b:aa",
"DriverOpts": null,
"GwPriority": 0,
"NetworkID": "bd3a93d94334caa3ac73d404263ed0e90700bcfdf47ec60afef7f1d00fe6f956",
"EndpointID": "7b18649b39b761d54331f51cd26a67ae48ceb1f9ad7261486db518df4f86af70",
"Gateway": "192.168.76.1",
"IPAddress": "192.168.76.2",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"scheduled-stop-826255",
"740b8b2fde3b"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:247: (dbg) Run: out/minikube-linux-arm64 status --format={{.Host}} -p scheduled-stop-826255 -n scheduled-stop-826255
helpers_test.go:252: <<< TestScheduledStopUnix FAILED: start of post-mortem logs <<<
helpers_test.go:253: ======> post-mortem[TestScheduledStopUnix]: minikube logs <======
helpers_test.go:255: (dbg) Run: out/minikube-linux-arm64 -p scheduled-stop-826255 logs -n 25
helpers_test.go:260: TestScheduledStopUnix logs:
-- stdout --
==> Audit <==
┌─────────┬─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬───────────────────────┬─────────┬─────────┬─────────────────────┬─────────────────────┐
│ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │
├─────────┼─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼───────────────────────┼─────────┼─────────┼─────────────────────┼─────────────────────┤
│ stop │ -p multinode-751402 │ multinode-751402 │ jenkins │ v1.37.0 │ 01 Nov 25 10:29 UTC │ 01 Nov 25 10:30 UTC │
│ start │ -p multinode-751402 --wait=true -v=5 --alsologtostderr │ multinode-751402 │ jenkins │ v1.37.0 │ 01 Nov 25 10:30 UTC │ 01 Nov 25 10:31 UTC │
│ node │ list -p multinode-751402 │ multinode-751402 │ jenkins │ v1.37.0 │ 01 Nov 25 10:31 UTC │ │
│ node │ multinode-751402 node delete m03 │ multinode-751402 │ jenkins │ v1.37.0 │ 01 Nov 25 10:31 UTC │ 01 Nov 25 10:31 UTC │
│ stop │ multinode-751402 stop │ multinode-751402 │ jenkins │ v1.37.0 │ 01 Nov 25 10:31 UTC │ 01 Nov 25 10:31 UTC │
│ start │ -p multinode-751402 --wait=true -v=5 --alsologtostderr --driver=docker --container-runtime=docker │ multinode-751402 │ jenkins │ v1.37.0 │ 01 Nov 25 10:31 UTC │ 01 Nov 25 10:32 UTC │
│ node │ list -p multinode-751402 │ multinode-751402 │ jenkins │ v1.37.0 │ 01 Nov 25 10:32 UTC │ │
│ start │ -p multinode-751402-m02 --driver=docker --container-runtime=docker │ multinode-751402-m02 │ jenkins │ v1.37.0 │ 01 Nov 25 10:32 UTC │ │
│ start │ -p multinode-751402-m03 --driver=docker --container-runtime=docker │ multinode-751402-m03 │ jenkins │ v1.37.0 │ 01 Nov 25 10:32 UTC │ 01 Nov 25 10:33 UTC │
│ node │ add -p multinode-751402 │ multinode-751402 │ jenkins │ v1.37.0 │ 01 Nov 25 10:33 UTC │ │
│ delete │ -p multinode-751402-m03 │ multinode-751402-m03 │ jenkins │ v1.37.0 │ 01 Nov 25 10:33 UTC │ 01 Nov 25 10:33 UTC │
│ delete │ -p multinode-751402 │ multinode-751402 │ jenkins │ v1.37.0 │ 01 Nov 25 10:33 UTC │ 01 Nov 25 10:33 UTC │
│ start │ -p test-preload-750101 --memory=3072 --alsologtostderr --wait=true --preload=false --driver=docker --container-runtime=docker --kubernetes-version=v1.32.0 │ test-preload-750101 │ jenkins │ v1.37.0 │ 01 Nov 25 10:33 UTC │ 01 Nov 25 10:34 UTC │
│ image │ test-preload-750101 image pull gcr.io/k8s-minikube/busybox │ test-preload-750101 │ jenkins │ v1.37.0 │ 01 Nov 25 10:34 UTC │ 01 Nov 25 10:34 UTC │
│ stop │ -p test-preload-750101 │ test-preload-750101 │ jenkins │ v1.37.0 │ 01 Nov 25 10:34 UTC │ 01 Nov 25 10:34 UTC │
│ start │ -p test-preload-750101 --memory=3072 --alsologtostderr -v=1 --wait=true --driver=docker --container-runtime=docker │ test-preload-750101 │ jenkins │ v1.37.0 │ 01 Nov 25 10:34 UTC │ 01 Nov 25 10:35 UTC │
│ image │ test-preload-750101 image list │ test-preload-750101 │ jenkins │ v1.37.0 │ 01 Nov 25 10:35 UTC │ 01 Nov 25 10:35 UTC │
│ delete │ -p test-preload-750101 │ test-preload-750101 │ jenkins │ v1.37.0 │ 01 Nov 25 10:35 UTC │ 01 Nov 25 10:35 UTC │
│ start │ -p scheduled-stop-826255 --memory=3072 --driver=docker --container-runtime=docker │ scheduled-stop-826255 │ jenkins │ v1.37.0 │ 01 Nov 25 10:35 UTC │ 01 Nov 25 10:36 UTC │
│ stop │ -p scheduled-stop-826255 --schedule 5m │ scheduled-stop-826255 │ jenkins │ v1.37.0 │ 01 Nov 25 10:36 UTC │ │
│ stop │ -p scheduled-stop-826255 --schedule 5m │ scheduled-stop-826255 │ jenkins │ v1.37.0 │ 01 Nov 25 10:36 UTC │ │
│ stop │ -p scheduled-stop-826255 --schedule 5m │ scheduled-stop-826255 │ jenkins │ v1.37.0 │ 01 Nov 25 10:36 UTC │ │
│ stop │ -p scheduled-stop-826255 --schedule 15s │ scheduled-stop-826255 │ jenkins │ v1.37.0 │ 01 Nov 25 10:36 UTC │ │
│ stop │ -p scheduled-stop-826255 --schedule 15s │ scheduled-stop-826255 │ jenkins │ v1.37.0 │ 01 Nov 25 10:36 UTC │ │
│ stop │ -p scheduled-stop-826255 --schedule 15s │ scheduled-stop-826255 │ jenkins │ v1.37.0 │ 01 Nov 25 10:36 UTC │ │
└─────────┴─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴───────────────────────┴─────────┴─────────┴─────────────────────┴─────────────────────┘
==> Last Start <==
Log file created at: 2025/11/01 10:35:39
Running on machine: ip-172-31-30-239
Binary: Built with gc go1.24.6 for linux/arm64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I1101 10:35:39.077979 635360 out.go:360] Setting OutFile to fd 1 ...
I1101 10:35:39.078091 635360 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1101 10:35:39.078095 635360 out.go:374] Setting ErrFile to fd 2...
I1101 10:35:39.078099 635360 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1101 10:35:39.078369 635360 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21830-429697/.minikube/bin
I1101 10:35:39.078772 635360 out.go:368] Setting JSON to false
I1101 10:35:39.079631 635360 start.go:133] hostinfo: {"hostname":"ip-172-31-30-239","uptime":8291,"bootTime":1761985048,"procs":155,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1084-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"92f46a7d-c249-4c12-924a-77f64874c910"}
I1101 10:35:39.079691 635360 start.go:143] virtualization:
I1101 10:35:39.085705 635360 out.go:179] * [scheduled-stop-826255] minikube v1.37.0 on Ubuntu 20.04 (arm64)
I1101 10:35:39.089309 635360 notify.go:221] Checking for updates...
I1101 10:35:39.092683 635360 out.go:179] - MINIKUBE_LOCATION=21830
I1101 10:35:39.096029 635360 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I1101 10:35:39.099155 635360 out.go:179] - KUBECONFIG=/home/jenkins/minikube-integration/21830-429697/kubeconfig
I1101 10:35:39.102323 635360 out.go:179] - MINIKUBE_HOME=/home/jenkins/minikube-integration/21830-429697/.minikube
I1101 10:35:39.105463 635360 out.go:179] - MINIKUBE_BIN=out/minikube-linux-arm64
I1101 10:35:39.108646 635360 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I1101 10:35:39.111930 635360 driver.go:422] Setting default libvirt URI to qemu:///system
I1101 10:35:39.147337 635360 docker.go:124] docker version: linux-28.1.1:Docker Engine - Community
I1101 10:35:39.147441 635360 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1101 10:35:39.207967 635360 info.go:266] docker info: {ID:6ZPO:QZND:VNGE:LUKL:4Y3K:XELL:AAX4:2GTK:E6LM:MPRN:3ZXR:TTMR Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:3 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:23 OomKillDisable:true NGoroutines:42 SystemTime:2025-11-01 10:35:39.198718898 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1084-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214835200 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-30-239 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx P
ath:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1]] Warnings:<nil>}}
I1101 10:35:39.208065 635360 docker.go:319] overlay module found
I1101 10:35:39.211420 635360 out.go:179] * Using the docker driver based on user configuration
I1101 10:35:39.214524 635360 start.go:309] selected driver: docker
I1101 10:35:39.214535 635360 start.go:930] validating driver "docker" against <nil>
I1101 10:35:39.214546 635360 start.go:941] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I1101 10:35:39.215293 635360 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1101 10:35:39.263710 635360 info.go:266] docker info: {ID:6ZPO:QZND:VNGE:LUKL:4Y3K:XELL:AAX4:2GTK:E6LM:MPRN:3ZXR:TTMR Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:3 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:23 OomKillDisable:true NGoroutines:42 SystemTime:2025-11-01 10:35:39.254542873 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1084-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214835200 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-30-239 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx P
ath:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1]] Warnings:<nil>}}
I1101 10:35:39.263852 635360 start_flags.go:327] no existing cluster config was found, will generate one from the flags
I1101 10:35:39.264061 635360 start_flags.go:974] Wait components to verify : map[apiserver:true system_pods:true]
I1101 10:35:39.266928 635360 out.go:179] * Using Docker driver with root privileges
I1101 10:35:39.269765 635360 cni.go:84] Creating CNI manager for ""
I1101 10:35:39.269828 635360 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I1101 10:35:39.269838 635360 start_flags.go:336] Found "bridge CNI" CNI - setting NetworkPlugin=cni
I1101 10:35:39.269923 635360 start.go:353] cluster config:
{Name:scheduled-stop-826255 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:scheduled-stop-826255 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local
ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1101 10:35:39.274974 635360 out.go:179] * Starting "scheduled-stop-826255" primary control-plane node in "scheduled-stop-826255" cluster
I1101 10:35:39.277905 635360 cache.go:124] Beginning downloading kic base image for docker with docker
I1101 10:35:39.281017 635360 out.go:179] * Pulling base image v0.0.48-1760939008-21773 ...
I1101 10:35:39.283909 635360 preload.go:183] Checking if preload exists for k8s version v1.34.1 and runtime docker
I1101 10:35:39.283911 635360 image.go:81] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 in local docker daemon
I1101 10:35:39.283965 635360 preload.go:198] Found local preload: /home/jenkins/minikube-integration/21830-429697/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-docker-overlay2-arm64.tar.lz4
I1101 10:35:39.283974 635360 cache.go:59] Caching tarball of preloaded images
I1101 10:35:39.284058 635360 preload.go:233] Found /home/jenkins/minikube-integration/21830-429697/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-docker-overlay2-arm64.tar.lz4 in cache, skipping download
I1101 10:35:39.284067 635360 cache.go:62] Finished verifying existence of preloaded tar for v1.34.1 on docker
I1101 10:35:39.284396 635360 profile.go:143] Saving config to /home/jenkins/minikube-integration/21830-429697/.minikube/profiles/scheduled-stop-826255/config.json ...
I1101 10:35:39.284414 635360 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21830-429697/.minikube/profiles/scheduled-stop-826255/config.json: {Name:mk5f240ce16e3b4aedd972547a8293807f53cc90 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1101 10:35:39.303171 635360 image.go:100] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 in local docker daemon, skipping pull
I1101 10:35:39.303183 635360 cache.go:148] gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 exists in daemon, skipping load
I1101 10:35:39.303195 635360 cache.go:233] Successfully downloaded all kic artifacts
I1101 10:35:39.303217 635360 start.go:360] acquireMachinesLock for scheduled-stop-826255: {Name:mk0210de6bead4c3d66f3d13bd2cebeff93f6255 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1101 10:35:39.303316 635360 start.go:364] duration metric: took 84.719µs to acquireMachinesLock for "scheduled-stop-826255"
I1101 10:35:39.303339 635360 start.go:93] Provisioning new machine with config: &{Name:scheduled-stop-826255 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:scheduled-stop-826255 Namespace:default APIServerHAVIP: A
PIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHA
uthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}
I1101 10:35:39.303409 635360 start.go:125] createHost starting for "" (driver="docker")
I1101 10:35:39.307141 635360 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ...
I1101 10:35:39.307380 635360 start.go:159] libmachine.API.Create for "scheduled-stop-826255" (driver="docker")
I1101 10:35:39.307414 635360 client.go:173] LocalClient.Create starting
I1101 10:35:39.307523 635360 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21830-429697/.minikube/certs/ca.pem
I1101 10:35:39.307558 635360 main.go:143] libmachine: Decoding PEM data...
I1101 10:35:39.307570 635360 main.go:143] libmachine: Parsing certificate...
I1101 10:35:39.307631 635360 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21830-429697/.minikube/certs/cert.pem
I1101 10:35:39.307647 635360 main.go:143] libmachine: Decoding PEM data...
I1101 10:35:39.307655 635360 main.go:143] libmachine: Parsing certificate...
I1101 10:35:39.308048 635360 cli_runner.go:164] Run: docker network inspect scheduled-stop-826255 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W1101 10:35:39.323966 635360 cli_runner.go:211] docker network inspect scheduled-stop-826255 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I1101 10:35:39.324037 635360 network_create.go:284] running [docker network inspect scheduled-stop-826255] to gather additional debugging logs...
I1101 10:35:39.324053 635360 cli_runner.go:164] Run: docker network inspect scheduled-stop-826255
W1101 10:35:39.340273 635360 cli_runner.go:211] docker network inspect scheduled-stop-826255 returned with exit code 1
I1101 10:35:39.340292 635360 network_create.go:287] error running [docker network inspect scheduled-stop-826255]: docker network inspect scheduled-stop-826255: exit status 1
stdout:
[]
stderr:
Error response from daemon: network scheduled-stop-826255 not found
I1101 10:35:39.340311 635360 network_create.go:289] output of [docker network inspect scheduled-stop-826255]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network scheduled-stop-826255 not found
** /stderr **
I1101 10:35:39.340438 635360 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1101 10:35:39.357450 635360 network.go:211] skipping subnet 192.168.49.0/24 that is taken: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName:br-38e2a87f6e1a IfaceIPv4:192.168.49.1 IfaceMTU:1500 IfaceMAC:6e:fc:d8:1b:73:6f} reservation:<nil>}
I1101 10:35:39.357689 635360 network.go:211] skipping subnet 192.168.58.0/24 that is taken: &{IP:192.168.58.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.58.0/24 Gateway:192.168.58.1 ClientMin:192.168.58.2 ClientMax:192.168.58.254 Broadcast:192.168.58.255 IsPrivate:true Interface:{IfaceName:br-8cf22fa5eddc IfaceIPv4:192.168.58.1 IfaceMTU:1500 IfaceMAC:42:b1:95:81:6c:c6} reservation:<nil>}
I1101 10:35:39.357896 635360 network.go:211] skipping subnet 192.168.67.0/24 that is taken: &{IP:192.168.67.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.67.0/24 Gateway:192.168.67.1 ClientMin:192.168.67.2 ClientMax:192.168.67.254 Broadcast:192.168.67.255 IsPrivate:true Interface:{IfaceName:br-6fa217da0172 IfaceIPv4:192.168.67.1 IfaceMTU:1500 IfaceMAC:ca:47:92:1f:18:de} reservation:<nil>}
I1101 10:35:39.358237 635360 network.go:206] using free private subnet 192.168.76.0/24: &{IP:192.168.76.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.76.0/24 Gateway:192.168.76.1 ClientMin:192.168.76.2 ClientMax:192.168.76.254 Broadcast:192.168.76.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0x40019dcb50}
I1101 10:35:39.358253 635360 network_create.go:124] attempt to create docker network scheduled-stop-826255 192.168.76.0/24 with gateway 192.168.76.1 and MTU of 1500 ...
I1101 10:35:39.358314 635360 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.76.0/24 --gateway=192.168.76.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=scheduled-stop-826255 scheduled-stop-826255
I1101 10:35:39.412564 635360 network_create.go:108] docker network scheduled-stop-826255 192.168.76.0/24 created
I1101 10:35:39.412583 635360 kic.go:121] calculated static IP "192.168.76.2" for the "scheduled-stop-826255" container
I1101 10:35:39.412694 635360 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I1101 10:35:39.428843 635360 cli_runner.go:164] Run: docker volume create scheduled-stop-826255 --label name.minikube.sigs.k8s.io=scheduled-stop-826255 --label created_by.minikube.sigs.k8s.io=true
I1101 10:35:39.446570 635360 oci.go:103] Successfully created a docker volume scheduled-stop-826255
I1101 10:35:39.446662 635360 cli_runner.go:164] Run: docker run --rm --name scheduled-stop-826255-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=scheduled-stop-826255 --entrypoint /usr/bin/test -v scheduled-stop-826255:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 -d /var/lib
I1101 10:35:39.977740 635360 oci.go:107] Successfully prepared a docker volume scheduled-stop-826255
I1101 10:35:39.977783 635360 preload.go:183] Checking if preload exists for k8s version v1.34.1 and runtime docker
I1101 10:35:39.977802 635360 kic.go:194] Starting extracting preloaded images to volume ...
I1101 10:35:39.977868 635360 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21830-429697/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-docker-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v scheduled-stop-826255:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 -I lz4 -xf /preloaded.tar -C /extractDir
I1101 10:35:44.027435 635360 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21830-429697/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-docker-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v scheduled-stop-826255:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 -I lz4 -xf /preloaded.tar -C /extractDir: (4.0495156s)
I1101 10:35:44.027460 635360 kic.go:203] duration metric: took 4.049654826s to extract preloaded images to volume ...
W1101 10:35:44.027632 635360 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
I1101 10:35:44.027737 635360 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I1101 10:35:44.091947 635360 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname scheduled-stop-826255 --name scheduled-stop-826255 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=scheduled-stop-826255 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=scheduled-stop-826255 --network scheduled-stop-826255 --ip 192.168.76.2 --volume scheduled-stop-826255:/var --security-opt apparmor=unconfined --memory=3072mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8
I1101 10:35:44.421193 635360 cli_runner.go:164] Run: docker container inspect scheduled-stop-826255 --format={{.State.Running}}
I1101 10:35:44.445371 635360 cli_runner.go:164] Run: docker container inspect scheduled-stop-826255 --format={{.State.Status}}
I1101 10:35:44.469928 635360 cli_runner.go:164] Run: docker exec scheduled-stop-826255 stat /var/lib/dpkg/alternatives/iptables
I1101 10:35:44.520401 635360 oci.go:144] the created container "scheduled-stop-826255" has a running status.
I1101 10:35:44.520421 635360 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/21830-429697/.minikube/machines/scheduled-stop-826255/id_rsa...
I1101 10:35:45.099614 635360 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/21830-429697/.minikube/machines/scheduled-stop-826255/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I1101 10:35:45.123804 635360 cli_runner.go:164] Run: docker container inspect scheduled-stop-826255 --format={{.State.Status}}
I1101 10:35:45.149115 635360 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I1101 10:35:45.149129 635360 kic_runner.go:114] Args: [docker exec --privileged scheduled-stop-826255 chown docker:docker /home/docker/.ssh/authorized_keys]
I1101 10:35:45.206309 635360 cli_runner.go:164] Run: docker container inspect scheduled-stop-826255 --format={{.State.Status}}
I1101 10:35:45.229656 635360 machine.go:94] provisionDockerMachine start ...
I1101 10:35:45.229772 635360 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-826255
I1101 10:35:45.251931 635360 main.go:143] libmachine: Using SSH client type: native
I1101 10:35:45.252292 635360 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x3ef110] 0x3f18d0 <nil> [] 0s} 127.0.0.1 33363 <nil> <nil>}
I1101 10:35:45.252302 635360 main.go:143] libmachine: About to run SSH command:
hostname
I1101 10:35:45.253072 635360 main.go:143] libmachine: Error dialing TCP: ssh: handshake failed: read tcp 127.0.0.1:55236->127.0.0.1:33363: read: connection reset by peer
I1101 10:35:48.402894 635360 main.go:143] libmachine: SSH cmd err, output: <nil>: scheduled-stop-826255
I1101 10:35:48.402908 635360 ubuntu.go:182] provisioning hostname "scheduled-stop-826255"
I1101 10:35:48.402972 635360 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-826255
I1101 10:35:48.420723 635360 main.go:143] libmachine: Using SSH client type: native
I1101 10:35:48.421033 635360 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x3ef110] 0x3f18d0 <nil> [] 0s} 127.0.0.1 33363 <nil> <nil>}
I1101 10:35:48.421042 635360 main.go:143] libmachine: About to run SSH command:
sudo hostname scheduled-stop-826255 && echo "scheduled-stop-826255" | sudo tee /etc/hostname
I1101 10:35:48.576960 635360 main.go:143] libmachine: SSH cmd err, output: <nil>: scheduled-stop-826255
I1101 10:35:48.577029 635360 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-826255
I1101 10:35:48.594669 635360 main.go:143] libmachine: Using SSH client type: native
I1101 10:35:48.594980 635360 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x3ef110] 0x3f18d0 <nil> [] 0s} 127.0.0.1 33363 <nil> <nil>}
I1101 10:35:48.595002 635360 main.go:143] libmachine: About to run SSH command:
if ! grep -xq '.*\sscheduled-stop-826255' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 scheduled-stop-826255/g' /etc/hosts;
else
echo '127.0.1.1 scheduled-stop-826255' | sudo tee -a /etc/hosts;
fi
fi
I1101 10:35:48.743588 635360 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1101 10:35:48.743605 635360 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/21830-429697/.minikube CaCertPath:/home/jenkins/minikube-integration/21830-429697/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21830-429697/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21830-429697/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21830-429697/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21830-429697/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21830-429697/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21830-429697/.minikube}
I1101 10:35:48.743632 635360 ubuntu.go:190] setting up certificates
I1101 10:35:48.743640 635360 provision.go:84] configureAuth start
I1101 10:35:48.743706 635360 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" scheduled-stop-826255
I1101 10:35:48.759819 635360 provision.go:143] copyHostCerts
I1101 10:35:48.759874 635360 exec_runner.go:144] found /home/jenkins/minikube-integration/21830-429697/.minikube/ca.pem, removing ...
I1101 10:35:48.759882 635360 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21830-429697/.minikube/ca.pem
I1101 10:35:48.759960 635360 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21830-429697/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21830-429697/.minikube/ca.pem (1078 bytes)
I1101 10:35:48.760057 635360 exec_runner.go:144] found /home/jenkins/minikube-integration/21830-429697/.minikube/cert.pem, removing ...
I1101 10:35:48.760061 635360 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21830-429697/.minikube/cert.pem
I1101 10:35:48.760085 635360 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21830-429697/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21830-429697/.minikube/cert.pem (1123 bytes)
I1101 10:35:48.760141 635360 exec_runner.go:144] found /home/jenkins/minikube-integration/21830-429697/.minikube/key.pem, removing ...
I1101 10:35:48.760145 635360 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21830-429697/.minikube/key.pem
I1101 10:35:48.760165 635360 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21830-429697/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21830-429697/.minikube/key.pem (1675 bytes)
I1101 10:35:48.760216 635360 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21830-429697/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21830-429697/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21830-429697/.minikube/certs/ca-key.pem org=jenkins.scheduled-stop-826255 san=[127.0.0.1 192.168.76.2 localhost minikube scheduled-stop-826255]
I1101 10:35:49.285858 635360 provision.go:177] copyRemoteCerts
I1101 10:35:49.285917 635360 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1101 10:35:49.285960 635360 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-826255
I1101 10:35:49.303741 635360 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33363 SSHKeyPath:/home/jenkins/minikube-integration/21830-429697/.minikube/machines/scheduled-stop-826255/id_rsa Username:docker}
I1101 10:35:49.406967 635360 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21830-429697/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1078 bytes)
I1101 10:35:49.423606 635360 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21830-429697/.minikube/machines/server.pem --> /etc/docker/server.pem (1229 bytes)
I1101 10:35:49.440624 635360 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21830-429697/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
I1101 10:35:49.457658 635360 provision.go:87] duration metric: took 713.997221ms to configureAuth
I1101 10:35:49.457675 635360 ubuntu.go:206] setting minikube options for container-runtime
I1101 10:35:49.457863 635360 config.go:182] Loaded profile config "scheduled-stop-826255": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.1
I1101 10:35:49.457919 635360 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-826255
I1101 10:35:49.475653 635360 main.go:143] libmachine: Using SSH client type: native
I1101 10:35:49.475967 635360 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x3ef110] 0x3f18d0 <nil> [] 0s} 127.0.0.1 33363 <nil> <nil>}
I1101 10:35:49.475975 635360 main.go:143] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I1101 10:35:49.624125 635360 main.go:143] libmachine: SSH cmd err, output: <nil>: overlay
I1101 10:35:49.624135 635360 ubuntu.go:71] root file system type: overlay
I1101 10:35:49.624241 635360 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
I1101 10:35:49.624302 635360 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-826255
I1101 10:35:49.642484 635360 main.go:143] libmachine: Using SSH client type: native
I1101 10:35:49.642784 635360 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x3ef110] 0x3f18d0 <nil> [] 0s} 127.0.0.1 33363 <nil> <nil>}
I1101 10:35:49.642862 635360 main.go:143] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 \
-H fd:// --containerd=/run/containerd/containerd.sock \
-H unix:///var/run/docker.sock \
--default-ulimit=nofile=1048576:1048576 \
--tlsverify \
--tlscacert /etc/docker/ca.pem \
--tlscert /etc/docker/server.pem \
--tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I1101 10:35:49.800890 635360 main.go:143] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
I1101 10:35:49.800961 635360 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-826255
I1101 10:35:49.818953 635360 main.go:143] libmachine: Using SSH client type: native
I1101 10:35:49.819246 635360 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x3ef110] 0x3f18d0 <nil> [] 0s} 127.0.0.1 33363 <nil> <nil>}
I1101 10:35:49.819261 635360 main.go:143] libmachine: About to run SSH command:
sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
I1101 10:35:50.742102 635360 main.go:143] libmachine: SSH cmd err, output: <nil>: --- /lib/systemd/system/docker.service 2025-10-08 12:17:10.000000000 +0000
+++ /lib/systemd/system/docker.service.new 2025-11-01 10:35:49.792983182 +0000
@@ -9,23 +9,34 @@
[Service]
Type=notify
-# the default is not to use systemd for cgroups because the delegate issues still
-# exists and systemd currently does not support the cgroup feature set required
-# for containers run by docker
-ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
-ExecReload=/bin/kill -s HUP $MAINPID
-TimeoutStartSec=0
-RestartSec=2
Restart=always
+
+
+# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
+# The base configuration already specifies an 'ExecStart=...' command. The first directive
+# here is to clear out that command inherited from the base configuration. Without this,
+# the command from the base configuration and the command specified here are treated as
+# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
+# will catch this invalid input and refuse to start the service with an error like:
+# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
+
+# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
+# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
+ExecStart=
+ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
+ExecReload=/bin/kill -s HUP $MAINPID
+
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
+LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
-# Comment TasksMax if your systemd version does not support it.
-# Only systemd 226 and above support this option.
+# Uncomment TasksMax if your systemd version supports it.
+# Only systemd 226 and above support this version.
TasksMax=infinity
+TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
Synchronizing state of docker.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable docker
I1101 10:35:50.742119 635360 machine.go:97] duration metric: took 5.512449725s to provisionDockerMachine
I1101 10:35:50.742129 635360 client.go:176] duration metric: took 11.434709258s to LocalClient.Create
I1101 10:35:50.742150 635360 start.go:167] duration metric: took 11.434771116s to libmachine.API.Create "scheduled-stop-826255"
I1101 10:35:50.742156 635360 start.go:293] postStartSetup for "scheduled-stop-826255" (driver="docker")
I1101 10:35:50.742165 635360 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1101 10:35:50.742224 635360 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1101 10:35:50.742267 635360 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-826255
I1101 10:35:50.761263 635360 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33363 SSHKeyPath:/home/jenkins/minikube-integration/21830-429697/.minikube/machines/scheduled-stop-826255/id_rsa Username:docker}
I1101 10:35:50.863693 635360 ssh_runner.go:195] Run: cat /etc/os-release
I1101 10:35:50.866958 635360 main.go:143] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I1101 10:35:50.866976 635360 info.go:137] Remote host: Debian GNU/Linux 12 (bookworm)
I1101 10:35:50.866986 635360 filesync.go:126] Scanning /home/jenkins/minikube-integration/21830-429697/.minikube/addons for local assets ...
I1101 10:35:50.867039 635360 filesync.go:126] Scanning /home/jenkins/minikube-integration/21830-429697/.minikube/files for local assets ...
I1101 10:35:50.867112 635360 filesync.go:149] local asset: /home/jenkins/minikube-integration/21830-429697/.minikube/files/etc/ssl/certs/4315382.pem -> 4315382.pem in /etc/ssl/certs
I1101 10:35:50.867218 635360 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1101 10:35:50.874613 635360 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21830-429697/.minikube/files/etc/ssl/certs/4315382.pem --> /etc/ssl/certs/4315382.pem (1708 bytes)
I1101 10:35:50.892811 635360 start.go:296] duration metric: took 150.641487ms for postStartSetup
I1101 10:35:50.893174 635360 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" scheduled-stop-826255
I1101 10:35:50.910867 635360 profile.go:143] Saving config to /home/jenkins/minikube-integration/21830-429697/.minikube/profiles/scheduled-stop-826255/config.json ...
I1101 10:35:50.911136 635360 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I1101 10:35:50.911173 635360 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-826255
I1101 10:35:50.928368 635360 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33363 SSHKeyPath:/home/jenkins/minikube-integration/21830-429697/.minikube/machines/scheduled-stop-826255/id_rsa Username:docker}
I1101 10:35:51.028700 635360 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I1101 10:35:51.033740 635360 start.go:128] duration metric: took 11.730317652s to createHost
I1101 10:35:51.033755 635360 start.go:83] releasing machines lock for "scheduled-stop-826255", held for 11.730432985s
I1101 10:35:51.033847 635360 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" scheduled-stop-826255
I1101 10:35:51.050810 635360 ssh_runner.go:195] Run: cat /version.json
I1101 10:35:51.050848 635360 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1101 10:35:51.050853 635360 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-826255
I1101 10:35:51.050905 635360 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-826255
I1101 10:35:51.069911 635360 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33363 SSHKeyPath:/home/jenkins/minikube-integration/21830-429697/.minikube/machines/scheduled-stop-826255/id_rsa Username:docker}
I1101 10:35:51.085163 635360 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33363 SSHKeyPath:/home/jenkins/minikube-integration/21830-429697/.minikube/machines/scheduled-stop-826255/id_rsa Username:docker}
I1101 10:35:51.171128 635360 ssh_runner.go:195] Run: systemctl --version
I1101 10:35:51.265174 635360 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W1101 10:35:51.269382 635360 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I1101 10:35:51.269464 635360 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1101 10:35:51.296522 635360 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/10-crio-bridge.conflist.disabled] bridge cni config(s)
I1101 10:35:51.296538 635360 start.go:496] detecting cgroup driver to use...
I1101 10:35:51.296568 635360 detect.go:187] detected "cgroupfs" cgroup driver on host os
I1101 10:35:51.296669 635360 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1101 10:35:51.310601 635360 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I1101 10:35:51.319139 635360 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1101 10:35:51.328553 635360 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I1101 10:35:51.328619 635360 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I1101 10:35:51.337293 635360 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1101 10:35:51.346140 635360 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1101 10:35:51.355081 635360 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1101 10:35:51.364109 635360 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1101 10:35:51.371887 635360 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1101 10:35:51.381019 635360 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1101 10:35:51.389476 635360 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1101 10:35:51.398006 635360 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1101 10:35:51.405363 635360 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1101 10:35:51.412480 635360 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1101 10:35:51.525656 635360 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1101 10:35:51.627061 635360 start.go:496] detecting cgroup driver to use...
I1101 10:35:51.627099 635360 detect.go:187] detected "cgroupfs" cgroup driver on host os
I1101 10:35:51.627148 635360 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I1101 10:35:51.642262 635360 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I1101 10:35:51.657137 635360 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
I1101 10:35:51.685487 635360 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I1101 10:35:51.698529 635360 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1101 10:35:51.713053 635360 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
" | sudo tee /etc/crictl.yaml"
I1101 10:35:51.730478 635360 ssh_runner.go:195] Run: which cri-dockerd
I1101 10:35:51.735423 635360 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
I1101 10:35:51.743293 635360 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (192 bytes)
I1101 10:35:51.756730 635360 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I1101 10:35:51.889489 635360 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I1101 10:35:52.010411 635360 docker.go:575] configuring docker to use "cgroupfs" as cgroup driver...
I1101 10:35:52.010529 635360 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (130 bytes)
I1101 10:35:52.026479 635360 ssh_runner.go:195] Run: sudo systemctl reset-failed docker
I1101 10:35:52.041336 635360 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1101 10:35:52.158376 635360 ssh_runner.go:195] Run: sudo systemctl restart docker
I1101 10:35:52.533829 635360 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1101 10:35:52.547077 635360 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
I1101 10:35:52.560634 635360 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1101 10:35:52.574452 635360 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
I1101 10:35:52.702303 635360 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
I1101 10:35:52.822962 635360 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1101 10:35:52.944808 635360 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
I1101 10:35:52.960721 635360 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
I1101 10:35:52.973868 635360 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1101 10:35:53.099334 635360 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I1101 10:35:53.176663 635360 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1101 10:35:53.195274 635360 start.go:543] Will wait 60s for socket path /var/run/cri-dockerd.sock
I1101 10:35:53.195344 635360 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
I1101 10:35:53.199141 635360 start.go:564] Will wait 60s for crictl version
I1101 10:35:53.199196 635360 ssh_runner.go:195] Run: which crictl
I1101 10:35:53.202697 635360 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl version
I1101 10:35:53.227106 635360 start.go:580] Version: 0.1.0
RuntimeName: docker
RuntimeVersion: 28.5.1
RuntimeApiVersion: v1
I1101 10:35:53.227165 635360 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I1101 10:35:53.249455 635360 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I1101 10:35:53.277400 635360 out.go:252] * Preparing Kubernetes v1.34.1 on Docker 28.5.1 ...
I1101 10:35:53.277491 635360 cli_runner.go:164] Run: docker network inspect scheduled-stop-826255 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1101 10:35:53.293590 635360 ssh_runner.go:195] Run: grep 192.168.76.1 host.minikube.internal$ /etc/hosts
I1101 10:35:53.297577 635360 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.76.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1101 10:35:53.307586 635360 kubeadm.go:884] updating cluster {Name:scheduled-stop-826255 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:scheduled-stop-826255 Namespace:default APIServerHAVIP: APIServerName:miniku
beCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock
: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I1101 10:35:53.307692 635360 preload.go:183] Checking if preload exists for k8s version v1.34.1 and runtime docker
I1101 10:35:53.307745 635360 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I1101 10:35:53.326816 635360 docker.go:691] Got preloaded images: -- stdout --
registry.k8s.io/kube-apiserver:v1.34.1
registry.k8s.io/kube-controller-manager:v1.34.1
registry.k8s.io/kube-scheduler:v1.34.1
registry.k8s.io/kube-proxy:v1.34.1
registry.k8s.io/etcd:3.6.4-0
registry.k8s.io/pause:3.10.1
registry.k8s.io/coredns/coredns:v1.12.1
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I1101 10:35:53.326828 635360 docker.go:621] Images already preloaded, skipping extraction
I1101 10:35:53.326891 635360 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I1101 10:35:53.348311 635360 docker.go:691] Got preloaded images: -- stdout --
registry.k8s.io/kube-apiserver:v1.34.1
registry.k8s.io/kube-controller-manager:v1.34.1
registry.k8s.io/kube-scheduler:v1.34.1
registry.k8s.io/kube-proxy:v1.34.1
registry.k8s.io/etcd:3.6.4-0
registry.k8s.io/pause:3.10.1
registry.k8s.io/coredns/coredns:v1.12.1
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I1101 10:35:53.348325 635360 cache_images.go:86] Images are preloaded, skipping loading
I1101 10:35:53.348333 635360 kubeadm.go:935] updating node { 192.168.76.2 8443 v1.34.1 docker true true} ...
I1101 10:35:53.348428 635360 kubeadm.go:947] kubelet [Unit]
Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=scheduled-stop-826255 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.76.2
[Install]
config:
{KubernetesVersion:v1.34.1 ClusterName:scheduled-stop-826255 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I1101 10:35:53.348507 635360 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
I1101 10:35:53.399296 635360 cni.go:84] Creating CNI manager for ""
I1101 10:35:53.399311 635360 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I1101 10:35:53.399328 635360 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
I1101 10:35:53.399348 635360 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.76.2 APIServerPort:8443 KubernetesVersion:v1.34.1 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:scheduled-stop-826255 NodeName:scheduled-stop-826255 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.76.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.76.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPo
dPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///var/run/cri-dockerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1101 10:35:53.399466 635360 kubeadm.go:196] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.76.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///var/run/cri-dockerd.sock
name: "scheduled-stop-826255"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.76.2"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.76.2"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
kubernetesVersion: v1.34.1
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1101 10:35:53.399559 635360 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.1
I1101 10:35:53.407276 635360 binaries.go:44] Found k8s binaries, skipping transfer
I1101 10:35:53.407339 635360 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1101 10:35:53.414662 635360 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (320 bytes)
I1101 10:35:53.427272 635360 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1101 10:35:53.440242 635360 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2222 bytes)
I1101 10:35:53.452546 635360 ssh_runner.go:195] Run: grep 192.168.76.2 control-plane.minikube.internal$ /etc/hosts
I1101 10:35:53.456258 635360 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.76.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1101 10:35:53.465734 635360 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1101 10:35:53.588538 635360 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1101 10:35:53.605643 635360 certs.go:69] Setting up /home/jenkins/minikube-integration/21830-429697/.minikube/profiles/scheduled-stop-826255 for IP: 192.168.76.2
I1101 10:35:53.605653 635360 certs.go:195] generating shared ca certs ...
I1101 10:35:53.605683 635360 certs.go:227] acquiring lock for ca certs: {Name:mk8e3a6be3dbfc79fcab32faa73ce5271301f192 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1101 10:35:53.605850 635360 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21830-429697/.minikube/ca.key
I1101 10:35:53.605894 635360 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21830-429697/.minikube/proxy-client-ca.key
I1101 10:35:53.605909 635360 certs.go:257] generating profile certs ...
I1101 10:35:53.605965 635360 certs.go:364] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/21830-429697/.minikube/profiles/scheduled-stop-826255/client.key
I1101 10:35:53.605983 635360 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21830-429697/.minikube/profiles/scheduled-stop-826255/client.crt with IP's: []
I1101 10:35:54.308333 635360 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21830-429697/.minikube/profiles/scheduled-stop-826255/client.crt ...
I1101 10:35:54.308349 635360 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21830-429697/.minikube/profiles/scheduled-stop-826255/client.crt: {Name:mkff6c648dc163647468478912002f67e111c467 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1101 10:35:54.308591 635360 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21830-429697/.minikube/profiles/scheduled-stop-826255/client.key ...
I1101 10:35:54.308602 635360 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21830-429697/.minikube/profiles/scheduled-stop-826255/client.key: {Name:mkc9b9ee6e32a3ded31fa7e76c671a55d90861ed Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1101 10:35:54.308713 635360 certs.go:364] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21830-429697/.minikube/profiles/scheduled-stop-826255/apiserver.key.42405298
I1101 10:35:54.308726 635360 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21830-429697/.minikube/profiles/scheduled-stop-826255/apiserver.crt.42405298 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.76.2]
I1101 10:35:54.552262 635360 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21830-429697/.minikube/profiles/scheduled-stop-826255/apiserver.crt.42405298 ...
I1101 10:35:54.552276 635360 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21830-429697/.minikube/profiles/scheduled-stop-826255/apiserver.crt.42405298: {Name:mk1588f84286d9fd914c46449517c49d63847752 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1101 10:35:54.552472 635360 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21830-429697/.minikube/profiles/scheduled-stop-826255/apiserver.key.42405298 ...
I1101 10:35:54.552480 635360 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21830-429697/.minikube/profiles/scheduled-stop-826255/apiserver.key.42405298: {Name:mk70cf6ab83821ea0b01e8bda245d672764ca8ac Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1101 10:35:54.552566 635360 certs.go:382] copying /home/jenkins/minikube-integration/21830-429697/.minikube/profiles/scheduled-stop-826255/apiserver.crt.42405298 -> /home/jenkins/minikube-integration/21830-429697/.minikube/profiles/scheduled-stop-826255/apiserver.crt
I1101 10:35:54.552648 635360 certs.go:386] copying /home/jenkins/minikube-integration/21830-429697/.minikube/profiles/scheduled-stop-826255/apiserver.key.42405298 -> /home/jenkins/minikube-integration/21830-429697/.minikube/profiles/scheduled-stop-826255/apiserver.key
I1101 10:35:54.552700 635360 certs.go:364] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/21830-429697/.minikube/profiles/scheduled-stop-826255/proxy-client.key
I1101 10:35:54.552712 635360 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21830-429697/.minikube/profiles/scheduled-stop-826255/proxy-client.crt with IP's: []
I1101 10:35:54.753245 635360 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21830-429697/.minikube/profiles/scheduled-stop-826255/proxy-client.crt ...
I1101 10:35:54.753262 635360 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21830-429697/.minikube/profiles/scheduled-stop-826255/proxy-client.crt: {Name:mk06d0f5c0b478413259646da7b6fac34de11bb5 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1101 10:35:54.753450 635360 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21830-429697/.minikube/profiles/scheduled-stop-826255/proxy-client.key ...
I1101 10:35:54.753458 635360 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21830-429697/.minikube/profiles/scheduled-stop-826255/proxy-client.key: {Name:mk9b7e3872bb62771ccb6ad9ea4dd35f59c7e008 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1101 10:35:54.753656 635360 certs.go:484] found cert: /home/jenkins/minikube-integration/21830-429697/.minikube/certs/431538.pem (1338 bytes)
W1101 10:35:54.753692 635360 certs.go:480] ignoring /home/jenkins/minikube-integration/21830-429697/.minikube/certs/431538_empty.pem, impossibly tiny 0 bytes
I1101 10:35:54.753700 635360 certs.go:484] found cert: /home/jenkins/minikube-integration/21830-429697/.minikube/certs/ca-key.pem (1675 bytes)
I1101 10:35:54.753725 635360 certs.go:484] found cert: /home/jenkins/minikube-integration/21830-429697/.minikube/certs/ca.pem (1078 bytes)
I1101 10:35:54.753750 635360 certs.go:484] found cert: /home/jenkins/minikube-integration/21830-429697/.minikube/certs/cert.pem (1123 bytes)
I1101 10:35:54.753771 635360 certs.go:484] found cert: /home/jenkins/minikube-integration/21830-429697/.minikube/certs/key.pem (1675 bytes)
I1101 10:35:54.753815 635360 certs.go:484] found cert: /home/jenkins/minikube-integration/21830-429697/.minikube/files/etc/ssl/certs/4315382.pem (1708 bytes)
I1101 10:35:54.754425 635360 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21830-429697/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1101 10:35:54.773919 635360 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21830-429697/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I1101 10:35:54.792115 635360 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21830-429697/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1101 10:35:54.810388 635360 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21830-429697/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I1101 10:35:54.827916 635360 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21830-429697/.minikube/profiles/scheduled-stop-826255/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1432 bytes)
I1101 10:35:54.845149 635360 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21830-429697/.minikube/profiles/scheduled-stop-826255/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes)
I1101 10:35:54.862566 635360 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21830-429697/.minikube/profiles/scheduled-stop-826255/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1101 10:35:54.880514 635360 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21830-429697/.minikube/profiles/scheduled-stop-826255/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I1101 10:35:54.897514 635360 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21830-429697/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1101 10:35:54.914750 635360 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21830-429697/.minikube/certs/431538.pem --> /usr/share/ca-certificates/431538.pem (1338 bytes)
I1101 10:35:54.932548 635360 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21830-429697/.minikube/files/etc/ssl/certs/4315382.pem --> /usr/share/ca-certificates/4315382.pem (1708 bytes)
I1101 10:35:54.950327 635360 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I1101 10:35:54.963432 635360 ssh_runner.go:195] Run: openssl version
I1101 10:35:54.970029 635360 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1101 10:35:54.978503 635360 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1101 10:35:54.982403 635360 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 1 09:56 /usr/share/ca-certificates/minikubeCA.pem
I1101 10:35:54.982462 635360 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1101 10:35:55.024290 635360 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1101 10:35:55.033469 635360 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/431538.pem && ln -fs /usr/share/ca-certificates/431538.pem /etc/ssl/certs/431538.pem"
I1101 10:35:55.042689 635360 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/431538.pem
I1101 10:35:55.046855 635360 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 1 10:02 /usr/share/ca-certificates/431538.pem
I1101 10:35:55.046914 635360 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/431538.pem
I1101 10:35:55.088699 635360 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/431538.pem /etc/ssl/certs/51391683.0"
I1101 10:35:55.097535 635360 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/4315382.pem && ln -fs /usr/share/ca-certificates/4315382.pem /etc/ssl/certs/4315382.pem"
I1101 10:35:55.106276 635360 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/4315382.pem
I1101 10:35:55.110300 635360 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 1 10:02 /usr/share/ca-certificates/4315382.pem
I1101 10:35:55.110356 635360 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/4315382.pem
I1101 10:35:55.151952 635360 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/4315382.pem /etc/ssl/certs/3ec20f2e.0"
I1101 10:35:55.160606 635360 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1101 10:35:55.164165 635360 certs.go:400] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I1101 10:35:55.164210 635360 kubeadm.go:401] StartCluster: {Name:scheduled-stop-826255 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:scheduled-stop-826255 Namespace:default APIServerHAVIP: APIServerName:minikubeC
A APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: S
SHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1101 10:35:55.164320 635360 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
I1101 10:35:55.181615 635360 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1101 10:35:55.189502 635360 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1101 10:35:55.197464 635360 kubeadm.go:215] ignoring SystemVerification for kubeadm because of docker driver
I1101 10:35:55.197537 635360 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1101 10:35:55.209135 635360 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1101 10:35:55.209145 635360 kubeadm.go:158] found existing configuration files:
I1101 10:35:55.209200 635360 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I1101 10:35:55.217320 635360 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1101 10:35:55.217377 635360 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1101 10:35:55.224729 635360 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I1101 10:35:55.232914 635360 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1101 10:35:55.232967 635360 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1101 10:35:55.240594 635360 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I1101 10:35:55.248713 635360 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1101 10:35:55.248770 635360 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1101 10:35:55.256652 635360 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I1101 10:35:55.265275 635360 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1101 10:35:55.265347 635360 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1101 10:35:55.275024 635360 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I1101 10:35:55.316548 635360 kubeadm.go:319] [init] Using Kubernetes version: v1.34.1
I1101 10:35:55.316881 635360 kubeadm.go:319] [preflight] Running pre-flight checks
I1101 10:35:55.345869 635360 kubeadm.go:319] [preflight] The system verification failed. Printing the output from the verification:
I1101 10:35:55.345936 635360 kubeadm.go:319] [0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1084-aws[0m
I1101 10:35:55.345972 635360 kubeadm.go:319] [0;37mOS[0m: [0;32mLinux[0m
I1101 10:35:55.346018 635360 kubeadm.go:319] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I1101 10:35:55.346067 635360 kubeadm.go:319] [0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
I1101 10:35:55.346115 635360 kubeadm.go:319] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I1101 10:35:55.346164 635360 kubeadm.go:319] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I1101 10:35:55.346214 635360 kubeadm.go:319] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I1101 10:35:55.346266 635360 kubeadm.go:319] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I1101 10:35:55.346312 635360 kubeadm.go:319] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I1101 10:35:55.346361 635360 kubeadm.go:319] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I1101 10:35:55.346409 635360 kubeadm.go:319] [0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
I1101 10:35:55.412512 635360 kubeadm.go:319] [preflight] Pulling images required for setting up a Kubernetes cluster
I1101 10:35:55.412619 635360 kubeadm.go:319] [preflight] This might take a minute or two, depending on the speed of your internet connection
I1101 10:35:55.412713 635360 kubeadm.go:319] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
I1101 10:35:55.425717 635360 kubeadm.go:319] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I1101 10:35:55.432242 635360 out.go:252] - Generating certificates and keys ...
I1101 10:35:55.432355 635360 kubeadm.go:319] [certs] Using existing ca certificate authority
I1101 10:35:55.432440 635360 kubeadm.go:319] [certs] Using existing apiserver certificate and key on disk
I1101 10:35:55.630304 635360 kubeadm.go:319] [certs] Generating "apiserver-kubelet-client" certificate and key
I1101 10:35:56.487004 635360 kubeadm.go:319] [certs] Generating "front-proxy-ca" certificate and key
I1101 10:35:56.800346 635360 kubeadm.go:319] [certs] Generating "front-proxy-client" certificate and key
I1101 10:35:57.071157 635360 kubeadm.go:319] [certs] Generating "etcd/ca" certificate and key
I1101 10:35:57.804881 635360 kubeadm.go:319] [certs] Generating "etcd/server" certificate and key
I1101 10:35:57.805044 635360 kubeadm.go:319] [certs] etcd/server serving cert is signed for DNS names [localhost scheduled-stop-826255] and IPs [192.168.76.2 127.0.0.1 ::1]
I1101 10:35:58.199055 635360 kubeadm.go:319] [certs] Generating "etcd/peer" certificate and key
I1101 10:35:58.199581 635360 kubeadm.go:319] [certs] etcd/peer serving cert is signed for DNS names [localhost scheduled-stop-826255] and IPs [192.168.76.2 127.0.0.1 ::1]
I1101 10:35:58.348600 635360 kubeadm.go:319] [certs] Generating "etcd/healthcheck-client" certificate and key
I1101 10:35:58.993346 635360 kubeadm.go:319] [certs] Generating "apiserver-etcd-client" certificate and key
I1101 10:35:59.647887 635360 kubeadm.go:319] [certs] Generating "sa" key and public key
I1101 10:35:59.648019 635360 kubeadm.go:319] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I1101 10:35:59.951911 635360 kubeadm.go:319] [kubeconfig] Writing "admin.conf" kubeconfig file
I1101 10:36:00.036932 635360 kubeadm.go:319] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I1101 10:36:00.894621 635360 kubeadm.go:319] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I1101 10:36:01.527321 635360 kubeadm.go:319] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I1101 10:36:01.979520 635360 kubeadm.go:319] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I1101 10:36:01.980188 635360 kubeadm.go:319] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I1101 10:36:01.991757 635360 kubeadm.go:319] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I1101 10:36:01.995225 635360 out.go:252] - Booting up control plane ...
I1101 10:36:01.995342 635360 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-apiserver"
I1101 10:36:01.995422 635360 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I1101 10:36:01.995514 635360 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-scheduler"
I1101 10:36:02.016753 635360 kubeadm.go:319] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I1101 10:36:02.016860 635360 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
I1101 10:36:02.024532 635360 kubeadm.go:319] [patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
I1101 10:36:02.024815 635360 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I1101 10:36:02.024859 635360 kubeadm.go:319] [kubelet-start] Starting the kubelet
I1101 10:36:02.161359 635360 kubeadm.go:319] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I1101 10:36:02.161475 635360 kubeadm.go:319] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I1101 10:36:03.163859 635360 kubeadm.go:319] [kubelet-check] The kubelet is healthy after 1.000849613s
I1101 10:36:03.166062 635360 kubeadm.go:319] [control-plane-check] Waiting for healthy control plane components. This can take up to 4m0s
I1101 10:36:03.166175 635360 kubeadm.go:319] [control-plane-check] Checking kube-apiserver at https://192.168.76.2:8443/livez
I1101 10:36:03.166273 635360 kubeadm.go:319] [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz
I1101 10:36:03.166355 635360 kubeadm.go:319] [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez
I1101 10:36:07.247710 635360 kubeadm.go:319] [control-plane-check] kube-controller-manager is healthy after 4.08109738s
I1101 10:36:08.571429 635360 kubeadm.go:319] [control-plane-check] kube-scheduler is healthy after 5.405303773s
I1101 10:36:09.668374 635360 kubeadm.go:319] [control-plane-check] kube-apiserver is healthy after 6.50203952s
I1101 10:36:09.688942 635360 kubeadm.go:319] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I1101 10:36:09.705101 635360 kubeadm.go:319] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I1101 10:36:09.718708 635360 kubeadm.go:319] [upload-certs] Skipping phase. Please see --upload-certs
I1101 10:36:09.718913 635360 kubeadm.go:319] [mark-control-plane] Marking the node scheduled-stop-826255 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I1101 10:36:09.733205 635360 kubeadm.go:319] [bootstrap-token] Using token: 8befff.325b59j6sezpueap
I1101 10:36:09.736195 635360 out.go:252] - Configuring RBAC rules ...
I1101 10:36:09.736317 635360 kubeadm.go:319] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I1101 10:36:09.743327 635360 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I1101 10:36:09.752013 635360 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I1101 10:36:09.756372 635360 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I1101 10:36:09.760634 635360 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I1101 10:36:09.765075 635360 kubeadm.go:319] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I1101 10:36:10.076086 635360 kubeadm.go:319] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I1101 10:36:10.502213 635360 kubeadm.go:319] [addons] Applied essential addon: CoreDNS
I1101 10:36:11.075890 635360 kubeadm.go:319] [addons] Applied essential addon: kube-proxy
I1101 10:36:11.077942 635360 kubeadm.go:319]
I1101 10:36:11.078014 635360 kubeadm.go:319] Your Kubernetes control-plane has initialized successfully!
I1101 10:36:11.078019 635360 kubeadm.go:319]
I1101 10:36:11.078099 635360 kubeadm.go:319] To start using your cluster, you need to run the following as a regular user:
I1101 10:36:11.078102 635360 kubeadm.go:319]
I1101 10:36:11.078128 635360 kubeadm.go:319] mkdir -p $HOME/.kube
I1101 10:36:11.078554 635360 kubeadm.go:319] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I1101 10:36:11.078608 635360 kubeadm.go:319] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I1101 10:36:11.078612 635360 kubeadm.go:319]
I1101 10:36:11.078668 635360 kubeadm.go:319] Alternatively, if you are the root user, you can run:
I1101 10:36:11.078672 635360 kubeadm.go:319]
I1101 10:36:11.078721 635360 kubeadm.go:319] export KUBECONFIG=/etc/kubernetes/admin.conf
I1101 10:36:11.078724 635360 kubeadm.go:319]
I1101 10:36:11.078777 635360 kubeadm.go:319] You should now deploy a pod network to the cluster.
I1101 10:36:11.078855 635360 kubeadm.go:319] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I1101 10:36:11.078925 635360 kubeadm.go:319] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I1101 10:36:11.078928 635360 kubeadm.go:319]
I1101 10:36:11.079191 635360 kubeadm.go:319] You can now join any number of control-plane nodes by copying certificate authorities
I1101 10:36:11.079284 635360 kubeadm.go:319] and service account keys on each node and then running the following as root:
I1101 10:36:11.079289 635360 kubeadm.go:319]
I1101 10:36:11.079610 635360 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token 8befff.325b59j6sezpueap \
I1101 10:36:11.079721 635360 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:1930971589d0f49c5d4addbf6ebf330a1b05f30590c314f9d841765a72b2a77f \
I1101 10:36:11.079916 635360 kubeadm.go:319] --control-plane
I1101 10:36:11.079923 635360 kubeadm.go:319]
I1101 10:36:11.080194 635360 kubeadm.go:319] Then you can join any number of worker nodes by running the following on each as root:
I1101 10:36:11.080200 635360 kubeadm.go:319]
I1101 10:36:11.080502 635360 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token 8befff.325b59j6sezpueap \
I1101 10:36:11.080793 635360 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:1930971589d0f49c5d4addbf6ebf330a1b05f30590c314f9d841765a72b2a77f
I1101 10:36:11.090699 635360 kubeadm.go:319] [WARNING SystemVerification]: cgroups v1 support is in maintenance mode, please migrate to cgroups v2
I1101 10:36:11.090942 635360 kubeadm.go:319] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1084-aws\n", err: exit status 1
I1101 10:36:11.091051 635360 kubeadm.go:319] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I1101 10:36:11.091067 635360 cni.go:84] Creating CNI manager for ""
I1101 10:36:11.091079 635360 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I1101 10:36:11.094446 635360 out.go:179] * Configuring bridge CNI (Container Networking Interface) ...
I1101 10:36:11.097498 635360 ssh_runner.go:195] Run: sudo mkdir -p /etc/cni/net.d
I1101 10:36:11.105421 635360 ssh_runner.go:362] scp memory --> /etc/cni/net.d/1-k8s.conflist (496 bytes)
I1101 10:36:11.119452 635360 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I1101 10:36:11.119611 635360 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I1101 10:36:11.119705 635360 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes scheduled-stop-826255 minikube.k8s.io/updated_at=2025_11_01T10_36_11_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=8d0f47abe6720ae55a5722df67bba0ddd12c8845 minikube.k8s.io/name=scheduled-stop-826255 minikube.k8s.io/primary=true
I1101 10:36:11.131984 635360 ops.go:34] apiserver oom_adj: -16
I1101 10:36:11.270582 635360 kubeadm.go:1114] duration metric: took 151.02259ms to wait for elevateKubeSystemPrivileges
I1101 10:36:11.270599 635360 kubeadm.go:403] duration metric: took 16.10639262s to StartCluster
I1101 10:36:11.270615 635360 settings.go:142] acquiring lock: {Name:mkcfef1c26d30ee2825e5b52257ea9b39f17bea7 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1101 10:36:11.270672 635360 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/21830-429697/kubeconfig
I1101 10:36:11.271317 635360 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21830-429697/kubeconfig: {Name:mk4effc4d445e38ce239edbb077a2fdcaef89fef Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1101 10:36:11.271596 635360 start.go:236] Will wait 6m0s for node &{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}
I1101 10:36:11.271720 635360 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I1101 10:36:11.271979 635360 addons.go:512] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I1101 10:36:11.272051 635360 addons.go:70] Setting storage-provisioner=true in profile "scheduled-stop-826255"
I1101 10:36:11.272064 635360 addons.go:239] Setting addon storage-provisioner=true in "scheduled-stop-826255"
I1101 10:36:11.272090 635360 host.go:66] Checking if "scheduled-stop-826255" exists ...
I1101 10:36:11.272170 635360 config.go:182] Loaded profile config "scheduled-stop-826255": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.1
I1101 10:36:11.272209 635360 addons.go:70] Setting default-storageclass=true in profile "scheduled-stop-826255"
I1101 10:36:11.272219 635360 addons_storage_classes.go:34] enableOrDisableStorageClasses default-storageclass=true on "scheduled-stop-826255"
I1101 10:36:11.272461 635360 cli_runner.go:164] Run: docker container inspect scheduled-stop-826255 --format={{.State.Status}}
I1101 10:36:11.272692 635360 cli_runner.go:164] Run: docker container inspect scheduled-stop-826255 --format={{.State.Status}}
I1101 10:36:11.277727 635360 out.go:179] * Verifying Kubernetes components...
I1101 10:36:11.280735 635360 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1101 10:36:11.308732 635360 addons.go:239] Setting addon default-storageclass=true in "scheduled-stop-826255"
I1101 10:36:11.308760 635360 host.go:66] Checking if "scheduled-stop-826255" exists ...
I1101 10:36:11.309183 635360 cli_runner.go:164] Run: docker container inspect scheduled-stop-826255 --format={{.State.Status}}
I1101 10:36:11.326335 635360 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1101 10:36:11.330486 635360 addons.go:436] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1101 10:36:11.330498 635360 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1101 10:36:11.330570 635360 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-826255
I1101 10:36:11.350721 635360 addons.go:436] installing /etc/kubernetes/addons/storageclass.yaml
I1101 10:36:11.350741 635360 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1101 10:36:11.350804 635360 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-826255
I1101 10:36:11.370895 635360 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33363 SSHKeyPath:/home/jenkins/minikube-integration/21830-429697/.minikube/machines/scheduled-stop-826255/id_rsa Username:docker}
I1101 10:36:11.389423 635360 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33363 SSHKeyPath:/home/jenkins/minikube-integration/21830-429697/.minikube/machines/scheduled-stop-826255/id_rsa Username:docker}
I1101 10:36:11.566250 635360 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.76.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I1101 10:36:11.583670 635360 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1101 10:36:11.680542 635360 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1101 10:36:11.721056 635360 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1101 10:36:12.074184 635360 start.go:977] {"host.minikube.internal": 192.168.76.1} host record injected into CoreDNS's ConfigMap
I1101 10:36:12.075997 635360 api_server.go:52] waiting for apiserver process to appear ...
I1101 10:36:12.076044 635360 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1101 10:36:12.279526 635360 api_server.go:72] duration metric: took 1.007905685s to wait for apiserver process to appear ...
I1101 10:36:12.279538 635360 api_server.go:88] waiting for apiserver healthz status ...
I1101 10:36:12.279555 635360 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1101 10:36:12.291713 635360 api_server.go:279] https://192.168.76.2:8443/healthz returned 200:
ok
I1101 10:36:12.293108 635360 api_server.go:141] control plane version: v1.34.1
I1101 10:36:12.293124 635360 api_server.go:131] duration metric: took 13.580099ms to wait for apiserver health ...
I1101 10:36:12.293131 635360 system_pods.go:43] waiting for kube-system pods to appear ...
I1101 10:36:12.296813 635360 system_pods.go:59] 5 kube-system pods found
I1101 10:36:12.296848 635360 system_pods.go:61] "etcd-scheduled-stop-826255" [315c52fa-d05c-44e1-a3b4-a1161d62d2bc] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd])
I1101 10:36:12.296856 635360 system_pods.go:61] "kube-apiserver-scheduled-stop-826255" [96e3322c-677a-415b-8541-f4b70a7fa641] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
I1101 10:36:12.296863 635360 system_pods.go:61] "kube-controller-manager-scheduled-stop-826255" [441f59b8-8d0b-459d-b492-cc8d5d24f182] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager])
I1101 10:36:12.296868 635360 system_pods.go:61] "kube-scheduler-scheduled-stop-826255" [a522b422-1463-4f02-8501-6d1c09ccf876] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
I1101 10:36:12.296876 635360 system_pods.go:61] "storage-provisioner" [dd2f679f-291b-4326-8c6f-05f1c30d186c] Pending: PodScheduled:Unschedulable (0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/not-ready: }. no new claims to deallocate, preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling.)
I1101 10:36:12.296881 635360 system_pods.go:74] duration metric: took 3.746063ms to wait for pod list to return data ...
I1101 10:36:12.296891 635360 kubeadm.go:587] duration metric: took 1.025275277s to wait for: map[apiserver:true system_pods:true]
I1101 10:36:12.296902 635360 node_conditions.go:102] verifying NodePressure condition ...
I1101 10:36:12.315254 635360 node_conditions.go:122] node storage ephemeral capacity is 203034800Ki
I1101 10:36:12.315272 635360 node_conditions.go:123] node cpu capacity is 2
I1101 10:36:12.315282 635360 node_conditions.go:105] duration metric: took 18.070376ms to run NodePressure ...
I1101 10:36:12.315294 635360 start.go:242] waiting for startup goroutines ...
I1101 10:36:12.316101 635360 out.go:179] * Enabled addons: storage-provisioner, default-storageclass
I1101 10:36:12.319149 635360 addons.go:515] duration metric: took 1.04715685s for enable addons: enabled=[storage-provisioner default-storageclass]
I1101 10:36:12.578097 635360 kapi.go:214] "coredns" deployment in "kube-system" namespace and "scheduled-stop-826255" context rescaled to 1 replicas
I1101 10:36:12.578120 635360 start.go:247] waiting for cluster config update ...
I1101 10:36:12.578130 635360 start.go:256] writing updated cluster config ...
I1101 10:36:12.578473 635360 ssh_runner.go:195] Run: rm -f paused
I1101 10:36:12.640111 635360 start.go:628] kubectl: 1.33.2, cluster: 1.34.1 (minor skew: 1)
I1101 10:36:12.643891 635360 out.go:179] * Done! kubectl is now configured to use "scheduled-stop-826255" cluster and "default" namespace by default
==> Docker <==
Nov 01 10:35:52 scheduled-stop-826255 dockerd[1132]: time="2025-11-01T10:35:52.496370326Z" level=info msg="Loading containers: done."
Nov 01 10:35:52 scheduled-stop-826255 dockerd[1132]: time="2025-11-01T10:35:52.506983030Z" level=info msg="Docker daemon" commit=f8215cc containerd-snapshotter=false storage-driver=overlay2 version=28.5.1
Nov 01 10:35:52 scheduled-stop-826255 dockerd[1132]: time="2025-11-01T10:35:52.507052422Z" level=info msg="Initializing buildkit"
Nov 01 10:35:52 scheduled-stop-826255 dockerd[1132]: time="2025-11-01T10:35:52.525929577Z" level=info msg="Completed buildkit initialization"
Nov 01 10:35:52 scheduled-stop-826255 dockerd[1132]: time="2025-11-01T10:35:52.531417060Z" level=info msg="Daemon has completed initialization"
Nov 01 10:35:52 scheduled-stop-826255 dockerd[1132]: time="2025-11-01T10:35:52.531699730Z" level=info msg="API listen on /run/docker.sock"
Nov 01 10:35:52 scheduled-stop-826255 systemd[1]: Started docker.service - Docker Application Container Engine.
Nov 01 10:35:52 scheduled-stop-826255 dockerd[1132]: time="2025-11-01T10:35:52.533159973Z" level=info msg="API listen on /var/run/docker.sock"
Nov 01 10:35:52 scheduled-stop-826255 dockerd[1132]: time="2025-11-01T10:35:52.533246128Z" level=info msg="API listen on [::]:2376"
Nov 01 10:35:53 scheduled-stop-826255 systemd[1]: Starting cri-docker.service - CRI Interface for Docker Application Container Engine...
Nov 01 10:35:53 scheduled-stop-826255 cri-dockerd[1432]: time="2025-11-01T10:35:53Z" level=info msg="Starting cri-dockerd dev (HEAD)"
Nov 01 10:35:53 scheduled-stop-826255 cri-dockerd[1432]: time="2025-11-01T10:35:53Z" level=info msg="Connecting to docker on the Endpoint unix:///var/run/docker.sock"
Nov 01 10:35:53 scheduled-stop-826255 cri-dockerd[1432]: time="2025-11-01T10:35:53Z" level=info msg="Start docker client with request timeout 0s"
Nov 01 10:35:53 scheduled-stop-826255 cri-dockerd[1432]: time="2025-11-01T10:35:53Z" level=info msg="Hairpin mode is set to hairpin-veth"
Nov 01 10:35:53 scheduled-stop-826255 cri-dockerd[1432]: time="2025-11-01T10:35:53Z" level=info msg="Loaded network plugin cni"
Nov 01 10:35:53 scheduled-stop-826255 cri-dockerd[1432]: time="2025-11-01T10:35:53Z" level=info msg="Docker cri networking managed by network plugin cni"
Nov 01 10:35:53 scheduled-stop-826255 cri-dockerd[1432]: time="2025-11-01T10:35:53Z" level=info msg="Setting cgroupDriver cgroupfs"
Nov 01 10:35:53 scheduled-stop-826255 cri-dockerd[1432]: time="2025-11-01T10:35:53Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:,},}"
Nov 01 10:35:53 scheduled-stop-826255 cri-dockerd[1432]: time="2025-11-01T10:35:53Z" level=info msg="Starting the GRPC backend for the Docker CRI interface."
Nov 01 10:35:53 scheduled-stop-826255 cri-dockerd[1432]: time="2025-11-01T10:35:53Z" level=info msg="Start cri-dockerd grpc backend"
Nov 01 10:35:53 scheduled-stop-826255 systemd[1]: Started cri-docker.service - CRI Interface for Docker Application Container Engine.
Nov 01 10:36:03 scheduled-stop-826255 cri-dockerd[1432]: time="2025-11-01T10:36:03Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/5073f02f1dc7aceabf9f336b22791af5461c02c9920deaa9bfeb34ee4ab0b832/resolv.conf as [nameserver 192.168.76.1 search us-east-2.compute.internal options edns0 trust-ad ndots:0]"
Nov 01 10:36:03 scheduled-stop-826255 cri-dockerd[1432]: time="2025-11-01T10:36:03Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/8b51d125772d0d60da98e9770840ac92a2ce55cccd1852657773f573f3f620c0/resolv.conf as [nameserver 192.168.76.1 search us-east-2.compute.internal options edns0 trust-ad ndots:0]"
Nov 01 10:36:03 scheduled-stop-826255 cri-dockerd[1432]: time="2025-11-01T10:36:03Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/b13c5984dc99049b58e0a07bb86787f3905b9198d1d3cf6075e18dbf9f1cf9a2/resolv.conf as [nameserver 192.168.76.1 search us-east-2.compute.internal options trust-ad ndots:0 edns0]"
Nov 01 10:36:03 scheduled-stop-826255 cri-dockerd[1432]: time="2025-11-01T10:36:03Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/ae9c016ab1fd0af7ca1c059e89cf91753210156aef8d2c7f7c4f34696eb0c06f/resolv.conf as [nameserver 192.168.76.1 search us-east-2.compute.internal options trust-ad ndots:0 edns0]"
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE
37c6690fb9e74 b5f57ec6b9867 11 seconds ago Running kube-scheduler 0 ae9c016ab1fd0 kube-scheduler-scheduled-stop-826255 kube-system
5b44257957672 a1894772a478e 11 seconds ago Running etcd 0 b13c5984dc990 etcd-scheduled-stop-826255 kube-system
2fa2d2e5efeea 7eb2c6ff0c5a7 11 seconds ago Running kube-controller-manager 0 8b51d125772d0 kube-controller-manager-scheduled-stop-826255 kube-system
065bfdc705622 43911e833d64d 11 seconds ago Running kube-apiserver 0 5073f02f1dc7a kube-apiserver-scheduled-stop-826255 kube-system
==> describe nodes <==
Name: scheduled-stop-826255
Roles: control-plane
Labels: beta.kubernetes.io/arch=arm64
beta.kubernetes.io/os=linux
kubernetes.io/arch=arm64
kubernetes.io/hostname=scheduled-stop-826255
kubernetes.io/os=linux
minikube.k8s.io/commit=8d0f47abe6720ae55a5722df67bba0ddd12c8845
minikube.k8s.io/name=scheduled-stop-826255
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2025_11_01T10_36_11_0700
minikube.k8s.io/version=v1.37.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Sat, 01 Nov 2025 10:36:08 +0000
Taints: node.kubernetes.io/not-ready:NoSchedule
Unschedulable: false
Lease:
HolderIdentity: scheduled-stop-826255
AcquireTime: <unset>
RenewTime: Sat, 01 Nov 2025 10:36:10 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Sat, 01 Nov 2025 10:36:13 +0000 Sat, 01 Nov 2025 10:36:04 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Sat, 01 Nov 2025 10:36:13 +0000 Sat, 01 Nov 2025 10:36:04 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Sat, 01 Nov 2025 10:36:13 +0000 Sat, 01 Nov 2025 10:36:04 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Sat, 01 Nov 2025 10:36:13 +0000 Sat, 01 Nov 2025 10:36:13 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.76.2
Hostname: scheduled-stop-826255
Capacity:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022300Ki
pods: 110
Allocatable:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022300Ki
pods: 110
System Info:
Machine ID: ef38fbc8889a0e5f09e9dc0868f5cd19
System UUID: 86b18832-b49c-45a1-bbc8-1c27a00f980c
Boot ID: 14f7d3d5-0d17-4f68-a33a-122584094ef0
Kernel Version: 5.15.0-1084-aws
OS Image: Debian GNU/Linux 12 (bookworm)
Operating System: linux
Architecture: arm64
Container Runtime Version: docker://28.5.1
Kubelet Version: v1.34.1
Kube-Proxy Version:
Non-terminated Pods: (4 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
kube-system etcd-scheduled-stop-826255 100m (5%) 0 (0%) 100Mi (1%) 0 (0%) 4s
kube-system kube-apiserver-scheduled-stop-826255 250m (12%) 0 (0%) 0 (0%) 0 (0%) 4s
kube-system kube-controller-manager-scheduled-stop-826255 200m (10%) 0 (0%) 0 (0%) 0 (0%) 4s
kube-system kube-scheduler-scheduled-stop-826255 100m (5%) 0 (0%) 0 (0%) 0 (0%) 4s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 650m (32%) 0 (0%)
memory 100Mi (1%) 0 (0%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
hugepages-32Mi 0 (0%) 0 (0%)
hugepages-64Ki 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal NodeHasSufficientPID 12s (x7 over 12s) kubelet Node scheduled-stop-826255 status is now: NodeHasSufficientPID
Normal NodeAllocatableEnforced 12s kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 11s (x8 over 12s) kubelet Node scheduled-stop-826255 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 11s (x8 over 12s) kubelet Node scheduled-stop-826255 status is now: NodeHasNoDiskPressure
Normal Starting 4s kubelet Starting kubelet.
Warning CgroupV1 4s kubelet cgroup v1 support is in maintenance mode, please migrate to cgroup v2
Normal NodeAllocatableEnforced 4s kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 4s kubelet Node scheduled-stop-826255 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 4s kubelet Node scheduled-stop-826255 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 4s kubelet Node scheduled-stop-826255 status is now: NodeHasSufficientPID
Normal NodeReady 1s kubelet Node scheduled-stop-826255 status is now: NodeReady
==> dmesg <==
[Nov 1 09:55] kauditd_printk_skb: 8 callbacks suppressed
==> etcd [5b4425795767] <==
{"level":"warn","ts":"2025-11-01T10:36:06.198109Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:36074","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-01T10:36:06.231742Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:36104","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-01T10:36:06.252902Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:36118","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-01T10:36:06.274088Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:36146","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-01T10:36:06.290106Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:36150","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-01T10:36:06.306910Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:36180","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-01T10:36:06.326432Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:36192","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-01T10:36:06.348824Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:36204","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-01T10:36:06.361550Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:36216","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-01T10:36:06.385817Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:36224","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-01T10:36:06.402641Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:36250","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-01T10:36:06.413529Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:36256","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-01T10:36:06.444290Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:36278","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-01T10:36:06.454919Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:36288","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-01T10:36:06.468639Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:36312","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-01T10:36:06.488123Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:36326","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-01T10:36:06.526745Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:36350","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-01T10:36:06.537163Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:36366","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-01T10:36:06.556328Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:36376","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-01T10:36:06.564027Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:36396","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-01T10:36:06.584765Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:36410","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-01T10:36:06.632225Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:36422","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-01T10:36:06.715930Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:36444","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-01T10:36:06.759261Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:36454","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-01T10:36:06.847055Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:36464","server-name":"","error":"EOF"}
==> kernel <==
10:36:14 up 2:18, 0 user, load average: 3.05, 2.60, 2.90
Linux scheduled-stop-826255 5.15.0-1084-aws #91~20.04.1-Ubuntu SMP Fri May 2 07:00:04 UTC 2025 aarch64 GNU/Linux
PRETTY_NAME="Debian GNU/Linux 12 (bookworm)"
==> kube-apiserver [065bfdc70562] <==
I1101 10:36:07.968799 1 handler_discovery.go:451] Starting ResourceDiscoveryManager
I1101 10:36:08.010123 1 shared_informer.go:356] "Caches are synced" controller="*generic.policySource[*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicy,*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicyBinding,k8s.io/apiserver/pkg/admission/plugin/policy/validating.Validator]"
I1101 10:36:08.010167 1 policy_source.go:240] refreshing policies
E1101 10:36:08.022737 1 controller.go:148] "Unhandled Error" err="while syncing ConfigMap \"kube-system/kube-apiserver-legacy-service-account-token-tracking\", err: namespaces \"kube-system\" not found" logger="UnhandledError"
E1101 10:36:08.051603 1 controller.go:145] "Failed to ensure lease exists, will retry" err="namespaces \"kube-system\" not found" interval="200ms"
I1101 10:36:08.063783 1 cidrallocator.go:301] created ClusterIP allocator for Service CIDR 10.96.0.0/12
I1101 10:36:08.064516 1 default_servicecidr_controller.go:228] Setting default ServiceCIDR condition Ready to True
I1101 10:36:08.072105 1 controller.go:667] quota admission added evaluator for: namespaces
I1101 10:36:08.099198 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12
I1101 10:36:08.103157 1 default_servicecidr_controller.go:137] Shutting down kubernetes-service-cidr-controller
E1101 10:36:08.115967 1 controller.go:148] "Unhandled Error" err="while syncing ConfigMap \"kube-system/kube-apiserver-legacy-service-account-token-tracking\", err: namespaces \"kube-system\" not found" logger="UnhandledError"
I1101 10:36:08.262097 1 controller.go:667] quota admission added evaluator for: leases.coordination.k8s.io
I1101 10:36:08.670330 1 storage_scheduling.go:95] created PriorityClass system-node-critical with value 2000001000
I1101 10:36:08.677635 1 storage_scheduling.go:95] created PriorityClass system-cluster-critical with value 2000000000
I1101 10:36:08.678684 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist.
I1101 10:36:09.450355 1 controller.go:667] quota admission added evaluator for: roles.rbac.authorization.k8s.io
I1101 10:36:09.506490 1 controller.go:667] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
I1101 10:36:09.579587 1 alloc.go:328] "allocated clusterIPs" service="default/kubernetes" clusterIPs={"IPv4":"10.96.0.1"}
W1101 10:36:09.590160 1 lease.go:265] Resetting endpoints for master service "kubernetes" to [192.168.76.2]
I1101 10:36:09.591706 1 controller.go:667] quota admission added evaluator for: endpoints
I1101 10:36:09.601493 1 controller.go:667] quota admission added evaluator for: endpointslices.discovery.k8s.io
I1101 10:36:09.892440 1 controller.go:667] quota admission added evaluator for: serviceaccounts
I1101 10:36:10.486340 1 controller.go:667] quota admission added evaluator for: deployments.apps
I1101 10:36:10.501081 1 alloc.go:328] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs={"IPv4":"10.96.0.10"}
I1101 10:36:10.512737 1 controller.go:667] quota admission added evaluator for: daemonsets.apps
==> kube-controller-manager [2fa2d2e5efee] <==
I1101 10:36:13.490847 1 certificate_controller.go:120] "Starting certificate controller" logger="certificatesigningrequest-signing-controller" name="csrsigning-kube-apiserver-client"
I1101 10:36:13.490855 1 shared_informer.go:349] "Waiting for caches to sync" controller="certificate-csrsigning-kube-apiserver-client"
I1101 10:36:13.490878 1 certificate_controller.go:120] "Starting certificate controller" logger="certificatesigningrequest-signing-controller" name="csrsigning-legacy-unknown"
I1101 10:36:13.490885 1 shared_informer.go:349] "Waiting for caches to sync" controller="certificate-csrsigning-legacy-unknown"
I1101 10:36:13.490899 1 dynamic_serving_content.go:135] "Starting controller" name="csr-controller::/var/lib/minikube/certs/ca.crt::/var/lib/minikube/certs/ca.key"
I1101 10:36:13.491025 1 dynamic_serving_content.go:135] "Starting controller" name="csr-controller::/var/lib/minikube/certs/ca.crt::/var/lib/minikube/certs/ca.key"
I1101 10:36:13.491097 1 dynamic_serving_content.go:135] "Starting controller" name="csr-controller::/var/lib/minikube/certs/ca.crt::/var/lib/minikube/certs/ca.key"
I1101 10:36:13.491161 1 dynamic_serving_content.go:135] "Starting controller" name="csr-controller::/var/lib/minikube/certs/ca.crt::/var/lib/minikube/certs/ca.key"
I1101 10:36:13.644455 1 controllermanager.go:781] "Started controller" controller="token-cleaner-controller"
I1101 10:36:13.644534 1 tokencleaner.go:117] "Starting token cleaner controller" logger="token-cleaner-controller"
I1101 10:36:13.644546 1 shared_informer.go:349] "Waiting for caches to sync" controller="token_cleaner"
I1101 10:36:13.644553 1 shared_informer.go:356] "Caches are synced" controller="token_cleaner"
I1101 10:36:13.790024 1 controllermanager.go:781] "Started controller" controller="resourceclaim-controller"
I1101 10:36:13.790052 1 controllermanager.go:733] "Controller is disabled by a feature gate" controller="device-taint-eviction-controller" requiredFeatureGates=["DynamicResourceAllocation","DRADeviceTaints"]
I1101 10:36:13.790112 1 controller.go:397] "Starting resource claim controller" logger="resourceclaim-controller"
I1101 10:36:13.790159 1 shared_informer.go:349] "Waiting for caches to sync" controller="resource_claim"
I1101 10:36:14.038972 1 controllermanager.go:781] "Started controller" controller="garbage-collector-controller"
I1101 10:36:14.039155 1 garbagecollector.go:144] "Starting controller" logger="garbage-collector-controller" controller="garbagecollector"
I1101 10:36:14.039170 1 shared_informer.go:349] "Waiting for caches to sync" controller="garbage collector"
I1101 10:36:14.039190 1 graph_builder.go:351] "Running" logger="garbage-collector-controller" component="GraphBuilder"
I1101 10:36:14.291908 1 controllermanager.go:781] "Started controller" controller="daemonset-controller"
I1101 10:36:14.292026 1 daemon_controller.go:310] "Starting daemon sets controller" logger="daemonset-controller"
I1101 10:36:14.292040 1 shared_informer.go:349] "Waiting for caches to sync" controller="daemon sets"
I1101 10:36:14.338295 1 controllermanager.go:781] "Started controller" controller="certificatesigningrequest-cleaner-controller"
I1101 10:36:14.338353 1 cleaner.go:83] "Starting CSR cleaner controller" logger="certificatesigningrequest-cleaner-controller"
==> kube-scheduler [37c6690fb9e7] <==
I1101 10:36:08.557068 1 server.go:177] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1101 10:36:08.561886 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
I1101 10:36:08.561941 1 shared_informer.go:349] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
I1101 10:36:08.562770 1 secure_serving.go:211] Serving securely on 127.0.0.1:10259
I1101 10:36:08.562953 1 tlsconfig.go:243] "Starting DynamicServingCertificateController"
E1101 10:36:08.573295 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicationcontrollers\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ReplicationController"
E1101 10:36:08.573705 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicasets\" in API group \"apps\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ReplicaSet"
E1101 10:36:08.574007 1 reflector.go:205] "Failed to watch" err="failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"statefulsets\" in API group \"apps\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.StatefulSet"
E1101 10:36:08.574235 1 reflector.go:205] "Failed to watch" err="failed to list *v1.VolumeAttachment: volumeattachments.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"volumeattachments\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.VolumeAttachment"
E1101 10:36:08.574510 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ResourceSlice: resourceslices.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"resourceslices\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ResourceSlice"
E1101 10:36:08.574773 1 reflector.go:205] "Failed to watch" err="failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"storageclasses\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.StorageClass"
E1101 10:36:08.575032 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ConfigMap: configmaps \"extension-apiserver-authentication\" is forbidden: User \"system:kube-scheduler\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"kube-system\"" logger="UnhandledError" reflector="runtime/asm_arm64.s:1223" type="*v1.ConfigMap"
E1101 10:36:08.576107 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolume"
E1101 10:36:08.576415 1 reflector.go:205] "Failed to watch" err="failed to list *v1.DeviceClass: deviceclasses.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"deviceclasses\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.DeviceClass"
E1101 10:36:08.576691 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csidrivers\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIDriver"
E1101 10:36:08.576921 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Namespace: namespaces is forbidden: User \"system:kube-scheduler\" cannot list resource \"namespaces\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Namespace"
E1101 10:36:08.577140 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Pod: pods is forbidden: User \"system:kube-scheduler\" cannot list resource \"pods\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Pod"
E1101 10:36:08.578844 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Service: services is forbidden: User \"system:kube-scheduler\" cannot list resource \"services\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Service"
E1101 10:36:08.579560 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ResourceClaim: resourceclaims.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"resourceclaims\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ResourceClaim"
E1101 10:36:08.579737 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User \"system:kube-scheduler\" cannot list resource \"poddisruptionbudgets\" in API group \"policy\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PodDisruptionBudget"
E1101 10:36:08.579608 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumeclaims\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolumeClaim"
E1101 10:36:08.580091 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csinodes\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSINode"
E1101 10:36:08.580417 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csistoragecapacities\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIStorageCapacity"
E1101 10:36:08.580767 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Node: nodes is forbidden: User \"system:kube-scheduler\" cannot list resource \"nodes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Node"
I1101 10:36:09.662500 1 shared_informer.go:356] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
==> kubelet <==
Nov 01 10:36:10 scheduled-stop-826255 kubelet[2256]: I1101 10:36:10.835725 2256 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/2dc2b8c66d0d2c19e80ef3616756d9ef-ca-certs\") pod \"kube-controller-manager-scheduled-stop-826255\" (UID: \"2dc2b8c66d0d2c19e80ef3616756d9ef\") " pod="kube-system/kube-controller-manager-scheduled-stop-826255"
Nov 01 10:36:10 scheduled-stop-826255 kubelet[2256]: I1101 10:36:10.835749 2256 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/2dc2b8c66d0d2c19e80ef3616756d9ef-etc-ca-certificates\") pod \"kube-controller-manager-scheduled-stop-826255\" (UID: \"2dc2b8c66d0d2c19e80ef3616756d9ef\") " pod="kube-system/kube-controller-manager-scheduled-stop-826255"
Nov 01 10:36:10 scheduled-stop-826255 kubelet[2256]: I1101 10:36:10.835772 2256 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"flexvolume-dir\" (UniqueName: \"kubernetes.io/host-path/2dc2b8c66d0d2c19e80ef3616756d9ef-flexvolume-dir\") pod \"kube-controller-manager-scheduled-stop-826255\" (UID: \"2dc2b8c66d0d2c19e80ef3616756d9ef\") " pod="kube-system/kube-controller-manager-scheduled-stop-826255"
Nov 01 10:36:10 scheduled-stop-826255 kubelet[2256]: I1101 10:36:10.835796 2256 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/2dc2b8c66d0d2c19e80ef3616756d9ef-k8s-certs\") pod \"kube-controller-manager-scheduled-stop-826255\" (UID: \"2dc2b8c66d0d2c19e80ef3616756d9ef\") " pod="kube-system/kube-controller-manager-scheduled-stop-826255"
Nov 01 10:36:10 scheduled-stop-826255 kubelet[2256]: I1101 10:36:10.835814 2256 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/2dc2b8c66d0d2c19e80ef3616756d9ef-kubeconfig\") pod \"kube-controller-manager-scheduled-stop-826255\" (UID: \"2dc2b8c66d0d2c19e80ef3616756d9ef\") " pod="kube-system/kube-controller-manager-scheduled-stop-826255"
Nov 01 10:36:10 scheduled-stop-826255 kubelet[2256]: I1101 10:36:10.835834 2256 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-certs\" (UniqueName: \"kubernetes.io/host-path/002f89339d395aef847c068bd10c6624-etcd-certs\") pod \"etcd-scheduled-stop-826255\" (UID: \"002f89339d395aef847c068bd10c6624\") " pod="kube-system/etcd-scheduled-stop-826255"
Nov 01 10:36:10 scheduled-stop-826255 kubelet[2256]: I1101 10:36:10.835853 2256 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/0d1ea222379fced1088b0288ddc5c342-usr-local-share-ca-certificates\") pod \"kube-apiserver-scheduled-stop-826255\" (UID: \"0d1ea222379fced1088b0288ddc5c342\") " pod="kube-system/kube-apiserver-scheduled-stop-826255"
Nov 01 10:36:10 scheduled-stop-826255 kubelet[2256]: I1101 10:36:10.835878 2256 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/2dc2b8c66d0d2c19e80ef3616756d9ef-usr-local-share-ca-certificates\") pod \"kube-controller-manager-scheduled-stop-826255\" (UID: \"2dc2b8c66d0d2c19e80ef3616756d9ef\") " pod="kube-system/kube-controller-manager-scheduled-stop-826255"
Nov 01 10:36:10 scheduled-stop-826255 kubelet[2256]: I1101 10:36:10.835898 2256 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/2dc2b8c66d0d2c19e80ef3616756d9ef-usr-share-ca-certificates\") pod \"kube-controller-manager-scheduled-stop-826255\" (UID: \"2dc2b8c66d0d2c19e80ef3616756d9ef\") " pod="kube-system/kube-controller-manager-scheduled-stop-826255"
Nov 01 10:36:10 scheduled-stop-826255 kubelet[2256]: I1101 10:36:10.835915 2256 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/31cef7d499b46934a1c2d33cb5e0a306-kubeconfig\") pod \"kube-scheduler-scheduled-stop-826255\" (UID: \"31cef7d499b46934a1c2d33cb5e0a306\") " pod="kube-system/kube-scheduler-scheduled-stop-826255"
Nov 01 10:36:10 scheduled-stop-826255 kubelet[2256]: I1101 10:36:10.835933 2256 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/0d1ea222379fced1088b0288ddc5c342-ca-certs\") pod \"kube-apiserver-scheduled-stop-826255\" (UID: \"0d1ea222379fced1088b0288ddc5c342\") " pod="kube-system/kube-apiserver-scheduled-stop-826255"
Nov 01 10:36:10 scheduled-stop-826255 kubelet[2256]: I1101 10:36:10.835950 2256 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-data\" (UniqueName: \"kubernetes.io/host-path/002f89339d395aef847c068bd10c6624-etcd-data\") pod \"etcd-scheduled-stop-826255\" (UID: \"002f89339d395aef847c068bd10c6624\") " pod="kube-system/etcd-scheduled-stop-826255"
Nov 01 10:36:10 scheduled-stop-826255 kubelet[2256]: I1101 10:36:10.835966 2256 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/0d1ea222379fced1088b0288ddc5c342-etc-ca-certificates\") pod \"kube-apiserver-scheduled-stop-826255\" (UID: \"0d1ea222379fced1088b0288ddc5c342\") " pod="kube-system/kube-apiserver-scheduled-stop-826255"
Nov 01 10:36:10 scheduled-stop-826255 kubelet[2256]: I1101 10:36:10.835985 2256 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/0d1ea222379fced1088b0288ddc5c342-usr-share-ca-certificates\") pod \"kube-apiserver-scheduled-stop-826255\" (UID: \"0d1ea222379fced1088b0288ddc5c342\") " pod="kube-system/kube-apiserver-scheduled-stop-826255"
Nov 01 10:36:11 scheduled-stop-826255 kubelet[2256]: I1101 10:36:11.411596 2256 apiserver.go:52] "Watching apiserver"
Nov 01 10:36:11 scheduled-stop-826255 kubelet[2256]: I1101 10:36:11.432909 2256 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world"
Nov 01 10:36:11 scheduled-stop-826255 kubelet[2256]: I1101 10:36:11.524810 2256 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/etcd-scheduled-stop-826255" podStartSLOduration=1.5247895360000001 podStartE2EDuration="1.524789536s" podCreationTimestamp="2025-11-01 10:36:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-01 10:36:11.504430263 +0000 UTC m=+1.184458484" watchObservedRunningTime="2025-11-01 10:36:11.524789536 +0000 UTC m=+1.204817765"
Nov 01 10:36:11 scheduled-stop-826255 kubelet[2256]: I1101 10:36:11.540929 2256 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-scheduler-scheduled-stop-826255" podStartSLOduration=1.540912008 podStartE2EDuration="1.540912008s" podCreationTimestamp="2025-11-01 10:36:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-01 10:36:11.525923137 +0000 UTC m=+1.205951358" watchObservedRunningTime="2025-11-01 10:36:11.540912008 +0000 UTC m=+1.220940221"
Nov 01 10:36:11 scheduled-stop-826255 kubelet[2256]: I1101 10:36:11.563932 2256 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/etcd-scheduled-stop-826255"
Nov 01 10:36:11 scheduled-stop-826255 kubelet[2256]: I1101 10:36:11.564330 2256 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-scheduler-scheduled-stop-826255"
Nov 01 10:36:11 scheduled-stop-826255 kubelet[2256]: I1101 10:36:11.581742 2256 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-apiserver-scheduled-stop-826255" podStartSLOduration=1.581722773 podStartE2EDuration="1.581722773s" podCreationTimestamp="2025-11-01 10:36:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-01 10:36:11.547264141 +0000 UTC m=+1.227292370" watchObservedRunningTime="2025-11-01 10:36:11.581722773 +0000 UTC m=+1.261750986"
Nov 01 10:36:11 scheduled-stop-826255 kubelet[2256]: E1101 10:36:11.582086 2256 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-scheduler-scheduled-stop-826255\" already exists" pod="kube-system/kube-scheduler-scheduled-stop-826255"
Nov 01 10:36:11 scheduled-stop-826255 kubelet[2256]: E1101 10:36:11.584012 2256 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"etcd-scheduled-stop-826255\" already exists" pod="kube-system/etcd-scheduled-stop-826255"
Nov 01 10:36:11 scheduled-stop-826255 kubelet[2256]: I1101 10:36:11.604206 2256 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-controller-manager-scheduled-stop-826255" podStartSLOduration=1.604189345 podStartE2EDuration="1.604189345s" podCreationTimestamp="2025-11-01 10:36:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-01 10:36:11.582056635 +0000 UTC m=+1.262084848" watchObservedRunningTime="2025-11-01 10:36:11.604189345 +0000 UTC m=+1.284217558"
Nov 01 10:36:13 scheduled-stop-826255 kubelet[2256]: I1101 10:36:13.234731 2256 kubelet_node_status.go:439] "Fast updating node status as it just became ready"
-- /stdout --
helpers_test.go:262: (dbg) Run: out/minikube-linux-arm64 status --format={{.APIServer}} -p scheduled-stop-826255 -n scheduled-stop-826255
helpers_test.go:269: (dbg) Run: kubectl --context scheduled-stop-826255 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:280: non-running pods: storage-provisioner
helpers_test.go:282: ======> post-mortem[TestScheduledStopUnix]: describe non-running pods <======
helpers_test.go:285: (dbg) Run: kubectl --context scheduled-stop-826255 describe pod storage-provisioner
helpers_test.go:285: (dbg) Non-zero exit: kubectl --context scheduled-stop-826255 describe pod storage-provisioner: exit status 1 (93.380438ms)
** stderr **
Error from server (NotFound): pods "storage-provisioner" not found
** /stderr **
helpers_test.go:287: kubectl --context scheduled-stop-826255 describe pod storage-provisioner: exit status 1
helpers_test.go:175: Cleaning up "scheduled-stop-826255" profile ...
helpers_test.go:178: (dbg) Run: out/minikube-linux-arm64 delete -p scheduled-stop-826255
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p scheduled-stop-826255: (2.219955685s)
--- FAIL: TestScheduledStopUnix (38.46s)