=== RUN TestScheduledStopUnix
scheduled_stop_test.go:128: (dbg) Run: out/minikube-linux-arm64 start -p scheduled-stop-243584 --memory=2048 --driver=docker --container-runtime=docker
scheduled_stop_test.go:128: (dbg) Done: out/minikube-linux-arm64 start -p scheduled-stop-243584 --memory=2048 --driver=docker --container-runtime=docker: (32.194492428s)
scheduled_stop_test.go:137: (dbg) Run: out/minikube-linux-arm64 stop -p scheduled-stop-243584 --schedule 5m
scheduled_stop_test.go:191: (dbg) Run: out/minikube-linux-arm64 status --format={{.TimeToStop}} -p scheduled-stop-243584 -n scheduled-stop-243584
scheduled_stop_test.go:169: signal error was: <nil>
scheduled_stop_test.go:137: (dbg) Run: out/minikube-linux-arm64 stop -p scheduled-stop-243584 --schedule 15s
scheduled_stop_test.go:169: signal error was: <nil>
scheduled_stop_test.go:98: process 1327554 running but should have been killed on reschedule of stop
panic.go:631: *** TestScheduledStopUnix FAILED at 2025-03-17 13:51:54.231214131 +0000 UTC m=+2219.191148020
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:230: ======> post-mortem[TestScheduledStopUnix]: docker inspect <======
helpers_test.go:231: (dbg) Run: docker inspect scheduled-stop-243584
helpers_test.go:235: (dbg) docker inspect scheduled-stop-243584:
-- stdout --
[
{
"Id": "0a4a4e7d230b278a6616dec5b3575d06c04c5a58dfa7bf70f80e01e5d01b244c",
"Created": "2025-03-17T13:51:26.414413002Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 1324594,
"ExitCode": 0,
"Error": "",
"StartedAt": "2025-03-17T13:51:26.47744243Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:df0c2544fb3106b890f0a9ab81fcf49f97edb092b83e47f42288ad5dfe1f4b40",
"ResolvConfPath": "/var/lib/docker/containers/0a4a4e7d230b278a6616dec5b3575d06c04c5a58dfa7bf70f80e01e5d01b244c/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/0a4a4e7d230b278a6616dec5b3575d06c04c5a58dfa7bf70f80e01e5d01b244c/hostname",
"HostsPath": "/var/lib/docker/containers/0a4a4e7d230b278a6616dec5b3575d06c04c5a58dfa7bf70f80e01e5d01b244c/hosts",
"LogPath": "/var/lib/docker/containers/0a4a4e7d230b278a6616dec5b3575d06c04c5a58dfa7bf70f80e01e5d01b244c/0a4a4e7d230b278a6616dec5b3575d06c04c5a58dfa7bf70f80e01e5d01b244c-json.log",
"Name": "/scheduled-stop-243584",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"scheduled-stop-243584:/var",
"/lib/modules:/lib/modules:ro"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {}
},
"NetworkMode": "scheduled-stop-243584",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "host",
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 2147483648,
"NanoCpus": 2000000000,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 4294967296,
"MemorySwappiness": null,
"OomKillDisable": false,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"ID": "0a4a4e7d230b278a6616dec5b3575d06c04c5a58dfa7bf70f80e01e5d01b244c",
"LowerDir": "/var/lib/docker/overlay2/b628189b32978c86b31fcf50aff1b4ff565d37c56cd44716d00dd5fb6010c34d-init/diff:/var/lib/docker/overlay2/41521760173e9c0e383fdb1e0e82a24e9241667b8273679076afa7a5eb322b96/diff",
"MergedDir": "/var/lib/docker/overlay2/b628189b32978c86b31fcf50aff1b4ff565d37c56cd44716d00dd5fb6010c34d/merged",
"UpperDir": "/var/lib/docker/overlay2/b628189b32978c86b31fcf50aff1b4ff565d37c56cd44716d00dd5fb6010c34d/diff",
"WorkDir": "/var/lib/docker/overlay2/b628189b32978c86b31fcf50aff1b4ff565d37c56cd44716d00dd5fb6010c34d/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "volume",
"Name": "scheduled-stop-243584",
"Source": "/var/lib/docker/volumes/scheduled-stop-243584/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
},
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
}
],
"Config": {
"Hostname": "scheduled-stop-243584",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.46-1741860993-20523@sha256:cd976907fa4d517c84fff1e5ef773b9fb3c738c4e1ded824ea5133470a66e185",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"OnBuild": null,
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "scheduled-stop-243584",
"name.minikube.sigs.k8s.io": "scheduled-stop-243584",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "eb6783fb2afdd78c72bee253e8f1a35888eff88c801521a188692a5338b7a7ef",
"SandboxKey": "/var/run/docker/netns/eb6783fb2afd",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33941"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33942"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33945"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33943"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33944"
}
]
},
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"scheduled-stop-243584": {
"IPAMConfig": {
"IPv4Address": "192.168.76.2"
},
"Links": null,
"Aliases": null,
"MacAddress": "ee:70:d4:0a:b8:67",
"DriverOpts": null,
"GwPriority": 0,
"NetworkID": "07dc9aa7fca7da570fa760917bbf5240191ee1c3e25701324ad7ed7fa1692f54",
"EndpointID": "909cce6b38f9f0db2f963cbdf018b22e9b87edbfd753deb290a0263ba256905a",
"Gateway": "192.168.76.1",
"IPAddress": "192.168.76.2",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"scheduled-stop-243584",
"0a4a4e7d230b"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:239: (dbg) Run: out/minikube-linux-arm64 status --format={{.Host}} -p scheduled-stop-243584 -n scheduled-stop-243584
helpers_test.go:244: <<< TestScheduledStopUnix FAILED: start of post-mortem logs <<<
helpers_test.go:245: ======> post-mortem[TestScheduledStopUnix]: minikube logs <======
helpers_test.go:247: (dbg) Run: out/minikube-linux-arm64 -p scheduled-stop-243584 logs -n 25
helpers_test.go:252: TestScheduledStopUnix logs:
-- stdout --
==> Audit <==
|---------|--------------------------------|-----------------------|---------|---------|---------------------|---------------------|
| Command | Args | Profile | User | Version | Start Time | End Time |
|---------|--------------------------------|-----------------------|---------|---------|---------------------|---------------------|
| stop | -p multinode-309686 | multinode-309686 | jenkins | v1.35.0 | 17 Mar 25 13:46 UTC | 17 Mar 25 13:46 UTC |
| start | -p multinode-309686 | multinode-309686 | jenkins | v1.35.0 | 17 Mar 25 13:46 UTC | 17 Mar 25 13:47 UTC |
| | --wait=true -v=8 | | | | | |
| | --alsologtostderr | | | | | |
| node | list -p multinode-309686 | multinode-309686 | jenkins | v1.35.0 | 17 Mar 25 13:47 UTC | |
| node | multinode-309686 node delete | multinode-309686 | jenkins | v1.35.0 | 17 Mar 25 13:47 UTC | 17 Mar 25 13:47 UTC |
| | m03 | | | | | |
| stop | multinode-309686 stop | multinode-309686 | jenkins | v1.35.0 | 17 Mar 25 13:47 UTC | 17 Mar 25 13:47 UTC |
| start | -p multinode-309686 | multinode-309686 | jenkins | v1.35.0 | 17 Mar 25 13:47 UTC | 17 Mar 25 13:48 UTC |
| | --wait=true -v=8 | | | | | |
| | --alsologtostderr | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=docker | | | | | |
| node | list -p multinode-309686 | multinode-309686 | jenkins | v1.35.0 | 17 Mar 25 13:48 UTC | |
| start | -p multinode-309686-m02 | multinode-309686-m02 | jenkins | v1.35.0 | 17 Mar 25 13:48 UTC | |
| | --driver=docker | | | | | |
| | --container-runtime=docker | | | | | |
| start | -p multinode-309686-m03 | multinode-309686-m03 | jenkins | v1.35.0 | 17 Mar 25 13:48 UTC | 17 Mar 25 13:49 UTC |
| | --driver=docker | | | | | |
| | --container-runtime=docker | | | | | |
| node | add -p multinode-309686 | multinode-309686 | jenkins | v1.35.0 | 17 Mar 25 13:49 UTC | |
| delete | -p multinode-309686-m03 | multinode-309686-m03 | jenkins | v1.35.0 | 17 Mar 25 13:49 UTC | 17 Mar 25 13:49 UTC |
| delete | -p multinode-309686 | multinode-309686 | jenkins | v1.35.0 | 17 Mar 25 13:49 UTC | 17 Mar 25 13:49 UTC |
| start | -p test-preload-700680 | test-preload-700680 | jenkins | v1.35.0 | 17 Mar 25 13:49 UTC | 17 Mar 25 13:50 UTC |
| | --memory=2200 | | | | | |
| | --alsologtostderr | | | | | |
| | --wait=true --preload=false | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=docker | | | | | |
| | --kubernetes-version=v1.24.4 | | | | | |
| image | test-preload-700680 image pull | test-preload-700680 | jenkins | v1.35.0 | 17 Mar 25 13:50 UTC | 17 Mar 25 13:50 UTC |
| | gcr.io/k8s-minikube/busybox | | | | | |
| stop | -p test-preload-700680 | test-preload-700680 | jenkins | v1.35.0 | 17 Mar 25 13:50 UTC | 17 Mar 25 13:50 UTC |
| start | -p test-preload-700680 | test-preload-700680 | jenkins | v1.35.0 | 17 Mar 25 13:50 UTC | 17 Mar 25 13:51 UTC |
| | --memory=2200 | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| | --wait=true --driver=docker | | | | | |
| | --container-runtime=docker | | | | | |
| image | test-preload-700680 image list | test-preload-700680 | jenkins | v1.35.0 | 17 Mar 25 13:51 UTC | 17 Mar 25 13:51 UTC |
| delete | -p test-preload-700680 | test-preload-700680 | jenkins | v1.35.0 | 17 Mar 25 13:51 UTC | 17 Mar 25 13:51 UTC |
| start | -p scheduled-stop-243584 | scheduled-stop-243584 | jenkins | v1.35.0 | 17 Mar 25 13:51 UTC | 17 Mar 25 13:51 UTC |
| | --memory=2048 --driver=docker | | | | | |
| | --container-runtime=docker | | | | | |
| stop | -p scheduled-stop-243584 | scheduled-stop-243584 | jenkins | v1.35.0 | 17 Mar 25 13:51 UTC | |
| | --schedule 5m | | | | | |
| stop | -p scheduled-stop-243584 | scheduled-stop-243584 | jenkins | v1.35.0 | 17 Mar 25 13:51 UTC | |
| | --schedule 5m | | | | | |
| stop | -p scheduled-stop-243584 | scheduled-stop-243584 | jenkins | v1.35.0 | 17 Mar 25 13:51 UTC | |
| | --schedule 5m | | | | | |
| stop | -p scheduled-stop-243584 | scheduled-stop-243584 | jenkins | v1.35.0 | 17 Mar 25 13:51 UTC | |
| | --schedule 15s | | | | | |
| stop | -p scheduled-stop-243584 | scheduled-stop-243584 | jenkins | v1.35.0 | 17 Mar 25 13:51 UTC | |
| | --schedule 15s | | | | | |
| stop | -p scheduled-stop-243584 | scheduled-stop-243584 | jenkins | v1.35.0 | 17 Mar 25 13:51 UTC | |
| | --schedule 15s | | | | | |
|---------|--------------------------------|-----------------------|---------|---------|---------------------|---------------------|
==> Last Start <==
Log file created at: 2025/03/17 13:51:21
Running on machine: ip-172-31-31-251
Binary: Built with gc go1.24.0 for linux/arm64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I0317 13:51:21.576491 1324214 out.go:345] Setting OutFile to fd 1 ...
I0317 13:51:21.576601 1324214 out.go:392] TERM=,COLORTERM=, which probably does not support color
I0317 13:51:21.576604 1324214 out.go:358] Setting ErrFile to fd 2...
I0317 13:51:21.576608 1324214 out.go:392] TERM=,COLORTERM=, which probably does not support color
I0317 13:51:21.576868 1324214 root.go:338] Updating PATH: /home/jenkins/minikube-integration/20539-1115410/.minikube/bin
I0317 13:51:21.577246 1324214 out.go:352] Setting JSON to false
I0317 13:51:21.578108 1324214 start.go:129] hostinfo: {"hostname":"ip-172-31-31-251","uptime":34432,"bootTime":1742185049,"procs":199,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1077-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"982e3628-3742-4b3e-bb63-ac1b07660ec7"}
I0317 13:51:21.578159 1324214 start.go:139] virtualization:
I0317 13:51:21.581845 1324214 out.go:177] * [scheduled-stop-243584] minikube v1.35.0 on Ubuntu 20.04 (arm64)
I0317 13:51:21.586349 1324214 out.go:177] - MINIKUBE_LOCATION=20539
I0317 13:51:21.586459 1324214 notify.go:220] Checking for updates...
I0317 13:51:21.592869 1324214 out.go:177] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I0317 13:51:21.596066 1324214 out.go:177] - KUBECONFIG=/home/jenkins/minikube-integration/20539-1115410/kubeconfig
I0317 13:51:21.599062 1324214 out.go:177] - MINIKUBE_HOME=/home/jenkins/minikube-integration/20539-1115410/.minikube
I0317 13:51:21.602088 1324214 out.go:177] - MINIKUBE_BIN=out/minikube-linux-arm64
I0317 13:51:21.605013 1324214 out.go:177] - MINIKUBE_FORCE_SYSTEMD=
I0317 13:51:21.608141 1324214 driver.go:394] Setting default libvirt URI to qemu:///system
I0317 13:51:21.631095 1324214 docker.go:123] docker version: linux-28.0.1:Docker Engine - Community
I0317 13:51:21.631230 1324214 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0317 13:51:21.689819 1324214 info.go:266] docker info: {ID:EOU5:DNGX:XN6V:L2FZ:UXRM:5TWK:EVUR:KC2F:GT7Z:Y4O4:GB77:5PD3 Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:3 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:23 OomKillDisable:true NGoroutines:42 SystemTime:2025-03-17 13:51:21.680744375 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1077-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214835200 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-31-251 Labels:[] ExperimentalBuild:false ServerVersion:28.0.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:bcc810d6b9066471b0b6fa75f557a15a1cbf31bb Expected:bcc810d6b9066471b0b6fa75f557a15a1cbf31bb} RuncCommit:{ID:v1.2.4-0-g6c52b3f Expected:v1.2.4-0-g6c52b3f} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> Se
rverErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.21.1] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.33.1]] Warnings:<nil>}}
I0317 13:51:21.689911 1324214 docker.go:318] overlay module found
I0317 13:51:21.693117 1324214 out.go:177] * Using the docker driver based on user configuration
I0317 13:51:21.695949 1324214 start.go:297] selected driver: docker
I0317 13:51:21.695960 1324214 start.go:901] validating driver "docker" against <nil>
I0317 13:51:21.695971 1324214 start.go:912] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I0317 13:51:21.696675 1324214 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0317 13:51:21.762007 1324214 info.go:266] docker info: {ID:EOU5:DNGX:XN6V:L2FZ:UXRM:5TWK:EVUR:KC2F:GT7Z:Y4O4:GB77:5PD3 Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:3 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:23 OomKillDisable:true NGoroutines:42 SystemTime:2025-03-17 13:51:21.753107103 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1077-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214835200 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-31-251 Labels:[] ExperimentalBuild:false ServerVersion:28.0.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:bcc810d6b9066471b0b6fa75f557a15a1cbf31bb Expected:bcc810d6b9066471b0b6fa75f557a15a1cbf31bb} RuncCommit:{ID:v1.2.4-0-g6c52b3f Expected:v1.2.4-0-g6c52b3f} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> Se
rverErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.21.1] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.33.1]] Warnings:<nil>}}
I0317 13:51:21.762146 1324214 start_flags.go:310] no existing cluster config was found, will generate one from the flags
I0317 13:51:21.762372 1324214 start_flags.go:929] Wait components to verify : map[apiserver:true system_pods:true]
I0317 13:51:21.765376 1324214 out.go:177] * Using Docker driver with root privileges
I0317 13:51:21.768231 1324214 cni.go:84] Creating CNI manager for ""
I0317 13:51:21.768296 1324214 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I0317 13:51:21.768304 1324214 start_flags.go:319] Found "bridge CNI" CNI - setting NetworkPlugin=cni
I0317 13:51:21.768380 1324214 start.go:340] cluster config:
{Name:scheduled-stop-243584 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.46-1741860993-20523@sha256:cd976907fa4d517c84fff1e5ef773b9fb3c738c4e1ded824ea5133470a66e185 Memory:2048 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.32.2 ClusterName:scheduled-stop-243584 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local
ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.32.2 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0317 13:51:21.771569 1324214 out.go:177] * Starting "scheduled-stop-243584" primary control-plane node in "scheduled-stop-243584" cluster
I0317 13:51:21.774410 1324214 cache.go:121] Beginning downloading kic base image for docker with docker
I0317 13:51:21.777276 1324214 out.go:177] * Pulling base image v0.0.46-1741860993-20523 ...
I0317 13:51:21.780058 1324214 preload.go:131] Checking if preload exists for k8s version v1.32.2 and runtime docker
I0317 13:51:21.780107 1324214 preload.go:146] Found local preload: /home/jenkins/minikube-integration/20539-1115410/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.32.2-docker-overlay2-arm64.tar.lz4
I0317 13:51:21.780112 1324214 cache.go:56] Caching tarball of preloaded images
I0317 13:51:21.780145 1324214 image.go:81] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.46-1741860993-20523@sha256:cd976907fa4d517c84fff1e5ef773b9fb3c738c4e1ded824ea5133470a66e185 in local docker daemon
I0317 13:51:21.780202 1324214 preload.go:172] Found /home/jenkins/minikube-integration/20539-1115410/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.32.2-docker-overlay2-arm64.tar.lz4 in cache, skipping download
I0317 13:51:21.780211 1324214 cache.go:59] Finished verifying existence of preloaded tar for v1.32.2 on docker
I0317 13:51:21.780534 1324214 profile.go:143] Saving config to /home/jenkins/minikube-integration/20539-1115410/.minikube/profiles/scheduled-stop-243584/config.json ...
I0317 13:51:21.780552 1324214 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20539-1115410/.minikube/profiles/scheduled-stop-243584/config.json: {Name:mk752da9b04f855539078e866057e6088edc9e4e Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0317 13:51:21.797906 1324214 image.go:100] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.46-1741860993-20523@sha256:cd976907fa4d517c84fff1e5ef773b9fb3c738c4e1ded824ea5133470a66e185 in local docker daemon, skipping pull
I0317 13:51:21.797919 1324214 cache.go:145] gcr.io/k8s-minikube/kicbase-builds:v0.0.46-1741860993-20523@sha256:cd976907fa4d517c84fff1e5ef773b9fb3c738c4e1ded824ea5133470a66e185 exists in daemon, skipping load
I0317 13:51:21.797949 1324214 cache.go:230] Successfully downloaded all kic artifacts
I0317 13:51:21.797980 1324214 start.go:360] acquireMachinesLock for scheduled-stop-243584: {Name:mk1910180da4c9169938285321ea1aa7f80b087b Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0317 13:51:21.798097 1324214 start.go:364] duration metric: took 102.047µs to acquireMachinesLock for "scheduled-stop-243584"
I0317 13:51:21.798123 1324214 start.go:93] Provisioning new machine with config: &{Name:scheduled-stop-243584 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.46-1741860993-20523@sha256:cd976907fa4d517c84fff1e5ef773b9fb3c738c4e1ded824ea5133470a66e185 Memory:2048 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.32.2 ClusterName:scheduled-stop-243584 Namespace:default APIServerHAVIP: A
PIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.32.2 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath
: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.32.2 ContainerRuntime:docker ControlPlane:true Worker:true}
I0317 13:51:21.798198 1324214 start.go:125] createHost starting for "" (driver="docker")
I0317 13:51:21.801558 1324214 out.go:235] * Creating docker container (CPUs=2, Memory=2048MB) ...
I0317 13:51:21.801853 1324214 start.go:159] libmachine.API.Create for "scheduled-stop-243584" (driver="docker")
I0317 13:51:21.801885 1324214 client.go:168] LocalClient.Create starting
I0317 13:51:21.801957 1324214 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/20539-1115410/.minikube/certs/ca.pem
I0317 13:51:21.801990 1324214 main.go:141] libmachine: Decoding PEM data...
I0317 13:51:21.802001 1324214 main.go:141] libmachine: Parsing certificate...
I0317 13:51:21.802053 1324214 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/20539-1115410/.minikube/certs/cert.pem
I0317 13:51:21.802068 1324214 main.go:141] libmachine: Decoding PEM data...
I0317 13:51:21.802079 1324214 main.go:141] libmachine: Parsing certificate...
I0317 13:51:21.802431 1324214 cli_runner.go:164] Run: docker network inspect scheduled-stop-243584 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W0317 13:51:21.819197 1324214 cli_runner.go:211] docker network inspect scheduled-stop-243584 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I0317 13:51:21.819264 1324214 network_create.go:284] running [docker network inspect scheduled-stop-243584] to gather additional debugging logs...
I0317 13:51:21.819279 1324214 cli_runner.go:164] Run: docker network inspect scheduled-stop-243584
W0317 13:51:21.835437 1324214 cli_runner.go:211] docker network inspect scheduled-stop-243584 returned with exit code 1
I0317 13:51:21.835457 1324214 network_create.go:287] error running [docker network inspect scheduled-stop-243584]: docker network inspect scheduled-stop-243584: exit status 1
stdout:
[]
stderr:
Error response from daemon: network scheduled-stop-243584 not found
I0317 13:51:21.835470 1324214 network_create.go:289] output of [docker network inspect scheduled-stop-243584]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network scheduled-stop-243584 not found
** /stderr **
I0317 13:51:21.835593 1324214 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0317 13:51:21.851543 1324214 network.go:211] skipping subnet 192.168.49.0/24 that is taken: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName:br-509972d2f15a IfaceIPv4:192.168.49.1 IfaceMTU:1500 IfaceMAC:02:ee:dc:aa:ea:d5} reservation:<nil>}
I0317 13:51:21.851903 1324214 network.go:211] skipping subnet 192.168.58.0/24 that is taken: &{IP:192.168.58.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.58.0/24 Gateway:192.168.58.1 ClientMin:192.168.58.2 ClientMax:192.168.58.254 Broadcast:192.168.58.255 IsPrivate:true Interface:{IfaceName:br-c97a9322feda IfaceIPv4:192.168.58.1 IfaceMTU:1500 IfaceMAC:76:dc:3b:ec:43:f8} reservation:<nil>}
I0317 13:51:21.852164 1324214 network.go:211] skipping subnet 192.168.67.0/24 that is taken: &{IP:192.168.67.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.67.0/24 Gateway:192.168.67.1 ClientMin:192.168.67.2 ClientMax:192.168.67.254 Broadcast:192.168.67.255 IsPrivate:true Interface:{IfaceName:br-b4002cd7e09a IfaceIPv4:192.168.67.1 IfaceMTU:1500 IfaceMAC:9a:23:b7:f0:11:bd} reservation:<nil>}
I0317 13:51:21.852535 1324214 network.go:206] using free private subnet 192.168.76.0/24: &{IP:192.168.76.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.76.0/24 Gateway:192.168.76.1 ClientMin:192.168.76.2 ClientMax:192.168.76.254 Broadcast:192.168.76.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0x400188fed0}
I0317 13:51:21.852553 1324214 network_create.go:124] attempt to create docker network scheduled-stop-243584 192.168.76.0/24 with gateway 192.168.76.1 and MTU of 1500 ...
I0317 13:51:21.852611 1324214 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.76.0/24 --gateway=192.168.76.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=scheduled-stop-243584 scheduled-stop-243584
I0317 13:51:21.908358 1324214 network_create.go:108] docker network scheduled-stop-243584 192.168.76.0/24 created
I0317 13:51:21.908382 1324214 kic.go:121] calculated static IP "192.168.76.2" for the "scheduled-stop-243584" container
I0317 13:51:21.908504 1324214 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I0317 13:51:21.923325 1324214 cli_runner.go:164] Run: docker volume create scheduled-stop-243584 --label name.minikube.sigs.k8s.io=scheduled-stop-243584 --label created_by.minikube.sigs.k8s.io=true
I0317 13:51:21.941649 1324214 oci.go:103] Successfully created a docker volume scheduled-stop-243584
I0317 13:51:21.941748 1324214 cli_runner.go:164] Run: docker run --rm --name scheduled-stop-243584-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=scheduled-stop-243584 --entrypoint /usr/bin/test -v scheduled-stop-243584:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.46-1741860993-20523@sha256:cd976907fa4d517c84fff1e5ef773b9fb3c738c4e1ded824ea5133470a66e185 -d /var/lib
I0317 13:51:22.460606 1324214 oci.go:107] Successfully prepared a docker volume scheduled-stop-243584
I0317 13:51:22.460651 1324214 preload.go:131] Checking if preload exists for k8s version v1.32.2 and runtime docker
I0317 13:51:22.460669 1324214 kic.go:194] Starting extracting preloaded images to volume ...
I0317 13:51:22.460746 1324214 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/20539-1115410/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.32.2-docker-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v scheduled-stop-243584:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.46-1741860993-20523@sha256:cd976907fa4d517c84fff1e5ef773b9fb3c738c4e1ded824ea5133470a66e185 -I lz4 -xf /preloaded.tar -C /extractDir
I0317 13:51:26.345905 1324214 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/20539-1115410/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.32.2-docker-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v scheduled-stop-243584:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.46-1741860993-20523@sha256:cd976907fa4d517c84fff1e5ef773b9fb3c738c4e1ded824ea5133470a66e185 -I lz4 -xf /preloaded.tar -C /extractDir: (3.88512338s)
I0317 13:51:26.345933 1324214 kic.go:203] duration metric: took 3.885260151s to extract preloaded images to volume ...
W0317 13:51:26.346077 1324214 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
I0317 13:51:26.346178 1324214 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I0317 13:51:26.398909 1324214 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname scheduled-stop-243584 --name scheduled-stop-243584 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=scheduled-stop-243584 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=scheduled-stop-243584 --network scheduled-stop-243584 --ip 192.168.76.2 --volume scheduled-stop-243584:/var --security-opt apparmor=unconfined --memory=2048mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.46-1741860993-20523@sha256:cd976907fa4d517c84fff1e5ef773b9fb3c738c4e1ded824ea5133470a66e185
I0317 13:51:26.682078 1324214 cli_runner.go:164] Run: docker container inspect scheduled-stop-243584 --format={{.State.Running}}
I0317 13:51:26.704574 1324214 cli_runner.go:164] Run: docker container inspect scheduled-stop-243584 --format={{.State.Status}}
I0317 13:51:26.731362 1324214 cli_runner.go:164] Run: docker exec scheduled-stop-243584 stat /var/lib/dpkg/alternatives/iptables
I0317 13:51:26.781070 1324214 oci.go:144] the created container "scheduled-stop-243584" has a running status.
I0317 13:51:26.781098 1324214 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/20539-1115410/.minikube/machines/scheduled-stop-243584/id_rsa...
I0317 13:51:27.516125 1324214 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/20539-1115410/.minikube/machines/scheduled-stop-243584/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I0317 13:51:27.552514 1324214 cli_runner.go:164] Run: docker container inspect scheduled-stop-243584 --format={{.State.Status}}
I0317 13:51:27.575048 1324214 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I0317 13:51:27.575060 1324214 kic_runner.go:114] Args: [docker exec --privileged scheduled-stop-243584 chown docker:docker /home/docker/.ssh/authorized_keys]
I0317 13:51:27.637071 1324214 cli_runner.go:164] Run: docker container inspect scheduled-stop-243584 --format={{.State.Status}}
I0317 13:51:27.661135 1324214 machine.go:93] provisionDockerMachine start ...
I0317 13:51:27.661216 1324214 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-243584
I0317 13:51:27.685153 1324214 main.go:141] libmachine: Using SSH client type: native
I0317 13:51:27.685531 1324214 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3e66c0] 0x3e8e80 <nil> [] 0s} 127.0.0.1 33941 <nil> <nil>}
I0317 13:51:27.685540 1324214 main.go:141] libmachine: About to run SSH command:
hostname
I0317 13:51:27.823374 1324214 main.go:141] libmachine: SSH cmd err, output: <nil>: scheduled-stop-243584
I0317 13:51:27.823388 1324214 ubuntu.go:169] provisioning hostname "scheduled-stop-243584"
I0317 13:51:27.823450 1324214 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-243584
I0317 13:51:27.841943 1324214 main.go:141] libmachine: Using SSH client type: native
I0317 13:51:27.842278 1324214 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3e66c0] 0x3e8e80 <nil> [] 0s} 127.0.0.1 33941 <nil> <nil>}
I0317 13:51:27.842288 1324214 main.go:141] libmachine: About to run SSH command:
sudo hostname scheduled-stop-243584 && echo "scheduled-stop-243584" | sudo tee /etc/hostname
I0317 13:51:27.984664 1324214 main.go:141] libmachine: SSH cmd err, output: <nil>: scheduled-stop-243584
I0317 13:51:27.984732 1324214 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-243584
I0317 13:51:28.006686 1324214 main.go:141] libmachine: Using SSH client type: native
I0317 13:51:28.007007 1324214 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3e66c0] 0x3e8e80 <nil> [] 0s} 127.0.0.1 33941 <nil> <nil>}
I0317 13:51:28.007023 1324214 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\sscheduled-stop-243584' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 scheduled-stop-243584/g' /etc/hosts;
else
echo '127.0.1.1 scheduled-stop-243584' | sudo tee -a /etc/hosts;
fi
fi
I0317 13:51:28.132013 1324214 main.go:141] libmachine: SSH cmd err, output: <nil>:
I0317 13:51:28.132028 1324214 ubuntu.go:175] set auth options {CertDir:/home/jenkins/minikube-integration/20539-1115410/.minikube CaCertPath:/home/jenkins/minikube-integration/20539-1115410/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/20539-1115410/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/20539-1115410/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/20539-1115410/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/20539-1115410/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/20539-1115410/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/20539-1115410/.minikube}
I0317 13:51:28.132045 1324214 ubuntu.go:177] setting up certificates
I0317 13:51:28.132054 1324214 provision.go:84] configureAuth start
I0317 13:51:28.132111 1324214 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" scheduled-stop-243584
I0317 13:51:28.150525 1324214 provision.go:143] copyHostCerts
I0317 13:51:28.150587 1324214 exec_runner.go:144] found /home/jenkins/minikube-integration/20539-1115410/.minikube/ca.pem, removing ...
I0317 13:51:28.150595 1324214 exec_runner.go:203] rm: /home/jenkins/minikube-integration/20539-1115410/.minikube/ca.pem
I0317 13:51:28.150672 1324214 exec_runner.go:151] cp: /home/jenkins/minikube-integration/20539-1115410/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/20539-1115410/.minikube/ca.pem (1082 bytes)
I0317 13:51:28.150766 1324214 exec_runner.go:144] found /home/jenkins/minikube-integration/20539-1115410/.minikube/cert.pem, removing ...
I0317 13:51:28.150770 1324214 exec_runner.go:203] rm: /home/jenkins/minikube-integration/20539-1115410/.minikube/cert.pem
I0317 13:51:28.150794 1324214 exec_runner.go:151] cp: /home/jenkins/minikube-integration/20539-1115410/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/20539-1115410/.minikube/cert.pem (1123 bytes)
I0317 13:51:28.150868 1324214 exec_runner.go:144] found /home/jenkins/minikube-integration/20539-1115410/.minikube/key.pem, removing ...
I0317 13:51:28.150872 1324214 exec_runner.go:203] rm: /home/jenkins/minikube-integration/20539-1115410/.minikube/key.pem
I0317 13:51:28.150901 1324214 exec_runner.go:151] cp: /home/jenkins/minikube-integration/20539-1115410/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/20539-1115410/.minikube/key.pem (1675 bytes)
I0317 13:51:28.150952 1324214 provision.go:117] generating server cert: /home/jenkins/minikube-integration/20539-1115410/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/20539-1115410/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/20539-1115410/.minikube/certs/ca-key.pem org=jenkins.scheduled-stop-243584 san=[127.0.0.1 192.168.76.2 localhost minikube scheduled-stop-243584]
I0317 13:51:28.455009 1324214 provision.go:177] copyRemoteCerts
I0317 13:51:28.455068 1324214 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I0317 13:51:28.455108 1324214 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-243584
I0317 13:51:28.472247 1324214 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33941 SSHKeyPath:/home/jenkins/minikube-integration/20539-1115410/.minikube/machines/scheduled-stop-243584/id_rsa Username:docker}
I0317 13:51:28.564963 1324214 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20539-1115410/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I0317 13:51:28.591329 1324214 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20539-1115410/.minikube/machines/server.pem --> /etc/docker/server.pem (1233 bytes)
I0317 13:51:28.615429 1324214 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20539-1115410/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I0317 13:51:28.640344 1324214 provision.go:87] duration metric: took 508.275605ms to configureAuth
I0317 13:51:28.640363 1324214 ubuntu.go:193] setting minikube options for container-runtime
I0317 13:51:28.640550 1324214 config.go:182] Loaded profile config "scheduled-stop-243584": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.32.2
I0317 13:51:28.640607 1324214 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-243584
I0317 13:51:28.657948 1324214 main.go:141] libmachine: Using SSH client type: native
I0317 13:51:28.658254 1324214 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3e66c0] 0x3e8e80 <nil> [] 0s} 127.0.0.1 33941 <nil> <nil>}
I0317 13:51:28.658261 1324214 main.go:141] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I0317 13:51:28.780311 1324214 main.go:141] libmachine: SSH cmd err, output: <nil>: overlay
I0317 13:51:28.780322 1324214 ubuntu.go:71] root file system type: overlay
I0317 13:51:28.780428 1324214 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
I0317 13:51:28.780494 1324214 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-243584
I0317 13:51:28.798198 1324214 main.go:141] libmachine: Using SSH client type: native
I0317 13:51:28.798503 1324214 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3e66c0] 0x3e8e80 <nil> [] 0s} 127.0.0.1 33941 <nil> <nil>}
I0317 13:51:28.798577 1324214 main.go:141] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
BindsTo=containerd.service
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=on-failure
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I0317 13:51:28.932110 1324214 main.go:141] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
BindsTo=containerd.service
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=on-failure
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
[Install]
WantedBy=multi-user.target
I0317 13:51:28.932185 1324214 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-243584
I0317 13:51:28.950033 1324214 main.go:141] libmachine: Using SSH client type: native
I0317 13:51:28.950345 1324214 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3e66c0] 0x3e8e80 <nil> [] 0s} 127.0.0.1 33941 <nil> <nil>}
I0317 13:51:28.950364 1324214 main.go:141] libmachine: About to run SSH command:
sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
I0317 13:51:29.740332 1324214 main.go:141] libmachine: SSH cmd err, output: <nil>: --- /lib/systemd/system/docker.service 2025-02-26 10:39:24.000000000 +0000
+++ /lib/systemd/system/docker.service.new 2025-03-17 13:51:28.926136675 +0000
@@ -1,46 +1,49 @@
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
-After=network-online.target nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
-Wants=network-online.target containerd.service
+BindsTo=containerd.service
+After=network-online.target firewalld.service containerd.service
+Wants=network-online.target
Requires=docker.socket
+StartLimitBurst=3
+StartLimitIntervalSec=60
[Service]
Type=notify
-# the default is not to use systemd for cgroups because the delegate issues still
-# exists and systemd currently does not support the cgroup feature set required
-# for containers run by docker
-ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
-ExecReload=/bin/kill -s HUP $MAINPID
-TimeoutStartSec=0
-RestartSec=2
-Restart=always
+Restart=on-failure
-# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229.
-# Both the old, and new location are accepted by systemd 229 and up, so using the old location
-# to make them work for either version of systemd.
-StartLimitBurst=3
-# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230.
-# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make
-# this option work for either version of systemd.
-StartLimitInterval=60s
+
+# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
+# The base configuration already specifies an 'ExecStart=...' command. The first directive
+# here is to clear out that command inherited from the base configuration. Without this,
+# the command from the base configuration and the command specified here are treated as
+# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
+# will catch this invalid input and refuse to start the service with an error like:
+# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
+
+# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
+# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
+ExecStart=
+ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
+ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
+LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
-# Comment TasksMax if your systemd version does not support it.
-# Only systemd 226 and above support this option.
+# Uncomment TasksMax if your systemd version supports it.
+# Only systemd 226 and above support this version.
TasksMax=infinity
+TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
-OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
Synchronizing state of docker.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable docker
I0317 13:51:29.740354 1324214 machine.go:96] duration metric: took 2.079207247s to provisionDockerMachine
I0317 13:51:29.740363 1324214 client.go:171] duration metric: took 7.938474199s to LocalClient.Create
I0317 13:51:29.740375 1324214 start.go:167] duration metric: took 7.938523314s to libmachine.API.Create "scheduled-stop-243584"
I0317 13:51:29.740381 1324214 start.go:293] postStartSetup for "scheduled-stop-243584" (driver="docker")
I0317 13:51:29.740389 1324214 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I0317 13:51:29.740447 1324214 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I0317 13:51:29.740499 1324214 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-243584
I0317 13:51:29.758732 1324214 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33941 SSHKeyPath:/home/jenkins/minikube-integration/20539-1115410/.minikube/machines/scheduled-stop-243584/id_rsa Username:docker}
I0317 13:51:29.853338 1324214 ssh_runner.go:195] Run: cat /etc/os-release
I0317 13:51:29.856471 1324214 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I0317 13:51:29.856492 1324214 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
I0317 13:51:29.856505 1324214 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
I0317 13:51:29.856511 1324214 info.go:137] Remote host: Ubuntu 22.04.5 LTS
I0317 13:51:29.856519 1324214 filesync.go:126] Scanning /home/jenkins/minikube-integration/20539-1115410/.minikube/addons for local assets ...
I0317 13:51:29.856581 1324214 filesync.go:126] Scanning /home/jenkins/minikube-integration/20539-1115410/.minikube/files for local assets ...
I0317 13:51:29.856661 1324214 filesync.go:149] local asset: /home/jenkins/minikube-integration/20539-1115410/.minikube/files/etc/ssl/certs/11207312.pem -> 11207312.pem in /etc/ssl/certs
I0317 13:51:29.856769 1324214 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I0317 13:51:29.865418 1324214 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20539-1115410/.minikube/files/etc/ssl/certs/11207312.pem --> /etc/ssl/certs/11207312.pem (1708 bytes)
I0317 13:51:29.889809 1324214 start.go:296] duration metric: took 149.414444ms for postStartSetup
I0317 13:51:29.890218 1324214 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" scheduled-stop-243584
I0317 13:51:29.907137 1324214 profile.go:143] Saving config to /home/jenkins/minikube-integration/20539-1115410/.minikube/profiles/scheduled-stop-243584/config.json ...
I0317 13:51:29.907410 1324214 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I0317 13:51:29.907448 1324214 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-243584
I0317 13:51:29.924253 1324214 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33941 SSHKeyPath:/home/jenkins/minikube-integration/20539-1115410/.minikube/machines/scheduled-stop-243584/id_rsa Username:docker}
I0317 13:51:30.022605 1324214 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I0317 13:51:30.028737 1324214 start.go:128] duration metric: took 8.230523187s to createHost
I0317 13:51:30.028754 1324214 start.go:83] releasing machines lock for "scheduled-stop-243584", held for 8.230649185s
I0317 13:51:30.028839 1324214 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" scheduled-stop-243584
I0317 13:51:30.050966 1324214 ssh_runner.go:195] Run: cat /version.json
I0317 13:51:30.051013 1324214 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-243584
I0317 13:51:30.051331 1324214 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I0317 13:51:30.051400 1324214 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-243584
I0317 13:51:30.073997 1324214 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33941 SSHKeyPath:/home/jenkins/minikube-integration/20539-1115410/.minikube/machines/scheduled-stop-243584/id_rsa Username:docker}
I0317 13:51:30.092577 1324214 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33941 SSHKeyPath:/home/jenkins/minikube-integration/20539-1115410/.minikube/machines/scheduled-stop-243584/id_rsa Username:docker}
I0317 13:51:30.163583 1324214 ssh_runner.go:195] Run: systemctl --version
I0317 13:51:30.313164 1324214 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
I0317 13:51:30.317879 1324214 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
I0317 13:51:30.347620 1324214 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
I0317 13:51:30.347696 1324214 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I0317 13:51:30.381512 1324214 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/100-crio-bridge.conf] bridge cni config(s)
I0317 13:51:30.381530 1324214 start.go:495] detecting cgroup driver to use...
I0317 13:51:30.381562 1324214 detect.go:187] detected "cgroupfs" cgroup driver on host os
I0317 13:51:30.381660 1324214 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I0317 13:51:30.398513 1324214 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10"|' /etc/containerd/config.toml"
I0317 13:51:30.408654 1324214 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I0317 13:51:30.418677 1324214 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I0317 13:51:30.418738 1324214 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I0317 13:51:30.429028 1324214 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0317 13:51:30.439223 1324214 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I0317 13:51:30.449620 1324214 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0317 13:51:30.459668 1324214 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I0317 13:51:30.469333 1324214 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I0317 13:51:30.479954 1324214 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I0317 13:51:30.490531 1324214 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I0317 13:51:30.501148 1324214 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I0317 13:51:30.510303 1324214 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I0317 13:51:30.519365 1324214 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0317 13:51:30.613597 1324214 ssh_runner.go:195] Run: sudo systemctl restart containerd
I0317 13:51:30.717924 1324214 start.go:495] detecting cgroup driver to use...
I0317 13:51:30.717971 1324214 detect.go:187] detected "cgroupfs" cgroup driver on host os
I0317 13:51:30.718017 1324214 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I0317 13:51:30.738767 1324214 cruntime.go:279] skipping containerd shutdown because we are bound to it
I0317 13:51:30.738826 1324214 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I0317 13:51:30.751495 1324214 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
" | sudo tee /etc/crictl.yaml"
I0317 13:51:30.769602 1324214 ssh_runner.go:195] Run: which cri-dockerd
I0317 13:51:30.778140 1324214 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
I0317 13:51:30.792169 1324214 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (190 bytes)
I0317 13:51:30.821256 1324214 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I0317 13:51:30.927974 1324214 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I0317 13:51:31.030664 1324214 docker.go:574] configuring docker to use "cgroupfs" as cgroup driver...
I0317 13:51:31.030776 1324214 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (130 bytes)
I0317 13:51:31.050211 1324214 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0317 13:51:31.154094 1324214 ssh_runner.go:195] Run: sudo systemctl restart docker
I0317 13:51:31.445496 1324214 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
I0317 13:51:31.457399 1324214 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I0317 13:51:31.469165 1324214 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
I0317 13:51:31.561952 1324214 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
I0317 13:51:31.645335 1324214 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0317 13:51:31.736617 1324214 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
I0317 13:51:31.750511 1324214 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I0317 13:51:31.762062 1324214 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0317 13:51:31.852662 1324214 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I0317 13:51:31.923255 1324214 start.go:542] Will wait 60s for socket path /var/run/cri-dockerd.sock
I0317 13:51:31.923329 1324214 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
I0317 13:51:31.927189 1324214 start.go:563] Will wait 60s for crictl version
I0317 13:51:31.927257 1324214 ssh_runner.go:195] Run: which crictl
I0317 13:51:31.930792 1324214 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I0317 13:51:31.971418 1324214 start.go:579] Version: 0.1.0
RuntimeName: docker
RuntimeVersion: 28.0.1
RuntimeApiVersion: v1
I0317 13:51:31.971485 1324214 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0317 13:51:31.993482 1324214 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0317 13:51:32.020526 1324214 out.go:235] * Preparing Kubernetes v1.32.2 on Docker 28.0.1 ...
I0317 13:51:32.020654 1324214 cli_runner.go:164] Run: docker network inspect scheduled-stop-243584 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0317 13:51:32.037621 1324214 ssh_runner.go:195] Run: grep 192.168.76.1 host.minikube.internal$ /etc/hosts
I0317 13:51:32.041598 1324214 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.76.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0317 13:51:32.052925 1324214 kubeadm.go:883] updating cluster {Name:scheduled-stop-243584 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.46-1741860993-20523@sha256:cd976907fa4d517c84fff1e5ef773b9fb3c738c4e1ded824ea5133470a66e185 Memory:2048 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.32.2 ClusterName:scheduled-stop-243584 Namespace:default APIServerHAVIP: APIServerName:miniku
beCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.32.2 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: Stati
cIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I0317 13:51:32.053022 1324214 preload.go:131] Checking if preload exists for k8s version v1.32.2 and runtime docker
I0317 13:51:32.053079 1324214 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I0317 13:51:32.073679 1324214 docker.go:689] Got preloaded images: -- stdout --
registry.k8s.io/kube-apiserver:v1.32.2
registry.k8s.io/kube-scheduler:v1.32.2
registry.k8s.io/kube-controller-manager:v1.32.2
registry.k8s.io/kube-proxy:v1.32.2
registry.k8s.io/etcd:3.5.16-0
registry.k8s.io/coredns/coredns:v1.11.3
registry.k8s.io/pause:3.10
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I0317 13:51:32.073692 1324214 docker.go:619] Images already preloaded, skipping extraction
I0317 13:51:32.073757 1324214 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I0317 13:51:32.094443 1324214 docker.go:689] Got preloaded images: -- stdout --
registry.k8s.io/kube-apiserver:v1.32.2
registry.k8s.io/kube-controller-manager:v1.32.2
registry.k8s.io/kube-scheduler:v1.32.2
registry.k8s.io/kube-proxy:v1.32.2
registry.k8s.io/etcd:3.5.16-0
registry.k8s.io/coredns/coredns:v1.11.3
registry.k8s.io/pause:3.10
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I0317 13:51:32.094458 1324214 cache_images.go:84] Images are preloaded, skipping loading
I0317 13:51:32.094466 1324214 kubeadm.go:934] updating node { 192.168.76.2 8443 v1.32.2 docker true true} ...
I0317 13:51:32.094564 1324214 kubeadm.go:946] kubelet [Unit]
Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.32.2/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=scheduled-stop-243584 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.76.2
[Install]
config:
{KubernetesVersion:v1.32.2 ClusterName:scheduled-stop-243584 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I0317 13:51:32.094639 1324214 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
I0317 13:51:32.142792 1324214 cni.go:84] Creating CNI manager for ""
I0317 13:51:32.142808 1324214 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I0317 13:51:32.142817 1324214 kubeadm.go:84] Using pod CIDR: 10.244.0.0/16
I0317 13:51:32.142835 1324214 kubeadm.go:189] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.76.2 APIServerPort:8443 KubernetesVersion:v1.32.2 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:scheduled-stop-243584 NodeName:scheduled-stop-243584 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.76.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.76.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPo
dPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///var/run/cri-dockerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I0317 13:51:32.142974 1324214 kubeadm.go:195] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.76.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///var/run/cri-dockerd.sock
name: "scheduled-stop-243584"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.76.2"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.76.2"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
- name: "proxy-refresh-interval"
value: "70000"
kubernetesVersion: v1.32.2
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I0317 13:51:32.143035 1324214 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.32.2
I0317 13:51:32.151672 1324214 binaries.go:44] Found k8s binaries, skipping transfer
I0317 13:51:32.151736 1324214 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I0317 13:51:32.160361 1324214 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (320 bytes)
I0317 13:51:32.179410 1324214 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I0317 13:51:32.196748 1324214 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2299 bytes)
I0317 13:51:32.213979 1324214 ssh_runner.go:195] Run: grep 192.168.76.2 control-plane.minikube.internal$ /etc/hosts
I0317 13:51:32.217396 1324214 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.76.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0317 13:51:32.228110 1324214 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0317 13:51:32.316662 1324214 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0317 13:51:32.336322 1324214 certs.go:68] Setting up /home/jenkins/minikube-integration/20539-1115410/.minikube/profiles/scheduled-stop-243584 for IP: 192.168.76.2
I0317 13:51:32.336344 1324214 certs.go:194] generating shared ca certs ...
I0317 13:51:32.336367 1324214 certs.go:226] acquiring lock for ca certs: {Name:mka2aadc5dbaa2e5043414215576d5f76d3f10d4 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0317 13:51:32.336523 1324214 certs.go:235] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/20539-1115410/.minikube/ca.key
I0317 13:51:32.336564 1324214 certs.go:235] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/20539-1115410/.minikube/proxy-client-ca.key
I0317 13:51:32.336570 1324214 certs.go:256] generating profile certs ...
I0317 13:51:32.336632 1324214 certs.go:363] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/20539-1115410/.minikube/profiles/scheduled-stop-243584/client.key
I0317 13:51:32.336641 1324214 crypto.go:68] Generating cert /home/jenkins/minikube-integration/20539-1115410/.minikube/profiles/scheduled-stop-243584/client.crt with IP's: []
I0317 13:51:32.849118 1324214 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/20539-1115410/.minikube/profiles/scheduled-stop-243584/client.crt ...
I0317 13:51:32.849134 1324214 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20539-1115410/.minikube/profiles/scheduled-stop-243584/client.crt: {Name:mkbf2ca1b10019250d7eeaddcc9c6c18f9e475c9 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0317 13:51:32.849341 1324214 crypto.go:164] Writing key to /home/jenkins/minikube-integration/20539-1115410/.minikube/profiles/scheduled-stop-243584/client.key ...
I0317 13:51:32.849349 1324214 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20539-1115410/.minikube/profiles/scheduled-stop-243584/client.key: {Name:mk2b3394b22eed346a9291a687219a1ae46bd410 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0317 13:51:32.849454 1324214 certs.go:363] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/20539-1115410/.minikube/profiles/scheduled-stop-243584/apiserver.key.b467935d
I0317 13:51:32.849467 1324214 crypto.go:68] Generating cert /home/jenkins/minikube-integration/20539-1115410/.minikube/profiles/scheduled-stop-243584/apiserver.crt.b467935d with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.76.2]
I0317 13:51:33.457781 1324214 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/20539-1115410/.minikube/profiles/scheduled-stop-243584/apiserver.crt.b467935d ...
I0317 13:51:33.457797 1324214 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20539-1115410/.minikube/profiles/scheduled-stop-243584/apiserver.crt.b467935d: {Name:mk20c43b27a7877cc5918c8eba64d07380adb4c2 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0317 13:51:33.457988 1324214 crypto.go:164] Writing key to /home/jenkins/minikube-integration/20539-1115410/.minikube/profiles/scheduled-stop-243584/apiserver.key.b467935d ...
I0317 13:51:33.457996 1324214 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20539-1115410/.minikube/profiles/scheduled-stop-243584/apiserver.key.b467935d: {Name:mk7f366e33721b3533bf134e1069d21f787e7151 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0317 13:51:33.458087 1324214 certs.go:381] copying /home/jenkins/minikube-integration/20539-1115410/.minikube/profiles/scheduled-stop-243584/apiserver.crt.b467935d -> /home/jenkins/minikube-integration/20539-1115410/.minikube/profiles/scheduled-stop-243584/apiserver.crt
I0317 13:51:33.458164 1324214 certs.go:385] copying /home/jenkins/minikube-integration/20539-1115410/.minikube/profiles/scheduled-stop-243584/apiserver.key.b467935d -> /home/jenkins/minikube-integration/20539-1115410/.minikube/profiles/scheduled-stop-243584/apiserver.key
I0317 13:51:33.458218 1324214 certs.go:363] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/20539-1115410/.minikube/profiles/scheduled-stop-243584/proxy-client.key
I0317 13:51:33.458229 1324214 crypto.go:68] Generating cert /home/jenkins/minikube-integration/20539-1115410/.minikube/profiles/scheduled-stop-243584/proxy-client.crt with IP's: []
I0317 13:51:34.497070 1324214 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/20539-1115410/.minikube/profiles/scheduled-stop-243584/proxy-client.crt ...
I0317 13:51:34.497086 1324214 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20539-1115410/.minikube/profiles/scheduled-stop-243584/proxy-client.crt: {Name:mk0827558a7eca62660222e8c745162089a25ff6 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0317 13:51:34.497279 1324214 crypto.go:164] Writing key to /home/jenkins/minikube-integration/20539-1115410/.minikube/profiles/scheduled-stop-243584/proxy-client.key ...
I0317 13:51:34.497288 1324214 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20539-1115410/.minikube/profiles/scheduled-stop-243584/proxy-client.key: {Name:mk0c81c8e1b5740582737fbd02a324c4dcb9d86b Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0317 13:51:34.497472 1324214 certs.go:484] found cert: /home/jenkins/minikube-integration/20539-1115410/.minikube/certs/1120731.pem (1338 bytes)
W0317 13:51:34.497515 1324214 certs.go:480] ignoring /home/jenkins/minikube-integration/20539-1115410/.minikube/certs/1120731_empty.pem, impossibly tiny 0 bytes
I0317 13:51:34.497522 1324214 certs.go:484] found cert: /home/jenkins/minikube-integration/20539-1115410/.minikube/certs/ca-key.pem (1675 bytes)
I0317 13:51:34.497545 1324214 certs.go:484] found cert: /home/jenkins/minikube-integration/20539-1115410/.minikube/certs/ca.pem (1082 bytes)
I0317 13:51:34.497565 1324214 certs.go:484] found cert: /home/jenkins/minikube-integration/20539-1115410/.minikube/certs/cert.pem (1123 bytes)
I0317 13:51:34.497588 1324214 certs.go:484] found cert: /home/jenkins/minikube-integration/20539-1115410/.minikube/certs/key.pem (1675 bytes)
I0317 13:51:34.497627 1324214 certs.go:484] found cert: /home/jenkins/minikube-integration/20539-1115410/.minikube/files/etc/ssl/certs/11207312.pem (1708 bytes)
I0317 13:51:34.498194 1324214 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20539-1115410/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I0317 13:51:34.528846 1324214 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20539-1115410/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1679 bytes)
I0317 13:51:34.556332 1324214 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20539-1115410/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I0317 13:51:34.582782 1324214 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20539-1115410/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
I0317 13:51:34.607100 1324214 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20539-1115410/.minikube/profiles/scheduled-stop-243584/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1432 bytes)
I0317 13:51:34.632616 1324214 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20539-1115410/.minikube/profiles/scheduled-stop-243584/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I0317 13:51:34.659222 1324214 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20539-1115410/.minikube/profiles/scheduled-stop-243584/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I0317 13:51:34.684892 1324214 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20539-1115410/.minikube/profiles/scheduled-stop-243584/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
I0317 13:51:34.709453 1324214 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20539-1115410/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I0317 13:51:34.736915 1324214 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20539-1115410/.minikube/certs/1120731.pem --> /usr/share/ca-certificates/1120731.pem (1338 bytes)
I0317 13:51:34.762267 1324214 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20539-1115410/.minikube/files/etc/ssl/certs/11207312.pem --> /usr/share/ca-certificates/11207312.pem (1708 bytes)
I0317 13:51:34.787339 1324214 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I0317 13:51:34.805849 1324214 ssh_runner.go:195] Run: openssl version
I0317 13:51:34.811538 1324214 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I0317 13:51:34.821384 1324214 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I0317 13:51:34.824790 1324214 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Mar 17 13:15 /usr/share/ca-certificates/minikubeCA.pem
I0317 13:51:34.824842 1324214 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I0317 13:51:34.831715 1324214 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I0317 13:51:34.841105 1324214 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/1120731.pem && ln -fs /usr/share/ca-certificates/1120731.pem /etc/ssl/certs/1120731.pem"
I0317 13:51:34.850674 1324214 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/1120731.pem
I0317 13:51:34.854278 1324214 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Mar 17 13:22 /usr/share/ca-certificates/1120731.pem
I0317 13:51:34.854332 1324214 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/1120731.pem
I0317 13:51:34.861470 1324214 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/1120731.pem /etc/ssl/certs/51391683.0"
I0317 13:51:34.871142 1324214 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/11207312.pem && ln -fs /usr/share/ca-certificates/11207312.pem /etc/ssl/certs/11207312.pem"
I0317 13:51:34.880691 1324214 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/11207312.pem
I0317 13:51:34.884052 1324214 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Mar 17 13:22 /usr/share/ca-certificates/11207312.pem
I0317 13:51:34.884106 1324214 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/11207312.pem
I0317 13:51:34.891159 1324214 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/11207312.pem /etc/ssl/certs/3ec20f2e.0"
I0317 13:51:34.900637 1324214 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I0317 13:51:34.903934 1324214 certs.go:399] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I0317 13:51:34.903977 1324214 kubeadm.go:392] StartCluster: {Name:scheduled-stop-243584 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.46-1741860993-20523@sha256:cd976907fa4d517c84fff1e5ef773b9fb3c738c4e1ded824ea5133470a66e185 Memory:2048 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.32.2 ClusterName:scheduled-stop-243584 Namespace:default APIServerHAVIP: APIServerName:minikubeC
A APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.32.2 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP
: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0317 13:51:34.904091 1324214 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
I0317 13:51:34.922030 1324214 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I0317 13:51:34.930991 1324214 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I0317 13:51:34.939482 1324214 kubeadm.go:214] ignoring SystemVerification for kubeadm because of docker driver
I0317 13:51:34.939533 1324214 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I0317 13:51:34.948095 1324214 kubeadm.go:155] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I0317 13:51:34.948105 1324214 kubeadm.go:157] found existing configuration files:
I0317 13:51:34.948161 1324214 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I0317 13:51:34.956645 1324214 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I0317 13:51:34.956705 1324214 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I0317 13:51:34.965156 1324214 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I0317 13:51:34.974062 1324214 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I0317 13:51:34.974135 1324214 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I0317 13:51:34.982816 1324214 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I0317 13:51:34.991898 1324214 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I0317 13:51:34.991953 1324214 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I0317 13:51:35.001929 1324214 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I0317 13:51:35.014325 1324214 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I0317 13:51:35.014399 1324214 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I0317 13:51:35.023367 1324214 ssh_runner.go:286] Start: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.32.2:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I0317 13:51:35.062377 1324214 kubeadm.go:310] [init] Using Kubernetes version: v1.32.2
I0317 13:51:35.062459 1324214 kubeadm.go:310] [preflight] Running pre-flight checks
I0317 13:51:35.084822 1324214 kubeadm.go:310] [preflight] The system verification failed. Printing the output from the verification:
I0317 13:51:35.084887 1324214 kubeadm.go:310] [0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1077-aws[0m
I0317 13:51:35.084921 1324214 kubeadm.go:310] [0;37mOS[0m: [0;32mLinux[0m
I0317 13:51:35.084965 1324214 kubeadm.go:310] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I0317 13:51:35.085012 1324214 kubeadm.go:310] [0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
I0317 13:51:35.085073 1324214 kubeadm.go:310] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I0317 13:51:35.085119 1324214 kubeadm.go:310] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I0317 13:51:35.085165 1324214 kubeadm.go:310] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I0317 13:51:35.085213 1324214 kubeadm.go:310] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I0317 13:51:35.085256 1324214 kubeadm.go:310] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I0317 13:51:35.085303 1324214 kubeadm.go:310] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I0317 13:51:35.085348 1324214 kubeadm.go:310] [0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
I0317 13:51:35.145583 1324214 kubeadm.go:310] [preflight] Pulling images required for setting up a Kubernetes cluster
I0317 13:51:35.145697 1324214 kubeadm.go:310] [preflight] This might take a minute or two, depending on the speed of your internet connection
I0317 13:51:35.145804 1324214 kubeadm.go:310] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
I0317 13:51:35.160380 1324214 kubeadm.go:310] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I0317 13:51:35.167031 1324214 out.go:235] - Generating certificates and keys ...
I0317 13:51:35.167159 1324214 kubeadm.go:310] [certs] Using existing ca certificate authority
I0317 13:51:35.167239 1324214 kubeadm.go:310] [certs] Using existing apiserver certificate and key on disk
I0317 13:51:35.554429 1324214 kubeadm.go:310] [certs] Generating "apiserver-kubelet-client" certificate and key
I0317 13:51:36.128450 1324214 kubeadm.go:310] [certs] Generating "front-proxy-ca" certificate and key
I0317 13:51:36.314424 1324214 kubeadm.go:310] [certs] Generating "front-proxy-client" certificate and key
I0317 13:51:36.918377 1324214 kubeadm.go:310] [certs] Generating "etcd/ca" certificate and key
I0317 13:51:37.695880 1324214 kubeadm.go:310] [certs] Generating "etcd/server" certificate and key
I0317 13:51:37.696247 1324214 kubeadm.go:310] [certs] etcd/server serving cert is signed for DNS names [localhost scheduled-stop-243584] and IPs [192.168.76.2 127.0.0.1 ::1]
I0317 13:51:38.271257 1324214 kubeadm.go:310] [certs] Generating "etcd/peer" certificate and key
I0317 13:51:38.271406 1324214 kubeadm.go:310] [certs] etcd/peer serving cert is signed for DNS names [localhost scheduled-stop-243584] and IPs [192.168.76.2 127.0.0.1 ::1]
I0317 13:51:39.036718 1324214 kubeadm.go:310] [certs] Generating "etcd/healthcheck-client" certificate and key
I0317 13:51:39.256822 1324214 kubeadm.go:310] [certs] Generating "apiserver-etcd-client" certificate and key
I0317 13:51:39.678292 1324214 kubeadm.go:310] [certs] Generating "sa" key and public key
I0317 13:51:39.678516 1324214 kubeadm.go:310] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I0317 13:51:40.392366 1324214 kubeadm.go:310] [kubeconfig] Writing "admin.conf" kubeconfig file
I0317 13:51:40.788525 1324214 kubeadm.go:310] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I0317 13:51:41.033424 1324214 kubeadm.go:310] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I0317 13:51:41.507347 1324214 kubeadm.go:310] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I0317 13:51:41.925392 1324214 kubeadm.go:310] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I0317 13:51:41.926134 1324214 kubeadm.go:310] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I0317 13:51:41.929088 1324214 kubeadm.go:310] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I0317 13:51:41.932757 1324214 out.go:235] - Booting up control plane ...
I0317 13:51:41.932873 1324214 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-apiserver"
I0317 13:51:41.932951 1324214 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I0317 13:51:41.933020 1324214 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-scheduler"
I0317 13:51:41.958526 1324214 kubeadm.go:310] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I0317 13:51:41.965886 1324214 kubeadm.go:310] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I0317 13:51:41.965933 1324214 kubeadm.go:310] [kubelet-start] Starting the kubelet
I0317 13:51:42.087287 1324214 kubeadm.go:310] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I0317 13:51:42.087401 1324214 kubeadm.go:310] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I0317 13:51:43.582129 1324214 kubeadm.go:310] [kubelet-check] The kubelet is healthy after 1.501652099s
I0317 13:51:43.582209 1324214 kubeadm.go:310] [api-check] Waiting for a healthy API server. This can take up to 4m0s
I0317 13:51:50.583775 1324214 kubeadm.go:310] [api-check] The API server is healthy after 7.001817229s
I0317 13:51:50.606333 1324214 kubeadm.go:310] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I0317 13:51:50.622604 1324214 kubeadm.go:310] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I0317 13:51:50.655318 1324214 kubeadm.go:310] [upload-certs] Skipping phase. Please see --upload-certs
I0317 13:51:50.655513 1324214 kubeadm.go:310] [mark-control-plane] Marking the node scheduled-stop-243584 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I0317 13:51:50.667246 1324214 kubeadm.go:310] [bootstrap-token] Using token: fmrs1x.hfr7ixeahmehc3u1
I0317 13:51:50.670114 1324214 out.go:235] - Configuring RBAC rules ...
I0317 13:51:50.670262 1324214 kubeadm.go:310] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I0317 13:51:50.674808 1324214 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I0317 13:51:50.683043 1324214 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I0317 13:51:50.689033 1324214 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I0317 13:51:50.695900 1324214 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I0317 13:51:50.699847 1324214 kubeadm.go:310] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I0317 13:51:50.990798 1324214 kubeadm.go:310] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I0317 13:51:51.424043 1324214 kubeadm.go:310] [addons] Applied essential addon: CoreDNS
I0317 13:51:51.992695 1324214 kubeadm.go:310] [addons] Applied essential addon: kube-proxy
I0317 13:51:51.994146 1324214 kubeadm.go:310]
I0317 13:51:51.994226 1324214 kubeadm.go:310] Your Kubernetes control-plane has initialized successfully!
I0317 13:51:51.994230 1324214 kubeadm.go:310]
I0317 13:51:51.994305 1324214 kubeadm.go:310] To start using your cluster, you need to run the following as a regular user:
I0317 13:51:51.994308 1324214 kubeadm.go:310]
I0317 13:51:51.994333 1324214 kubeadm.go:310] mkdir -p $HOME/.kube
I0317 13:51:51.994390 1324214 kubeadm.go:310] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I0317 13:51:51.994439 1324214 kubeadm.go:310] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I0317 13:51:51.994442 1324214 kubeadm.go:310]
I0317 13:51:51.994498 1324214 kubeadm.go:310] Alternatively, if you are the root user, you can run:
I0317 13:51:51.994501 1324214 kubeadm.go:310]
I0317 13:51:51.994548 1324214 kubeadm.go:310] export KUBECONFIG=/etc/kubernetes/admin.conf
I0317 13:51:51.994553 1324214 kubeadm.go:310]
I0317 13:51:51.994603 1324214 kubeadm.go:310] You should now deploy a pod network to the cluster.
I0317 13:51:51.994690 1324214 kubeadm.go:310] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I0317 13:51:51.994757 1324214 kubeadm.go:310] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I0317 13:51:51.994760 1324214 kubeadm.go:310]
I0317 13:51:51.994850 1324214 kubeadm.go:310] You can now join any number of control-plane nodes by copying certificate authorities
I0317 13:51:51.994925 1324214 kubeadm.go:310] and service account keys on each node and then running the following as root:
I0317 13:51:51.994928 1324214 kubeadm.go:310]
I0317 13:51:51.995010 1324214 kubeadm.go:310] kubeadm join control-plane.minikube.internal:8443 --token fmrs1x.hfr7ixeahmehc3u1 \
I0317 13:51:51.995111 1324214 kubeadm.go:310] --discovery-token-ca-cert-hash sha256:af4d80408e90de1964e97f63a0ed1bbfbf4eafdde4782d2526b5a4753a70e86a \
I0317 13:51:51.995131 1324214 kubeadm.go:310] --control-plane
I0317 13:51:51.995134 1324214 kubeadm.go:310]
I0317 13:51:51.995217 1324214 kubeadm.go:310] Then you can join any number of worker nodes by running the following on each as root:
I0317 13:51:51.995221 1324214 kubeadm.go:310]
I0317 13:51:51.995301 1324214 kubeadm.go:310] kubeadm join control-plane.minikube.internal:8443 --token fmrs1x.hfr7ixeahmehc3u1 \
I0317 13:51:51.995401 1324214 kubeadm.go:310] --discovery-token-ca-cert-hash sha256:af4d80408e90de1964e97f63a0ed1bbfbf4eafdde4782d2526b5a4753a70e86a
I0317 13:51:52.002051 1324214 kubeadm.go:310] [WARNING SystemVerification]: cgroups v1 support is in maintenance mode, please migrate to cgroups v2
I0317 13:51:52.002280 1324214 kubeadm.go:310] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1077-aws\n", err: exit status 1
I0317 13:51:52.002488 1324214 kubeadm.go:310] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I0317 13:51:52.002514 1324214 cni.go:84] Creating CNI manager for ""
I0317 13:51:52.002528 1324214 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I0317 13:51:52.005902 1324214 out.go:177] * Configuring bridge CNI (Container Networking Interface) ...
I0317 13:51:52.008706 1324214 ssh_runner.go:195] Run: sudo mkdir -p /etc/cni/net.d
I0317 13:51:52.026077 1324214 ssh_runner.go:362] scp memory --> /etc/cni/net.d/1-k8s.conflist (496 bytes)
I0317 13:51:52.047041 1324214 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I0317 13:51:52.047155 1324214 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.32.2/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I0317 13:51:52.047237 1324214 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.32.2/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes scheduled-stop-243584 minikube.k8s.io/updated_at=2025_03_17T13_51_52_0700 minikube.k8s.io/version=v1.35.0 minikube.k8s.io/commit=5a6f3d20e78a9ae03fc65e3f2e727d0ae0107b3c minikube.k8s.io/name=scheduled-stop-243584 minikube.k8s.io/primary=true
I0317 13:51:52.205444 1324214 ops.go:34] apiserver oom_adj: -16
I0317 13:51:52.205471 1324214 kubeadm.go:1113] duration metric: took 158.3583ms to wait for elevateKubeSystemPrivileges
I0317 13:51:52.205490 1324214 kubeadm.go:394] duration metric: took 17.301517013s to StartCluster
I0317 13:51:52.205506 1324214 settings.go:142] acquiring lock: {Name:mke49e242edc3285f205f5787b107a2dac6376eb Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0317 13:51:52.205579 1324214 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/20539-1115410/kubeconfig
I0317 13:51:52.206210 1324214 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20539-1115410/kubeconfig: {Name:mkd7b3f1599a993f1ecc89c150f1c90959e7d444 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0317 13:51:52.206413 1324214 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.32.2/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I0317 13:51:52.206432 1324214 start.go:235] Will wait 6m0s for node &{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.32.2 ContainerRuntime:docker ControlPlane:true Worker:true}
I0317 13:51:52.206667 1324214 config.go:182] Loaded profile config "scheduled-stop-243584": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.32.2
I0317 13:51:52.206708 1324214 addons.go:511] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-gluster:false storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I0317 13:51:52.206772 1324214 addons.go:69] Setting storage-provisioner=true in profile "scheduled-stop-243584"
I0317 13:51:52.206783 1324214 addons.go:238] Setting addon storage-provisioner=true in "scheduled-stop-243584"
I0317 13:51:52.206805 1324214 host.go:66] Checking if "scheduled-stop-243584" exists ...
I0317 13:51:52.207265 1324214 cli_runner.go:164] Run: docker container inspect scheduled-stop-243584 --format={{.State.Status}}
I0317 13:51:52.207898 1324214 addons.go:69] Setting default-storageclass=true in profile "scheduled-stop-243584"
I0317 13:51:52.207912 1324214 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "scheduled-stop-243584"
I0317 13:51:52.208187 1324214 cli_runner.go:164] Run: docker container inspect scheduled-stop-243584 --format={{.State.Status}}
I0317 13:51:52.210482 1324214 out.go:177] * Verifying Kubernetes components...
I0317 13:51:52.213528 1324214 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0317 13:51:52.251152 1324214 addons.go:238] Setting addon default-storageclass=true in "scheduled-stop-243584"
I0317 13:51:52.251180 1324214 host.go:66] Checking if "scheduled-stop-243584" exists ...
I0317 13:51:52.251600 1324214 cli_runner.go:164] Run: docker container inspect scheduled-stop-243584 --format={{.State.Status}}
I0317 13:51:52.251754 1324214 out.go:177] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I0317 13:51:52.254756 1324214 addons.go:435] installing /etc/kubernetes/addons/storage-provisioner.yaml
I0317 13:51:52.254767 1324214 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I0317 13:51:52.254917 1324214 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-243584
I0317 13:51:52.293671 1324214 addons.go:435] installing /etc/kubernetes/addons/storageclass.yaml
I0317 13:51:52.293696 1324214 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I0317 13:51:52.293768 1324214 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-243584
I0317 13:51:52.299009 1324214 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33941 SSHKeyPath:/home/jenkins/minikube-integration/20539-1115410/.minikube/machines/scheduled-stop-243584/id_rsa Username:docker}
I0317 13:51:52.336933 1324214 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33941 SSHKeyPath:/home/jenkins/minikube-integration/20539-1115410/.minikube/machines/scheduled-stop-243584/id_rsa Username:docker}
I0317 13:51:52.537701 1324214 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.32.2/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I0317 13:51:52.542705 1324214 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.32.2/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.76.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.32.2/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I0317 13:51:52.542846 1324214 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0317 13:51:52.599630 1324214 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.32.2/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I0317 13:51:53.128443 1324214 api_server.go:52] waiting for apiserver process to appear ...
I0317 13:51:53.128494 1324214 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I0317 13:51:53.128577 1324214 start.go:971] {"host.minikube.internal": 192.168.76.1} host record injected into CoreDNS's ConfigMap
I0317 13:51:53.155238 1324214 api_server.go:72] duration metric: took 948.781109ms to wait for apiserver process to appear ...
I0317 13:51:53.155250 1324214 api_server.go:88] waiting for apiserver healthz status ...
I0317 13:51:53.155300 1324214 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I0317 13:51:53.168942 1324214 api_server.go:279] https://192.168.76.2:8443/healthz returned 200:
ok
I0317 13:51:53.170158 1324214 api_server.go:141] control plane version: v1.32.2
I0317 13:51:53.170174 1324214 api_server.go:131] duration metric: took 14.907174ms to wait for apiserver health ...
I0317 13:51:53.170181 1324214 system_pods.go:43] waiting for kube-system pods to appear ...
I0317 13:51:53.177719 1324214 system_pods.go:59] 5 kube-system pods found
I0317 13:51:53.177778 1324214 system_pods.go:61] "etcd-scheduled-stop-243584" [7ee18146-7cb0-4e3b-8eb9-c42c4c205ea3] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd])
I0317 13:51:53.177786 1324214 system_pods.go:61] "kube-apiserver-scheduled-stop-243584" [03f29911-b26a-4d65-a5bf-6ae8a7888242] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
I0317 13:51:53.177796 1324214 system_pods.go:61] "kube-controller-manager-scheduled-stop-243584" [0ab2542e-9a6f-4666-b4e7-11df1dc4407c] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager])
I0317 13:51:53.177810 1324214 system_pods.go:61] "kube-scheduler-scheduled-stop-243584" [be96d5fe-6afd-4d20-8fd3-28ca64f420d1] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
I0317 13:51:53.177815 1324214 system_pods.go:61] "storage-provisioner" [4ad6664f-e005-48c7-a2ad-08c65444f76e] Pending: PodScheduled:Unschedulable (0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/not-ready: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling.)
I0317 13:51:53.177819 1324214 system_pods.go:74] duration metric: took 7.634354ms to wait for pod list to return data ...
I0317 13:51:53.177829 1324214 kubeadm.go:582] duration metric: took 971.3773ms to wait for: map[apiserver:true system_pods:true]
I0317 13:51:53.177840 1324214 node_conditions.go:102] verifying NodePressure condition ...
I0317 13:51:53.179515 1324214 out.go:177] * Enabled addons: storage-provisioner, default-storageclass
I0317 13:51:53.180719 1324214 node_conditions.go:122] node storage ephemeral capacity is 203034800Ki
I0317 13:51:53.180737 1324214 node_conditions.go:123] node cpu capacity is 2
I0317 13:51:53.180747 1324214 node_conditions.go:105] duration metric: took 2.90347ms to run NodePressure ...
I0317 13:51:53.180758 1324214 start.go:241] waiting for startup goroutines ...
I0317 13:51:53.182690 1324214 addons.go:514] duration metric: took 975.979019ms for enable addons: enabled=[storage-provisioner default-storageclass]
I0317 13:51:53.632541 1324214 kapi.go:214] "coredns" deployment in "kube-system" namespace and "scheduled-stop-243584" context rescaled to 1 replicas
I0317 13:51:53.632571 1324214 start.go:246] waiting for cluster config update ...
I0317 13:51:53.632583 1324214 start.go:255] writing updated cluster config ...
I0317 13:51:53.632874 1324214 ssh_runner.go:195] Run: rm -f paused
I0317 13:51:53.703919 1324214 start.go:600] kubectl: 1.32.3, cluster: 1.32.2 (minor skew: 0)
I0317 13:51:53.707274 1324214 out.go:177] * Done! kubectl is now configured to use "scheduled-stop-243584" cluster and "default" namespace by default
==> Docker <==
Mar 17 13:51:31 scheduled-stop-243584 dockerd[1348]: time="2025-03-17T13:51:31.240629990Z" level=info msg="Loading containers: start."
Mar 17 13:51:31 scheduled-stop-243584 dockerd[1348]: time="2025-03-17T13:51:31.409818172Z" level=info msg="Loading containers: done."
Mar 17 13:51:31 scheduled-stop-243584 dockerd[1348]: time="2025-03-17T13:51:31.419948555Z" level=info msg="Docker daemon" commit=bbd0a17 containerd-snapshotter=false storage-driver=overlay2 version=28.0.1
Mar 17 13:51:31 scheduled-stop-243584 dockerd[1348]: time="2025-03-17T13:51:31.420034216Z" level=info msg="Initializing buildkit"
Mar 17 13:51:31 scheduled-stop-243584 dockerd[1348]: time="2025-03-17T13:51:31.434828333Z" level=info msg="Completed buildkit initialization"
Mar 17 13:51:31 scheduled-stop-243584 dockerd[1348]: time="2025-03-17T13:51:31.443140739Z" level=info msg="Daemon has completed initialization"
Mar 17 13:51:31 scheduled-stop-243584 dockerd[1348]: time="2025-03-17T13:51:31.443354711Z" level=info msg="API listen on /var/run/docker.sock"
Mar 17 13:51:31 scheduled-stop-243584 dockerd[1348]: time="2025-03-17T13:51:31.443540499Z" level=info msg="API listen on [::]:2376"
Mar 17 13:51:31 scheduled-stop-243584 systemd[1]: Started Docker Application Container Engine.
Mar 17 13:51:31 scheduled-stop-243584 systemd[1]: Starting CRI Interface for Docker Application Container Engine...
Mar 17 13:51:31 scheduled-stop-243584 cri-dockerd[1629]: time="2025-03-17T13:51:31Z" level=info msg="Starting cri-dockerd dev (HEAD)"
Mar 17 13:51:31 scheduled-stop-243584 cri-dockerd[1629]: time="2025-03-17T13:51:31Z" level=info msg="Connecting to docker on the Endpoint unix:///var/run/docker.sock"
Mar 17 13:51:31 scheduled-stop-243584 cri-dockerd[1629]: time="2025-03-17T13:51:31Z" level=info msg="Start docker client with request timeout 0s"
Mar 17 13:51:31 scheduled-stop-243584 cri-dockerd[1629]: time="2025-03-17T13:51:31Z" level=info msg="Hairpin mode is set to hairpin-veth"
Mar 17 13:51:31 scheduled-stop-243584 cri-dockerd[1629]: time="2025-03-17T13:51:31Z" level=info msg="Loaded network plugin cni"
Mar 17 13:51:31 scheduled-stop-243584 cri-dockerd[1629]: time="2025-03-17T13:51:31Z" level=info msg="Docker cri networking managed by network plugin cni"
Mar 17 13:51:31 scheduled-stop-243584 cri-dockerd[1629]: time="2025-03-17T13:51:31Z" level=info msg="Setting cgroupDriver cgroupfs"
Mar 17 13:51:31 scheduled-stop-243584 cri-dockerd[1629]: time="2025-03-17T13:51:31Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:,},}"
Mar 17 13:51:31 scheduled-stop-243584 cri-dockerd[1629]: time="2025-03-17T13:51:31Z" level=info msg="Starting the GRPC backend for the Docker CRI interface."
Mar 17 13:51:31 scheduled-stop-243584 cri-dockerd[1629]: time="2025-03-17T13:51:31Z" level=info msg="Start cri-dockerd grpc backend"
Mar 17 13:51:31 scheduled-stop-243584 systemd[1]: Started CRI Interface for Docker Application Container Engine.
Mar 17 13:51:43 scheduled-stop-243584 cri-dockerd[1629]: time="2025-03-17T13:51:43Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/9fbeab80d02e830c7ddc50c52bdd2f90d7921efa2197f0911974db02a6c8dee0/resolv.conf as [nameserver 192.168.76.1 search us-east-2.compute.internal options edns0 trust-ad ndots:0]"
Mar 17 13:51:44 scheduled-stop-243584 cri-dockerd[1629]: time="2025-03-17T13:51:44Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/07a0a7263d87c5344a1c9157ef02dafb0bebfc4cd942663552904532e851f9bd/resolv.conf as [nameserver 192.168.76.1 search us-east-2.compute.internal options edns0 trust-ad ndots:0]"
Mar 17 13:51:44 scheduled-stop-243584 cri-dockerd[1629]: time="2025-03-17T13:51:44Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/0bea48ed5d84b8c53a7df3fed3a489ab052f44df7c7626fce1a18ff5869b9a6c/resolv.conf as [nameserver 192.168.76.1 search us-east-2.compute.internal options edns0 trust-ad ndots:0]"
Mar 17 13:51:44 scheduled-stop-243584 cri-dockerd[1629]: time="2025-03-17T13:51:44Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/37a821cf2cac29e435c7c4e9d292af7e2101ea42103fe66d3a2af681a79116d2/resolv.conf as [nameserver 192.168.76.1 search us-east-2.compute.internal options edns0 trust-ad ndots:0]"
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD
81d46e6721573 7fc9d4aa817aa 11 seconds ago Running etcd 0 37a821cf2cac2 etcd-scheduled-stop-243584
3ad036ce2cfb7 82dfa03f692fb 11 seconds ago Running kube-scheduler 0 0bea48ed5d84b kube-scheduler-scheduled-stop-243584
a2e69c3351900 3c9285acfd2ff 11 seconds ago Running kube-controller-manager 0 07a0a7263d87c kube-controller-manager-scheduled-stop-243584
a926595c50419 6417e1437b6d9 12 seconds ago Running kube-apiserver 0 9fbeab80d02e8 kube-apiserver-scheduled-stop-243584
==> describe nodes <==
Name: scheduled-stop-243584
Roles: control-plane
Labels: beta.kubernetes.io/arch=arm64
beta.kubernetes.io/os=linux
kubernetes.io/arch=arm64
kubernetes.io/hostname=scheduled-stop-243584
kubernetes.io/os=linux
minikube.k8s.io/commit=5a6f3d20e78a9ae03fc65e3f2e727d0ae0107b3c
minikube.k8s.io/name=scheduled-stop-243584
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2025_03_17T13_51_52_0700
minikube.k8s.io/version=v1.35.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///var/run/cri-dockerd.sock
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Mon, 17 Mar 2025 13:51:48 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: scheduled-stop-243584
AcquireTime: <unset>
RenewTime: Mon, 17 Mar 2025 13:51:51 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Mon, 17 Mar 2025 13:51:48 +0000 Mon, 17 Mar 2025 13:51:44 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Mon, 17 Mar 2025 13:51:48 +0000 Mon, 17 Mar 2025 13:51:44 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Mon, 17 Mar 2025 13:51:48 +0000 Mon, 17 Mar 2025 13:51:44 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Mon, 17 Mar 2025 13:51:48 +0000 Mon, 17 Mar 2025 13:51:48 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.76.2
Hostname: scheduled-stop-243584
Capacity:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022300Ki
pods: 110
Allocatable:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022300Ki
pods: 110
System Info:
Machine ID: 84e8daba3db2458990b6f8d5f3e476ab
System UUID: f148d108-2187-4838-8f29-d6fb2835fd93
Boot ID: 181457f8-a248-4acf-a09f-ef4fd7d5bbae
Kernel Version: 5.15.0-1077-aws
OS Image: Ubuntu 22.04.5 LTS
Operating System: linux
Architecture: arm64
Container Runtime Version: docker://28.0.1
Kubelet Version: v1.32.2
Kube-Proxy Version: v1.32.2
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (5 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
kube-system etcd-scheduled-stop-243584 100m (5%) 0 (0%) 100Mi (1%) 0 (0%) 6s
kube-system kube-apiserver-scheduled-stop-243584 250m (12%) 0 (0%) 0 (0%) 0 (0%) 4s
kube-system kube-controller-manager-scheduled-stop-243584 200m (10%) 0 (0%) 0 (0%) 0 (0%) 5s
kube-system kube-scheduler-scheduled-stop-243584 100m (5%) 0 (0%) 0 (0%) 0 (0%) 4s
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 2s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 650m (32%) 0 (0%)
memory 100Mi (1%) 0 (0%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
hugepages-32Mi 0 (0%) 0 (0%)
hugepages-64Ki 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 4s kubelet Starting kubelet.
Warning CgroupV1 4s kubelet cgroup v1 support is in maintenance mode, please migrate to cgroup v2
Normal NodeAllocatableEnforced 4s kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 4s kubelet Node scheduled-stop-243584 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 4s kubelet Node scheduled-stop-243584 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 4s kubelet Node scheduled-stop-243584 status is now: NodeHasSufficientPID
Normal RegisteredNode 0s node-controller Node scheduled-stop-243584 event: Registered Node scheduled-stop-243584 in Controller
==> dmesg <==
==> etcd [81d46e672157] <==
{"level":"info","ts":"2025-03-17T13:51:44.714837Z","caller":"embed/etcd.go:871","msg":"serving metrics","address":"http://127.0.0.1:2381"}
{"level":"info","ts":"2025-03-17T13:51:44.715128Z","caller":"embed/etcd.go:600","msg":"serving peer traffic","address":"192.168.76.2:2380"}
{"level":"info","ts":"2025-03-17T13:51:44.715289Z","caller":"embed/etcd.go:572","msg":"cmux::serve","address":"192.168.76.2:2380"}
{"level":"info","ts":"2025-03-17T13:51:44.699604Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 switched to configuration voters=(16896983918768216326)"}
{"level":"info","ts":"2025-03-17T13:51:44.718372Z","caller":"membership/cluster.go:421","msg":"added member","cluster-id":"6f20f2c4b2fb5f8a","local-member-id":"ea7e25599daad906","added-peer-id":"ea7e25599daad906","added-peer-peer-urls":["https://192.168.76.2:2380"]}
{"level":"info","ts":"2025-03-17T13:51:45.661457Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 is starting a new election at term 1"}
{"level":"info","ts":"2025-03-17T13:51:45.661513Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 became pre-candidate at term 1"}
{"level":"info","ts":"2025-03-17T13:51:45.661531Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 received MsgPreVoteResp from ea7e25599daad906 at term 1"}
{"level":"info","ts":"2025-03-17T13:51:45.661548Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 became candidate at term 2"}
{"level":"info","ts":"2025-03-17T13:51:45.661556Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 received MsgVoteResp from ea7e25599daad906 at term 2"}
{"level":"info","ts":"2025-03-17T13:51:45.661567Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 became leader at term 2"}
{"level":"info","ts":"2025-03-17T13:51:45.661575Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"raft.node: ea7e25599daad906 elected leader ea7e25599daad906 at term 2"}
{"level":"info","ts":"2025-03-17T13:51:45.665422Z","caller":"etcdserver/server.go:2140","msg":"published local member to cluster through raft","local-member-id":"ea7e25599daad906","local-member-attributes":"{Name:scheduled-stop-243584 ClientURLs:[https://192.168.76.2:2379]}","request-path":"/0/members/ea7e25599daad906/attributes","cluster-id":"6f20f2c4b2fb5f8a","publish-timeout":"7s"}
{"level":"info","ts":"2025-03-17T13:51:45.665605Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-03-17T13:51:45.665756Z","caller":"etcdserver/server.go:2651","msg":"setting up initial cluster version using v2 API","cluster-version":"3.5"}
{"level":"info","ts":"2025-03-17T13:51:45.665605Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-03-17T13:51:45.668663Z","caller":"v3rpc/health.go:61","msg":"grpc service status changed","service":"","status":"SERVING"}
{"level":"info","ts":"2025-03-17T13:51:45.674899Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
{"level":"info","ts":"2025-03-17T13:51:45.675110Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
{"level":"info","ts":"2025-03-17T13:51:45.675282Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"6f20f2c4b2fb5f8a","local-member-id":"ea7e25599daad906","cluster-version":"3.5"}
{"level":"info","ts":"2025-03-17T13:51:45.675460Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
{"level":"info","ts":"2025-03-17T13:51:45.675613Z","caller":"etcdserver/server.go:2675","msg":"cluster version is updated","cluster-version":"3.5"}
{"level":"info","ts":"2025-03-17T13:51:45.675925Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
{"level":"info","ts":"2025-03-17T13:51:45.676478Z","caller":"v3rpc/health.go:61","msg":"grpc service status changed","service":"","status":"SERVING"}
{"level":"info","ts":"2025-03-17T13:51:45.687750Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.76.2:2379"}
==> kernel <==
13:51:55 up 9:34, 0 users, load average: 3.54, 2.49, 2.61
Linux scheduled-stop-243584 5.15.0-1077-aws #84~20.04.1-Ubuntu SMP Mon Jan 20 22:14:27 UTC 2025 aarch64 aarch64 aarch64 GNU/Linux
PRETTY_NAME="Ubuntu 22.04.5 LTS"
==> kube-apiserver [a926595c5041] <==
I0317 13:51:48.552741 1 aggregator.go:171] initial CRD sync complete...
I0317 13:51:48.552835 1 autoregister_controller.go:144] Starting autoregister controller
I0317 13:51:48.552942 1 cache.go:32] Waiting for caches to sync for autoregister controller
I0317 13:51:48.553080 1 cache.go:39] Caches are synced for autoregister controller
I0317 13:51:48.588016 1 cache.go:39] Caches are synced for APIServiceRegistrationController controller
I0317 13:51:48.590943 1 shared_informer.go:320] Caches are synced for node_authorizer
I0317 13:51:48.590989 1 shared_informer.go:320] Caches are synced for configmaps
I0317 13:51:48.591179 1 cache.go:39] Caches are synced for LocalAvailability controller
I0317 13:51:48.591413 1 cache.go:39] Caches are synced for RemoteAvailability controller
I0317 13:51:48.591690 1 apf_controller.go:382] Running API Priority and Fairness config worker
I0317 13:51:48.591752 1 apf_controller.go:385] Running API Priority and Fairness periodic rebalancing process
I0317 13:51:48.591906 1 handler_discovery.go:451] Starting ResourceDiscoveryManager
I0317 13:51:49.388409 1 storage_scheduling.go:95] created PriorityClass system-node-critical with value 2000001000
I0317 13:51:49.396060 1 storage_scheduling.go:95] created PriorityClass system-cluster-critical with value 2000000000
I0317 13:51:49.396088 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist.
I0317 13:51:50.088479 1 controller.go:615] quota admission added evaluator for: roles.rbac.authorization.k8s.io
I0317 13:51:50.146905 1 controller.go:615] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
I0317 13:51:50.211425 1 alloc.go:330] "allocated clusterIPs" service="default/kubernetes" clusterIPs={"IPv4":"10.96.0.1"}
W0317 13:51:50.219335 1 lease.go:265] Resetting endpoints for master service "kubernetes" to [192.168.76.2]
I0317 13:51:50.220784 1 controller.go:615] quota admission added evaluator for: endpoints
I0317 13:51:50.225603 1 controller.go:615] quota admission added evaluator for: endpointslices.discovery.k8s.io
I0317 13:51:50.517342 1 controller.go:615] quota admission added evaluator for: serviceaccounts
I0317 13:51:51.402324 1 controller.go:615] quota admission added evaluator for: deployments.apps
I0317 13:51:51.422603 1 alloc.go:330] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs={"IPv4":"10.96.0.10"}
I0317 13:51:51.432923 1 controller.go:615] quota admission added evaluator for: daemonsets.apps
==> kube-controller-manager [a2e69c335190] <==
I0317 13:51:55.085953 1 shared_informer.go:320] Caches are synced for resource quota
I0317 13:51:55.091032 1 shared_informer.go:320] Caches are synced for validatingadmissionpolicy-status
I0317 13:51:55.101976 1 shared_informer.go:320] Caches are synced for garbage collector
I0317 13:51:55.114774 1 shared_informer.go:320] Caches are synced for disruption
I0317 13:51:55.114983 1 shared_informer.go:320] Caches are synced for HPA
I0317 13:51:55.114918 1 shared_informer.go:320] Caches are synced for endpoint_slice_mirroring
I0317 13:51:55.114933 1 shared_informer.go:320] Caches are synced for taint
I0317 13:51:55.115615 1 node_lifecycle_controller.go:1234] "Initializing eviction metric for zone" logger="node-lifecycle-controller" zone=""
I0317 13:51:55.115810 1 node_lifecycle_controller.go:886] "Missing timestamp for Node. Assuming now as a timestamp" logger="node-lifecycle-controller" node="scheduled-stop-243584"
I0317 13:51:55.116437 1 node_lifecycle_controller.go:1080] "Controller detected that zone is now in new state" logger="node-lifecycle-controller" zone="" newState="Normal"
I0317 13:51:55.115995 1 shared_informer.go:320] Caches are synced for expand
I0317 13:51:55.116692 1 shared_informer.go:320] Caches are synced for persistent volume
I0317 13:51:55.116088 1 shared_informer.go:320] Caches are synced for legacy-service-account-token-cleaner
I0317 13:51:55.122893 1 shared_informer.go:320] Caches are synced for ReplicaSet
I0317 13:51:55.116208 1 shared_informer.go:320] Caches are synced for service account
I0317 13:51:55.117070 1 shared_informer.go:320] Caches are synced for endpoint
I0317 13:51:55.117083 1 shared_informer.go:320] Caches are synced for daemon sets
I0317 13:51:55.126148 1 shared_informer.go:320] Caches are synced for cronjob
I0317 13:51:55.126239 1 shared_informer.go:320] Caches are synced for job
I0317 13:51:55.126281 1 shared_informer.go:320] Caches are synced for endpoint_slice
I0317 13:51:55.126319 1 shared_informer.go:320] Caches are synced for crt configmap
I0317 13:51:55.134566 1 range_allocator.go:428] "Set node PodCIDR" logger="node-ipam-controller" node="scheduled-stop-243584" podCIDRs=["10.244.0.0/24"]
I0317 13:51:55.134674 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="scheduled-stop-243584"
I0317 13:51:55.134748 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="scheduled-stop-243584"
I0317 13:51:55.149419 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="scheduled-stop-243584"
==> kube-scheduler [3ad036ce2cfb] <==
W0317 13:51:48.559503 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
E0317 13:51:48.561151 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.ReplicaSet: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicasets\" in API group \"apps\" at the cluster scope" logger="UnhandledError"
W0317 13:51:48.559554 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
E0317 13:51:48.561322 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumeclaims\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0317 13:51:48.559614 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
E0317 13:51:48.561495 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User \"system:kube-scheduler\" cannot list resource \"namespaces\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0317 13:51:48.559685 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
E0317 13:51:48.561855 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"storageclasses\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError"
W0317 13:51:48.559721 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
E0317 13:51:48.562162 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User \"system:kube-scheduler\" cannot list resource \"services\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0317 13:51:49.454386 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
E0317 13:51:49.454439 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User \"system:kube-scheduler\" cannot list resource \"pods\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0317 13:51:49.491751 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
E0317 13:51:49.491831 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User \"system:kube-scheduler\" cannot list resource \"services\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0317 13:51:49.532936 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
E0317 13:51:49.533204 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.ReplicationController: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicationcontrollers\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0317 13:51:49.621174 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
E0317 13:51:49.621221 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.StatefulSet: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"statefulsets\" in API group \"apps\" at the cluster scope" logger="UnhandledError"
W0317 13:51:49.718322 1 reflector.go:569] runtime/asm_arm64.s:1223: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
E0317 13:51:49.718589 1 reflector.go:166] "Unhandled Error" err="runtime/asm_arm64.s:1223: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"extension-apiserver-authentication\" is forbidden: User \"system:kube-scheduler\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"kube-system\"" logger="UnhandledError"
W0317 13:51:49.749468 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
E0317 13:51:49.749516 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"storageclasses\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError"
W0317 13:51:49.754023 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
E0317 13:51:49.754070 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.ReplicaSet: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicasets\" in API group \"apps\" at the cluster scope" logger="UnhandledError"
I0317 13:51:52.349519 1 shared_informer.go:320] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
==> kubelet <==
Mar 17 13:51:51 scheduled-stop-243584 kubelet[2457]: I0317 13:51:51.721812 2457 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/f99fcd21e2352b3194e2f689a2b0ab77-usr-share-ca-certificates\") pod \"kube-apiserver-scheduled-stop-243584\" (UID: \"f99fcd21e2352b3194e2f689a2b0ab77\") " pod="kube-system/kube-apiserver-scheduled-stop-243584"
Mar 17 13:51:51 scheduled-stop-243584 kubelet[2457]: I0317 13:51:51.721836 2457 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/674948943cebe292401ee019a1dfbf65-ca-certs\") pod \"kube-controller-manager-scheduled-stop-243584\" (UID: \"674948943cebe292401ee019a1dfbf65\") " pod="kube-system/kube-controller-manager-scheduled-stop-243584"
Mar 17 13:51:51 scheduled-stop-243584 kubelet[2457]: I0317 13:51:51.721855 2457 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/674948943cebe292401ee019a1dfbf65-usr-share-ca-certificates\") pod \"kube-controller-manager-scheduled-stop-243584\" (UID: \"674948943cebe292401ee019a1dfbf65\") " pod="kube-system/kube-controller-manager-scheduled-stop-243584"
Mar 17 13:51:51 scheduled-stop-243584 kubelet[2457]: I0317 13:51:51.721873 2457 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-certs\" (UniqueName: \"kubernetes.io/host-path/ec2d1be6b05e32dd0ed64c110d469297-etcd-certs\") pod \"etcd-scheduled-stop-243584\" (UID: \"ec2d1be6b05e32dd0ed64c110d469297\") " pod="kube-system/etcd-scheduled-stop-243584"
Mar 17 13:51:51 scheduled-stop-243584 kubelet[2457]: I0317 13:51:51.721892 2457 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/f99fcd21e2352b3194e2f689a2b0ab77-ca-certs\") pod \"kube-apiserver-scheduled-stop-243584\" (UID: \"f99fcd21e2352b3194e2f689a2b0ab77\") " pod="kube-system/kube-apiserver-scheduled-stop-243584"
Mar 17 13:51:51 scheduled-stop-243584 kubelet[2457]: I0317 13:51:51.721910 2457 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/f99fcd21e2352b3194e2f689a2b0ab77-usr-local-share-ca-certificates\") pod \"kube-apiserver-scheduled-stop-243584\" (UID: \"f99fcd21e2352b3194e2f689a2b0ab77\") " pod="kube-system/kube-apiserver-scheduled-stop-243584"
Mar 17 13:51:51 scheduled-stop-243584 kubelet[2457]: I0317 13:51:51.721931 2457 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/f99fcd21e2352b3194e2f689a2b0ab77-k8s-certs\") pod \"kube-apiserver-scheduled-stop-243584\" (UID: \"f99fcd21e2352b3194e2f689a2b0ab77\") " pod="kube-system/kube-apiserver-scheduled-stop-243584"
Mar 17 13:51:51 scheduled-stop-243584 kubelet[2457]: I0317 13:51:51.721956 2457 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/674948943cebe292401ee019a1dfbf65-kubeconfig\") pod \"kube-controller-manager-scheduled-stop-243584\" (UID: \"674948943cebe292401ee019a1dfbf65\") " pod="kube-system/kube-controller-manager-scheduled-stop-243584"
Mar 17 13:51:52 scheduled-stop-243584 kubelet[2457]: I0317 13:51:52.319355 2457 apiserver.go:52] "Watching apiserver"
Mar 17 13:51:52 scheduled-stop-243584 kubelet[2457]: I0317 13:51:52.403045 2457 kubelet.go:3200] "Creating a mirror pod for static pod" pod="kube-system/etcd-scheduled-stop-243584"
Mar 17 13:51:52 scheduled-stop-243584 kubelet[2457]: I0317 13:51:52.406326 2457 kubelet.go:3200] "Creating a mirror pod for static pod" pod="kube-system/kube-scheduler-scheduled-stop-243584"
Mar 17 13:51:52 scheduled-stop-243584 kubelet[2457]: I0317 13:51:52.406602 2457 kubelet.go:3200] "Creating a mirror pod for static pod" pod="kube-system/kube-apiserver-scheduled-stop-243584"
Mar 17 13:51:52 scheduled-stop-243584 kubelet[2457]: E0317 13:51:52.417971 2457 kubelet.go:3202] "Failed creating a mirror pod" err="pods \"etcd-scheduled-stop-243584\" already exists" pod="kube-system/etcd-scheduled-stop-243584"
Mar 17 13:51:52 scheduled-stop-243584 kubelet[2457]: I0317 13:51:52.420716 2457 desired_state_of_world_populator.go:157] "Finished populating initial desired state of world"
Mar 17 13:51:52 scheduled-stop-243584 kubelet[2457]: E0317 13:51:52.421070 2457 kubelet.go:3202] "Failed creating a mirror pod" err="pods \"kube-scheduler-scheduled-stop-243584\" already exists" pod="kube-system/kube-scheduler-scheduled-stop-243584"
Mar 17 13:51:52 scheduled-stop-243584 kubelet[2457]: E0317 13:51:52.421908 2457 kubelet.go:3202] "Failed creating a mirror pod" err="pods \"kube-apiserver-scheduled-stop-243584\" already exists" pod="kube-system/kube-apiserver-scheduled-stop-243584"
Mar 17 13:51:52 scheduled-stop-243584 kubelet[2457]: I0317 13:51:52.457104 2457 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/etcd-scheduled-stop-243584" podStartSLOduration=3.457083252 podStartE2EDuration="3.457083252s" podCreationTimestamp="2025-03-17 13:51:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-03-17 13:51:52.443434714 +0000 UTC m=+1.253430860" watchObservedRunningTime="2025-03-17 13:51:52.457083252 +0000 UTC m=+1.267079390"
Mar 17 13:51:52 scheduled-stop-243584 kubelet[2457]: I0317 13:51:52.457415 2457 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-scheduler-scheduled-stop-243584" podStartSLOduration=1.457408075 podStartE2EDuration="1.457408075s" podCreationTimestamp="2025-03-17 13:51:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-03-17 13:51:52.454801572 +0000 UTC m=+1.264797710" watchObservedRunningTime="2025-03-17 13:51:52.457408075 +0000 UTC m=+1.267404213"
Mar 17 13:51:52 scheduled-stop-243584 kubelet[2457]: I0317 13:51:52.481835 2457 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-controller-manager-scheduled-stop-243584" podStartSLOduration=2.481814604 podStartE2EDuration="2.481814604s" podCreationTimestamp="2025-03-17 13:51:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-03-17 13:51:52.469147395 +0000 UTC m=+1.279143541" watchObservedRunningTime="2025-03-17 13:51:52.481814604 +0000 UTC m=+1.291810742"
Mar 17 13:51:52 scheduled-stop-243584 kubelet[2457]: I0317 13:51:52.494948 2457 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-apiserver-scheduled-stop-243584" podStartSLOduration=1.4949299759999999 podStartE2EDuration="1.494929976s" podCreationTimestamp="2025-03-17 13:51:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-03-17 13:51:52.48239125 +0000 UTC m=+1.292387388" watchObservedRunningTime="2025-03-17 13:51:52.494929976 +0000 UTC m=+1.304926122"
Mar 17 13:51:55 scheduled-stop-243584 kubelet[2457]: I0317 13:51:55.258939 2457 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/4ad6664f-e005-48c7-a2ad-08c65444f76e-tmp\") pod \"storage-provisioner\" (UID: \"4ad6664f-e005-48c7-a2ad-08c65444f76e\") " pod="kube-system/storage-provisioner"
Mar 17 13:51:55 scheduled-stop-243584 kubelet[2457]: I0317 13:51:55.259003 2457 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v9xrk\" (UniqueName: \"kubernetes.io/projected/4ad6664f-e005-48c7-a2ad-08c65444f76e-kube-api-access-v9xrk\") pod \"storage-provisioner\" (UID: \"4ad6664f-e005-48c7-a2ad-08c65444f76e\") " pod="kube-system/storage-provisioner"
Mar 17 13:51:55 scheduled-stop-243584 kubelet[2457]: E0317 13:51:55.371399 2457 projected.go:288] Couldn't get configMap kube-system/kube-root-ca.crt: configmap "kube-root-ca.crt" not found
Mar 17 13:51:55 scheduled-stop-243584 kubelet[2457]: E0317 13:51:55.371439 2457 projected.go:194] Error preparing data for projected volume kube-api-access-v9xrk for pod kube-system/storage-provisioner: configmap "kube-root-ca.crt" not found
Mar 17 13:51:55 scheduled-stop-243584 kubelet[2457]: E0317 13:51:55.371514 2457 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4ad6664f-e005-48c7-a2ad-08c65444f76e-kube-api-access-v9xrk podName:4ad6664f-e005-48c7-a2ad-08c65444f76e nodeName:}" failed. No retries permitted until 2025-03-17 13:51:55.871490223 +0000 UTC m=+4.681486361 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-v9xrk" (UniqueName: "kubernetes.io/projected/4ad6664f-e005-48c7-a2ad-08c65444f76e-kube-api-access-v9xrk") pod "storage-provisioner" (UID: "4ad6664f-e005-48c7-a2ad-08c65444f76e") : configmap "kube-root-ca.crt" not found
-- /stdout --
helpers_test.go:254: (dbg) Run: out/minikube-linux-arm64 status --format={{.APIServer}} -p scheduled-stop-243584 -n scheduled-stop-243584
helpers_test.go:261: (dbg) Run: kubectl --context scheduled-stop-243584 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:272: non-running pods: coredns-668d6bf9bc-r49zz kube-proxy-94gxh storage-provisioner
helpers_test.go:274: ======> post-mortem[TestScheduledStopUnix]: describe non-running pods <======
helpers_test.go:277: (dbg) Run: kubectl --context scheduled-stop-243584 describe pod coredns-668d6bf9bc-r49zz kube-proxy-94gxh storage-provisioner
helpers_test.go:277: (dbg) Non-zero exit: kubectl --context scheduled-stop-243584 describe pod coredns-668d6bf9bc-r49zz kube-proxy-94gxh storage-provisioner: exit status 1 (92.594352ms)
** stderr **
Error from server (NotFound): pods "coredns-668d6bf9bc-r49zz" not found
Error from server (NotFound): pods "kube-proxy-94gxh" not found
Error from server (NotFound): pods "storage-provisioner" not found
** /stderr **
helpers_test.go:279: kubectl --context scheduled-stop-243584 describe pod coredns-668d6bf9bc-r49zz kube-proxy-94gxh storage-provisioner: exit status 1
helpers_test.go:175: Cleaning up "scheduled-stop-243584" profile ...
helpers_test.go:178: (dbg) Run: out/minikube-linux-arm64 delete -p scheduled-stop-243584
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p scheduled-stop-243584: (2.127815287s)
--- FAIL: TestScheduledStopUnix (36.97s)