=== RUN TestScheduledStopUnix
scheduled_stop_test.go:128: (dbg) Run: out/minikube-linux-arm64 start -p scheduled-stop-748426 --memory=2048 --driver=docker --container-runtime=containerd
scheduled_stop_test.go:128: (dbg) Done: out/minikube-linux-arm64 start -p scheduled-stop-748426 --memory=2048 --driver=docker --container-runtime=containerd: (29.439569625s)
scheduled_stop_test.go:137: (dbg) Run: out/minikube-linux-arm64 stop -p scheduled-stop-748426 --schedule 5m
scheduled_stop_test.go:191: (dbg) Run: out/minikube-linux-arm64 status --format={{.TimeToStop}} -p scheduled-stop-748426 -n scheduled-stop-748426
scheduled_stop_test.go:169: signal error was: <nil>
scheduled_stop_test.go:137: (dbg) Run: out/minikube-linux-arm64 stop -p scheduled-stop-748426 --schedule 15s
scheduled_stop_test.go:169: signal error was: <nil>
scheduled_stop_test.go:98: process 1042156 running but should have been killed on reschedule of stop
panic.go:629: *** TestScheduledStopUnix FAILED at 2025-01-27 11:58:13.611502429 +0000 UTC m=+2107.021986795
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:230: ======> post-mortem[TestScheduledStopUnix]: docker inspect <======
helpers_test.go:231: (dbg) Run: docker inspect scheduled-stop-748426
helpers_test.go:235: (dbg) docker inspect scheduled-stop-748426:
-- stdout --
[
{
"Id": "ed0ae98c2aeafbc5b94192af7883039095182b47faeb933e7e419f00d97b51a4",
"Created": "2025-01-27T11:57:49.188239919Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 1040227,
"ExitCode": 0,
"Error": "",
"StartedAt": "2025-01-27T11:57:49.354081947Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:0434cf58b6dbace281e5de753aa4b2e3fe33dc9a3be53021531403743c3f155a",
"ResolvConfPath": "/var/lib/docker/containers/ed0ae98c2aeafbc5b94192af7883039095182b47faeb933e7e419f00d97b51a4/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/ed0ae98c2aeafbc5b94192af7883039095182b47faeb933e7e419f00d97b51a4/hostname",
"HostsPath": "/var/lib/docker/containers/ed0ae98c2aeafbc5b94192af7883039095182b47faeb933e7e419f00d97b51a4/hosts",
"LogPath": "/var/lib/docker/containers/ed0ae98c2aeafbc5b94192af7883039095182b47faeb933e7e419f00d97b51a4/ed0ae98c2aeafbc5b94192af7883039095182b47faeb933e7e419f00d97b51a4-json.log",
"Name": "/scheduled-stop-748426",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"scheduled-stop-748426:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {}
},
"NetworkMode": "scheduled-stop-748426",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "host",
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 2147483648,
"NanoCpus": 2000000000,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 4294967296,
"MemorySwappiness": null,
"OomKillDisable": false,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"LowerDir": "/var/lib/docker/overlay2/b2a84234e5ee9219c2baaecfaba5525bdd39e3be01922f0abd14ac9eb4621710-init/diff:/var/lib/docker/overlay2/027cb12703497bfe682a04123361dc92cd40ae4c78d3ee9eafeedefee7ad1bd7/diff",
"MergedDir": "/var/lib/docker/overlay2/b2a84234e5ee9219c2baaecfaba5525bdd39e3be01922f0abd14ac9eb4621710/merged",
"UpperDir": "/var/lib/docker/overlay2/b2a84234e5ee9219c2baaecfaba5525bdd39e3be01922f0abd14ac9eb4621710/diff",
"WorkDir": "/var/lib/docker/overlay2/b2a84234e5ee9219c2baaecfaba5525bdd39e3be01922f0abd14ac9eb4621710/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
},
{
"Type": "volume",
"Name": "scheduled-stop-748426",
"Source": "/var/lib/docker/volumes/scheduled-stop-748426/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
}
],
"Config": {
"Hostname": "scheduled-stop-748426",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase:v0.0.46@sha256:fd2d445ddcc33ebc5c6b68a17e6219ea207ce63c005095ea1525296da2d1a279",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"OnBuild": null,
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "scheduled-stop-748426",
"name.minikube.sigs.k8s.io": "scheduled-stop-748426",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "6efa4f5b5085c2936077b6b481cd705f32930ca048bf1377aebe24116698ed0a",
"SandboxKey": "/var/run/docker/netns/6efa4f5b5085",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33762"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33763"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33766"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33764"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33765"
}
]
},
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"scheduled-stop-748426": {
"IPAMConfig": {
"IPv4Address": "192.168.76.2"
},
"Links": null,
"Aliases": null,
"MacAddress": "02:42:c0:a8:4c:02",
"DriverOpts": null,
"NetworkID": "d8817b8dee70e7fa2c0909aa8a7c51b5b1873b5f56d022f3950bad798740718d",
"EndpointID": "65671326e221da3c624326bc090705f5670934ff6f269996f9a6e3df4f759268",
"Gateway": "192.168.76.1",
"IPAddress": "192.168.76.2",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"scheduled-stop-748426",
"ed0ae98c2aea"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:239: (dbg) Run: out/minikube-linux-arm64 status --format={{.Host}} -p scheduled-stop-748426 -n scheduled-stop-748426
helpers_test.go:244: <<< TestScheduledStopUnix FAILED: start of post-mortem logs <<<
helpers_test.go:245: ======> post-mortem[TestScheduledStopUnix]: minikube logs <======
helpers_test.go:247: (dbg) Run: out/minikube-linux-arm64 -p scheduled-stop-748426 logs -n 25
helpers_test.go:247: (dbg) Done: out/minikube-linux-arm64 -p scheduled-stop-748426 logs -n 25: (1.256737711s)
helpers_test.go:252: TestScheduledStopUnix logs:
-- stdout --
==> Audit <==
|---------|--------------------------------|-----------------------|---------|---------|---------------------|---------------------|
| Command | Args | Profile | User | Version | Start Time | End Time |
|---------|--------------------------------|-----------------------|---------|---------|---------------------|---------------------|
| stop | -p multinode-407627 | multinode-407627 | jenkins | v1.35.0 | 27 Jan 25 11:52 UTC | 27 Jan 25 11:52 UTC |
| start | -p multinode-407627 | multinode-407627 | jenkins | v1.35.0 | 27 Jan 25 11:52 UTC | 27 Jan 25 11:53 UTC |
| | --wait=true -v=8 | | | | | |
| | --alsologtostderr | | | | | |
| node | list -p multinode-407627 | multinode-407627 | jenkins | v1.35.0 | 27 Jan 25 11:53 UTC | |
| node | multinode-407627 node delete | multinode-407627 | jenkins | v1.35.0 | 27 Jan 25 11:53 UTC | 27 Jan 25 11:53 UTC |
| | m03 | | | | | |
| stop | multinode-407627 stop | multinode-407627 | jenkins | v1.35.0 | 27 Jan 25 11:53 UTC | 27 Jan 25 11:54 UTC |
| start | -p multinode-407627 | multinode-407627 | jenkins | v1.35.0 | 27 Jan 25 11:54 UTC | 27 Jan 25 11:55 UTC |
| | --wait=true -v=8 | | | | | |
| | --alsologtostderr | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=containerd | | | | | |
| node | list -p multinode-407627 | multinode-407627 | jenkins | v1.35.0 | 27 Jan 25 11:55 UTC | |
| start | -p multinode-407627-m02 | multinode-407627-m02 | jenkins | v1.35.0 | 27 Jan 25 11:55 UTC | |
| | --driver=docker | | | | | |
| | --container-runtime=containerd | | | | | |
| start | -p multinode-407627-m03 | multinode-407627-m03 | jenkins | v1.35.0 | 27 Jan 25 11:55 UTC | 27 Jan 25 11:55 UTC |
| | --driver=docker | | | | | |
| | --container-runtime=containerd | | | | | |
| node | add -p multinode-407627 | multinode-407627 | jenkins | v1.35.0 | 27 Jan 25 11:55 UTC | |
| delete | -p multinode-407627-m03 | multinode-407627-m03 | jenkins | v1.35.0 | 27 Jan 25 11:55 UTC | 27 Jan 25 11:55 UTC |
| delete | -p multinode-407627 | multinode-407627 | jenkins | v1.35.0 | 27 Jan 25 11:55 UTC | 27 Jan 25 11:55 UTC |
| start | -p test-preload-010440 | test-preload-010440 | jenkins | v1.35.0 | 27 Jan 25 11:55 UTC | 27 Jan 25 11:57 UTC |
| | --memory=2200 | | | | | |
| | --alsologtostderr | | | | | |
| | --wait=true --preload=false | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=containerd | | | | | |
| | --kubernetes-version=v1.24.4 | | | | | |
| image | test-preload-010440 image pull | test-preload-010440 | jenkins | v1.35.0 | 27 Jan 25 11:57 UTC | 27 Jan 25 11:57 UTC |
| | gcr.io/k8s-minikube/busybox | | | | | |
| stop | -p test-preload-010440 | test-preload-010440 | jenkins | v1.35.0 | 27 Jan 25 11:57 UTC | 27 Jan 25 11:57 UTC |
| start | -p test-preload-010440 | test-preload-010440 | jenkins | v1.35.0 | 27 Jan 25 11:57 UTC | 27 Jan 25 11:57 UTC |
| | --memory=2200 | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| | --wait=true --driver=docker | | | | | |
| | --container-runtime=containerd | | | | | |
| image | test-preload-010440 image list | test-preload-010440 | jenkins | v1.35.0 | 27 Jan 25 11:57 UTC | 27 Jan 25 11:57 UTC |
| delete | -p test-preload-010440 | test-preload-010440 | jenkins | v1.35.0 | 27 Jan 25 11:57 UTC | 27 Jan 25 11:57 UTC |
| start | -p scheduled-stop-748426 | scheduled-stop-748426 | jenkins | v1.35.0 | 27 Jan 25 11:57 UTC | 27 Jan 25 11:58 UTC |
| | --memory=2048 --driver=docker | | | | | |
| | --container-runtime=containerd | | | | | |
| stop | -p scheduled-stop-748426 | scheduled-stop-748426 | jenkins | v1.35.0 | 27 Jan 25 11:58 UTC | |
| | --schedule 5m | | | | | |
| stop | -p scheduled-stop-748426 | scheduled-stop-748426 | jenkins | v1.35.0 | 27 Jan 25 11:58 UTC | |
| | --schedule 5m | | | | | |
| stop | -p scheduled-stop-748426 | scheduled-stop-748426 | jenkins | v1.35.0 | 27 Jan 25 11:58 UTC | |
| | --schedule 5m | | | | | |
| stop | -p scheduled-stop-748426 | scheduled-stop-748426 | jenkins | v1.35.0 | 27 Jan 25 11:58 UTC | |
| | --schedule 15s | | | | | |
| stop | -p scheduled-stop-748426 | scheduled-stop-748426 | jenkins | v1.35.0 | 27 Jan 25 11:58 UTC | |
| | --schedule 15s | | | | | |
| stop | -p scheduled-stop-748426 | scheduled-stop-748426 | jenkins | v1.35.0 | 27 Jan 25 11:58 UTC | |
| | --schedule 15s | | | | | |
|---------|--------------------------------|-----------------------|---------|---------|---------------------|---------------------|
==> Last Start <==
Log file created at: 2025/01/27 11:57:43
Running on machine: ip-172-31-29-130
Binary: Built with gc go1.23.4 for linux/arm64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I0127 11:57:43.698034 1039736 out.go:345] Setting OutFile to fd 1 ...
I0127 11:57:43.698147 1039736 out.go:392] TERM=,COLORTERM=, which probably does not support color
I0127 11:57:43.698151 1039736 out.go:358] Setting ErrFile to fd 2...
I0127 11:57:43.698155 1039736 out.go:392] TERM=,COLORTERM=, which probably does not support color
I0127 11:57:43.698496 1039736 root.go:338] Updating PATH: /home/jenkins/minikube-integration/20318-888339/.minikube/bin
I0127 11:57:43.698934 1039736 out.go:352] Setting JSON to false
I0127 11:57:43.700021 1039736 start.go:129] hostinfo: {"hostname":"ip-172-31-29-130","uptime":16809,"bootTime":1737962255,"procs":163,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1075-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"36adf542-ef4f-4e2d-a0c8-6868d1383ff9"}
I0127 11:57:43.700085 1039736 start.go:139] virtualization:
I0127 11:57:43.703664 1039736 out.go:177] * [scheduled-stop-748426] minikube v1.35.0 on Ubuntu 20.04 (arm64)
I0127 11:57:43.707511 1039736 out.go:177] - MINIKUBE_LOCATION=20318
I0127 11:57:43.707637 1039736 notify.go:220] Checking for updates...
I0127 11:57:43.713547 1039736 out.go:177] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I0127 11:57:43.716344 1039736 out.go:177] - KUBECONFIG=/home/jenkins/minikube-integration/20318-888339/kubeconfig
I0127 11:57:43.719130 1039736 out.go:177] - MINIKUBE_HOME=/home/jenkins/minikube-integration/20318-888339/.minikube
I0127 11:57:43.721891 1039736 out.go:177] - MINIKUBE_BIN=out/minikube-linux-arm64
I0127 11:57:43.724584 1039736 out.go:177] - MINIKUBE_FORCE_SYSTEMD=
I0127 11:57:43.727552 1039736 driver.go:394] Setting default libvirt URI to qemu:///system
I0127 11:57:43.766448 1039736 docker.go:123] docker version: linux-27.5.1:Docker Engine - Community
I0127 11:57:43.766562 1039736 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0127 11:57:43.822345 1039736 info.go:266] docker info: {ID:U5VK:ZNT5:35M3:FHLW:Q7TL:ELFX:BNAG:AV4T:UD2H:SK5L:SEJV:SJJL Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:3 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:24 OomKillDisable:true NGoroutines:43 SystemTime:2025-01-27 11:57:43.812602593 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1075-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214835200 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-29-130 Labels:[] ExperimentalBuild:false ServerVersion:27.5.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:bcc810d6b9066471b0b6fa75f557a15a1cbf31bb Expected:bcc810d6b9066471b0b6fa75f557a15a1cbf31bb} RuncCommit:{ID:v1.2.4-0-g6c52b3f Expected:v1.2.4-0-g6c52b3f} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErro
rs:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.20.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.32.4]] Warnings:<nil>}}
I0127 11:57:43.822456 1039736 docker.go:318] overlay module found
I0127 11:57:43.825290 1039736 out.go:177] * Using the docker driver based on user configuration
I0127 11:57:43.828081 1039736 start.go:297] selected driver: docker
I0127 11:57:43.828089 1039736 start.go:901] validating driver "docker" against <nil>
I0127 11:57:43.828101 1039736 start.go:912] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I0127 11:57:43.828886 1039736 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0127 11:57:43.879895 1039736 info.go:266] docker info: {ID:U5VK:ZNT5:35M3:FHLW:Q7TL:ELFX:BNAG:AV4T:UD2H:SK5L:SEJV:SJJL Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:3 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:24 OomKillDisable:true NGoroutines:43 SystemTime:2025-01-27 11:57:43.870748795 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1075-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214835200 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-29-130 Labels:[] ExperimentalBuild:false ServerVersion:27.5.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:bcc810d6b9066471b0b6fa75f557a15a1cbf31bb Expected:bcc810d6b9066471b0b6fa75f557a15a1cbf31bb} RuncCommit:{ID:v1.2.4-0-g6c52b3f Expected:v1.2.4-0-g6c52b3f} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErro
rs:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.20.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.32.4]] Warnings:<nil>}}
I0127 11:57:43.880126 1039736 start_flags.go:310] no existing cluster config was found, will generate one from the flags
I0127 11:57:43.880333 1039736 start_flags.go:929] Wait components to verify : map[apiserver:true system_pods:true]
I0127 11:57:43.882976 1039736 out.go:177] * Using Docker driver with root privileges
I0127 11:57:43.885494 1039736 cni.go:84] Creating CNI manager for ""
I0127 11:57:43.885542 1039736 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I0127 11:57:43.885550 1039736 start_flags.go:319] Found "CNI" CNI - setting NetworkPlugin=cni
I0127 11:57:43.885627 1039736 start.go:340] cluster config:
{Name:scheduled-stop-748426 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.46@sha256:fd2d445ddcc33ebc5c6b68a17e6219ea207ce63c005095ea1525296da2d1a279 Memory:2048 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.32.1 ClusterName:scheduled-stop-748426 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:contain
erd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.32.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0127 11:57:43.890003 1039736 out.go:177] * Starting "scheduled-stop-748426" primary control-plane node in "scheduled-stop-748426" cluster
I0127 11:57:43.892650 1039736 cache.go:121] Beginning downloading kic base image for docker with containerd
I0127 11:57:43.895363 1039736 out.go:177] * Pulling base image v0.0.46 ...
I0127 11:57:43.897903 1039736 preload.go:131] Checking if preload exists for k8s version v1.32.1 and runtime containerd
I0127 11:57:43.897944 1039736 image.go:81] Checking for gcr.io/k8s-minikube/kicbase:v0.0.46@sha256:fd2d445ddcc33ebc5c6b68a17e6219ea207ce63c005095ea1525296da2d1a279 in local docker daemon
I0127 11:57:43.897953 1039736 preload.go:146] Found local preload: /home/jenkins/minikube-integration/20318-888339/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.32.1-containerd-overlay2-arm64.tar.lz4
I0127 11:57:43.897961 1039736 cache.go:56] Caching tarball of preloaded images
I0127 11:57:43.898065 1039736 preload.go:172] Found /home/jenkins/minikube-integration/20318-888339/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.32.1-containerd-overlay2-arm64.tar.lz4 in cache, skipping download
I0127 11:57:43.898075 1039736 cache.go:59] Finished verifying existence of preloaded tar for v1.32.1 on containerd
I0127 11:57:43.898409 1039736 profile.go:143] Saving config to /home/jenkins/minikube-integration/20318-888339/.minikube/profiles/scheduled-stop-748426/config.json ...
I0127 11:57:43.898435 1039736 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20318-888339/.minikube/profiles/scheduled-stop-748426/config.json: {Name:mk218504d197a75327b80a789dc665c3a883c3cf Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0127 11:57:43.916985 1039736 image.go:100] Found gcr.io/k8s-minikube/kicbase:v0.0.46@sha256:fd2d445ddcc33ebc5c6b68a17e6219ea207ce63c005095ea1525296da2d1a279 in local docker daemon, skipping pull
I0127 11:57:43.916996 1039736 cache.go:145] gcr.io/k8s-minikube/kicbase:v0.0.46@sha256:fd2d445ddcc33ebc5c6b68a17e6219ea207ce63c005095ea1525296da2d1a279 exists in daemon, skipping load
I0127 11:57:43.917015 1039736 cache.go:227] Successfully downloaded all kic artifacts
I0127 11:57:43.917068 1039736 start.go:360] acquireMachinesLock for scheduled-stop-748426: {Name:mk92c1565d00f8c50b0d670a17ff2446ac388089 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0127 11:57:43.917185 1039736 start.go:364] duration metric: took 103.037µs to acquireMachinesLock for "scheduled-stop-748426"
I0127 11:57:43.917216 1039736 start.go:93] Provisioning new machine with config: &{Name:scheduled-stop-748426 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.46@sha256:fd2d445ddcc33ebc5c6b68a17e6219ea207ce63c005095ea1525296da2d1a279 Memory:2048 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.32.1 ClusterName:scheduled-stop-748426 Namespace:default APIServerHAVIP: APIServerName:minikubeCA
APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.32.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHA
uthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.32.1 ContainerRuntime:containerd ControlPlane:true Worker:true}
I0127 11:57:43.917279 1039736 start.go:125] createHost starting for "" (driver="docker")
I0127 11:57:43.920427 1039736 out.go:235] * Creating docker container (CPUs=2, Memory=2048MB) ...
I0127 11:57:43.920666 1039736 start.go:159] libmachine.API.Create for "scheduled-stop-748426" (driver="docker")
I0127 11:57:43.920702 1039736 client.go:168] LocalClient.Create starting
I0127 11:57:43.920768 1039736 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/20318-888339/.minikube/certs/ca.pem
I0127 11:57:43.920799 1039736 main.go:141] libmachine: Decoding PEM data...
I0127 11:57:43.920815 1039736 main.go:141] libmachine: Parsing certificate...
I0127 11:57:43.920876 1039736 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/20318-888339/.minikube/certs/cert.pem
I0127 11:57:43.920897 1039736 main.go:141] libmachine: Decoding PEM data...
I0127 11:57:43.920906 1039736 main.go:141] libmachine: Parsing certificate...
I0127 11:57:43.921306 1039736 cli_runner.go:164] Run: docker network inspect scheduled-stop-748426 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W0127 11:57:43.937065 1039736 cli_runner.go:211] docker network inspect scheduled-stop-748426 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I0127 11:57:43.937148 1039736 network_create.go:284] running [docker network inspect scheduled-stop-748426] to gather additional debugging logs...
I0127 11:57:43.937164 1039736 cli_runner.go:164] Run: docker network inspect scheduled-stop-748426
W0127 11:57:43.953021 1039736 cli_runner.go:211] docker network inspect scheduled-stop-748426 returned with exit code 1
I0127 11:57:43.953109 1039736 network_create.go:287] error running [docker network inspect scheduled-stop-748426]: docker network inspect scheduled-stop-748426: exit status 1
stdout:
[]
stderr:
Error response from daemon: network scheduled-stop-748426 not found
I0127 11:57:43.953121 1039736 network_create.go:289] output of [docker network inspect scheduled-stop-748426]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network scheduled-stop-748426 not found
** /stderr **
I0127 11:57:43.953220 1039736 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0127 11:57:43.970041 1039736 network.go:211] skipping subnet 192.168.49.0/24 that is taken: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName:br-2217238752e2 IfaceIPv4:192.168.49.1 IfaceMTU:1500 IfaceMAC:02:42:92:9d:42:1b} reservation:<nil>}
I0127 11:57:43.970379 1039736 network.go:211] skipping subnet 192.168.58.0/24 that is taken: &{IP:192.168.58.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.58.0/24 Gateway:192.168.58.1 ClientMin:192.168.58.2 ClientMax:192.168.58.254 Broadcast:192.168.58.255 IsPrivate:true Interface:{IfaceName:br-2670da9d45c0 IfaceIPv4:192.168.58.1 IfaceMTU:1500 IfaceMAC:02:42:22:55:9f:e0} reservation:<nil>}
I0127 11:57:43.970693 1039736 network.go:211] skipping subnet 192.168.67.0/24 that is taken: &{IP:192.168.67.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.67.0/24 Gateway:192.168.67.1 ClientMin:192.168.67.2 ClientMax:192.168.67.254 Broadcast:192.168.67.255 IsPrivate:true Interface:{IfaceName:br-b00ab774f07e IfaceIPv4:192.168.67.1 IfaceMTU:1500 IfaceMAC:02:42:4a:c8:c0:d6} reservation:<nil>}
I0127 11:57:43.971108 1039736 network.go:206] using free private subnet 192.168.76.0/24: &{IP:192.168.76.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.76.0/24 Gateway:192.168.76.1 ClientMin:192.168.76.2 ClientMax:192.168.76.254 Broadcast:192.168.76.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0x4001949600}
I0127 11:57:43.971124 1039736 network_create.go:124] attempt to create docker network scheduled-stop-748426 192.168.76.0/24 with gateway 192.168.76.1 and MTU of 1500 ...
I0127 11:57:43.971178 1039736 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.76.0/24 --gateway=192.168.76.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=scheduled-stop-748426 scheduled-stop-748426
I0127 11:57:44.051212 1039736 network_create.go:108] docker network scheduled-stop-748426 192.168.76.0/24 created
I0127 11:57:44.051236 1039736 kic.go:121] calculated static IP "192.168.76.2" for the "scheduled-stop-748426" container
I0127 11:57:44.051330 1039736 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I0127 11:57:44.067426 1039736 cli_runner.go:164] Run: docker volume create scheduled-stop-748426 --label name.minikube.sigs.k8s.io=scheduled-stop-748426 --label created_by.minikube.sigs.k8s.io=true
I0127 11:57:44.094303 1039736 oci.go:103] Successfully created a docker volume scheduled-stop-748426
I0127 11:57:44.094413 1039736 cli_runner.go:164] Run: docker run --rm --name scheduled-stop-748426-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=scheduled-stop-748426 --entrypoint /usr/bin/test -v scheduled-stop-748426:/var gcr.io/k8s-minikube/kicbase:v0.0.46@sha256:fd2d445ddcc33ebc5c6b68a17e6219ea207ce63c005095ea1525296da2d1a279 -d /var/lib
I0127 11:57:44.670543 1039736 oci.go:107] Successfully prepared a docker volume scheduled-stop-748426
I0127 11:57:44.670583 1039736 preload.go:131] Checking if preload exists for k8s version v1.32.1 and runtime containerd
I0127 11:57:44.670602 1039736 kic.go:194] Starting extracting preloaded images to volume ...
I0127 11:57:44.670668 1039736 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/20318-888339/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.32.1-containerd-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v scheduled-stop-748426:/extractDir gcr.io/k8s-minikube/kicbase:v0.0.46@sha256:fd2d445ddcc33ebc5c6b68a17e6219ea207ce63c005095ea1525296da2d1a279 -I lz4 -xf /preloaded.tar -C /extractDir
I0127 11:57:49.115797 1039736 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/20318-888339/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.32.1-containerd-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v scheduled-stop-748426:/extractDir gcr.io/k8s-minikube/kicbase:v0.0.46@sha256:fd2d445ddcc33ebc5c6b68a17e6219ea207ce63c005095ea1525296da2d1a279 -I lz4 -xf /preloaded.tar -C /extractDir: (4.445096218s)
I0127 11:57:49.115817 1039736 kic.go:203] duration metric: took 4.445212187s to extract preloaded images to volume ...
W0127 11:57:49.115961 1039736 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
I0127 11:57:49.116056 1039736 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I0127 11:57:49.173963 1039736 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname scheduled-stop-748426 --name scheduled-stop-748426 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=scheduled-stop-748426 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=scheduled-stop-748426 --network scheduled-stop-748426 --ip 192.168.76.2 --volume scheduled-stop-748426:/var --security-opt apparmor=unconfined --memory=2048mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase:v0.0.46@sha256:fd2d445ddcc33ebc5c6b68a17e6219ea207ce63c005095ea1525296da2d1a279
I0127 11:57:49.533343 1039736 cli_runner.go:164] Run: docker container inspect scheduled-stop-748426 --format={{.State.Running}}
I0127 11:57:49.555679 1039736 cli_runner.go:164] Run: docker container inspect scheduled-stop-748426 --format={{.State.Status}}
I0127 11:57:49.580083 1039736 cli_runner.go:164] Run: docker exec scheduled-stop-748426 stat /var/lib/dpkg/alternatives/iptables
I0127 11:57:49.629841 1039736 oci.go:144] the created container "scheduled-stop-748426" has a running status.
I0127 11:57:49.629861 1039736 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/20318-888339/.minikube/machines/scheduled-stop-748426/id_rsa...
I0127 11:57:50.740310 1039736 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/20318-888339/.minikube/machines/scheduled-stop-748426/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I0127 11:57:50.766533 1039736 cli_runner.go:164] Run: docker container inspect scheduled-stop-748426 --format={{.State.Status}}
I0127 11:57:50.786066 1039736 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I0127 11:57:50.786078 1039736 kic_runner.go:114] Args: [docker exec --privileged scheduled-stop-748426 chown docker:docker /home/docker/.ssh/authorized_keys]
I0127 11:57:50.825983 1039736 cli_runner.go:164] Run: docker container inspect scheduled-stop-748426 --format={{.State.Status}}
I0127 11:57:50.843719 1039736 machine.go:93] provisionDockerMachine start ...
I0127 11:57:50.843812 1039736 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-748426
I0127 11:57:50.860742 1039736 main.go:141] libmachine: Using SSH client type: native
I0127 11:57:50.861143 1039736 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x4132a0] 0x415ae0 <nil> [] 0s} 127.0.0.1 33762 <nil> <nil>}
I0127 11:57:50.861151 1039736 main.go:141] libmachine: About to run SSH command:
hostname
I0127 11:57:50.986749 1039736 main.go:141] libmachine: SSH cmd err, output: <nil>: scheduled-stop-748426
I0127 11:57:50.986766 1039736 ubuntu.go:169] provisioning hostname "scheduled-stop-748426"
I0127 11:57:50.986842 1039736 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-748426
I0127 11:57:51.008361 1039736 main.go:141] libmachine: Using SSH client type: native
I0127 11:57:51.008626 1039736 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x4132a0] 0x415ae0 <nil> [] 0s} 127.0.0.1 33762 <nil> <nil>}
I0127 11:57:51.008636 1039736 main.go:141] libmachine: About to run SSH command:
sudo hostname scheduled-stop-748426 && echo "scheduled-stop-748426" | sudo tee /etc/hostname
I0127 11:57:51.145599 1039736 main.go:141] libmachine: SSH cmd err, output: <nil>: scheduled-stop-748426
I0127 11:57:51.145683 1039736 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-748426
I0127 11:57:51.164596 1039736 main.go:141] libmachine: Using SSH client type: native
I0127 11:57:51.164834 1039736 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x4132a0] 0x415ae0 <nil> [] 0s} 127.0.0.1 33762 <nil> <nil>}
I0127 11:57:51.164851 1039736 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\sscheduled-stop-748426' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 scheduled-stop-748426/g' /etc/hosts;
else
echo '127.0.1.1 scheduled-stop-748426' | sudo tee -a /etc/hosts;
fi
fi
I0127 11:57:51.289004 1039736 main.go:141] libmachine: SSH cmd err, output: <nil>:
I0127 11:57:51.289022 1039736 ubuntu.go:175] set auth options {CertDir:/home/jenkins/minikube-integration/20318-888339/.minikube CaCertPath:/home/jenkins/minikube-integration/20318-888339/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/20318-888339/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/20318-888339/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/20318-888339/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/20318-888339/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/20318-888339/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/20318-888339/.minikube}
I0127 11:57:51.289067 1039736 ubuntu.go:177] setting up certificates
I0127 11:57:51.289075 1039736 provision.go:84] configureAuth start
I0127 11:57:51.289134 1039736 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" scheduled-stop-748426
I0127 11:57:51.306076 1039736 provision.go:143] copyHostCerts
I0127 11:57:51.306133 1039736 exec_runner.go:144] found /home/jenkins/minikube-integration/20318-888339/.minikube/ca.pem, removing ...
I0127 11:57:51.306140 1039736 exec_runner.go:203] rm: /home/jenkins/minikube-integration/20318-888339/.minikube/ca.pem
I0127 11:57:51.306220 1039736 exec_runner.go:151] cp: /home/jenkins/minikube-integration/20318-888339/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/20318-888339/.minikube/ca.pem (1082 bytes)
I0127 11:57:51.306337 1039736 exec_runner.go:144] found /home/jenkins/minikube-integration/20318-888339/.minikube/cert.pem, removing ...
I0127 11:57:51.306341 1039736 exec_runner.go:203] rm: /home/jenkins/minikube-integration/20318-888339/.minikube/cert.pem
I0127 11:57:51.306370 1039736 exec_runner.go:151] cp: /home/jenkins/minikube-integration/20318-888339/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/20318-888339/.minikube/cert.pem (1123 bytes)
I0127 11:57:51.306433 1039736 exec_runner.go:144] found /home/jenkins/minikube-integration/20318-888339/.minikube/key.pem, removing ...
I0127 11:57:51.306436 1039736 exec_runner.go:203] rm: /home/jenkins/minikube-integration/20318-888339/.minikube/key.pem
I0127 11:57:51.306458 1039736 exec_runner.go:151] cp: /home/jenkins/minikube-integration/20318-888339/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/20318-888339/.minikube/key.pem (1675 bytes)
I0127 11:57:51.306501 1039736 provision.go:117] generating server cert: /home/jenkins/minikube-integration/20318-888339/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/20318-888339/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/20318-888339/.minikube/certs/ca-key.pem org=jenkins.scheduled-stop-748426 san=[127.0.0.1 192.168.76.2 localhost minikube scheduled-stop-748426]
I0127 11:57:51.538554 1039736 provision.go:177] copyRemoteCerts
I0127 11:57:51.538607 1039736 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I0127 11:57:51.538648 1039736 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-748426
I0127 11:57:51.555438 1039736 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33762 SSHKeyPath:/home/jenkins/minikube-integration/20318-888339/.minikube/machines/scheduled-stop-748426/id_rsa Username:docker}
I0127 11:57:51.646064 1039736 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20318-888339/.minikube/machines/server.pem --> /etc/docker/server.pem (1233 bytes)
I0127 11:57:51.672038 1039736 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20318-888339/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I0127 11:57:51.696333 1039736 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20318-888339/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I0127 11:57:51.720358 1039736 provision.go:87] duration metric: took 431.270556ms to configureAuth
I0127 11:57:51.720375 1039736 ubuntu.go:193] setting minikube options for container-runtime
I0127 11:57:51.720562 1039736 config.go:182] Loaded profile config "scheduled-stop-748426": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.32.1
I0127 11:57:51.720568 1039736 machine.go:96] duration metric: took 876.839617ms to provisionDockerMachine
I0127 11:57:51.720573 1039736 client.go:171] duration metric: took 7.799866955s to LocalClient.Create
I0127 11:57:51.720585 1039736 start.go:167] duration metric: took 7.799921436s to libmachine.API.Create "scheduled-stop-748426"
I0127 11:57:51.720592 1039736 start.go:293] postStartSetup for "scheduled-stop-748426" (driver="docker")
I0127 11:57:51.720600 1039736 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I0127 11:57:51.720647 1039736 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I0127 11:57:51.720683 1039736 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-748426
I0127 11:57:51.737148 1039736 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33762 SSHKeyPath:/home/jenkins/minikube-integration/20318-888339/.minikube/machines/scheduled-stop-748426/id_rsa Username:docker}
I0127 11:57:51.826025 1039736 ssh_runner.go:195] Run: cat /etc/os-release
I0127 11:57:51.828869 1039736 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I0127 11:57:51.828900 1039736 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
I0127 11:57:51.828909 1039736 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
I0127 11:57:51.828915 1039736 info.go:137] Remote host: Ubuntu 22.04.5 LTS
I0127 11:57:51.828925 1039736 filesync.go:126] Scanning /home/jenkins/minikube-integration/20318-888339/.minikube/addons for local assets ...
I0127 11:57:51.828984 1039736 filesync.go:126] Scanning /home/jenkins/minikube-integration/20318-888339/.minikube/files for local assets ...
I0127 11:57:51.829093 1039736 filesync.go:149] local asset: /home/jenkins/minikube-integration/20318-888339/.minikube/files/etc/ssl/certs/8937152.pem -> 8937152.pem in /etc/ssl/certs
I0127 11:57:51.829197 1039736 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I0127 11:57:51.837739 1039736 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20318-888339/.minikube/files/etc/ssl/certs/8937152.pem --> /etc/ssl/certs/8937152.pem (1708 bytes)
I0127 11:57:51.862031 1039736 start.go:296] duration metric: took 141.42475ms for postStartSetup
I0127 11:57:51.862401 1039736 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" scheduled-stop-748426
I0127 11:57:51.879043 1039736 profile.go:143] Saving config to /home/jenkins/minikube-integration/20318-888339/.minikube/profiles/scheduled-stop-748426/config.json ...
I0127 11:57:51.879322 1039736 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I0127 11:57:51.879363 1039736 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-748426
I0127 11:57:51.895400 1039736 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33762 SSHKeyPath:/home/jenkins/minikube-integration/20318-888339/.minikube/machines/scheduled-stop-748426/id_rsa Username:docker}
I0127 11:57:51.981563 1039736 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I0127 11:57:51.985665 1039736 start.go:128] duration metric: took 8.068369084s to createHost
I0127 11:57:51.985680 1039736 start.go:83] releasing machines lock for "scheduled-stop-748426", held for 8.068487064s
I0127 11:57:51.985746 1039736 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" scheduled-stop-748426
I0127 11:57:52.003269 1039736 ssh_runner.go:195] Run: cat /version.json
I0127 11:57:52.003321 1039736 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-748426
I0127 11:57:52.003579 1039736 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I0127 11:57:52.003647 1039736 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-748426
I0127 11:57:52.034805 1039736 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33762 SSHKeyPath:/home/jenkins/minikube-integration/20318-888339/.minikube/machines/scheduled-stop-748426/id_rsa Username:docker}
I0127 11:57:52.041125 1039736 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33762 SSHKeyPath:/home/jenkins/minikube-integration/20318-888339/.minikube/machines/scheduled-stop-748426/id_rsa Username:docker}
I0127 11:57:52.120555 1039736 ssh_runner.go:195] Run: systemctl --version
I0127 11:57:52.249742 1039736 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
I0127 11:57:52.253950 1039736 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
I0127 11:57:52.278869 1039736 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
I0127 11:57:52.278938 1039736 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I0127 11:57:52.309082 1039736 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/100-crio-bridge.conf] bridge cni config(s)
I0127 11:57:52.309097 1039736 start.go:495] detecting cgroup driver to use...
I0127 11:57:52.309128 1039736 detect.go:187] detected "cgroupfs" cgroup driver on host os
I0127 11:57:52.309179 1039736 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I0127 11:57:52.321894 1039736 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I0127 11:57:52.333629 1039736 docker.go:217] disabling cri-docker service (if available) ...
I0127 11:57:52.333685 1039736 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I0127 11:57:52.348093 1039736 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I0127 11:57:52.363817 1039736 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I0127 11:57:52.455143 1039736 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I0127 11:57:52.557989 1039736 docker.go:233] disabling docker service ...
I0127 11:57:52.558046 1039736 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I0127 11:57:52.579455 1039736 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I0127 11:57:52.591139 1039736 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I0127 11:57:52.674404 1039736 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I0127 11:57:52.766168 1039736 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I0127 11:57:52.777539 1039736 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I0127 11:57:52.794222 1039736 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10"|' /etc/containerd/config.toml"
I0127 11:57:52.803979 1039736 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I0127 11:57:52.814099 1039736 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I0127 11:57:52.814161 1039736 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I0127 11:57:52.823667 1039736 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0127 11:57:52.833080 1039736 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I0127 11:57:52.842587 1039736 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0127 11:57:52.851948 1039736 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I0127 11:57:52.860845 1039736 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I0127 11:57:52.870713 1039736 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I0127 11:57:52.879838 1039736 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I0127 11:57:52.889189 1039736 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I0127 11:57:52.897638 1039736 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I0127 11:57:52.905776 1039736 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0127 11:57:52.997475 1039736 ssh_runner.go:195] Run: sudo systemctl restart containerd
I0127 11:57:53.132368 1039736 start.go:542] Will wait 60s for socket path /run/containerd/containerd.sock
I0127 11:57:53.132430 1039736 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
I0127 11:57:53.136130 1039736 start.go:563] Will wait 60s for crictl version
I0127 11:57:53.136183 1039736 ssh_runner.go:195] Run: which crictl
I0127 11:57:53.139400 1039736 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I0127 11:57:53.174276 1039736 start.go:579] Version: 0.1.0
RuntimeName: containerd
RuntimeVersion: 1.7.24
RuntimeApiVersion: v1
I0127 11:57:53.174338 1039736 ssh_runner.go:195] Run: containerd --version
I0127 11:57:53.198966 1039736 ssh_runner.go:195] Run: containerd --version
I0127 11:57:53.229578 1039736 out.go:177] * Preparing Kubernetes v1.32.1 on containerd 1.7.24 ...
I0127 11:57:53.232260 1039736 cli_runner.go:164] Run: docker network inspect scheduled-stop-748426 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0127 11:57:53.248172 1039736 ssh_runner.go:195] Run: grep 192.168.76.1 host.minikube.internal$ /etc/hosts
I0127 11:57:53.251873 1039736 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.76.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0127 11:57:53.262095 1039736 kubeadm.go:883] updating cluster {Name:scheduled-stop-748426 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.46@sha256:fd2d445ddcc33ebc5c6b68a17e6219ea207ce63c005095ea1525296da2d1a279 Memory:2048 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.32.1 ClusterName:scheduled-stop-748426 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] A
PIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.32.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock
: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I0127 11:57:53.262210 1039736 preload.go:131] Checking if preload exists for k8s version v1.32.1 and runtime containerd
I0127 11:57:53.262268 1039736 ssh_runner.go:195] Run: sudo crictl images --output json
I0127 11:57:53.296767 1039736 containerd.go:627] all images are preloaded for containerd runtime.
I0127 11:57:53.296779 1039736 containerd.go:534] Images already preloaded, skipping extraction
I0127 11:57:53.296840 1039736 ssh_runner.go:195] Run: sudo crictl images --output json
I0127 11:57:53.330834 1039736 containerd.go:627] all images are preloaded for containerd runtime.
I0127 11:57:53.330847 1039736 cache_images.go:84] Images are preloaded, skipping loading
I0127 11:57:53.330854 1039736 kubeadm.go:934] updating node { 192.168.76.2 8443 v1.32.1 containerd true true} ...
I0127 11:57:53.330942 1039736 kubeadm.go:946] kubelet [Unit]
Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.32.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=scheduled-stop-748426 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.76.2
[Install]
config:
{KubernetesVersion:v1.32.1 ClusterName:scheduled-stop-748426 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I0127 11:57:53.331006 1039736 ssh_runner.go:195] Run: sudo crictl info
I0127 11:57:53.372629 1039736 cni.go:84] Creating CNI manager for ""
I0127 11:57:53.372640 1039736 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I0127 11:57:53.372653 1039736 kubeadm.go:84] Using pod CIDR: 10.244.0.0/16
I0127 11:57:53.372674 1039736 kubeadm.go:189] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.76.2 APIServerPort:8443 KubernetesVersion:v1.32.1 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:scheduled-stop-748426 NodeName:scheduled-stop-748426 DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.76.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.76.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt St
aticPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///run/containerd/containerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I0127 11:57:53.372790 1039736 kubeadm.go:195] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.76.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
name: "scheduled-stop-748426"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.76.2"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.76.2"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
- name: "proxy-refresh-interval"
value: "70000"
kubernetesVersion: v1.32.1
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///run/containerd/containerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I0127 11:57:53.372858 1039736 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.32.1
I0127 11:57:53.381795 1039736 binaries.go:44] Found k8s binaries, skipping transfer
I0127 11:57:53.381856 1039736 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I0127 11:57:53.390675 1039736 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (325 bytes)
I0127 11:57:53.408421 1039736 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I0127 11:57:53.426395 1039736 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2311 bytes)
I0127 11:57:53.444172 1039736 ssh_runner.go:195] Run: grep 192.168.76.2 control-plane.minikube.internal$ /etc/hosts
I0127 11:57:53.447705 1039736 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.76.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0127 11:57:53.458211 1039736 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0127 11:57:53.536768 1039736 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0127 11:57:53.552419 1039736 certs.go:68] Setting up /home/jenkins/minikube-integration/20318-888339/.minikube/profiles/scheduled-stop-748426 for IP: 192.168.76.2
I0127 11:57:53.552430 1039736 certs.go:194] generating shared ca certs ...
I0127 11:57:53.552444 1039736 certs.go:226] acquiring lock for ca certs: {Name:mke15f79704ae0e83f911aa0e3f9c4b862da9341 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0127 11:57:53.552572 1039736 certs.go:235] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/20318-888339/.minikube/ca.key
I0127 11:57:53.552612 1039736 certs.go:235] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/20318-888339/.minikube/proxy-client-ca.key
I0127 11:57:53.552617 1039736 certs.go:256] generating profile certs ...
I0127 11:57:53.552669 1039736 certs.go:363] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/20318-888339/.minikube/profiles/scheduled-stop-748426/client.key
I0127 11:57:53.552686 1039736 crypto.go:68] Generating cert /home/jenkins/minikube-integration/20318-888339/.minikube/profiles/scheduled-stop-748426/client.crt with IP's: []
I0127 11:57:54.256278 1039736 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/20318-888339/.minikube/profiles/scheduled-stop-748426/client.crt ...
I0127 11:57:54.256293 1039736 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20318-888339/.minikube/profiles/scheduled-stop-748426/client.crt: {Name:mk00473d4059a4eaf568e46c3f14ed7b30ed3260 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0127 11:57:54.256475 1039736 crypto.go:164] Writing key to /home/jenkins/minikube-integration/20318-888339/.minikube/profiles/scheduled-stop-748426/client.key ...
I0127 11:57:54.256482 1039736 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20318-888339/.minikube/profiles/scheduled-stop-748426/client.key: {Name:mk2ff30c241cd8c08021624122d9a51abf04667c Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0127 11:57:54.256562 1039736 certs.go:363] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/20318-888339/.minikube/profiles/scheduled-stop-748426/apiserver.key.61a3669b
I0127 11:57:54.256575 1039736 crypto.go:68] Generating cert /home/jenkins/minikube-integration/20318-888339/.minikube/profiles/scheduled-stop-748426/apiserver.crt.61a3669b with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.76.2]
I0127 11:57:54.699851 1039736 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/20318-888339/.minikube/profiles/scheduled-stop-748426/apiserver.crt.61a3669b ...
I0127 11:57:54.699867 1039736 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20318-888339/.minikube/profiles/scheduled-stop-748426/apiserver.crt.61a3669b: {Name:mkea04e822e41f1cc4061560bee5c60f81a75c13 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0127 11:57:54.700059 1039736 crypto.go:164] Writing key to /home/jenkins/minikube-integration/20318-888339/.minikube/profiles/scheduled-stop-748426/apiserver.key.61a3669b ...
I0127 11:57:54.700067 1039736 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20318-888339/.minikube/profiles/scheduled-stop-748426/apiserver.key.61a3669b: {Name:mkd87eaec3e4d85033834570fb31d33123fc7c49 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0127 11:57:54.700186 1039736 certs.go:381] copying /home/jenkins/minikube-integration/20318-888339/.minikube/profiles/scheduled-stop-748426/apiserver.crt.61a3669b -> /home/jenkins/minikube-integration/20318-888339/.minikube/profiles/scheduled-stop-748426/apiserver.crt
I0127 11:57:54.700264 1039736 certs.go:385] copying /home/jenkins/minikube-integration/20318-888339/.minikube/profiles/scheduled-stop-748426/apiserver.key.61a3669b -> /home/jenkins/minikube-integration/20318-888339/.minikube/profiles/scheduled-stop-748426/apiserver.key
I0127 11:57:54.700322 1039736 certs.go:363] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/20318-888339/.minikube/profiles/scheduled-stop-748426/proxy-client.key
I0127 11:57:54.700335 1039736 crypto.go:68] Generating cert /home/jenkins/minikube-integration/20318-888339/.minikube/profiles/scheduled-stop-748426/proxy-client.crt with IP's: []
I0127 11:57:55.805953 1039736 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/20318-888339/.minikube/profiles/scheduled-stop-748426/proxy-client.crt ...
I0127 11:57:55.805971 1039736 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20318-888339/.minikube/profiles/scheduled-stop-748426/proxy-client.crt: {Name:mk9c74d7f0e1ab1fd77138e75cbacd1d9b873b9b Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0127 11:57:55.806168 1039736 crypto.go:164] Writing key to /home/jenkins/minikube-integration/20318-888339/.minikube/profiles/scheduled-stop-748426/proxy-client.key ...
I0127 11:57:55.806175 1039736 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20318-888339/.minikube/profiles/scheduled-stop-748426/proxy-client.key: {Name:mk080b29214fa746515e3b62380eec3a05495edf Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0127 11:57:55.806380 1039736 certs.go:484] found cert: /home/jenkins/minikube-integration/20318-888339/.minikube/certs/893715.pem (1338 bytes)
W0127 11:57:55.806417 1039736 certs.go:480] ignoring /home/jenkins/minikube-integration/20318-888339/.minikube/certs/893715_empty.pem, impossibly tiny 0 bytes
I0127 11:57:55.806425 1039736 certs.go:484] found cert: /home/jenkins/minikube-integration/20318-888339/.minikube/certs/ca-key.pem (1679 bytes)
I0127 11:57:55.806448 1039736 certs.go:484] found cert: /home/jenkins/minikube-integration/20318-888339/.minikube/certs/ca.pem (1082 bytes)
I0127 11:57:55.806470 1039736 certs.go:484] found cert: /home/jenkins/minikube-integration/20318-888339/.minikube/certs/cert.pem (1123 bytes)
I0127 11:57:55.806491 1039736 certs.go:484] found cert: /home/jenkins/minikube-integration/20318-888339/.minikube/certs/key.pem (1675 bytes)
I0127 11:57:55.806530 1039736 certs.go:484] found cert: /home/jenkins/minikube-integration/20318-888339/.minikube/files/etc/ssl/certs/8937152.pem (1708 bytes)
I0127 11:57:55.807149 1039736 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20318-888339/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I0127 11:57:55.834707 1039736 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20318-888339/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I0127 11:57:55.859387 1039736 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20318-888339/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I0127 11:57:55.884106 1039736 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20318-888339/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
I0127 11:57:55.907962 1039736 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20318-888339/.minikube/profiles/scheduled-stop-748426/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1432 bytes)
I0127 11:57:55.932389 1039736 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20318-888339/.minikube/profiles/scheduled-stop-748426/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I0127 11:57:55.957103 1039736 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20318-888339/.minikube/profiles/scheduled-stop-748426/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I0127 11:57:55.981805 1039736 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20318-888339/.minikube/profiles/scheduled-stop-748426/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I0127 11:57:56.007524 1039736 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20318-888339/.minikube/certs/893715.pem --> /usr/share/ca-certificates/893715.pem (1338 bytes)
I0127 11:57:56.034843 1039736 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20318-888339/.minikube/files/etc/ssl/certs/8937152.pem --> /usr/share/ca-certificates/8937152.pem (1708 bytes)
I0127 11:57:56.060264 1039736 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20318-888339/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I0127 11:57:56.085175 1039736 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I0127 11:57:56.103726 1039736 ssh_runner.go:195] Run: openssl version
I0127 11:57:56.109402 1039736 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/893715.pem && ln -fs /usr/share/ca-certificates/893715.pem /etc/ssl/certs/893715.pem"
I0127 11:57:56.119068 1039736 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/893715.pem
I0127 11:57:56.122759 1039736 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Jan 27 11:31 /usr/share/ca-certificates/893715.pem
I0127 11:57:56.122816 1039736 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/893715.pem
I0127 11:57:56.129750 1039736 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/893715.pem /etc/ssl/certs/51391683.0"
I0127 11:57:56.139005 1039736 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/8937152.pem && ln -fs /usr/share/ca-certificates/8937152.pem /etc/ssl/certs/8937152.pem"
I0127 11:57:56.148221 1039736 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/8937152.pem
I0127 11:57:56.151586 1039736 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Jan 27 11:31 /usr/share/ca-certificates/8937152.pem
I0127 11:57:56.151649 1039736 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/8937152.pem
I0127 11:57:56.158493 1039736 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/8937152.pem /etc/ssl/certs/3ec20f2e.0"
I0127 11:57:56.167885 1039736 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I0127 11:57:56.177159 1039736 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I0127 11:57:56.180774 1039736 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Jan 27 11:23 /usr/share/ca-certificates/minikubeCA.pem
I0127 11:57:56.180829 1039736 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I0127 11:57:56.187838 1039736 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I0127 11:57:56.197541 1039736 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I0127 11:57:56.200750 1039736 certs.go:399] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I0127 11:57:56.200793 1039736 kubeadm.go:392] StartCluster: {Name:scheduled-stop-748426 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.46@sha256:fd2d445ddcc33ebc5c6b68a17e6219ea207ce63c005095ea1525296da2d1a279 Memory:2048 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.32.1 ClusterName:scheduled-stop-748426 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIS
erverIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.32.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: S
SHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0127 11:57:56.200862 1039736 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:paused Name: Namespaces:[kube-system]}
I0127 11:57:56.200924 1039736 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
I0127 11:57:56.237132 1039736 cri.go:89] found id: ""
I0127 11:57:56.237195 1039736 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I0127 11:57:56.246075 1039736 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I0127 11:57:56.254898 1039736 kubeadm.go:214] ignoring SystemVerification for kubeadm because of docker driver
I0127 11:57:56.254954 1039736 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I0127 11:57:56.263472 1039736 kubeadm.go:155] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I0127 11:57:56.263482 1039736 kubeadm.go:157] found existing configuration files:
I0127 11:57:56.263538 1039736 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I0127 11:57:56.272255 1039736 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I0127 11:57:56.272314 1039736 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I0127 11:57:56.280660 1039736 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I0127 11:57:56.289195 1039736 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I0127 11:57:56.289251 1039736 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I0127 11:57:56.297800 1039736 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I0127 11:57:56.306585 1039736 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I0127 11:57:56.306647 1039736 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I0127 11:57:56.314750 1039736 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I0127 11:57:56.323313 1039736 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I0127 11:57:56.323369 1039736 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I0127 11:57:56.331779 1039736 ssh_runner.go:286] Start: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.32.1:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I0127 11:57:56.392369 1039736 kubeadm.go:310] [WARNING SystemVerification]: cgroups v1 support is in maintenance mode, please migrate to cgroups v2
I0127 11:57:56.392612 1039736 kubeadm.go:310] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1075-aws\n", err: exit status 1
I0127 11:57:56.467370 1039736 kubeadm.go:310] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I0127 11:58:11.297284 1039736 kubeadm.go:310] [init] Using Kubernetes version: v1.32.1
I0127 11:58:11.297338 1039736 kubeadm.go:310] [preflight] Running pre-flight checks
I0127 11:58:11.297432 1039736 kubeadm.go:310] [preflight] The system verification failed. Printing the output from the verification:
I0127 11:58:11.297487 1039736 kubeadm.go:310] [0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1075-aws[0m
I0127 11:58:11.297521 1039736 kubeadm.go:310] [0;37mOS[0m: [0;32mLinux[0m
I0127 11:58:11.297566 1039736 kubeadm.go:310] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I0127 11:58:11.297614 1039736 kubeadm.go:310] [0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
I0127 11:58:11.297660 1039736 kubeadm.go:310] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I0127 11:58:11.297708 1039736 kubeadm.go:310] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I0127 11:58:11.297755 1039736 kubeadm.go:310] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I0127 11:58:11.297803 1039736 kubeadm.go:310] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I0127 11:58:11.297848 1039736 kubeadm.go:310] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I0127 11:58:11.297895 1039736 kubeadm.go:310] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I0127 11:58:11.297941 1039736 kubeadm.go:310] [0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
I0127 11:58:11.298013 1039736 kubeadm.go:310] [preflight] Pulling images required for setting up a Kubernetes cluster
I0127 11:58:11.298107 1039736 kubeadm.go:310] [preflight] This might take a minute or two, depending on the speed of your internet connection
I0127 11:58:11.298196 1039736 kubeadm.go:310] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
I0127 11:58:11.298258 1039736 kubeadm.go:310] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I0127 11:58:11.301091 1039736 out.go:235] - Generating certificates and keys ...
I0127 11:58:11.301183 1039736 kubeadm.go:310] [certs] Using existing ca certificate authority
I0127 11:58:11.301244 1039736 kubeadm.go:310] [certs] Using existing apiserver certificate and key on disk
I0127 11:58:11.301310 1039736 kubeadm.go:310] [certs] Generating "apiserver-kubelet-client" certificate and key
I0127 11:58:11.301366 1039736 kubeadm.go:310] [certs] Generating "front-proxy-ca" certificate and key
I0127 11:58:11.301425 1039736 kubeadm.go:310] [certs] Generating "front-proxy-client" certificate and key
I0127 11:58:11.301475 1039736 kubeadm.go:310] [certs] Generating "etcd/ca" certificate and key
I0127 11:58:11.301528 1039736 kubeadm.go:310] [certs] Generating "etcd/server" certificate and key
I0127 11:58:11.301651 1039736 kubeadm.go:310] [certs] etcd/server serving cert is signed for DNS names [localhost scheduled-stop-748426] and IPs [192.168.76.2 127.0.0.1 ::1]
I0127 11:58:11.301703 1039736 kubeadm.go:310] [certs] Generating "etcd/peer" certificate and key
I0127 11:58:11.301823 1039736 kubeadm.go:310] [certs] etcd/peer serving cert is signed for DNS names [localhost scheduled-stop-748426] and IPs [192.168.76.2 127.0.0.1 ::1]
I0127 11:58:11.301887 1039736 kubeadm.go:310] [certs] Generating "etcd/healthcheck-client" certificate and key
I0127 11:58:11.301950 1039736 kubeadm.go:310] [certs] Generating "apiserver-etcd-client" certificate and key
I0127 11:58:11.301993 1039736 kubeadm.go:310] [certs] Generating "sa" key and public key
I0127 11:58:11.302047 1039736 kubeadm.go:310] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I0127 11:58:11.302097 1039736 kubeadm.go:310] [kubeconfig] Writing "admin.conf" kubeconfig file
I0127 11:58:11.302152 1039736 kubeadm.go:310] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I0127 11:58:11.302204 1039736 kubeadm.go:310] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I0127 11:58:11.302266 1039736 kubeadm.go:310] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I0127 11:58:11.302319 1039736 kubeadm.go:310] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I0127 11:58:11.302400 1039736 kubeadm.go:310] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I0127 11:58:11.302465 1039736 kubeadm.go:310] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I0127 11:58:11.305152 1039736 out.go:235] - Booting up control plane ...
I0127 11:58:11.305263 1039736 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-apiserver"
I0127 11:58:11.305352 1039736 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I0127 11:58:11.305422 1039736 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-scheduler"
I0127 11:58:11.305523 1039736 kubeadm.go:310] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I0127 11:58:11.305606 1039736 kubeadm.go:310] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I0127 11:58:11.305644 1039736 kubeadm.go:310] [kubelet-start] Starting the kubelet
I0127 11:58:11.305795 1039736 kubeadm.go:310] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I0127 11:58:11.305912 1039736 kubeadm.go:310] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I0127 11:58:11.305986 1039736 kubeadm.go:310] [kubelet-check] The kubelet is healthy after 1.000815141s
I0127 11:58:11.306058 1039736 kubeadm.go:310] [api-check] Waiting for a healthy API server. This can take up to 4m0s
I0127 11:58:11.306125 1039736 kubeadm.go:310] [api-check] The API server is healthy after 6.001376326s
I0127 11:58:11.306254 1039736 kubeadm.go:310] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I0127 11:58:11.306386 1039736 kubeadm.go:310] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I0127 11:58:11.306445 1039736 kubeadm.go:310] [upload-certs] Skipping phase. Please see --upload-certs
I0127 11:58:11.306637 1039736 kubeadm.go:310] [mark-control-plane] Marking the node scheduled-stop-748426 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I0127 11:58:11.306693 1039736 kubeadm.go:310] [bootstrap-token] Using token: jegsuq.qjjdrkcunejmj7k5
I0127 11:58:11.309387 1039736 out.go:235] - Configuring RBAC rules ...
I0127 11:58:11.309508 1039736 kubeadm.go:310] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I0127 11:58:11.309641 1039736 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I0127 11:58:11.309798 1039736 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I0127 11:58:11.309952 1039736 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I0127 11:58:11.310100 1039736 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I0127 11:58:11.310193 1039736 kubeadm.go:310] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I0127 11:58:11.310314 1039736 kubeadm.go:310] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I0127 11:58:11.310376 1039736 kubeadm.go:310] [addons] Applied essential addon: CoreDNS
I0127 11:58:11.310434 1039736 kubeadm.go:310] [addons] Applied essential addon: kube-proxy
I0127 11:58:11.310438 1039736 kubeadm.go:310]
I0127 11:58:11.310504 1039736 kubeadm.go:310] Your Kubernetes control-plane has initialized successfully!
I0127 11:58:11.310507 1039736 kubeadm.go:310]
I0127 11:58:11.310582 1039736 kubeadm.go:310] To start using your cluster, you need to run the following as a regular user:
I0127 11:58:11.310585 1039736 kubeadm.go:310]
I0127 11:58:11.310613 1039736 kubeadm.go:310] mkdir -p $HOME/.kube
I0127 11:58:11.310670 1039736 kubeadm.go:310] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I0127 11:58:11.310719 1039736 kubeadm.go:310] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I0127 11:58:11.310722 1039736 kubeadm.go:310]
I0127 11:58:11.310774 1039736 kubeadm.go:310] Alternatively, if you are the root user, you can run:
I0127 11:58:11.310781 1039736 kubeadm.go:310]
I0127 11:58:11.310827 1039736 kubeadm.go:310] export KUBECONFIG=/etc/kubernetes/admin.conf
I0127 11:58:11.310830 1039736 kubeadm.go:310]
I0127 11:58:11.310881 1039736 kubeadm.go:310] You should now deploy a pod network to the cluster.
I0127 11:58:11.310954 1039736 kubeadm.go:310] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I0127 11:58:11.311021 1039736 kubeadm.go:310] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I0127 11:58:11.311024 1039736 kubeadm.go:310]
I0127 11:58:11.311107 1039736 kubeadm.go:310] You can now join any number of control-plane nodes by copying certificate authorities
I0127 11:58:11.311182 1039736 kubeadm.go:310] and service account keys on each node and then running the following as root:
I0127 11:58:11.311186 1039736 kubeadm.go:310]
I0127 11:58:11.311271 1039736 kubeadm.go:310] kubeadm join control-plane.minikube.internal:8443 --token jegsuq.qjjdrkcunejmj7k5 \
I0127 11:58:11.311372 1039736 kubeadm.go:310] --discovery-token-ca-cert-hash sha256:ab77ca629af18522722058cf6e6b9d1dd63a614828aa0be2683e90565b703f3c \
I0127 11:58:11.311393 1039736 kubeadm.go:310] --control-plane
I0127 11:58:11.311397 1039736 kubeadm.go:310]
I0127 11:58:11.311480 1039736 kubeadm.go:310] Then you can join any number of worker nodes by running the following on each as root:
I0127 11:58:11.311483 1039736 kubeadm.go:310]
I0127 11:58:11.311564 1039736 kubeadm.go:310] kubeadm join control-plane.minikube.internal:8443 --token jegsuq.qjjdrkcunejmj7k5 \
I0127 11:58:11.311678 1039736 kubeadm.go:310] --discovery-token-ca-cert-hash sha256:ab77ca629af18522722058cf6e6b9d1dd63a614828aa0be2683e90565b703f3c
I0127 11:58:11.311686 1039736 cni.go:84] Creating CNI manager for ""
I0127 11:58:11.311692 1039736 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I0127 11:58:11.314500 1039736 out.go:177] * Configuring CNI (Container Networking Interface) ...
I0127 11:58:11.317107 1039736 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I0127 11:58:11.321118 1039736 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.32.1/kubectl ...
I0127 11:58:11.321129 1039736 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2601 bytes)
I0127 11:58:11.340712 1039736 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.32.1/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I0127 11:58:11.622312 1039736 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I0127 11:58:11.622427 1039736 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.32.1/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I0127 11:58:11.622493 1039736 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.32.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes scheduled-stop-748426 minikube.k8s.io/updated_at=2025_01_27T11_58_11_0700 minikube.k8s.io/version=v1.35.0 minikube.k8s.io/commit=21d19df81a8d69cdaec1a8f1932c09dc00369650 minikube.k8s.io/name=scheduled-stop-748426 minikube.k8s.io/primary=true
I0127 11:58:11.793507 1039736 ops.go:34] apiserver oom_adj: -16
I0127 11:58:11.793525 1039736 kubeadm.go:1113] duration metric: took 171.147367ms to wait for elevateKubeSystemPrivileges
I0127 11:58:11.793536 1039736 kubeadm.go:394] duration metric: took 15.592749035s to StartCluster
I0127 11:58:11.793551 1039736 settings.go:142] acquiring lock: {Name:mk8e4620a376eeb900823ad35149c0dd6d301c83 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0127 11:58:11.793612 1039736 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/20318-888339/kubeconfig
I0127 11:58:11.794281 1039736 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20318-888339/kubeconfig: {Name:mk75ddd380b783b9f157e482ffdcc29dbd635876 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0127 11:58:11.794468 1039736 start.go:235] Will wait 6m0s for node &{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.32.1 ContainerRuntime:containerd ControlPlane:true Worker:true}
I0127 11:58:11.794573 1039736 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.32.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I0127 11:58:11.794852 1039736 config.go:182] Loaded profile config "scheduled-stop-748426": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.32.1
I0127 11:58:11.794887 1039736 addons.go:511] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-gluster:false storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I0127 11:58:11.794947 1039736 addons.go:69] Setting storage-provisioner=true in profile "scheduled-stop-748426"
I0127 11:58:11.794962 1039736 addons.go:238] Setting addon storage-provisioner=true in "scheduled-stop-748426"
I0127 11:58:11.794984 1039736 host.go:66] Checking if "scheduled-stop-748426" exists ...
I0127 11:58:11.795460 1039736 cli_runner.go:164] Run: docker container inspect scheduled-stop-748426 --format={{.State.Status}}
I0127 11:58:11.795714 1039736 addons.go:69] Setting default-storageclass=true in profile "scheduled-stop-748426"
I0127 11:58:11.795729 1039736 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "scheduled-stop-748426"
I0127 11:58:11.796008 1039736 cli_runner.go:164] Run: docker container inspect scheduled-stop-748426 --format={{.State.Status}}
I0127 11:58:11.799515 1039736 out.go:177] * Verifying Kubernetes components...
I0127 11:58:11.806739 1039736 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0127 11:58:11.842570 1039736 addons.go:238] Setting addon default-storageclass=true in "scheduled-stop-748426"
I0127 11:58:11.842599 1039736 host.go:66] Checking if "scheduled-stop-748426" exists ...
I0127 11:58:11.843009 1039736 cli_runner.go:164] Run: docker container inspect scheduled-stop-748426 --format={{.State.Status}}
I0127 11:58:11.851421 1039736 out.go:177] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I0127 11:58:11.854131 1039736 addons.go:435] installing /etc/kubernetes/addons/storage-provisioner.yaml
I0127 11:58:11.854141 1039736 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I0127 11:58:11.854206 1039736 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-748426
I0127 11:58:11.891896 1039736 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33762 SSHKeyPath:/home/jenkins/minikube-integration/20318-888339/.minikube/machines/scheduled-stop-748426/id_rsa Username:docker}
I0127 11:58:11.892262 1039736 addons.go:435] installing /etc/kubernetes/addons/storageclass.yaml
I0127 11:58:11.892269 1039736 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I0127 11:58:11.892325 1039736 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-748426
I0127 11:58:11.923185 1039736 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33762 SSHKeyPath:/home/jenkins/minikube-integration/20318-888339/.minikube/machines/scheduled-stop-748426/id_rsa Username:docker}
I0127 11:58:12.065929 1039736 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.32.1/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I0127 11:58:12.106662 1039736 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0127 11:58:12.106974 1039736 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.32.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.76.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.32.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I0127 11:58:12.145000 1039736 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.32.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I0127 11:58:12.499769 1039736 start.go:971] {"host.minikube.internal": 192.168.76.1} host record injected into CoreDNS's ConfigMap
I0127 11:58:12.501380 1039736 api_server.go:52] waiting for apiserver process to appear ...
I0127 11:58:12.501433 1039736 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I0127 11:58:12.702443 1039736 api_server.go:72] duration metric: took 907.949247ms to wait for apiserver process to appear ...
I0127 11:58:12.702454 1039736 api_server.go:88] waiting for apiserver healthz status ...
I0127 11:58:12.702471 1039736 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I0127 11:58:12.705254 1039736 out.go:177] * Enabled addons: default-storageclass, storage-provisioner
I0127 11:58:12.707989 1039736 addons.go:514] duration metric: took 913.083247ms for enable addons: enabled=[default-storageclass storage-provisioner]
I0127 11:58:12.712760 1039736 api_server.go:279] https://192.168.76.2:8443/healthz returned 200:
ok
I0127 11:58:12.713826 1039736 api_server.go:141] control plane version: v1.32.1
I0127 11:58:12.713840 1039736 api_server.go:131] duration metric: took 11.380954ms to wait for apiserver health ...
I0127 11:58:12.713847 1039736 system_pods.go:43] waiting for kube-system pods to appear ...
I0127 11:58:12.720010 1039736 system_pods.go:59] 5 kube-system pods found
I0127 11:58:12.720030 1039736 system_pods.go:61] "etcd-scheduled-stop-748426" [6086d396-6517-4117-a4c4-d40c8e9cc36a] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd])
I0127 11:58:12.720039 1039736 system_pods.go:61] "kube-apiserver-scheduled-stop-748426" [c865c1e1-0032-477f-9637-85739079d6f3] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
I0127 11:58:12.720046 1039736 system_pods.go:61] "kube-controller-manager-scheduled-stop-748426" [181345e9-32cc-478d-b1e1-c00e42c5b842] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager])
I0127 11:58:12.720053 1039736 system_pods.go:61] "kube-scheduler-scheduled-stop-748426" [d60f2708-d555-4717-98ab-8502c843a7fb] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
I0127 11:58:12.720058 1039736 system_pods.go:61] "storage-provisioner" [af66dd39-9188-415b-9a71-7dac74b7cc9d] Pending: PodScheduled:Unschedulable (0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/not-ready: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling.)
I0127 11:58:12.720063 1039736 system_pods.go:74] duration metric: took 6.211212ms to wait for pod list to return data ...
I0127 11:58:12.720073 1039736 kubeadm.go:582] duration metric: took 925.586474ms to wait for: map[apiserver:true system_pods:true]
I0127 11:58:12.720086 1039736 node_conditions.go:102] verifying NodePressure condition ...
I0127 11:58:12.723293 1039736 node_conditions.go:122] node storage ephemeral capacity is 203034800Ki
I0127 11:58:12.723311 1039736 node_conditions.go:123] node cpu capacity is 2
I0127 11:58:12.723320 1039736 node_conditions.go:105] duration metric: took 3.230518ms to run NodePressure ...
I0127 11:58:12.723330 1039736 start.go:241] waiting for startup goroutines ...
I0127 11:58:13.013202 1039736 kapi.go:214] "coredns" deployment in "kube-system" namespace and "scheduled-stop-748426" context rescaled to 1 replicas
I0127 11:58:13.013235 1039736 start.go:246] waiting for cluster config update ...
I0127 11:58:13.013246 1039736 start.go:255] writing updated cluster config ...
I0127 11:58:13.013631 1039736 ssh_runner.go:195] Run: rm -f paused
I0127 11:58:13.072980 1039736 start.go:600] kubectl: 1.32.1, cluster: 1.32.1 (minor skew: 0)
I0127 11:58:13.076172 1039736 out.go:177] * Done! kubectl is now configured to use "scheduled-stop-748426" cluster and "default" namespace by default
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD
a97b452b5e772 7fc9d4aa817aa 10 seconds ago Running etcd 0 1091e5ddc1a38 etcd-scheduled-stop-748426
16510b6a7ac00 265c2dedf28ab 10 seconds ago Running kube-apiserver 0 dafabe52ca89a kube-apiserver-scheduled-stop-748426
727a0847f087b ddb38cac617cb 10 seconds ago Running kube-scheduler 0 bdb352f4eef6d kube-scheduler-scheduled-stop-748426
537f10d28b292 2933761aa7ada 10 seconds ago Running kube-controller-manager 0 02ec88019edd4 kube-controller-manager-scheduled-stop-748426
==> containerd <==
Jan 27 11:58:04 scheduled-stop-748426 containerd[832]: time="2025-01-27T11:58:04.190182980Z" level=info msg="loading plugin \"io.containerd.ttrpc.v1.pause\"..." runtime=io.containerd.runc.v2 type=io.containerd.ttrpc.v1
Jan 27 11:58:04 scheduled-stop-748426 containerd[832]: time="2025-01-27T11:58:04.225252435Z" level=info msg="loading plugin \"io.containerd.event.v1.publisher\"..." runtime=io.containerd.runc.v2 type=io.containerd.event.v1
Jan 27 11:58:04 scheduled-stop-748426 containerd[832]: time="2025-01-27T11:58:04.225324466Z" level=info msg="loading plugin \"io.containerd.internal.v1.shutdown\"..." runtime=io.containerd.runc.v2 type=io.containerd.internal.v1
Jan 27 11:58:04 scheduled-stop-748426 containerd[832]: time="2025-01-27T11:58:04.225337774Z" level=info msg="loading plugin \"io.containerd.ttrpc.v1.task\"..." runtime=io.containerd.runc.v2 type=io.containerd.ttrpc.v1
Jan 27 11:58:04 scheduled-stop-748426 containerd[832]: time="2025-01-27T11:58:04.225445391Z" level=info msg="loading plugin \"io.containerd.ttrpc.v1.pause\"..." runtime=io.containerd.runc.v2 type=io.containerd.ttrpc.v1
Jan 27 11:58:04 scheduled-stop-748426 containerd[832]: time="2025-01-27T11:58:04.261289089Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:kube-controller-manager-scheduled-stop-748426,Uid:caa5ac4f85b15eaa3c74fad992f53917,Namespace:kube-system,Attempt:0,} returns sandbox id \"02ec88019edd46e5bdc80fdaaf98da7c791fc6349738f6571f3fb57afff2cbf9\""
Jan 27 11:58:04 scheduled-stop-748426 containerd[832]: time="2025-01-27T11:58:04.265004848Z" level=info msg="CreateContainer within sandbox \"02ec88019edd46e5bdc80fdaaf98da7c791fc6349738f6571f3fb57afff2cbf9\" for container &ContainerMetadata{Name:kube-controller-manager,Attempt:0,}"
Jan 27 11:58:04 scheduled-stop-748426 containerd[832]: time="2025-01-27T11:58:04.293320826Z" level=info msg="CreateContainer within sandbox \"02ec88019edd46e5bdc80fdaaf98da7c791fc6349738f6571f3fb57afff2cbf9\" for &ContainerMetadata{Name:kube-controller-manager,Attempt:0,} returns container id \"537f10d28b292f4a6cfe37966bd1c067c3a75dc8c8809342cbfffc2f98f92dda\""
Jan 27 11:58:04 scheduled-stop-748426 containerd[832]: time="2025-01-27T11:58:04.294206074Z" level=info msg="StartContainer for \"537f10d28b292f4a6cfe37966bd1c067c3a75dc8c8809342cbfffc2f98f92dda\""
Jan 27 11:58:04 scheduled-stop-748426 containerd[832]: time="2025-01-27T11:58:04.298296159Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:kube-scheduler-scheduled-stop-748426,Uid:afa3e820e8b9e990cade89a49848c8b3,Namespace:kube-system,Attempt:0,} returns sandbox id \"bdb352f4eef6d3dce1efb0fee4298060a96b38738500aa3622e60326a6baf7b3\""
Jan 27 11:58:04 scheduled-stop-748426 containerd[832]: time="2025-01-27T11:58:04.301000986Z" level=info msg="CreateContainer within sandbox \"bdb352f4eef6d3dce1efb0fee4298060a96b38738500aa3622e60326a6baf7b3\" for container &ContainerMetadata{Name:kube-scheduler,Attempt:0,}"
Jan 27 11:58:04 scheduled-stop-748426 containerd[832]: time="2025-01-27T11:58:04.308123243Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:kube-apiserver-scheduled-stop-748426,Uid:d0e1b7884e19e5db4cf9e2ac9cb71dc6,Namespace:kube-system,Attempt:0,} returns sandbox id \"dafabe52ca89a92f769ffb759f6116076cf99ac7fb4ce4499aca33b4de449332\""
Jan 27 11:58:04 scheduled-stop-748426 containerd[832]: time="2025-01-27T11:58:04.313269609Z" level=info msg="CreateContainer within sandbox \"dafabe52ca89a92f769ffb759f6116076cf99ac7fb4ce4499aca33b4de449332\" for container &ContainerMetadata{Name:kube-apiserver,Attempt:0,}"
Jan 27 11:58:04 scheduled-stop-748426 containerd[832]: time="2025-01-27T11:58:04.325625573Z" level=info msg="CreateContainer within sandbox \"bdb352f4eef6d3dce1efb0fee4298060a96b38738500aa3622e60326a6baf7b3\" for &ContainerMetadata{Name:kube-scheduler,Attempt:0,} returns container id \"727a0847f087bcdbf434e1c7383741df9ddec84b07c5db8e59c53a92c20309ab\""
Jan 27 11:58:04 scheduled-stop-748426 containerd[832]: time="2025-01-27T11:58:04.326284300Z" level=info msg="StartContainer for \"727a0847f087bcdbf434e1c7383741df9ddec84b07c5db8e59c53a92c20309ab\""
Jan 27 11:58:04 scheduled-stop-748426 containerd[832]: time="2025-01-27T11:58:04.350733424Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:etcd-scheduled-stop-748426,Uid:0fca17c6874e88feec826e2e1a723fe3,Namespace:kube-system,Attempt:0,} returns sandbox id \"1091e5ddc1a38cc7e318ff82f92cfc15a52345a82f102639ae65f6515f6b3e2c\""
Jan 27 11:58:04 scheduled-stop-748426 containerd[832]: time="2025-01-27T11:58:04.354675065Z" level=info msg="CreateContainer within sandbox \"dafabe52ca89a92f769ffb759f6116076cf99ac7fb4ce4499aca33b4de449332\" for &ContainerMetadata{Name:kube-apiserver,Attempt:0,} returns container id \"16510b6a7ac00f01ced6eed0dd6556db6e2eaa8f4a56311a43950b7aa4a3c0ab\""
Jan 27 11:58:04 scheduled-stop-748426 containerd[832]: time="2025-01-27T11:58:04.355087823Z" level=info msg="StartContainer for \"16510b6a7ac00f01ced6eed0dd6556db6e2eaa8f4a56311a43950b7aa4a3c0ab\""
Jan 27 11:58:04 scheduled-stop-748426 containerd[832]: time="2025-01-27T11:58:04.356970392Z" level=info msg="CreateContainer within sandbox \"1091e5ddc1a38cc7e318ff82f92cfc15a52345a82f102639ae65f6515f6b3e2c\" for container &ContainerMetadata{Name:etcd,Attempt:0,}"
Jan 27 11:58:04 scheduled-stop-748426 containerd[832]: time="2025-01-27T11:58:04.390971365Z" level=info msg="CreateContainer within sandbox \"1091e5ddc1a38cc7e318ff82f92cfc15a52345a82f102639ae65f6515f6b3e2c\" for &ContainerMetadata{Name:etcd,Attempt:0,} returns container id \"a97b452b5e772196332a5154a75e1664b70a4eab6fca5fcc659aa060e1f02e83\""
Jan 27 11:58:04 scheduled-stop-748426 containerd[832]: time="2025-01-27T11:58:04.391458148Z" level=info msg="StartContainer for \"a97b452b5e772196332a5154a75e1664b70a4eab6fca5fcc659aa060e1f02e83\""
Jan 27 11:58:04 scheduled-stop-748426 containerd[832]: time="2025-01-27T11:58:04.443585168Z" level=info msg="StartContainer for \"537f10d28b292f4a6cfe37966bd1c067c3a75dc8c8809342cbfffc2f98f92dda\" returns successfully"
Jan 27 11:58:04 scheduled-stop-748426 containerd[832]: time="2025-01-27T11:58:04.474556907Z" level=info msg="StartContainer for \"727a0847f087bcdbf434e1c7383741df9ddec84b07c5db8e59c53a92c20309ab\" returns successfully"
Jan 27 11:58:04 scheduled-stop-748426 containerd[832]: time="2025-01-27T11:58:04.537863432Z" level=info msg="StartContainer for \"16510b6a7ac00f01ced6eed0dd6556db6e2eaa8f4a56311a43950b7aa4a3c0ab\" returns successfully"
Jan 27 11:58:04 scheduled-stop-748426 containerd[832]: time="2025-01-27T11:58:04.643130380Z" level=info msg="StartContainer for \"a97b452b5e772196332a5154a75e1664b70a4eab6fca5fcc659aa060e1f02e83\" returns successfully"
==> describe nodes <==
Name: scheduled-stop-748426
Roles: control-plane
Labels: beta.kubernetes.io/arch=arm64
beta.kubernetes.io/os=linux
kubernetes.io/arch=arm64
kubernetes.io/hostname=scheduled-stop-748426
kubernetes.io/os=linux
minikube.k8s.io/commit=21d19df81a8d69cdaec1a8f1932c09dc00369650
minikube.k8s.io/name=scheduled-stop-748426
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2025_01_27T11_58_11_0700
minikube.k8s.io/version=v1.35.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///run/containerd/containerd.sock
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Mon, 27 Jan 2025 11:58:08 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: scheduled-stop-748426
AcquireTime: <unset>
RenewTime: Mon, 27 Jan 2025 11:58:10 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Mon, 27 Jan 2025 11:58:08 +0000 Mon, 27 Jan 2025 11:58:05 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Mon, 27 Jan 2025 11:58:08 +0000 Mon, 27 Jan 2025 11:58:05 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Mon, 27 Jan 2025 11:58:08 +0000 Mon, 27 Jan 2025 11:58:05 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Mon, 27 Jan 2025 11:58:08 +0000 Mon, 27 Jan 2025 11:58:08 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.76.2
Hostname: scheduled-stop-748426
Capacity:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022300Ki
pods: 110
Allocatable:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022300Ki
pods: 110
System Info:
Machine ID: 85987e9b0c0e490193771bc39c9df73a
System UUID: 8704061c-637c-4267-8f48-5186d048eba7
Boot ID: 9a2b5a8b-82ce-43cf-92bd-6297263d30a0
Kernel Version: 5.15.0-1075-aws
OS Image: Ubuntu 22.04.5 LTS
Operating System: linux
Architecture: arm64
Container Runtime Version: containerd://1.7.24
Kubelet Version: v1.32.1
Kube-Proxy Version: v1.32.1
Non-terminated Pods: (5 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
kube-system etcd-scheduled-stop-748426 100m (5%) 0 (0%) 100Mi (1%) 0 (0%) 4s
kube-system kube-apiserver-scheduled-stop-748426 250m (12%) 0 (0%) 0 (0%) 0 (0%) 5s
kube-system kube-controller-manager-scheduled-stop-748426 200m (10%) 0 (0%) 0 (0%) 0 (0%) 4s
kube-system kube-scheduler-scheduled-stop-748426 100m (5%) 0 (0%) 0 (0%) 0 (0%) 4s
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 2s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 650m (32%) 0 (0%)
memory 100Mi (1%) 0 (0%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
hugepages-32Mi 0 (0%) 0 (0%)
hugepages-64Ki 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 4s kubelet Starting kubelet.
Warning CgroupV1 4s kubelet cgroup v1 support is in maintenance mode, please migrate to cgroup v2
Normal NodeAllocatableEnforced 4s kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 4s kubelet Node scheduled-stop-748426 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 4s kubelet Node scheduled-stop-748426 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 4s kubelet Node scheduled-stop-748426 status is now: NodeHasSufficientPID
Normal RegisteredNode 0s node-controller Node scheduled-stop-748426 event: Registered Node scheduled-stop-748426 in Controller
==> dmesg <==
==> etcd [a97b452b5e772196332a5154a75e1664b70a4eab6fca5fcc659aa060e1f02e83] <==
{"level":"info","ts":"2025-01-27T11:58:04.761320Z","caller":"embed/etcd.go:729","msg":"starting with client TLS","tls-info":"cert = /var/lib/minikube/certs/etcd/server.crt, key = /var/lib/minikube/certs/etcd/server.key, client-cert=, client-key=, trusted-ca = /var/lib/minikube/certs/etcd/ca.crt, client-cert-auth = true, crl-file = ","cipher-suites":[]}
{"level":"info","ts":"2025-01-27T11:58:04.762050Z","caller":"embed/etcd.go:280","msg":"now serving peer/client/metrics","local-member-id":"ea7e25599daad906","initial-advertise-peer-urls":["https://192.168.76.2:2380"],"listen-peer-urls":["https://192.168.76.2:2380"],"advertise-client-urls":["https://192.168.76.2:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.76.2:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"]}
{"level":"info","ts":"2025-01-27T11:58:04.762207Z","caller":"embed/etcd.go:871","msg":"serving metrics","address":"http://127.0.0.1:2381"}
{"level":"info","ts":"2025-01-27T11:58:04.762694Z","caller":"embed/etcd.go:600","msg":"serving peer traffic","address":"192.168.76.2:2380"}
{"level":"info","ts":"2025-01-27T11:58:04.763106Z","caller":"embed/etcd.go:572","msg":"cmux::serve","address":"192.168.76.2:2380"}
{"level":"info","ts":"2025-01-27T11:58:04.921068Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 is starting a new election at term 1"}
{"level":"info","ts":"2025-01-27T11:58:04.921291Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 became pre-candidate at term 1"}
{"level":"info","ts":"2025-01-27T11:58:04.921412Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 received MsgPreVoteResp from ea7e25599daad906 at term 1"}
{"level":"info","ts":"2025-01-27T11:58:04.921530Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 became candidate at term 2"}
{"level":"info","ts":"2025-01-27T11:58:04.921655Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 received MsgVoteResp from ea7e25599daad906 at term 2"}
{"level":"info","ts":"2025-01-27T11:58:04.921745Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 became leader at term 2"}
{"level":"info","ts":"2025-01-27T11:58:04.921837Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"raft.node: ea7e25599daad906 elected leader ea7e25599daad906 at term 2"}
{"level":"info","ts":"2025-01-27T11:58:04.925209Z","caller":"etcdserver/server.go:2140","msg":"published local member to cluster through raft","local-member-id":"ea7e25599daad906","local-member-attributes":"{Name:scheduled-stop-748426 ClientURLs:[https://192.168.76.2:2379]}","request-path":"/0/members/ea7e25599daad906/attributes","cluster-id":"6f20f2c4b2fb5f8a","publish-timeout":"7s"}
{"level":"info","ts":"2025-01-27T11:58:04.925541Z","caller":"etcdserver/server.go:2651","msg":"setting up initial cluster version using v2 API","cluster-version":"3.5"}
{"level":"info","ts":"2025-01-27T11:58:04.925758Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-01-27T11:58:04.925967Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
{"level":"info","ts":"2025-01-27T11:58:04.926060Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
{"level":"info","ts":"2025-01-27T11:58:04.926168Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-01-27T11:58:04.926957Z","caller":"v3rpc/health.go:61","msg":"grpc service status changed","service":"","status":"SERVING"}
{"level":"info","ts":"2025-01-27T11:58:04.927831Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.76.2:2379"}
{"level":"info","ts":"2025-01-27T11:58:04.937122Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"6f20f2c4b2fb5f8a","local-member-id":"ea7e25599daad906","cluster-version":"3.5"}
{"level":"info","ts":"2025-01-27T11:58:04.937445Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
{"level":"info","ts":"2025-01-27T11:58:04.937611Z","caller":"etcdserver/server.go:2675","msg":"cluster version is updated","cluster-version":"3.5"}
{"level":"info","ts":"2025-01-27T11:58:04.939347Z","caller":"v3rpc/health.go:61","msg":"grpc service status changed","service":"","status":"SERVING"}
{"level":"info","ts":"2025-01-27T11:58:04.949770Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
==> kernel <==
11:58:15 up 4:40, 0 users, load average: 1.68, 1.85, 2.33
Linux scheduled-stop-748426 5.15.0-1075-aws #82~20.04.1-Ubuntu SMP Thu Dec 19 05:23:06 UTC 2024 aarch64 aarch64 aarch64 GNU/Linux
PRETTY_NAME="Ubuntu 22.04.5 LTS"
==> kube-apiserver [16510b6a7ac00f01ced6eed0dd6556db6e2eaa8f4a56311a43950b7aa4a3c0ab] <==
I0127 11:58:08.237378 1 cache.go:39] Caches are synced for autoregister controller
I0127 11:58:08.259120 1 shared_informer.go:320] Caches are synced for node_authorizer
I0127 11:58:08.262438 1 apf_controller.go:382] Running API Priority and Fairness config worker
I0127 11:58:08.262467 1 apf_controller.go:385] Running API Priority and Fairness periodic rebalancing process
I0127 11:58:08.262680 1 shared_informer.go:320] Caches are synced for configmaps
I0127 11:58:08.263111 1 cache.go:39] Caches are synced for LocalAvailability controller
I0127 11:58:08.281088 1 shared_informer.go:320] Caches are synced for *generic.policySource[*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicy,*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicyBinding,k8s.io/apiserver/pkg/admission/plugin/policy/validating.Validator]
I0127 11:58:08.281277 1 policy_source.go:240] refreshing policies
E0127 11:58:08.311524 1 controller.go:145] "Failed to ensure lease exists, will retry" err="namespaces \"kube-system\" not found" interval="200ms"
E0127 11:58:08.321181 1 controller.go:148] "Unhandled Error" err="while syncing ConfigMap \"kube-system/kube-apiserver-legacy-service-account-token-tracking\", err: namespaces \"kube-system\" not found" logger="UnhandledError"
I0127 11:58:08.352658 1 controller.go:615] quota admission added evaluator for: namespaces
I0127 11:58:08.521437 1 controller.go:615] quota admission added evaluator for: leases.coordination.k8s.io
I0127 11:58:08.936129 1 storage_scheduling.go:95] created PriorityClass system-node-critical with value 2000001000
I0127 11:58:08.943587 1 storage_scheduling.go:95] created PriorityClass system-cluster-critical with value 2000000000
I0127 11:58:08.943609 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist.
I0127 11:58:09.597325 1 controller.go:615] quota admission added evaluator for: roles.rbac.authorization.k8s.io
I0127 11:58:09.644166 1 controller.go:615] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
I0127 11:58:09.778161 1 alloc.go:330] "allocated clusterIPs" service="default/kubernetes" clusterIPs={"IPv4":"10.96.0.1"}
W0127 11:58:09.785417 1 lease.go:265] Resetting endpoints for master service "kubernetes" to [192.168.76.2]
I0127 11:58:09.786635 1 controller.go:615] quota admission added evaluator for: endpoints
I0127 11:58:09.791696 1 controller.go:615] quota admission added evaluator for: endpointslices.discovery.k8s.io
I0127 11:58:10.266697 1 controller.go:615] quota admission added evaluator for: serviceaccounts
I0127 11:58:10.707690 1 controller.go:615] quota admission added evaluator for: deployments.apps
I0127 11:58:10.720434 1 alloc.go:330] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs={"IPv4":"10.96.0.10"}
I0127 11:58:10.731869 1 controller.go:615] quota admission added evaluator for: daemonsets.apps
==> kube-controller-manager [537f10d28b292f4a6cfe37966bd1c067c3a75dc8c8809342cbfffc2f98f92dda] <==
I0127 11:58:14.826509 1 shared_informer.go:320] Caches are synced for resource quota
I0127 11:58:14.846378 1 shared_informer.go:320] Caches are synced for validatingadmissionpolicy-status
I0127 11:58:14.857326 1 shared_informer.go:320] Caches are synced for node
I0127 11:58:14.857443 1 range_allocator.go:177] "Sending events to api server" logger="node-ipam-controller"
I0127 11:58:14.857495 1 range_allocator.go:183] "Starting range CIDR allocator" logger="node-ipam-controller"
I0127 11:58:14.857501 1 shared_informer.go:313] Waiting for caches to sync for cidrallocator
I0127 11:58:14.857507 1 shared_informer.go:320] Caches are synced for cidrallocator
I0127 11:58:14.860493 1 shared_informer.go:320] Caches are synced for HPA
I0127 11:58:14.862716 1 shared_informer.go:320] Caches are synced for daemon sets
I0127 11:58:14.862755 1 shared_informer.go:320] Caches are synced for ReplicationController
I0127 11:58:14.862779 1 shared_informer.go:320] Caches are synced for ephemeral
I0127 11:58:14.862796 1 shared_informer.go:320] Caches are synced for endpoint_slice_mirroring
I0127 11:58:14.863380 1 shared_informer.go:320] Caches are synced for crt configmap
I0127 11:58:14.863572 1 shared_informer.go:320] Caches are synced for service account
I0127 11:58:14.863767 1 shared_informer.go:320] Caches are synced for attach detach
I0127 11:58:14.863945 1 shared_informer.go:320] Caches are synced for PVC protection
I0127 11:58:14.866173 1 shared_informer.go:320] Caches are synced for deployment
I0127 11:58:14.871861 1 shared_informer.go:320] Caches are synced for job
I0127 11:58:14.873128 1 shared_informer.go:320] Caches are synced for persistent volume
I0127 11:58:14.880180 1 shared_informer.go:320] Caches are synced for endpoint_slice
I0127 11:58:14.888365 1 shared_informer.go:320] Caches are synced for resource quota
I0127 11:58:14.894435 1 range_allocator.go:428] "Set node PodCIDR" logger="node-ipam-controller" node="scheduled-stop-748426" podCIDRs=["10.244.0.0/24"]
I0127 11:58:14.894672 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="scheduled-stop-748426"
I0127 11:58:14.894777 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="scheduled-stop-748426"
I0127 11:58:14.897181 1 shared_informer.go:320] Caches are synced for garbage collector
==> kube-scheduler [727a0847f087bcdbf434e1c7383741df9ddec84b07c5db8e59c53a92c20309ab] <==
W0127 11:58:08.715709 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
E0127 11:58:08.715735 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User \"system:kube-scheduler\" cannot list resource \"pods\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0127 11:58:08.715824 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E0127 11:58:08.715847 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csidrivers\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError"
W0127 11:58:08.716030 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
E0127 11:58:08.716054 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.ReplicationController: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicationcontrollers\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0127 11:58:08.716106 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.VolumeAttachment: volumeattachments.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "volumeattachments" in API group "storage.k8s.io" at the cluster scope
E0127 11:58:08.716126 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.VolumeAttachment: failed to list *v1.VolumeAttachment: volumeattachments.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"volumeattachments\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError"
W0127 11:58:08.716193 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
E0127 11:58:08.716214 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User \"system:kube-scheduler\" cannot list resource \"services\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0127 11:58:08.716272 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
E0127 11:58:08.716291 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumeclaims\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0127 11:58:08.716352 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
E0127 11:58:08.716371 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PodDisruptionBudget: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User \"system:kube-scheduler\" cannot list resource \"poddisruptionbudgets\" in API group \"policy\" at the cluster scope" logger="UnhandledError"
W0127 11:58:08.716451 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
E0127 11:58:08.716471 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: nodes is forbidden: User \"system:kube-scheduler\" cannot list resource \"nodes\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0127 11:58:08.716525 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
E0127 11:58:08.716542 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PersistentVolume: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumes\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0127 11:58:08.711756 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
E0127 11:58:08.716589 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSINode: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csinodes\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError"
W0127 11:58:08.717138 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
E0127 11:58:08.717163 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User \"system:kube-scheduler\" cannot list resource \"namespaces\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0127 11:58:08.717609 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
E0127 11:58:08.717635 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIStorageCapacity: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csistoragecapacities\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError"
I0127 11:58:10.317368 1 shared_informer.go:320] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
==> kubelet <==
Jan 27 11:58:11 scheduled-stop-748426 kubelet[1535]: I0127 11:58:11.059525 1535 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/afa3e820e8b9e990cade89a49848c8b3-kubeconfig\") pod \"kube-scheduler-scheduled-stop-748426\" (UID: \"afa3e820e8b9e990cade89a49848c8b3\") " pod="kube-system/kube-scheduler-scheduled-stop-748426"
Jan 27 11:58:11 scheduled-stop-748426 kubelet[1535]: I0127 11:58:11.059545 1535 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/d0e1b7884e19e5db4cf9e2ac9cb71dc6-etc-ca-certificates\") pod \"kube-apiserver-scheduled-stop-748426\" (UID: \"d0e1b7884e19e5db4cf9e2ac9cb71dc6\") " pod="kube-system/kube-apiserver-scheduled-stop-748426"
Jan 27 11:58:11 scheduled-stop-748426 kubelet[1535]: I0127 11:58:11.059567 1535 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/d0e1b7884e19e5db4cf9e2ac9cb71dc6-usr-share-ca-certificates\") pod \"kube-apiserver-scheduled-stop-748426\" (UID: \"d0e1b7884e19e5db4cf9e2ac9cb71dc6\") " pod="kube-system/kube-apiserver-scheduled-stop-748426"
Jan 27 11:58:11 scheduled-stop-748426 kubelet[1535]: I0127 11:58:11.059586 1535 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/caa5ac4f85b15eaa3c74fad992f53917-ca-certs\") pod \"kube-controller-manager-scheduled-stop-748426\" (UID: \"caa5ac4f85b15eaa3c74fad992f53917\") " pod="kube-system/kube-controller-manager-scheduled-stop-748426"
Jan 27 11:58:11 scheduled-stop-748426 kubelet[1535]: I0127 11:58:11.059611 1535 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/caa5ac4f85b15eaa3c74fad992f53917-usr-local-share-ca-certificates\") pod \"kube-controller-manager-scheduled-stop-748426\" (UID: \"caa5ac4f85b15eaa3c74fad992f53917\") " pod="kube-system/kube-controller-manager-scheduled-stop-748426"
Jan 27 11:58:11 scheduled-stop-748426 kubelet[1535]: I0127 11:58:11.059633 1535 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/caa5ac4f85b15eaa3c74fad992f53917-kubeconfig\") pod \"kube-controller-manager-scheduled-stop-748426\" (UID: \"caa5ac4f85b15eaa3c74fad992f53917\") " pod="kube-system/kube-controller-manager-scheduled-stop-748426"
Jan 27 11:58:11 scheduled-stop-748426 kubelet[1535]: I0127 11:58:11.059653 1535 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-certs\" (UniqueName: \"kubernetes.io/host-path/0fca17c6874e88feec826e2e1a723fe3-etcd-certs\") pod \"etcd-scheduled-stop-748426\" (UID: \"0fca17c6874e88feec826e2e1a723fe3\") " pod="kube-system/etcd-scheduled-stop-748426"
Jan 27 11:58:11 scheduled-stop-748426 kubelet[1535]: I0127 11:58:11.059670 1535 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/d0e1b7884e19e5db4cf9e2ac9cb71dc6-ca-certs\") pod \"kube-apiserver-scheduled-stop-748426\" (UID: \"d0e1b7884e19e5db4cf9e2ac9cb71dc6\") " pod="kube-system/kube-apiserver-scheduled-stop-748426"
Jan 27 11:58:11 scheduled-stop-748426 kubelet[1535]: I0127 11:58:11.059694 1535 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/d0e1b7884e19e5db4cf9e2ac9cb71dc6-usr-local-share-ca-certificates\") pod \"kube-apiserver-scheduled-stop-748426\" (UID: \"d0e1b7884e19e5db4cf9e2ac9cb71dc6\") " pod="kube-system/kube-apiserver-scheduled-stop-748426"
Jan 27 11:58:11 scheduled-stop-748426 kubelet[1535]: I0127 11:58:11.059714 1535 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-data\" (UniqueName: \"kubernetes.io/host-path/0fca17c6874e88feec826e2e1a723fe3-etcd-data\") pod \"etcd-scheduled-stop-748426\" (UID: \"0fca17c6874e88feec826e2e1a723fe3\") " pod="kube-system/etcd-scheduled-stop-748426"
Jan 27 11:58:11 scheduled-stop-748426 kubelet[1535]: I0127 11:58:11.059733 1535 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/d0e1b7884e19e5db4cf9e2ac9cb71dc6-k8s-certs\") pod \"kube-apiserver-scheduled-stop-748426\" (UID: \"d0e1b7884e19e5db4cf9e2ac9cb71dc6\") " pod="kube-system/kube-apiserver-scheduled-stop-748426"
Jan 27 11:58:11 scheduled-stop-748426 kubelet[1535]: I0127 11:58:11.059752 1535 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"flexvolume-dir\" (UniqueName: \"kubernetes.io/host-path/caa5ac4f85b15eaa3c74fad992f53917-flexvolume-dir\") pod \"kube-controller-manager-scheduled-stop-748426\" (UID: \"caa5ac4f85b15eaa3c74fad992f53917\") " pod="kube-system/kube-controller-manager-scheduled-stop-748426"
Jan 27 11:58:11 scheduled-stop-748426 kubelet[1535]: I0127 11:58:11.622525 1535 apiserver.go:52] "Watching apiserver"
Jan 27 11:58:11 scheduled-stop-748426 kubelet[1535]: I0127 11:58:11.631733 1535 desired_state_of_world_populator.go:157] "Finished populating initial desired state of world"
Jan 27 11:58:11 scheduled-stop-748426 kubelet[1535]: I0127 11:58:11.742448 1535 kubelet.go:3200] "Creating a mirror pod for static pod" pod="kube-system/etcd-scheduled-stop-748426"
Jan 27 11:58:11 scheduled-stop-748426 kubelet[1535]: E0127 11:58:11.753196 1535 kubelet.go:3202] "Failed creating a mirror pod" err="pods \"etcd-scheduled-stop-748426\" already exists" pod="kube-system/etcd-scheduled-stop-748426"
Jan 27 11:58:11 scheduled-stop-748426 kubelet[1535]: I0127 11:58:11.776305 1535 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/etcd-scheduled-stop-748426" podStartSLOduration=1.7762733609999999 podStartE2EDuration="1.776273361s" podCreationTimestamp="2025-01-27 11:58:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-01-27 11:58:11.764027015 +0000 UTC m=+1.229960485" watchObservedRunningTime="2025-01-27 11:58:11.776273361 +0000 UTC m=+1.242206815"
Jan 27 11:58:11 scheduled-stop-748426 kubelet[1535]: I0127 11:58:11.791513 1535 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-scheduler-scheduled-stop-748426" podStartSLOduration=1.791491597 podStartE2EDuration="1.791491597s" podCreationTimestamp="2025-01-27 11:58:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-01-27 11:58:11.77651498 +0000 UTC m=+1.242448434" watchObservedRunningTime="2025-01-27 11:58:11.791491597 +0000 UTC m=+1.257425050"
Jan 27 11:58:11 scheduled-stop-748426 kubelet[1535]: I0127 11:58:11.817596 1535 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-controller-manager-scheduled-stop-748426" podStartSLOduration=1.817579839 podStartE2EDuration="1.817579839s" podCreationTimestamp="2025-01-27 11:58:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-01-27 11:58:11.817377702 +0000 UTC m=+1.283311189" watchObservedRunningTime="2025-01-27 11:58:11.817579839 +0000 UTC m=+1.283513293"
Jan 27 11:58:11 scheduled-stop-748426 kubelet[1535]: I0127 11:58:11.817784 1535 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-apiserver-scheduled-stop-748426" podStartSLOduration=2.81777803 podStartE2EDuration="2.81777803s" podCreationTimestamp="2025-01-27 11:58:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-01-27 11:58:11.791793736 +0000 UTC m=+1.257727288" watchObservedRunningTime="2025-01-27 11:58:11.81777803 +0000 UTC m=+1.283711492"
Jan 27 11:58:14 scheduled-stop-748426 kubelet[1535]: I0127 11:58:14.901909 1535 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dms8k\" (UniqueName: \"kubernetes.io/projected/af66dd39-9188-415b-9a71-7dac74b7cc9d-kube-api-access-dms8k\") pod \"storage-provisioner\" (UID: \"af66dd39-9188-415b-9a71-7dac74b7cc9d\") " pod="kube-system/storage-provisioner"
Jan 27 11:58:14 scheduled-stop-748426 kubelet[1535]: I0127 11:58:14.901975 1535 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/af66dd39-9188-415b-9a71-7dac74b7cc9d-tmp\") pod \"storage-provisioner\" (UID: \"af66dd39-9188-415b-9a71-7dac74b7cc9d\") " pod="kube-system/storage-provisioner"
Jan 27 11:58:15 scheduled-stop-748426 kubelet[1535]: E0127 11:58:15.020549 1535 projected.go:288] Couldn't get configMap kube-system/kube-root-ca.crt: configmap "kube-root-ca.crt" not found
Jan 27 11:58:15 scheduled-stop-748426 kubelet[1535]: E0127 11:58:15.020614 1535 projected.go:194] Error preparing data for projected volume kube-api-access-dms8k for pod kube-system/storage-provisioner: configmap "kube-root-ca.crt" not found
Jan 27 11:58:15 scheduled-stop-748426 kubelet[1535]: E0127 11:58:15.020718 1535 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/af66dd39-9188-415b-9a71-7dac74b7cc9d-kube-api-access-dms8k podName:af66dd39-9188-415b-9a71-7dac74b7cc9d nodeName:}" failed. No retries permitted until 2025-01-27 11:58:15.520688775 +0000 UTC m=+4.986622237 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-dms8k" (UniqueName: "kubernetes.io/projected/af66dd39-9188-415b-9a71-7dac74b7cc9d-kube-api-access-dms8k") pod "storage-provisioner" (UID: "af66dd39-9188-415b-9a71-7dac74b7cc9d") : configmap "kube-root-ca.crt" not found
-- /stdout --
helpers_test.go:254: (dbg) Run: out/minikube-linux-arm64 status --format={{.APIServer}} -p scheduled-stop-748426 -n scheduled-stop-748426
helpers_test.go:261: (dbg) Run: kubectl --context scheduled-stop-748426 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:272: non-running pods: kindnet-dtmxl kube-proxy-w8b5l storage-provisioner
helpers_test.go:274: ======> post-mortem[TestScheduledStopUnix]: describe non-running pods <======
helpers_test.go:277: (dbg) Run: kubectl --context scheduled-stop-748426 describe pod kindnet-dtmxl kube-proxy-w8b5l storage-provisioner
helpers_test.go:277: (dbg) Non-zero exit: kubectl --context scheduled-stop-748426 describe pod kindnet-dtmxl kube-proxy-w8b5l storage-provisioner: exit status 1 (145.183246ms)
** stderr **
Error from server (NotFound): pods "kindnet-dtmxl" not found
Error from server (NotFound): pods "kube-proxy-w8b5l" not found
Error from server (NotFound): pods "storage-provisioner" not found
** /stderr **
helpers_test.go:279: kubectl --context scheduled-stop-748426 describe pod kindnet-dtmxl kube-proxy-w8b5l storage-provisioner: exit status 1
helpers_test.go:175: Cleaning up "scheduled-stop-748426" profile ...
helpers_test.go:178: (dbg) Run: out/minikube-linux-arm64 delete -p scheduled-stop-748426
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p scheduled-stop-748426: (2.117264971s)
--- FAIL: TestScheduledStopUnix (34.46s)