=== RUN TestSkaffold
skaffold_test.go:59: (dbg) Run: /var/folders/vq/yhv778t970xgml0dzm5fdwlr0000gp/T/skaffold.exe1780526238 version
skaffold_test.go:59: (dbg) Done: /var/folders/vq/yhv778t970xgml0dzm5fdwlr0000gp/T/skaffold.exe1780526238 version: (1.606259144s)
skaffold_test.go:63: skaffold version: v2.10.0
skaffold_test.go:66: (dbg) Run: out/minikube-darwin-amd64 start -p skaffold-475000 --memory=2600 --driver=docker
skaffold_test.go:66: (dbg) Done: out/minikube-darwin-amd64 start -p skaffold-475000 --memory=2600 --driver=docker : (21.726484306s)
skaffold_test.go:86: copying out/minikube-darwin-amd64 to /Users/jenkins/workspace/out/minikube
skaffold_test.go:105: (dbg) Run: /var/folders/vq/yhv778t970xgml0dzm5fdwlr0000gp/T/skaffold.exe1780526238 run --minikube-profile skaffold-475000 --kube-context skaffold-475000 --status-check=true --port-forward=false --interactive=false
E0401 04:47:59.598569 12907 cert_rotation.go:168] key failed with : open /Users/jenkins/minikube-integration/18551-12424/.minikube/profiles/addons-357000/client.crt: no such file or directory
E0401 04:48:09.373815 12907 cert_rotation.go:168] key failed with : open /Users/jenkins/minikube-integration/18551-12424/.minikube/profiles/functional-683000/client.crt: no such file or directory
skaffold_test.go:105: (dbg) Done: /var/folders/vq/yhv778t970xgml0dzm5fdwlr0000gp/T/skaffold.exe1780526238 run --minikube-profile skaffold-475000 --kube-context skaffold-475000 --status-check=true --port-forward=false --interactive=false: (4m34.144091644s)
skaffold_test.go:111: (dbg) TestSkaffold: waiting 1m0s for pods matching "app=leeroy-app" in namespace "default" ...
helpers_test.go:344: "leeroy-app-8477656689-hcjh4" [cc247ea0-483c-4f2d-aa88-0eff133725c7] Running
skaffold_test.go:111: ***** TestSkaffold: pod "app=leeroy-app" failed to start within 1m0s: context deadline exceeded ****
skaffold_test.go:111: (dbg) Run: out/minikube-darwin-amd64 status --format={{.APIServer}} -p skaffold-475000 -n skaffold-475000
skaffold_test.go:111: TestSkaffold: showing logs for failed pods as of 2024-04-01 04:49:32.046475 -0700 PDT m=+2400.564506825
skaffold_test.go:111: (dbg) Run: kubectl --context skaffold-475000 describe po leeroy-app-8477656689-hcjh4 -n default
skaffold_test.go:111: (dbg) Non-zero exit: kubectl --context skaffold-475000 describe po leeroy-app-8477656689-hcjh4 -n default: context deadline exceeded (2.045µs)
skaffold_test.go:111: kubectl --context skaffold-475000 describe po leeroy-app-8477656689-hcjh4 -n default: context deadline exceeded
skaffold_test.go:111: (dbg) Run: kubectl --context skaffold-475000 logs leeroy-app-8477656689-hcjh4 -n default
skaffold_test.go:111: (dbg) Non-zero exit: kubectl --context skaffold-475000 logs leeroy-app-8477656689-hcjh4 -n default: context deadline exceeded (265ns)
skaffold_test.go:111: kubectl --context skaffold-475000 logs leeroy-app-8477656689-hcjh4 -n default: context deadline exceeded
skaffold_test.go:112: failed waiting for pod leeroy-app: app=leeroy-app within 1m0s: context deadline exceeded
panic.go:626: *** TestSkaffold FAILED at 2024-04-01 04:49:32.050026 -0700 PDT m=+2400.568057748
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:230: ======> post-mortem[TestSkaffold]: docker inspect <======
helpers_test.go:231: (dbg) Run: docker inspect skaffold-475000
helpers_test.go:235: (dbg) docker inspect skaffold-475000:
-- stdout --
[
{
"Id": "f244cd68a41264cba560f9c7196edfb9378e9dfb947507a912a93d89d8b91a38",
"Created": "2024-04-01T11:44:38.708881454Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 228356,
"ExitCode": 0,
"Error": "",
"StartedAt": "2024-04-01T11:44:38.915596035Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:96c05e40227e38fe4ccae4f0aabc836c21ba547af60e2e13f00894d532e626b8",
"ResolvConfPath": "/var/lib/docker/containers/f244cd68a41264cba560f9c7196edfb9378e9dfb947507a912a93d89d8b91a38/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/f244cd68a41264cba560f9c7196edfb9378e9dfb947507a912a93d89d8b91a38/hostname",
"HostsPath": "/var/lib/docker/containers/f244cd68a41264cba560f9c7196edfb9378e9dfb947507a912a93d89d8b91a38/hosts",
"LogPath": "/var/lib/docker/containers/f244cd68a41264cba560f9c7196edfb9378e9dfb947507a912a93d89d8b91a38/f244cd68a41264cba560f9c7196edfb9378e9dfb947507a912a93d89d8b91a38-json.log",
"Name": "/skaffold-475000",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"skaffold-475000:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {}
},
"NetworkMode": "skaffold-475000",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "0"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "0"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "0"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "0"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "0"
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "private",
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 2726297600,
"NanoCpus": 2000000000,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 2726297600,
"MemorySwappiness": null,
"OomKillDisable": null,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"LowerDir": "/var/lib/docker/overlay2/da7f2e574a23bc71f2260cbef037cd95c39cb4bbf5cf726eaf13ef42bff7bedc-init/diff:/var/lib/docker/overlay2/090c1715cac6dc2b0e713fba81e04c66863cde229a8b7b642aa924ee5eecad26/diff",
"MergedDir": "/var/lib/docker/overlay2/da7f2e574a23bc71f2260cbef037cd95c39cb4bbf5cf726eaf13ef42bff7bedc/merged",
"UpperDir": "/var/lib/docker/overlay2/da7f2e574a23bc71f2260cbef037cd95c39cb4bbf5cf726eaf13ef42bff7bedc/diff",
"WorkDir": "/var/lib/docker/overlay2/da7f2e574a23bc71f2260cbef037cd95c39cb4bbf5cf726eaf13ef42bff7bedc/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
},
{
"Type": "volume",
"Name": "skaffold-475000",
"Source": "/var/lib/docker/volumes/skaffold-475000/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
}
],
"Config": {
"Hostname": "skaffold-475000",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.43-1711559786-18485@sha256:2dcab64da240d825290a528fa79ad3c32db45fe5f8be5150468234a7114eff82",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"OnBuild": null,
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "skaffold-475000",
"name.minikube.sigs.k8s.io": "skaffold-475000",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "8dcf3902ad9c2bc9f85860ecb1215c825762187dba2d709a5c99afa475b78310",
"SandboxKey": "/var/run/docker/netns/8dcf3902ad9c",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "60988"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "60989"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "60990"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "60991"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "60992"
}
]
},
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"skaffold-475000": {
"IPAMConfig": {
"IPv4Address": "192.168.76.2"
},
"Links": null,
"Aliases": [
"f244cd68a412",
"skaffold-475000"
],
"MacAddress": "02:42:c0:a8:4c:02",
"NetworkID": "e1ababfeb45e662c40eaafec26ac7e1b08484b7b4558eaab9c5df72cff0d254e",
"EndpointID": "a62b9bdc27fca3f2b0b6c6f82fa57568cf4e56d13471af66269d85a0aa38f8cb",
"Gateway": "192.168.76.1",
"IPAddress": "192.168.76.2",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DriverOpts": null,
"DNSNames": [
"skaffold-475000",
"f244cd68a412"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:239: (dbg) Run: out/minikube-darwin-amd64 status --format={{.Host}} -p skaffold-475000 -n skaffold-475000
helpers_test.go:244: <<< TestSkaffold FAILED: start of post-mortem logs <<<
helpers_test.go:245: ======> post-mortem[TestSkaffold]: minikube logs <======
helpers_test.go:247: (dbg) Run: out/minikube-darwin-amd64 -p skaffold-475000 logs -n 25
helpers_test.go:247: (dbg) Done: out/minikube-darwin-amd64 -p skaffold-475000 logs -n 25: (2.316471719s)
helpers_test.go:252: TestSkaffold logs:
-- stdout --
==> Audit <==
|------------|--------------------------------|-----------------------|----------|----------------|---------------------|---------------------|
| Command | Args | Profile | User | Version | Start Time | End Time |
|------------|--------------------------------|-----------------------|----------|----------------|---------------------|---------------------|
| start | -p multinode-699000-m02 | multinode-699000-m02 | jenkins | v1.33.0-beta.0 | 01 Apr 24 04:39 PDT | |
| | --driver=docker | | | | | |
| start | -p multinode-699000-m03 | multinode-699000-m03 | jenkins | v1.33.0-beta.0 | 01 Apr 24 04:39 PDT | 01 Apr 24 04:40 PDT |
| | --driver=docker | | | | | |
| node | add -p multinode-699000 | multinode-699000 | jenkins | v1.33.0-beta.0 | 01 Apr 24 04:40 PDT | |
| delete | -p multinode-699000-m03 | multinode-699000-m03 | jenkins | v1.33.0-beta.0 | 01 Apr 24 04:40 PDT | 01 Apr 24 04:40 PDT |
| delete | -p multinode-699000 | multinode-699000 | jenkins | v1.33.0-beta.0 | 01 Apr 24 04:40 PDT | 01 Apr 24 04:40 PDT |
| start | -p test-preload-603000 | test-preload-603000 | jenkins | v1.33.0-beta.0 | 01 Apr 24 04:40 PDT | 01 Apr 24 04:42 PDT |
| | --memory=2200 | | | | | |
| | --alsologtostderr | | | | | |
| | --wait=true --preload=false | | | | | |
| | --driver=docker | | | | | |
| | --kubernetes-version=v1.24.4 | | | | | |
| image | test-preload-603000 image pull | test-preload-603000 | jenkins | v1.33.0-beta.0 | 01 Apr 24 04:42 PDT | 01 Apr 24 04:42 PDT |
| | gcr.io/k8s-minikube/busybox | | | | | |
| stop | -p test-preload-603000 | test-preload-603000 | jenkins | v1.33.0-beta.0 | 01 Apr 24 04:42 PDT | 01 Apr 24 04:42 PDT |
| start | -p test-preload-603000 | test-preload-603000 | jenkins | v1.33.0-beta.0 | 01 Apr 24 04:42 PDT | 01 Apr 24 04:42 PDT |
| | --memory=2200 | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| | --wait=true --driver=docker | | | | | |
| image | test-preload-603000 image list | test-preload-603000 | jenkins | v1.33.0-beta.0 | 01 Apr 24 04:42 PDT | 01 Apr 24 04:42 PDT |
| delete | -p test-preload-603000 | test-preload-603000 | jenkins | v1.33.0-beta.0 | 01 Apr 24 04:42 PDT | 01 Apr 24 04:42 PDT |
| start | -p scheduled-stop-073000 | scheduled-stop-073000 | jenkins | v1.33.0-beta.0 | 01 Apr 24 04:42 PDT | 01 Apr 24 04:43 PDT |
| | --memory=2048 --driver=docker | | | | | |
| stop | -p scheduled-stop-073000 | scheduled-stop-073000 | jenkins | v1.33.0-beta.0 | 01 Apr 24 04:43 PDT | |
| | --schedule 5m | | | | | |
| stop | -p scheduled-stop-073000 | scheduled-stop-073000 | jenkins | v1.33.0-beta.0 | 01 Apr 24 04:43 PDT | |
| | --schedule 5m | | | | | |
| stop | -p scheduled-stop-073000 | scheduled-stop-073000 | jenkins | v1.33.0-beta.0 | 01 Apr 24 04:43 PDT | |
| | --schedule 5m | | | | | |
| stop | -p scheduled-stop-073000 | scheduled-stop-073000 | jenkins | v1.33.0-beta.0 | 01 Apr 24 04:43 PDT | |
| | --schedule 15s | | | | | |
| stop | -p scheduled-stop-073000 | scheduled-stop-073000 | jenkins | v1.33.0-beta.0 | 01 Apr 24 04:43 PDT | |
| | --schedule 15s | | | | | |
| stop | -p scheduled-stop-073000 | scheduled-stop-073000 | jenkins | v1.33.0-beta.0 | 01 Apr 24 04:43 PDT | |
| | --schedule 15s | | | | | |
| stop | -p scheduled-stop-073000 | scheduled-stop-073000 | jenkins | v1.33.0-beta.0 | 01 Apr 24 04:43 PDT | 01 Apr 24 04:43 PDT |
| | --cancel-scheduled | | | | | |
| stop | -p scheduled-stop-073000 | scheduled-stop-073000 | jenkins | v1.33.0-beta.0 | 01 Apr 24 04:43 PDT | |
| | --schedule 15s | | | | | |
| stop | -p scheduled-stop-073000 | scheduled-stop-073000 | jenkins | v1.33.0-beta.0 | 01 Apr 24 04:43 PDT | |
| | --schedule 15s | | | | | |
| stop | -p scheduled-stop-073000 | scheduled-stop-073000 | jenkins | v1.33.0-beta.0 | 01 Apr 24 04:43 PDT | 01 Apr 24 04:44 PDT |
| | --schedule 15s | | | | | |
| delete | -p scheduled-stop-073000 | scheduled-stop-073000 | jenkins | v1.33.0-beta.0 | 01 Apr 24 04:44 PDT | 01 Apr 24 04:44 PDT |
| start | -p skaffold-475000 | skaffold-475000 | jenkins | v1.33.0-beta.0 | 01 Apr 24 04:44 PDT | 01 Apr 24 04:44 PDT |
| | --memory=2600 --driver=docker | | | | | |
| docker-env | --shell none -p | skaffold-475000 | skaffold | v1.33.0-beta.0 | 01 Apr 24 04:44 PDT | 01 Apr 24 04:44 PDT |
| | skaffold-475000 | | | | | |
| | --user=skaffold | | | | | |
|------------|--------------------------------|-----------------------|----------|----------------|---------------------|---------------------|
==> Last Start <==
Log file created at: 2024/04/01 04:44:34
Running on machine: MacOS-Agent-2
Binary: Built with gc go1.22.1 for darwin/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I0401 04:44:34.799597 23509 out.go:291] Setting OutFile to fd 1 ...
I0401 04:44:34.799753 23509 out.go:338] TERM=,COLORTERM=, which probably does not support color
I0401 04:44:34.799756 23509 out.go:304] Setting ErrFile to fd 2...
I0401 04:44:34.799759 23509 out.go:338] TERM=,COLORTERM=, which probably does not support color
I0401 04:44:34.799936 23509 root.go:338] Updating PATH: /Users/jenkins/minikube-integration/18551-12424/.minikube/bin
I0401 04:44:34.801330 23509 out.go:298] Setting JSON to false
I0401 04:44:34.824543 23509 start.go:129] hostinfo: {"hostname":"MacOS-Agent-2.local","uptime":9847,"bootTime":1711962027,"procs":436,"os":"darwin","platform":"darwin","platformFamily":"Standalone Workstation","platformVersion":"14.3.1","kernelVersion":"23.3.0","kernelArch":"x86_64","virtualizationSystem":"","virtualizationRole":"","hostId":"2965c349-98a5-5970-aaa9-9eedd3ae5959"}
W0401 04:44:34.824626 23509 start.go:137] gopshost.Virtualization returned error: not implemented yet
I0401 04:44:34.847388 23509 out.go:177] * [skaffold-475000] minikube v1.33.0-beta.0 on Darwin 14.3.1
I0401 04:44:34.888430 23509 out.go:177] - MINIKUBE_LOCATION=18551
I0401 04:44:34.888446 23509 notify.go:220] Checking for updates...
I0401 04:44:34.931423 23509 out.go:177] - KUBECONFIG=/Users/jenkins/minikube-integration/18551-12424/kubeconfig
I0401 04:44:34.973109 23509 out.go:177] - MINIKUBE_BIN=out/minikube-darwin-amd64
I0401 04:44:34.994386 23509 out.go:177] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I0401 04:44:35.015299 23509 out.go:177] - MINIKUBE_HOME=/Users/jenkins/minikube-integration/18551-12424/.minikube
I0401 04:44:35.036187 23509 out.go:177] - MINIKUBE_FORCE_SYSTEMD=
I0401 04:44:35.057879 23509 driver.go:392] Setting default libvirt URI to qemu:///system
I0401 04:44:35.114319 23509 docker.go:122] docker version: linux-25.0.3:Docker Desktop 4.27.2 (137060)
I0401 04:44:35.114486 23509 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0401 04:44:35.213655 23509 info.go:266] docker info: {ID:9dd12a49-41d2-44e8-aa64-4ab7fa99394e Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:2 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:67 OomKillDisable:false NGoroutines:110 SystemTime:2024-04-01 11:44:35.20388402 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:26 KernelVersion:6.6.12-linuxkit OperatingSystem:Docker Desktop OSType:linux Architecture:x86_64 IndexServerAddress:h
ttps://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:12 MemTotal:6213300224 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy:http.docker.internal:3128 HTTPSProxy:http.docker.internal:3128 NoProxy:hubproxy.docker.internal Name:docker-desktop Labels:[] ExperimentalBuild:false ServerVersion:25.0.3 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:ae07eda36dd25f8a1b98dfbf587313b99c0190bb Expected:ae07eda36dd25f8a1b98dfbf587313b99c0190bb} RuncCommit:{ID:v1.1.12-0-g51d5e94 Expected:v1.1.12-0-g51d5e94} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=seccomp,profile=unconfined name=
cgroupns] ProductLicense: Warnings:[WARNING: daemon is not using the default seccomp profile] ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/Users/jenkins/.docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShadowedPaths:[/usr/local/lib/docker/cli-plugins/docker-buildx] ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.12.1-desktop.4] map[Name:compose Path:/Users/jenkins/.docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShadowedPaths:[/usr/local/lib/docker/cli-plugins/docker-compose] ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.24.5-desktop.1] map[Name:debug Path:/Users/jenkins/.docker/cli-plugins/docker-debug SchemaVersion:0.1.0 ShadowedPaths:[/usr/local/lib/docker/cli-plugins/docker-debug] ShortDescription:Get a shell into any image or container. Vendor:Docker Inc. Version:0.0.24] map[Name:dev Path:/Users/jenkins/.docker/cli-plugins/docker-dev SchemaVersion:0.1.0 ShadowedPaths:[/usr/local/lib/docker/cli-plugins/docker-dev] ShortDescription:Docker De
v Environments Vendor:Docker Inc. Version:v0.1.0] map[Name:extension Path:/Users/jenkins/.docker/cli-plugins/docker-extension SchemaVersion:0.1.0 ShadowedPaths:[/usr/local/lib/docker/cli-plugins/docker-extension] ShortDescription:Manages Docker extensions Vendor:Docker Inc. Version:v0.2.21] map[Name:feedback Path:/Users/jenkins/.docker/cli-plugins/docker-feedback SchemaVersion:0.1.0 ShadowedPaths:[/usr/local/lib/docker/cli-plugins/docker-feedback] ShortDescription:Provide feedback, right in your terminal! Vendor:Docker Inc. Version:v1.0.4] map[Name:init Path:/Users/jenkins/.docker/cli-plugins/docker-init SchemaVersion:0.1.0 ShadowedPaths:[/usr/local/lib/docker/cli-plugins/docker-init] ShortDescription:Creates Docker-related starter files for your project Vendor:Docker Inc. Version:v1.0.0] map[Name:sbom Path:/Users/jenkins/.docker/cli-plugins/docker-sbom SchemaVersion:0.1.0 ShadowedPaths:[/usr/local/lib/docker/cli-plugins/docker-sbom] ShortDescription:View the packaged-based Software Bill Of Materials (SBOM) f
or an image URL:https://github.com/docker/sbom-cli-plugin Vendor:Anchore Inc. Version:0.6.0] map[Err:failed to fetch metadata: fork/exec /Users/jenkins/.docker/cli-plugins/docker-scan: no such file or directory Name:scan Path:/Users/jenkins/.docker/cli-plugins/docker-scan] map[Name:scout Path:/Users/jenkins/.docker/cli-plugins/docker-scout SchemaVersion:0.1.0 ShadowedPaths:[/usr/local/lib/docker/cli-plugins/docker-scout] ShortDescription:Docker Scout Vendor:Docker Inc. Version:v1.4.1]] Warnings:<nil>}}
I0401 04:44:35.256233 23509 out.go:177] * Using the docker driver based on user configuration
I0401 04:44:35.277360 23509 start.go:297] selected driver: docker
I0401 04:44:35.277377 23509 start.go:901] validating driver "docker" against <nil>
I0401 04:44:35.277390 23509 start.go:912] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I0401 04:44:35.281790 23509 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0401 04:44:35.381195 23509 info.go:266] docker info: {ID:9dd12a49-41d2-44e8-aa64-4ab7fa99394e Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:2 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:67 OomKillDisable:false NGoroutines:110 SystemTime:2024-04-01 11:44:35.371606177 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:26 KernelVersion:6.6.12-linuxkit OperatingSystem:Docker Desktop OSType:linux Architecture:x86_64 IndexServerAddress:
https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:12 MemTotal:6213300224 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy:http.docker.internal:3128 HTTPSProxy:http.docker.internal:3128 NoProxy:hubproxy.docker.internal Name:docker-desktop Labels:[] ExperimentalBuild:false ServerVersion:25.0.3 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:ae07eda36dd25f8a1b98dfbf587313b99c0190bb Expected:ae07eda36dd25f8a1b98dfbf587313b99c0190bb} RuncCommit:{ID:v1.1.12-0-g51d5e94 Expected:v1.1.12-0-g51d5e94} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=seccomp,profile=unconfined name
=cgroupns] ProductLicense: Warnings:[WARNING: daemon is not using the default seccomp profile] ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/Users/jenkins/.docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShadowedPaths:[/usr/local/lib/docker/cli-plugins/docker-buildx] ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.12.1-desktop.4] map[Name:compose Path:/Users/jenkins/.docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShadowedPaths:[/usr/local/lib/docker/cli-plugins/docker-compose] ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.24.5-desktop.1] map[Name:debug Path:/Users/jenkins/.docker/cli-plugins/docker-debug SchemaVersion:0.1.0 ShadowedPaths:[/usr/local/lib/docker/cli-plugins/docker-debug] ShortDescription:Get a shell into any image or container. Vendor:Docker Inc. Version:0.0.24] map[Name:dev Path:/Users/jenkins/.docker/cli-plugins/docker-dev SchemaVersion:0.1.0 ShadowedPaths:[/usr/local/lib/docker/cli-plugins/docker-dev] ShortDescription:Docker D
ev Environments Vendor:Docker Inc. Version:v0.1.0] map[Name:extension Path:/Users/jenkins/.docker/cli-plugins/docker-extension SchemaVersion:0.1.0 ShadowedPaths:[/usr/local/lib/docker/cli-plugins/docker-extension] ShortDescription:Manages Docker extensions Vendor:Docker Inc. Version:v0.2.21] map[Name:feedback Path:/Users/jenkins/.docker/cli-plugins/docker-feedback SchemaVersion:0.1.0 ShadowedPaths:[/usr/local/lib/docker/cli-plugins/docker-feedback] ShortDescription:Provide feedback, right in your terminal! Vendor:Docker Inc. Version:v1.0.4] map[Name:init Path:/Users/jenkins/.docker/cli-plugins/docker-init SchemaVersion:0.1.0 ShadowedPaths:[/usr/local/lib/docker/cli-plugins/docker-init] ShortDescription:Creates Docker-related starter files for your project Vendor:Docker Inc. Version:v1.0.0] map[Name:sbom Path:/Users/jenkins/.docker/cli-plugins/docker-sbom SchemaVersion:0.1.0 ShadowedPaths:[/usr/local/lib/docker/cli-plugins/docker-sbom] ShortDescription:View the packaged-based Software Bill Of Materials (SBOM)
for an image URL:https://github.com/docker/sbom-cli-plugin Vendor:Anchore Inc. Version:0.6.0] map[Err:failed to fetch metadata: fork/exec /Users/jenkins/.docker/cli-plugins/docker-scan: no such file or directory Name:scan Path:/Users/jenkins/.docker/cli-plugins/docker-scan] map[Name:scout Path:/Users/jenkins/.docker/cli-plugins/docker-scout SchemaVersion:0.1.0 ShadowedPaths:[/usr/local/lib/docker/cli-plugins/docker-scout] ShortDescription:Docker Scout Vendor:Docker Inc. Version:v1.4.1]] Warnings:<nil>}}
I0401 04:44:35.381409 23509 start_flags.go:310] no existing cluster config was found, will generate one from the flags
I0401 04:44:35.381580 23509 start_flags.go:929] Wait components to verify : map[apiserver:true system_pods:true]
I0401 04:44:35.403045 23509 out.go:177] * Using Docker Desktop driver with root privileges
I0401 04:44:35.423873 23509 cni.go:84] Creating CNI manager for ""
I0401 04:44:35.423895 23509 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I0401 04:44:35.423904 23509 start_flags.go:319] Found "bridge CNI" CNI - setting NetworkPlugin=cni
I0401 04:44:35.423974 23509 start.go:340] cluster config:
{Name:skaffold-475000 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.43-1711559786-18485@sha256:2dcab64da240d825290a528fa79ad3c32db45fe5f8be5150468234a7114eff82 Memory:2600 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.29.3 ClusterName:skaffold-475000 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRun
time:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.29.3 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/Users:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0401 04:44:35.445057 23509 out.go:177] * Starting "skaffold-475000" primary control-plane node in "skaffold-475000" cluster
I0401 04:44:35.467069 23509 cache.go:121] Beginning downloading kic base image for docker with docker
I0401 04:44:35.487838 23509 out.go:177] * Pulling base image v0.0.43-1711559786-18485 ...
I0401 04:44:35.529900 23509 preload.go:132] Checking if preload exists for k8s version v1.29.3 and runtime docker
I0401 04:44:35.529992 23509 preload.go:147] Found local preload: /Users/jenkins/minikube-integration/18551-12424/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.29.3-docker-overlay2-amd64.tar.lz4
I0401 04:44:35.530012 23509 cache.go:56] Caching tarball of preloaded images
I0401 04:44:35.530002 23509 image.go:79] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.43-1711559786-18485@sha256:2dcab64da240d825290a528fa79ad3c32db45fe5f8be5150468234a7114eff82 in local docker daemon
I0401 04:44:35.530230 23509 preload.go:173] Found /Users/jenkins/minikube-integration/18551-12424/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.29.3-docker-overlay2-amd64.tar.lz4 in cache, skipping download
I0401 04:44:35.530248 23509 cache.go:59] Finished verifying existence of preloaded tar for v1.29.3 on docker
I0401 04:44:35.531935 23509 profile.go:143] Saving config to /Users/jenkins/minikube-integration/18551-12424/.minikube/profiles/skaffold-475000/config.json ...
I0401 04:44:35.532033 23509 lock.go:35] WriteFile acquiring /Users/jenkins/minikube-integration/18551-12424/.minikube/profiles/skaffold-475000/config.json: {Name:mkf71cf20eb3be3d503299766d2f6c1ad46e110d Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0401 04:44:35.581113 23509 image.go:83] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.43-1711559786-18485@sha256:2dcab64da240d825290a528fa79ad3c32db45fe5f8be5150468234a7114eff82 in local docker daemon, skipping pull
I0401 04:44:35.581122 23509 cache.go:144] gcr.io/k8s-minikube/kicbase-builds:v0.0.43-1711559786-18485@sha256:2dcab64da240d825290a528fa79ad3c32db45fe5f8be5150468234a7114eff82 exists in daemon, skipping load
I0401 04:44:35.581140 23509 cache.go:194] Successfully downloaded all kic artifacts
I0401 04:44:35.581172 23509 start.go:360] acquireMachinesLock for skaffold-475000: {Name:mkde7497f0eaf25a5a0ceb9c9a9d115713711efd Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0401 04:44:35.581315 23509 start.go:364] duration metric: took 132.929µs to acquireMachinesLock for "skaffold-475000"
I0401 04:44:35.581342 23509 start.go:93] Provisioning new machine with config: &{Name:skaffold-475000 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.43-1711559786-18485@sha256:2dcab64da240d825290a528fa79ad3c32db45fe5f8be5150468234a7114eff82 Memory:2600 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.29.3 ClusterName:skaffold-475000 Namespace:default APIServerHAVIP: APIServerName
:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.29.3 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/Users:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuth
Sock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.29.3 ContainerRuntime:docker ControlPlane:true Worker:true}
I0401 04:44:35.581416 23509 start.go:125] createHost starting for "" (driver="docker")
I0401 04:44:35.623723 23509 out.go:204] * Creating docker container (CPUs=2, Memory=2600MB) ...
I0401 04:44:35.624122 23509 start.go:159] libmachine.API.Create for "skaffold-475000" (driver="docker")
I0401 04:44:35.624155 23509 client.go:168] LocalClient.Create starting
I0401 04:44:35.624301 23509 main.go:141] libmachine: Reading certificate data from /Users/jenkins/minikube-integration/18551-12424/.minikube/certs/ca.pem
I0401 04:44:35.624363 23509 main.go:141] libmachine: Decoding PEM data...
I0401 04:44:35.624384 23509 main.go:141] libmachine: Parsing certificate...
I0401 04:44:35.624448 23509 main.go:141] libmachine: Reading certificate data from /Users/jenkins/minikube-integration/18551-12424/.minikube/certs/cert.pem
I0401 04:44:35.624492 23509 main.go:141] libmachine: Decoding PEM data...
I0401 04:44:35.624500 23509 main.go:141] libmachine: Parsing certificate...
I0401 04:44:35.625088 23509 cli_runner.go:164] Run: docker network inspect skaffold-475000 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W0401 04:44:35.675083 23509 cli_runner.go:211] docker network inspect skaffold-475000 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I0401 04:44:35.675184 23509 network_create.go:281] running [docker network inspect skaffold-475000] to gather additional debugging logs...
I0401 04:44:35.675201 23509 cli_runner.go:164] Run: docker network inspect skaffold-475000
W0401 04:44:35.724470 23509 cli_runner.go:211] docker network inspect skaffold-475000 returned with exit code 1
I0401 04:44:35.724514 23509 network_create.go:284] error running [docker network inspect skaffold-475000]: docker network inspect skaffold-475000: exit status 1
stdout:
[]
stderr:
Error response from daemon: network skaffold-475000 not found
I0401 04:44:35.724526 23509 network_create.go:286] output of [docker network inspect skaffold-475000]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network skaffold-475000 not found
** /stderr **
I0401 04:44:35.724646 23509 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0401 04:44:35.775764 23509 network.go:209] skipping subnet 192.168.49.0/24 that is reserved: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:<nil>}
I0401 04:44:35.777388 23509 network.go:209] skipping subnet 192.168.58.0/24 that is reserved: &{IP:192.168.58.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.58.0/24 Gateway:192.168.58.1 ClientMin:192.168.58.2 ClientMax:192.168.58.254 Broadcast:192.168.58.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:<nil>}
I0401 04:44:35.777747 23509 network.go:206] using free private subnet 192.168.67.0/24: &{IP:192.168.67.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.67.0/24 Gateway:192.168.67.1 ClientMin:192.168.67.2 ClientMax:192.168.67.254 Broadcast:192.168.67.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc002348bb0}
I0401 04:44:35.777764 23509 network_create.go:124] attempt to create docker network skaffold-475000 192.168.67.0/24 with gateway 192.168.67.1 and MTU of 65535 ...
I0401 04:44:35.777840 23509 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.67.0/24 --gateway=192.168.67.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=65535 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=skaffold-475000 skaffold-475000
W0401 04:44:35.827235 23509 cli_runner.go:211] docker network create --driver=bridge --subnet=192.168.67.0/24 --gateway=192.168.67.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=65535 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=skaffold-475000 skaffold-475000 returned with exit code 1
W0401 04:44:35.827270 23509 network_create.go:149] failed to create docker network skaffold-475000 192.168.67.0/24 with gateway 192.168.67.1 and mtu of 65535: docker network create --driver=bridge --subnet=192.168.67.0/24 --gateway=192.168.67.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=65535 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=skaffold-475000 skaffold-475000: exit status 1
stdout:
stderr:
Error response from daemon: Pool overlaps with other one on this address space
W0401 04:44:35.827291 23509 network_create.go:116] failed to create docker network skaffold-475000 192.168.67.0/24, will retry: subnet is taken
I0401 04:44:35.828901 23509 network.go:209] skipping subnet 192.168.67.0/24 that is reserved: &{IP:192.168.67.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.67.0/24 Gateway:192.168.67.1 ClientMin:192.168.67.2 ClientMax:192.168.67.254 Broadcast:192.168.67.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:<nil>}
I0401 04:44:35.829259 23509 network.go:206] using free private subnet 192.168.76.0/24: &{IP:192.168.76.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.76.0/24 Gateway:192.168.76.1 ClientMin:192.168.76.2 ClientMax:192.168.76.254 Broadcast:192.168.76.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc002349ae0}
I0401 04:44:35.829268 23509 network_create.go:124] attempt to create docker network skaffold-475000 192.168.76.0/24 with gateway 192.168.76.1 and MTU of 65535 ...
I0401 04:44:35.829330 23509 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.76.0/24 --gateway=192.168.76.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=65535 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=skaffold-475000 skaffold-475000
I0401 04:44:35.914943 23509 network_create.go:108] docker network skaffold-475000 192.168.76.0/24 created
I0401 04:44:35.914973 23509 kic.go:121] calculated static IP "192.168.76.2" for the "skaffold-475000" container
I0401 04:44:35.915088 23509 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I0401 04:44:35.963882 23509 cli_runner.go:164] Run: docker volume create skaffold-475000 --label name.minikube.sigs.k8s.io=skaffold-475000 --label created_by.minikube.sigs.k8s.io=true
I0401 04:44:36.015498 23509 oci.go:103] Successfully created a docker volume skaffold-475000
I0401 04:44:36.015628 23509 cli_runner.go:164] Run: docker run --rm --name skaffold-475000-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=skaffold-475000 --entrypoint /usr/bin/test -v skaffold-475000:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.43-1711559786-18485@sha256:2dcab64da240d825290a528fa79ad3c32db45fe5f8be5150468234a7114eff82 -d /var/lib
I0401 04:44:36.376535 23509 oci.go:107] Successfully prepared a docker volume skaffold-475000
I0401 04:44:36.376573 23509 preload.go:132] Checking if preload exists for k8s version v1.29.3 and runtime docker
I0401 04:44:36.376587 23509 kic.go:194] Starting extracting preloaded images to volume ...
I0401 04:44:36.376675 23509 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /Users/jenkins/minikube-integration/18551-12424/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.29.3-docker-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v skaffold-475000:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.43-1711559786-18485@sha256:2dcab64da240d825290a528fa79ad3c32db45fe5f8be5150468234a7114eff82 -I lz4 -xf /preloaded.tar -C /extractDir
I0401 04:44:38.557949 23509 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /Users/jenkins/minikube-integration/18551-12424/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.29.3-docker-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v skaffold-475000:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.43-1711559786-18485@sha256:2dcab64da240d825290a528fa79ad3c32db45fe5f8be5150468234a7114eff82 -I lz4 -xf /preloaded.tar -C /extractDir: (2.181166625s)
I0401 04:44:38.557972 23509 kic.go:203] duration metric: took 2.181376738s to extract preloaded images to volume ...
I0401 04:44:38.558081 23509 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I0401 04:44:38.658060 23509 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname skaffold-475000 --name skaffold-475000 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=skaffold-475000 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=skaffold-475000 --network skaffold-475000 --ip 192.168.76.2 --volume skaffold-475000:/var --security-opt apparmor=unconfined --memory=2600mb --memory-swap=2600mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.43-1711559786-18485@sha256:2dcab64da240d825290a528fa79ad3c32db45fe5f8be5150468234a7114eff82
I0401 04:44:38.923168 23509 cli_runner.go:164] Run: docker container inspect skaffold-475000 --format={{.State.Running}}
I0401 04:44:38.975880 23509 cli_runner.go:164] Run: docker container inspect skaffold-475000 --format={{.State.Status}}
I0401 04:44:39.028673 23509 cli_runner.go:164] Run: docker exec skaffold-475000 stat /var/lib/dpkg/alternatives/iptables
I0401 04:44:39.194899 23509 oci.go:144] the created container "skaffold-475000" has a running status.
I0401 04:44:39.194960 23509 kic.go:225] Creating ssh key for kic: /Users/jenkins/minikube-integration/18551-12424/.minikube/machines/skaffold-475000/id_rsa...
I0401 04:44:39.274866 23509 kic_runner.go:191] docker (temp): /Users/jenkins/minikube-integration/18551-12424/.minikube/machines/skaffold-475000/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I0401 04:44:39.338389 23509 cli_runner.go:164] Run: docker container inspect skaffold-475000 --format={{.State.Status}}
I0401 04:44:39.391853 23509 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I0401 04:44:39.391878 23509 kic_runner.go:114] Args: [docker exec --privileged skaffold-475000 chown docker:docker /home/docker/.ssh/authorized_keys]
I0401 04:44:39.498701 23509 cli_runner.go:164] Run: docker container inspect skaffold-475000 --format={{.State.Status}}
I0401 04:44:39.548849 23509 machine.go:94] provisionDockerMachine start ...
I0401 04:44:39.548969 23509 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" skaffold-475000
I0401 04:44:39.599721 23509 main.go:141] libmachine: Using SSH client type: native
I0401 04:44:39.599935 23509 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x1fdfb40] 0x1fe28a0 <nil> [] 0s} 127.0.0.1 60988 <nil> <nil>}
I0401 04:44:39.599942 23509 main.go:141] libmachine: About to run SSH command:
hostname
I0401 04:44:39.716391 23509 main.go:141] libmachine: SSH cmd err, output: <nil>: skaffold-475000
I0401 04:44:39.716417 23509 ubuntu.go:169] provisioning hostname "skaffold-475000"
I0401 04:44:39.716495 23509 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" skaffold-475000
I0401 04:44:39.765987 23509 main.go:141] libmachine: Using SSH client type: native
I0401 04:44:39.766162 23509 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x1fdfb40] 0x1fe28a0 <nil> [] 0s} 127.0.0.1 60988 <nil> <nil>}
I0401 04:44:39.766170 23509 main.go:141] libmachine: About to run SSH command:
sudo hostname skaffold-475000 && echo "skaffold-475000" | sudo tee /etc/hostname
I0401 04:44:39.908015 23509 main.go:141] libmachine: SSH cmd err, output: <nil>: skaffold-475000
I0401 04:44:39.908104 23509 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" skaffold-475000
I0401 04:44:39.958621 23509 main.go:141] libmachine: Using SSH client type: native
I0401 04:44:39.958794 23509 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x1fdfb40] 0x1fe28a0 <nil> [] 0s} 127.0.0.1 60988 <nil> <nil>}
I0401 04:44:39.958806 23509 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\sskaffold-475000' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 skaffold-475000/g' /etc/hosts;
else
echo '127.0.1.1 skaffold-475000' | sudo tee -a /etc/hosts;
fi
fi
I0401 04:44:40.077771 23509 main.go:141] libmachine: SSH cmd err, output: <nil>:
I0401 04:44:40.077789 23509 ubuntu.go:175] set auth options {CertDir:/Users/jenkins/minikube-integration/18551-12424/.minikube CaCertPath:/Users/jenkins/minikube-integration/18551-12424/.minikube/certs/ca.pem CaPrivateKeyPath:/Users/jenkins/minikube-integration/18551-12424/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/Users/jenkins/minikube-integration/18551-12424/.minikube/machines/server.pem ServerKeyPath:/Users/jenkins/minikube-integration/18551-12424/.minikube/machines/server-key.pem ClientKeyPath:/Users/jenkins/minikube-integration/18551-12424/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/Users/jenkins/minikube-integration/18551-12424/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/Users/jenkins/minikube-integration/18551-12424/.minikube}
I0401 04:44:40.077809 23509 ubuntu.go:177] setting up certificates
I0401 04:44:40.077817 23509 provision.go:84] configureAuth start
I0401 04:44:40.077885 23509 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" skaffold-475000
I0401 04:44:40.128629 23509 provision.go:143] copyHostCerts
I0401 04:44:40.128713 23509 exec_runner.go:144] found /Users/jenkins/minikube-integration/18551-12424/.minikube/ca.pem, removing ...
I0401 04:44:40.128721 23509 exec_runner.go:203] rm: /Users/jenkins/minikube-integration/18551-12424/.minikube/ca.pem
I0401 04:44:40.128836 23509 exec_runner.go:151] cp: /Users/jenkins/minikube-integration/18551-12424/.minikube/certs/ca.pem --> /Users/jenkins/minikube-integration/18551-12424/.minikube/ca.pem (1082 bytes)
I0401 04:44:40.129070 23509 exec_runner.go:144] found /Users/jenkins/minikube-integration/18551-12424/.minikube/cert.pem, removing ...
I0401 04:44:40.129073 23509 exec_runner.go:203] rm: /Users/jenkins/minikube-integration/18551-12424/.minikube/cert.pem
I0401 04:44:40.129155 23509 exec_runner.go:151] cp: /Users/jenkins/minikube-integration/18551-12424/.minikube/certs/cert.pem --> /Users/jenkins/minikube-integration/18551-12424/.minikube/cert.pem (1123 bytes)
I0401 04:44:40.129331 23509 exec_runner.go:144] found /Users/jenkins/minikube-integration/18551-12424/.minikube/key.pem, removing ...
I0401 04:44:40.129334 23509 exec_runner.go:203] rm: /Users/jenkins/minikube-integration/18551-12424/.minikube/key.pem
I0401 04:44:40.129412 23509 exec_runner.go:151] cp: /Users/jenkins/minikube-integration/18551-12424/.minikube/certs/key.pem --> /Users/jenkins/minikube-integration/18551-12424/.minikube/key.pem (1679 bytes)
I0401 04:44:40.129558 23509 provision.go:117] generating server cert: /Users/jenkins/minikube-integration/18551-12424/.minikube/machines/server.pem ca-key=/Users/jenkins/minikube-integration/18551-12424/.minikube/certs/ca.pem private-key=/Users/jenkins/minikube-integration/18551-12424/.minikube/certs/ca-key.pem org=jenkins.skaffold-475000 san=[127.0.0.1 192.168.76.2 localhost minikube skaffold-475000]
I0401 04:44:40.210924 23509 provision.go:177] copyRemoteCerts
I0401 04:44:40.210971 23509 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I0401 04:44:40.211022 23509 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" skaffold-475000
I0401 04:44:40.261077 23509 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:60988 SSHKeyPath:/Users/jenkins/minikube-integration/18551-12424/.minikube/machines/skaffold-475000/id_rsa Username:docker}
I0401 04:44:40.358724 23509 ssh_runner.go:362] scp /Users/jenkins/minikube-integration/18551-12424/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I0401 04:44:40.398088 23509 ssh_runner.go:362] scp /Users/jenkins/minikube-integration/18551-12424/.minikube/machines/server.pem --> /etc/docker/server.pem (1216 bytes)
I0401 04:44:40.438615 23509 ssh_runner.go:362] scp /Users/jenkins/minikube-integration/18551-12424/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
I0401 04:44:40.478220 23509 provision.go:87] duration metric: took 400.386785ms to configureAuth
I0401 04:44:40.478230 23509 ubuntu.go:193] setting minikube options for container-runtime
I0401 04:44:40.478385 23509 config.go:182] Loaded profile config "skaffold-475000": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.29.3
I0401 04:44:40.478449 23509 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" skaffold-475000
I0401 04:44:40.529545 23509 main.go:141] libmachine: Using SSH client type: native
I0401 04:44:40.529713 23509 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x1fdfb40] 0x1fe28a0 <nil> [] 0s} 127.0.0.1 60988 <nil> <nil>}
I0401 04:44:40.529741 23509 main.go:141] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I0401 04:44:40.646904 23509 main.go:141] libmachine: SSH cmd err, output: <nil>: overlay
I0401 04:44:40.646914 23509 ubuntu.go:71] root file system type: overlay
I0401 04:44:40.646997 23509 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
I0401 04:44:40.647086 23509 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" skaffold-475000
I0401 04:44:40.697197 23509 main.go:141] libmachine: Using SSH client type: native
I0401 04:44:40.697362 23509 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x1fdfb40] 0x1fe28a0 <nil> [] 0s} 127.0.0.1 60988 <nil> <nil>}
I0401 04:44:40.697406 23509 main.go:141] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %!s(MISSING) "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
BindsTo=containerd.service
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=on-failure
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I0401 04:44:40.839086 23509 main.go:141] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
BindsTo=containerd.service
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=on-failure
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
[Install]
WantedBy=multi-user.target
I0401 04:44:40.839189 23509 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" skaffold-475000
I0401 04:44:40.889277 23509 main.go:141] libmachine: Using SSH client type: native
I0401 04:44:40.889451 23509 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x1fdfb40] 0x1fe28a0 <nil> [] 0s} 127.0.0.1 60988 <nil> <nil>}
I0401 04:44:40.889460 23509 main.go:141] libmachine: About to run SSH command:
sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
I0401 04:44:41.488964 23509 main.go:141] libmachine: SSH cmd err, output: <nil>: --- /lib/systemd/system/docker.service 2024-03-20 15:16:16.000000000 +0000
+++ /lib/systemd/system/docker.service.new 2024-04-01 11:44:40.834646201 +0000
@@ -1,46 +1,49 @@
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
-After=network-online.target docker.socket firewalld.service containerd.service time-set.target
-Wants=network-online.target containerd.service
+BindsTo=containerd.service
+After=network-online.target firewalld.service containerd.service
+Wants=network-online.target
Requires=docker.socket
+StartLimitBurst=3
+StartLimitIntervalSec=60
[Service]
Type=notify
-# the default is not to use systemd for cgroups because the delegate issues still
-# exists and systemd currently does not support the cgroup feature set required
-# for containers run by docker
-ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
-ExecReload=/bin/kill -s HUP $MAINPID
-TimeoutStartSec=0
-RestartSec=2
-Restart=always
+Restart=on-failure
-# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229.
-# Both the old, and new location are accepted by systemd 229 and up, so using the old location
-# to make them work for either version of systemd.
-StartLimitBurst=3
-# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230.
-# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make
-# this option work for either version of systemd.
-StartLimitInterval=60s
+
+# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
+# The base configuration already specifies an 'ExecStart=...' command. The first directive
+# here is to clear out that command inherited from the base configuration. Without this,
+# the command from the base configuration and the command specified here are treated as
+# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
+# will catch this invalid input and refuse to start the service with an error like:
+# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
+
+# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
+# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
+ExecStart=
+ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
+ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
+LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
-# Comment TasksMax if your systemd version does not support it.
-# Only systemd 226 and above support this option.
+# Uncomment TasksMax if your systemd version supports it.
+# Only systemd 226 and above support this version.
TasksMax=infinity
+TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
-OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
Synchronizing state of docker.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable docker
I0401 04:44:41.488980 23509 machine.go:97] duration metric: took 1.940104337s to provisionDockerMachine
I0401 04:44:41.488988 23509 client.go:171] duration metric: took 5.864802402s to LocalClient.Create
I0401 04:44:41.489002 23509 start.go:167] duration metric: took 5.864854185s to libmachine.API.Create "skaffold-475000"
I0401 04:44:41.489011 23509 start.go:293] postStartSetup for "skaffold-475000" (driver="docker")
I0401 04:44:41.489016 23509 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I0401 04:44:41.489085 23509 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I0401 04:44:41.489140 23509 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" skaffold-475000
I0401 04:44:41.540794 23509 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:60988 SSHKeyPath:/Users/jenkins/minikube-integration/18551-12424/.minikube/machines/skaffold-475000/id_rsa Username:docker}
I0401 04:44:41.634564 23509 ssh_runner.go:195] Run: cat /etc/os-release
I0401 04:44:41.638613 23509 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I0401 04:44:41.638630 23509 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
I0401 04:44:41.638642 23509 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
I0401 04:44:41.638646 23509 info.go:137] Remote host: Ubuntu 22.04.4 LTS
I0401 04:44:41.638652 23509 filesync.go:126] Scanning /Users/jenkins/minikube-integration/18551-12424/.minikube/addons for local assets ...
I0401 04:44:41.638754 23509 filesync.go:126] Scanning /Users/jenkins/minikube-integration/18551-12424/.minikube/files for local assets ...
I0401 04:44:41.638927 23509 filesync.go:149] local asset: /Users/jenkins/minikube-integration/18551-12424/.minikube/files/etc/ssl/certs/129072.pem -> 129072.pem in /etc/ssl/certs
I0401 04:44:41.639133 23509 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I0401 04:44:41.653692 23509 ssh_runner.go:362] scp /Users/jenkins/minikube-integration/18551-12424/.minikube/files/etc/ssl/certs/129072.pem --> /etc/ssl/certs/129072.pem (1708 bytes)
I0401 04:44:41.693009 23509 start.go:296] duration metric: took 203.989504ms for postStartSetup
I0401 04:44:41.693556 23509 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" skaffold-475000
I0401 04:44:41.743651 23509 profile.go:143] Saving config to /Users/jenkins/minikube-integration/18551-12424/.minikube/profiles/skaffold-475000/config.json ...
I0401 04:44:41.744100 23509 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I0401 04:44:41.744151 23509 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" skaffold-475000
I0401 04:44:41.794707 23509 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:60988 SSHKeyPath:/Users/jenkins/minikube-integration/18551-12424/.minikube/machines/skaffold-475000/id_rsa Username:docker}
I0401 04:44:41.878774 23509 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I0401 04:44:41.883957 23509 start.go:128] duration metric: took 6.302501094s to createHost
I0401 04:44:41.883969 23509 start.go:83] releasing machines lock for "skaffold-475000", held for 6.302620515s
I0401 04:44:41.884048 23509 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" skaffold-475000
I0401 04:44:41.933850 23509 ssh_runner.go:195] Run: cat /version.json
I0401 04:44:41.933878 23509 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I0401 04:44:41.933908 23509 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" skaffold-475000
I0401 04:44:41.933959 23509 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" skaffold-475000
I0401 04:44:41.985988 23509 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:60988 SSHKeyPath:/Users/jenkins/minikube-integration/18551-12424/.minikube/machines/skaffold-475000/id_rsa Username:docker}
I0401 04:44:41.986214 23509 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:60988 SSHKeyPath:/Users/jenkins/minikube-integration/18551-12424/.minikube/machines/skaffold-475000/id_rsa Username:docker}
I0401 04:44:42.159971 23509 ssh_runner.go:195] Run: systemctl --version
I0401 04:44:42.166501 23509 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
I0401 04:44:42.171700 23509 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
I0401 04:44:42.214519 23509 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
I0401 04:44:42.214575 23509 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%!p(MISSING), " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I0401 04:44:42.258658 23509 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/100-crio-bridge.conf] bridge cni config(s)
I0401 04:44:42.258675 23509 start.go:494] detecting cgroup driver to use...
I0401 04:44:42.258686 23509 detect.go:196] detected "cgroupfs" cgroup driver on host os
I0401 04:44:42.258781 23509 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %!s(MISSING) "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I0401 04:44:42.285877 23509 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.9"|' /etc/containerd/config.toml"
I0401 04:44:42.301479 23509 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I0401 04:44:42.317316 23509 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I0401 04:44:42.317382 23509 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I0401 04:44:42.333178 23509 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0401 04:44:42.348839 23509 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I0401 04:44:42.363933 23509 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0401 04:44:42.379689 23509 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I0401 04:44:42.394712 23509 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I0401 04:44:42.410091 23509 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I0401 04:44:42.425762 23509 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I0401 04:44:42.441328 23509 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I0401 04:44:42.456664 23509 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I0401 04:44:42.470760 23509 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0401 04:44:42.526855 23509 ssh_runner.go:195] Run: sudo systemctl restart containerd
I0401 04:44:42.608008 23509 start.go:494] detecting cgroup driver to use...
I0401 04:44:42.608022 23509 detect.go:196] detected "cgroupfs" cgroup driver on host os
I0401 04:44:42.608084 23509 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I0401 04:44:42.629873 23509 cruntime.go:279] skipping containerd shutdown because we are bound to it
I0401 04:44:42.629935 23509 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I0401 04:44:42.648512 23509 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %!s(MISSING) "runtime-endpoint: unix:///var/run/cri-dockerd.sock
" | sudo tee /etc/crictl.yaml"
I0401 04:44:42.678194 23509 ssh_runner.go:195] Run: which cri-dockerd
I0401 04:44:42.682839 23509 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
I0401 04:44:42.698480 23509 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (189 bytes)
I0401 04:44:42.727540 23509 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I0401 04:44:42.800687 23509 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I0401 04:44:42.859572 23509 docker.go:574] configuring docker to use "cgroupfs" as cgroup driver...
I0401 04:44:42.859661 23509 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (130 bytes)
I0401 04:44:42.911007 23509 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0401 04:44:42.969902 23509 ssh_runner.go:195] Run: sudo systemctl restart docker
I0401 04:44:43.217143 23509 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
I0401 04:44:43.234421 23509 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I0401 04:44:43.251512 23509 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
I0401 04:44:43.315279 23509 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
I0401 04:44:43.372031 23509 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0401 04:44:43.428580 23509 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
I0401 04:44:43.456633 23509 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I0401 04:44:43.473811 23509 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0401 04:44:43.534848 23509 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I0401 04:44:43.616853 23509 start.go:541] Will wait 60s for socket path /var/run/cri-dockerd.sock
I0401 04:44:43.616934 23509 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
I0401 04:44:43.622771 23509 start.go:562] Will wait 60s for crictl version
I0401 04:44:43.622814 23509 ssh_runner.go:195] Run: which crictl
I0401 04:44:43.626814 23509 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I0401 04:44:43.675621 23509 start.go:578] Version: 0.1.0
RuntimeName: docker
RuntimeVersion: 26.0.0
RuntimeApiVersion: v1
I0401 04:44:43.675689 23509 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0401 04:44:43.698338 23509 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0401 04:44:43.770402 23509 out.go:204] * Preparing Kubernetes v1.29.3 on Docker 26.0.0 ...
I0401 04:44:43.770554 23509 cli_runner.go:164] Run: docker exec -t skaffold-475000 dig +short host.docker.internal
I0401 04:44:43.874575 23509 network.go:96] got host ip for mount in container by digging dns: 192.168.65.254
I0401 04:44:43.874664 23509 ssh_runner.go:195] Run: grep 192.168.65.254 host.minikube.internal$ /etc/hosts
I0401 04:44:43.879044 23509 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.65.254 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0401 04:44:43.895774 23509 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "8443/tcp") 0).HostPort}}'" skaffold-475000
I0401 04:44:43.946467 23509 kubeadm.go:877] updating cluster {Name:skaffold-475000 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.43-1711559786-18485@sha256:2dcab64da240d825290a528fa79ad3c32db45fe5f8be5150468234a7114eff82 Memory:2600 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.29.3 ClusterName:skaffold-475000 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServ
erNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.29.3 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/Users:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: S
SHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I0401 04:44:43.946551 23509 preload.go:132] Checking if preload exists for k8s version v1.29.3 and runtime docker
I0401 04:44:43.946621 23509 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I0401 04:44:43.964967 23509 docker.go:685] Got preloaded images: -- stdout --
registry.k8s.io/kube-apiserver:v1.29.3
registry.k8s.io/kube-controller-manager:v1.29.3
registry.k8s.io/kube-scheduler:v1.29.3
registry.k8s.io/kube-proxy:v1.29.3
registry.k8s.io/etcd:3.5.12-0
registry.k8s.io/coredns/coredns:v1.11.1
registry.k8s.io/pause:3.9
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I0401 04:44:43.964978 23509 docker.go:615] Images already preloaded, skipping extraction
I0401 04:44:43.965070 23509 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I0401 04:44:43.984179 23509 docker.go:685] Got preloaded images: -- stdout --
registry.k8s.io/kube-apiserver:v1.29.3
registry.k8s.io/kube-controller-manager:v1.29.3
registry.k8s.io/kube-scheduler:v1.29.3
registry.k8s.io/kube-proxy:v1.29.3
registry.k8s.io/etcd:3.5.12-0
registry.k8s.io/coredns/coredns:v1.11.1
registry.k8s.io/pause:3.9
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I0401 04:44:43.984198 23509 cache_images.go:84] Images are preloaded, skipping loading
I0401 04:44:43.984212 23509 kubeadm.go:928] updating node { 192.168.76.2 8443 v1.29.3 docker true true} ...
I0401 04:44:43.984318 23509 kubeadm.go:940] kubelet [Unit]
Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.29.3/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=skaffold-475000 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.76.2
[Install]
config:
{KubernetesVersion:v1.29.3 ClusterName:skaffold-475000 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I0401 04:44:43.984418 23509 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
I0401 04:44:44.025986 23509 cni.go:84] Creating CNI manager for ""
I0401 04:44:44.026000 23509 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I0401 04:44:44.026009 23509 kubeadm.go:84] Using pod CIDR: 10.244.0.0/16
I0401 04:44:44.026022 23509 kubeadm.go:181] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.76.2 APIServerPort:8443 KubernetesVersion:v1.29.3 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:skaffold-475000 NodeName:skaffold-475000 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.76.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.76.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:/etc/k
ubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///var/run/cri-dockerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I0401 04:44:44.026130 23509 kubeadm.go:187] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.76.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///var/run/cri-dockerd.sock
name: "skaffold-475000"
kubeletExtraArgs:
node-ip: 192.168.76.2
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.76.2"]
extraArgs:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.29.3
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%!"(MISSING)
nodefs.inodesFree: "0%!"(MISSING)
imagefs.available: "0%!"(MISSING)
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I0401 04:44:44.026202 23509 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.29.3
I0401 04:44:44.040653 23509 binaries.go:44] Found k8s binaries, skipping transfer
I0401 04:44:44.040708 23509 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I0401 04:44:44.054906 23509 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (314 bytes)
I0401 04:44:44.083329 23509 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I0401 04:44:44.111512 23509 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2157 bytes)
I0401 04:44:44.139689 23509 ssh_runner.go:195] Run: grep 192.168.76.2 control-plane.minikube.internal$ /etc/hosts
I0401 04:44:44.143845 23509 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.76.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0401 04:44:44.160869 23509 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0401 04:44:44.221026 23509 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0401 04:44:44.251944 23509 certs.go:68] Setting up /Users/jenkins/minikube-integration/18551-12424/.minikube/profiles/skaffold-475000 for IP: 192.168.76.2
I0401 04:44:44.251953 23509 certs.go:194] generating shared ca certs ...
I0401 04:44:44.251963 23509 certs.go:226] acquiring lock for ca certs: {Name:mk6ce92a155e1601f71ffd10a4b11c7f561f5b2b Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0401 04:44:44.252148 23509 certs.go:235] skipping valid "minikubeCA" ca cert: /Users/jenkins/minikube-integration/18551-12424/.minikube/ca.key
I0401 04:44:44.252213 23509 certs.go:235] skipping valid "proxyClientCA" ca cert: /Users/jenkins/minikube-integration/18551-12424/.minikube/proxy-client-ca.key
I0401 04:44:44.252223 23509 certs.go:256] generating profile certs ...
I0401 04:44:44.252270 23509 certs.go:363] generating signed profile cert for "minikube-user": /Users/jenkins/minikube-integration/18551-12424/.minikube/profiles/skaffold-475000/client.key
I0401 04:44:44.252279 23509 crypto.go:68] Generating cert /Users/jenkins/minikube-integration/18551-12424/.minikube/profiles/skaffold-475000/client.crt with IP's: []
I0401 04:44:44.536339 23509 crypto.go:156] Writing cert to /Users/jenkins/minikube-integration/18551-12424/.minikube/profiles/skaffold-475000/client.crt ...
I0401 04:44:44.536350 23509 lock.go:35] WriteFile acquiring /Users/jenkins/minikube-integration/18551-12424/.minikube/profiles/skaffold-475000/client.crt: {Name:mk488308933a66b4aa9c495dc55fd0ad0c176fa1 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0401 04:44:44.536687 23509 crypto.go:164] Writing key to /Users/jenkins/minikube-integration/18551-12424/.minikube/profiles/skaffold-475000/client.key ...
I0401 04:44:44.536693 23509 lock.go:35] WriteFile acquiring /Users/jenkins/minikube-integration/18551-12424/.minikube/profiles/skaffold-475000/client.key: {Name:mkca15421dc6aaa1f808e803ec2f4441071220ce Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0401 04:44:44.536916 23509 certs.go:363] generating signed profile cert for "minikube": /Users/jenkins/minikube-integration/18551-12424/.minikube/profiles/skaffold-475000/apiserver.key.3c283986
I0401 04:44:44.536931 23509 crypto.go:68] Generating cert /Users/jenkins/minikube-integration/18551-12424/.minikube/profiles/skaffold-475000/apiserver.crt.3c283986 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.76.2]
I0401 04:44:44.766698 23509 crypto.go:156] Writing cert to /Users/jenkins/minikube-integration/18551-12424/.minikube/profiles/skaffold-475000/apiserver.crt.3c283986 ...
I0401 04:44:44.766708 23509 lock.go:35] WriteFile acquiring /Users/jenkins/minikube-integration/18551-12424/.minikube/profiles/skaffold-475000/apiserver.crt.3c283986: {Name:mkbd31a62fc0566c78cbba1c144ee60bc93ce535 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0401 04:44:44.767036 23509 crypto.go:164] Writing key to /Users/jenkins/minikube-integration/18551-12424/.minikube/profiles/skaffold-475000/apiserver.key.3c283986 ...
I0401 04:44:44.767042 23509 lock.go:35] WriteFile acquiring /Users/jenkins/minikube-integration/18551-12424/.minikube/profiles/skaffold-475000/apiserver.key.3c283986: {Name:mk5d3c15aba8dba97b6021fcec5893f219051904 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0401 04:44:44.767265 23509 certs.go:381] copying /Users/jenkins/minikube-integration/18551-12424/.minikube/profiles/skaffold-475000/apiserver.crt.3c283986 -> /Users/jenkins/minikube-integration/18551-12424/.minikube/profiles/skaffold-475000/apiserver.crt
I0401 04:44:44.767454 23509 certs.go:385] copying /Users/jenkins/minikube-integration/18551-12424/.minikube/profiles/skaffold-475000/apiserver.key.3c283986 -> /Users/jenkins/minikube-integration/18551-12424/.minikube/profiles/skaffold-475000/apiserver.key
I0401 04:44:44.767618 23509 certs.go:363] generating signed profile cert for "aggregator": /Users/jenkins/minikube-integration/18551-12424/.minikube/profiles/skaffold-475000/proxy-client.key
I0401 04:44:44.767632 23509 crypto.go:68] Generating cert /Users/jenkins/minikube-integration/18551-12424/.minikube/profiles/skaffold-475000/proxy-client.crt with IP's: []
I0401 04:44:45.089059 23509 crypto.go:156] Writing cert to /Users/jenkins/minikube-integration/18551-12424/.minikube/profiles/skaffold-475000/proxy-client.crt ...
I0401 04:44:45.089079 23509 lock.go:35] WriteFile acquiring /Users/jenkins/minikube-integration/18551-12424/.minikube/profiles/skaffold-475000/proxy-client.crt: {Name:mk48fb14dd98cc3290d030766622c79f42b7a72a Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0401 04:44:45.089360 23509 crypto.go:164] Writing key to /Users/jenkins/minikube-integration/18551-12424/.minikube/profiles/skaffold-475000/proxy-client.key ...
I0401 04:44:45.089367 23509 lock.go:35] WriteFile acquiring /Users/jenkins/minikube-integration/18551-12424/.minikube/profiles/skaffold-475000/proxy-client.key: {Name:mke740a626c876372836b3cc346ba03b9aaf63b2 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0401 04:44:45.089761 23509 certs.go:484] found cert: /Users/jenkins/minikube-integration/18551-12424/.minikube/certs/12907.pem (1338 bytes)
W0401 04:44:45.089810 23509 certs.go:480] ignoring /Users/jenkins/minikube-integration/18551-12424/.minikube/certs/12907_empty.pem, impossibly tiny 0 bytes
I0401 04:44:45.089816 23509 certs.go:484] found cert: /Users/jenkins/minikube-integration/18551-12424/.minikube/certs/ca-key.pem (1675 bytes)
I0401 04:44:45.089847 23509 certs.go:484] found cert: /Users/jenkins/minikube-integration/18551-12424/.minikube/certs/ca.pem (1082 bytes)
I0401 04:44:45.089875 23509 certs.go:484] found cert: /Users/jenkins/minikube-integration/18551-12424/.minikube/certs/cert.pem (1123 bytes)
I0401 04:44:45.089900 23509 certs.go:484] found cert: /Users/jenkins/minikube-integration/18551-12424/.minikube/certs/key.pem (1679 bytes)
I0401 04:44:45.089957 23509 certs.go:484] found cert: /Users/jenkins/minikube-integration/18551-12424/.minikube/files/etc/ssl/certs/129072.pem (1708 bytes)
I0401 04:44:45.090434 23509 ssh_runner.go:362] scp /Users/jenkins/minikube-integration/18551-12424/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I0401 04:44:45.131839 23509 ssh_runner.go:362] scp /Users/jenkins/minikube-integration/18551-12424/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I0401 04:44:45.172914 23509 ssh_runner.go:362] scp /Users/jenkins/minikube-integration/18551-12424/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I0401 04:44:45.212194 23509 ssh_runner.go:362] scp /Users/jenkins/minikube-integration/18551-12424/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
I0401 04:44:45.252677 23509 ssh_runner.go:362] scp /Users/jenkins/minikube-integration/18551-12424/.minikube/profiles/skaffold-475000/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1424 bytes)
I0401 04:44:45.292044 23509 ssh_runner.go:362] scp /Users/jenkins/minikube-integration/18551-12424/.minikube/profiles/skaffold-475000/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes)
I0401 04:44:45.331862 23509 ssh_runner.go:362] scp /Users/jenkins/minikube-integration/18551-12424/.minikube/profiles/skaffold-475000/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I0401 04:44:45.371676 23509 ssh_runner.go:362] scp /Users/jenkins/minikube-integration/18551-12424/.minikube/profiles/skaffold-475000/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
I0401 04:44:45.410684 23509 ssh_runner.go:362] scp /Users/jenkins/minikube-integration/18551-12424/.minikube/files/etc/ssl/certs/129072.pem --> /usr/share/ca-certificates/129072.pem (1708 bytes)
I0401 04:44:45.450695 23509 ssh_runner.go:362] scp /Users/jenkins/minikube-integration/18551-12424/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I0401 04:44:45.491316 23509 ssh_runner.go:362] scp /Users/jenkins/minikube-integration/18551-12424/.minikube/certs/12907.pem --> /usr/share/ca-certificates/12907.pem (1338 bytes)
I0401 04:44:45.530679 23509 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (752 bytes)
I0401 04:44:45.558905 23509 ssh_runner.go:195] Run: openssl version
I0401 04:44:45.564331 23509 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I0401 04:44:45.579595 23509 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I0401 04:44:45.583848 23509 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Apr 1 11:11 /usr/share/ca-certificates/minikubeCA.pem
I0401 04:44:45.583922 23509 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I0401 04:44:45.590870 23509 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I0401 04:44:45.606783 23509 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/12907.pem && ln -fs /usr/share/ca-certificates/12907.pem /etc/ssl/certs/12907.pem"
I0401 04:44:45.622229 23509 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/12907.pem
I0401 04:44:45.626361 23509 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Apr 1 11:15 /usr/share/ca-certificates/12907.pem
I0401 04:44:45.626401 23509 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/12907.pem
I0401 04:44:45.632896 23509 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/12907.pem /etc/ssl/certs/51391683.0"
I0401 04:44:45.648169 23509 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/129072.pem && ln -fs /usr/share/ca-certificates/129072.pem /etc/ssl/certs/129072.pem"
I0401 04:44:45.663599 23509 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/129072.pem
I0401 04:44:45.667573 23509 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Apr 1 11:15 /usr/share/ca-certificates/129072.pem
I0401 04:44:45.667615 23509 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/129072.pem
I0401 04:44:45.673974 23509 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/129072.pem /etc/ssl/certs/3ec20f2e.0"
I0401 04:44:45.689620 23509 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I0401 04:44:45.693519 23509 certs.go:399] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I0401 04:44:45.693561 23509 kubeadm.go:391] StartCluster: {Name:skaffold-475000 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.43-1711559786-18485@sha256:2dcab64da240d825290a528fa79ad3c32db45fe5f8be5150468234a7114eff82 Memory:2600 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.29.3 ClusterName:skaffold-475000 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerN
ames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.29.3 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/Users:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHA
gentPID:0 GPUs: AutoPauseInterval:1m0s}
I0401 04:44:45.693647 23509 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
I0401 04:44:45.711442 23509 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I0401 04:44:45.726171 23509 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I0401 04:44:45.740713 23509 kubeadm.go:213] ignoring SystemVerification for kubeadm because of docker driver
I0401 04:44:45.740767 23509 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I0401 04:44:45.755431 23509 kubeadm.go:154] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I0401 04:44:45.755439 23509 kubeadm.go:156] found existing configuration files:
I0401 04:44:45.755494 23509 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I0401 04:44:45.770024 23509 kubeadm.go:162] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I0401 04:44:45.770078 23509 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I0401 04:44:45.784498 23509 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I0401 04:44:45.798863 23509 kubeadm.go:162] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I0401 04:44:45.798911 23509 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I0401 04:44:45.813270 23509 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I0401 04:44:45.827831 23509 kubeadm.go:162] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I0401 04:44:45.827880 23509 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I0401 04:44:45.842615 23509 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I0401 04:44:45.857424 23509 kubeadm.go:162] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I0401 04:44:45.857482 23509 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I0401 04:44:45.872761 23509 ssh_runner.go:286] Start: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.29.3:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I0401 04:44:45.957185 23509 kubeadm.go:309] [WARNING Swap]: swap is supported for cgroup v2 only; the NodeSwap feature gate of the kubelet is beta but disabled by default
I0401 04:44:46.028111 23509 kubeadm.go:309] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I0401 04:44:55.129940 23509 kubeadm.go:309] [init] Using Kubernetes version: v1.29.3
I0401 04:44:55.129984 23509 kubeadm.go:309] [preflight] Running pre-flight checks
I0401 04:44:55.130047 23509 kubeadm.go:309] [preflight] Pulling images required for setting up a Kubernetes cluster
I0401 04:44:55.130143 23509 kubeadm.go:309] [preflight] This might take a minute or two, depending on the speed of your internet connection
I0401 04:44:55.130236 23509 kubeadm.go:309] [preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
I0401 04:44:55.130286 23509 kubeadm.go:309] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I0401 04:44:55.151205 23509 out.go:204] - Generating certificates and keys ...
I0401 04:44:55.151281 23509 kubeadm.go:309] [certs] Using existing ca certificate authority
I0401 04:44:55.151350 23509 kubeadm.go:309] [certs] Using existing apiserver certificate and key on disk
I0401 04:44:55.151435 23509 kubeadm.go:309] [certs] Generating "apiserver-kubelet-client" certificate and key
I0401 04:44:55.151493 23509 kubeadm.go:309] [certs] Generating "front-proxy-ca" certificate and key
I0401 04:44:55.151558 23509 kubeadm.go:309] [certs] Generating "front-proxy-client" certificate and key
I0401 04:44:55.151621 23509 kubeadm.go:309] [certs] Generating "etcd/ca" certificate and key
I0401 04:44:55.151686 23509 kubeadm.go:309] [certs] Generating "etcd/server" certificate and key
I0401 04:44:55.151810 23509 kubeadm.go:309] [certs] etcd/server serving cert is signed for DNS names [localhost skaffold-475000] and IPs [192.168.76.2 127.0.0.1 ::1]
I0401 04:44:55.151870 23509 kubeadm.go:309] [certs] Generating "etcd/peer" certificate and key
I0401 04:44:55.152007 23509 kubeadm.go:309] [certs] etcd/peer serving cert is signed for DNS names [localhost skaffold-475000] and IPs [192.168.76.2 127.0.0.1 ::1]
I0401 04:44:55.152076 23509 kubeadm.go:309] [certs] Generating "etcd/healthcheck-client" certificate and key
I0401 04:44:55.152149 23509 kubeadm.go:309] [certs] Generating "apiserver-etcd-client" certificate and key
I0401 04:44:55.152200 23509 kubeadm.go:309] [certs] Generating "sa" key and public key
I0401 04:44:55.152257 23509 kubeadm.go:309] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I0401 04:44:55.152311 23509 kubeadm.go:309] [kubeconfig] Writing "admin.conf" kubeconfig file
I0401 04:44:55.152378 23509 kubeadm.go:309] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I0401 04:44:55.152440 23509 kubeadm.go:309] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I0401 04:44:55.152502 23509 kubeadm.go:309] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I0401 04:44:55.152578 23509 kubeadm.go:309] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I0401 04:44:55.152666 23509 kubeadm.go:309] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I0401 04:44:55.152737 23509 kubeadm.go:309] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I0401 04:44:55.194569 23509 out.go:204] - Booting up control plane ...
I0401 04:44:55.194739 23509 kubeadm.go:309] [control-plane] Creating static Pod manifest for "kube-apiserver"
I0401 04:44:55.194852 23509 kubeadm.go:309] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I0401 04:44:55.194951 23509 kubeadm.go:309] [control-plane] Creating static Pod manifest for "kube-scheduler"
I0401 04:44:55.195142 23509 kubeadm.go:309] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I0401 04:44:55.195271 23509 kubeadm.go:309] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I0401 04:44:55.195349 23509 kubeadm.go:309] [kubelet-start] Starting the kubelet
I0401 04:44:55.195648 23509 kubeadm.go:309] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
I0401 04:44:55.195730 23509 kubeadm.go:309] [apiclient] All control plane components are healthy after 5.002639 seconds
I0401 04:44:55.195848 23509 kubeadm.go:309] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I0401 04:44:55.195983 23509 kubeadm.go:309] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I0401 04:44:55.196041 23509 kubeadm.go:309] [upload-certs] Skipping phase. Please see --upload-certs
I0401 04:44:55.196240 23509 kubeadm.go:309] [mark-control-plane] Marking the node skaffold-475000 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I0401 04:44:55.196316 23509 kubeadm.go:309] [bootstrap-token] Using token: plgdsy.3rvx1s8ul2lytdeg
I0401 04:44:55.217520 23509 out.go:204] - Configuring RBAC rules ...
I0401 04:44:55.217685 23509 kubeadm.go:309] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I0401 04:44:55.217836 23509 kubeadm.go:309] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I0401 04:44:55.218079 23509 kubeadm.go:309] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I0401 04:44:55.218289 23509 kubeadm.go:309] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I0401 04:44:55.218483 23509 kubeadm.go:309] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I0401 04:44:55.218636 23509 kubeadm.go:309] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I0401 04:44:55.218920 23509 kubeadm.go:309] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I0401 04:44:55.218970 23509 kubeadm.go:309] [addons] Applied essential addon: CoreDNS
I0401 04:44:55.219055 23509 kubeadm.go:309] [addons] Applied essential addon: kube-proxy
I0401 04:44:55.219066 23509 kubeadm.go:309]
I0401 04:44:55.219171 23509 kubeadm.go:309] Your Kubernetes control-plane has initialized successfully!
I0401 04:44:55.219175 23509 kubeadm.go:309]
I0401 04:44:55.219301 23509 kubeadm.go:309] To start using your cluster, you need to run the following as a regular user:
I0401 04:44:55.219306 23509 kubeadm.go:309]
I0401 04:44:55.219340 23509 kubeadm.go:309] mkdir -p $HOME/.kube
I0401 04:44:55.219433 23509 kubeadm.go:309] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I0401 04:44:55.219499 23509 kubeadm.go:309] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I0401 04:44:55.219511 23509 kubeadm.go:309]
I0401 04:44:55.219594 23509 kubeadm.go:309] Alternatively, if you are the root user, you can run:
I0401 04:44:55.219602 23509 kubeadm.go:309]
I0401 04:44:55.219652 23509 kubeadm.go:309] export KUBECONFIG=/etc/kubernetes/admin.conf
I0401 04:44:55.219657 23509 kubeadm.go:309]
I0401 04:44:55.219708 23509 kubeadm.go:309] You should now deploy a pod network to the cluster.
I0401 04:44:55.219791 23509 kubeadm.go:309] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I0401 04:44:55.219866 23509 kubeadm.go:309] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I0401 04:44:55.219870 23509 kubeadm.go:309]
I0401 04:44:55.219950 23509 kubeadm.go:309] You can now join any number of control-plane nodes by copying certificate authorities
I0401 04:44:55.220027 23509 kubeadm.go:309] and service account keys on each node and then running the following as root:
I0401 04:44:55.220032 23509 kubeadm.go:309]
I0401 04:44:55.220117 23509 kubeadm.go:309] kubeadm join control-plane.minikube.internal:8443 --token plgdsy.3rvx1s8ul2lytdeg \
I0401 04:44:55.220236 23509 kubeadm.go:309] --discovery-token-ca-cert-hash sha256:a40c0a3af7e9ed525e9c5743189ce621ec8f67d9676f02a9f5fe8931d120020f \
I0401 04:44:55.220259 23509 kubeadm.go:309] --control-plane
I0401 04:44:55.220262 23509 kubeadm.go:309]
I0401 04:44:55.220351 23509 kubeadm.go:309] Then you can join any number of worker nodes by running the following on each as root:
I0401 04:44:55.220360 23509 kubeadm.go:309]
I0401 04:44:55.220432 23509 kubeadm.go:309] kubeadm join control-plane.minikube.internal:8443 --token plgdsy.3rvx1s8ul2lytdeg \
I0401 04:44:55.220542 23509 kubeadm.go:309] --discovery-token-ca-cert-hash sha256:a40c0a3af7e9ed525e9c5743189ce621ec8f67d9676f02a9f5fe8931d120020f
I0401 04:44:55.220554 23509 cni.go:84] Creating CNI manager for ""
I0401 04:44:55.220564 23509 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I0401 04:44:55.280559 23509 out.go:177] * Configuring bridge CNI (Container Networking Interface) ...
I0401 04:44:55.301631 23509 ssh_runner.go:195] Run: sudo mkdir -p /etc/cni/net.d
I0401 04:44:55.317726 23509 ssh_runner.go:362] scp memory --> /etc/cni/net.d/1-k8s.conflist (457 bytes)
I0401 04:44:55.346633 23509 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I0401 04:44:55.346701 23509 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.29.3/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I0401 04:44:55.346722 23509 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.29.3/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes skaffold-475000 minikube.k8s.io/updated_at=2024_04_01T04_44_55_0700 minikube.k8s.io/version=v1.33.0-beta.0 minikube.k8s.io/commit=b8aa0d860b7e6047018bc1a9124397cd2c931e0d minikube.k8s.io/name=skaffold-475000 minikube.k8s.io/primary=true
I0401 04:44:55.354925 23509 ops.go:34] apiserver oom_adj: -16
I0401 04:44:55.434340 23509 kubeadm.go:1107] duration metric: took 87.687466ms to wait for elevateKubeSystemPrivileges
W0401 04:44:55.434368 23509 kubeadm.go:286] apiserver tunnel failed: apiserver port not set
I0401 04:44:55.434373 23509 kubeadm.go:393] duration metric: took 9.740772431s to StartCluster
I0401 04:44:55.434384 23509 settings.go:142] acquiring lock: {Name:mk8d68f384d7be74f8d51d392d82f93c1ea7737c Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0401 04:44:55.434480 23509 settings.go:150] Updating kubeconfig: /Users/jenkins/minikube-integration/18551-12424/kubeconfig
I0401 04:44:55.435012 23509 lock.go:35] WriteFile acquiring /Users/jenkins/minikube-integration/18551-12424/kubeconfig: {Name:mk07f3710013070cfb665d15403926f3ecb34355 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0401 04:44:55.435269 23509 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.29.3/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I0401 04:44:55.435288 23509 start.go:234] Will wait 6m0s for node &{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.29.3 ContainerRuntime:docker ControlPlane:true Worker:true}
I0401 04:44:55.458510 23509 out.go:177] * Verifying Kubernetes components...
I0401 04:44:55.435304 23509 addons.go:502] enable addons start: toEnable=map[ambassador:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false helm-tiller:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-gluster:false storage-provisioner-rancher:false volumesnapshots:false yakd:false]
I0401 04:44:55.458538 23509 addons.go:69] Setting storage-provisioner=true in profile "skaffold-475000"
I0401 04:44:55.435448 23509 config.go:182] Loaded profile config "skaffold-475000": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.29.3
I0401 04:44:55.458544 23509 addons.go:69] Setting default-storageclass=true in profile "skaffold-475000"
I0401 04:44:55.500708 23509 addons.go:234] Setting addon storage-provisioner=true in "skaffold-475000"
I0401 04:44:55.500729 23509 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0401 04:44:55.500733 23509 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "skaffold-475000"
I0401 04:44:55.500737 23509 host.go:66] Checking if "skaffold-475000" exists ...
I0401 04:44:55.501011 23509 cli_runner.go:164] Run: docker container inspect skaffold-475000 --format={{.State.Status}}
I0401 04:44:55.501102 23509 cli_runner.go:164] Run: docker container inspect skaffold-475000 --format={{.State.Status}}
I0401 04:44:55.564487 23509 addons.go:234] Setting addon default-storageclass=true in "skaffold-475000"
I0401 04:44:55.584595 23509 out.go:177] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I0401 04:44:55.584653 23509 host.go:66] Checking if "skaffold-475000" exists ...
I0401 04:44:55.592401 23509 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.29.3/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.65.254 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.29.3/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I0401 04:44:55.605714 23509 addons.go:426] installing /etc/kubernetes/addons/storage-provisioner.yaml
I0401 04:44:55.605719 23509 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I0401 04:44:55.605789 23509 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" skaffold-475000
I0401 04:44:55.606615 23509 cli_runner.go:164] Run: docker container inspect skaffold-475000 --format={{.State.Status}}
I0401 04:44:55.626961 23509 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0401 04:44:55.669674 23509 addons.go:426] installing /etc/kubernetes/addons/storageclass.yaml
I0401 04:44:55.669689 23509 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I0401 04:44:55.669701 23509 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:60988 SSHKeyPath:/Users/jenkins/minikube-integration/18551-12424/.minikube/machines/skaffold-475000/id_rsa Username:docker}
I0401 04:44:55.669767 23509 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" skaffold-475000
I0401 04:44:55.729681 23509 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:60988 SSHKeyPath:/Users/jenkins/minikube-integration/18551-12424/.minikube/machines/skaffold-475000/id_rsa Username:docker}
I0401 04:44:55.811834 23509 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.29.3/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I0401 04:44:55.819816 23509 start.go:946] {"host.minikube.internal": 192.168.65.254} host record injected into CoreDNS's ConfigMap
I0401 04:44:55.819940 23509 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "8443/tcp") 0).HostPort}}'" skaffold-475000
I0401 04:44:55.871521 23509 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.29.3/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I0401 04:44:55.876751 23509 api_server.go:52] waiting for apiserver process to appear ...
I0401 04:44:55.876791 23509 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I0401 04:44:56.143070 23509 api_server.go:72] duration metric: took 707.760263ms to wait for apiserver process to appear ...
I0401 04:44:56.143081 23509 api_server.go:88] waiting for apiserver healthz status ...
I0401 04:44:56.143096 23509 api_server.go:253] Checking apiserver healthz at https://127.0.0.1:60992/healthz ...
I0401 04:44:56.148246 23509 api_server.go:279] https://127.0.0.1:60992/healthz returned 200:
ok
I0401 04:44:56.171748 23509 out.go:177] * Enabled addons: storage-provisioner, default-storageclass
I0401 04:44:56.149771 23509 api_server.go:141] control plane version: v1.29.3
I0401 04:44:56.213759 23509 api_server.go:131] duration metric: took 70.663961ms to wait for apiserver health ...
I0401 04:44:56.213786 23509 system_pods.go:43] waiting for kube-system pods to appear ...
I0401 04:44:56.213827 23509 addons.go:505] duration metric: took 778.495331ms for enable addons: enabled=[storage-provisioner default-storageclass]
I0401 04:44:56.220987 23509 system_pods.go:59] 5 kube-system pods found
I0401 04:44:56.221002 23509 system_pods.go:61] "etcd-skaffold-475000" [5e144024-f93c-4db2-9096-e6bd970e70ae] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd])
I0401 04:44:56.221007 23509 system_pods.go:61] "kube-apiserver-skaffold-475000" [5e15f846-e3fa-43cc-837e-e6af02989ab0] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
I0401 04:44:56.221012 23509 system_pods.go:61] "kube-controller-manager-skaffold-475000" [e8e0d701-54c3-4a2f-a8ac-fd4713bbd10a] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager])
I0401 04:44:56.221018 23509 system_pods.go:61] "kube-scheduler-skaffold-475000" [aa8f8d07-ec3d-4cc7-8583-21401c83e10c] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
I0401 04:44:56.221020 23509 system_pods.go:61] "storage-provisioner" [731f8613-4c5d-447a-b8b2-e88114c0f069] Pending: PodScheduled:Unschedulable (0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/not-ready: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling.)
I0401 04:44:56.221023 23509 system_pods.go:74] duration metric: took 7.233069ms to wait for pod list to return data ...
I0401 04:44:56.221029 23509 kubeadm.go:576] duration metric: took 785.723666ms to wait for: map[apiserver:true system_pods:true]
I0401 04:44:56.221037 23509 node_conditions.go:102] verifying NodePressure condition ...
I0401 04:44:56.223897 23509 node_conditions.go:122] node storage ephemeral capacity is 115273188Ki
I0401 04:44:56.223905 23509 node_conditions.go:123] node cpu capacity is 12
I0401 04:44:56.223916 23509 node_conditions.go:105] duration metric: took 2.876533ms to run NodePressure ...
I0401 04:44:56.223924 23509 start.go:240] waiting for startup goroutines ...
I0401 04:44:56.325826 23509 kapi.go:248] "coredns" deployment in "kube-system" namespace and "skaffold-475000" context rescaled to 1 replicas
I0401 04:44:56.325842 23509 start.go:245] waiting for cluster config update ...
I0401 04:44:56.325851 23509 start.go:254] writing updated cluster config ...
I0401 04:44:56.326183 23509 ssh_runner.go:195] Run: rm -f paused
I0401 04:44:56.367576 23509 start.go:600] kubectl: 1.29.1, cluster: 1.29.3 (minor skew: 0)
I0401 04:44:56.388872 23509 out.go:177] * Done! kubectl is now configured to use "skaffold-475000" cluster and "default" namespace by default
==> Docker <==
Apr 01 11:44:43 skaffold-475000 dockerd[1114]: time="2024-04-01T11:44:43.212887932Z" level=info msg="API listen on [::]:2376"
Apr 01 11:44:43 skaffold-475000 systemd[1]: Started Docker Application Container Engine.
Apr 01 11:44:43 skaffold-475000 systemd[1]: Starting CRI Interface for Docker Application Container Engine...
Apr 01 11:44:43 skaffold-475000 cri-dockerd[1341]: time="2024-04-01T11:44:43Z" level=info msg="Connecting to docker on the Endpoint unix:///var/run/docker.sock"
Apr 01 11:44:43 skaffold-475000 cri-dockerd[1341]: time="2024-04-01T11:44:43Z" level=info msg="Start docker client with request timeout 0s"
Apr 01 11:44:43 skaffold-475000 cri-dockerd[1341]: time="2024-04-01T11:44:43Z" level=info msg="Hairpin mode is set to hairpin-veth"
Apr 01 11:44:43 skaffold-475000 cri-dockerd[1341]: time="2024-04-01T11:44:43Z" level=info msg="Loaded network plugin cni"
Apr 01 11:44:43 skaffold-475000 cri-dockerd[1341]: time="2024-04-01T11:44:43Z" level=info msg="Docker cri networking managed by network plugin cni"
Apr 01 11:44:43 skaffold-475000 cri-dockerd[1341]: time="2024-04-01T11:44:43Z" level=info msg="Docker Info: &{ID:82ff08d0-5de0-4d96-bcce-36704614b85e Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:8 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:[] Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:[] Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6tables:true Debug:false NFd:25 OomKillDisable:false NGoroutines:41 SystemTime:2024-04-01T11:44:43.606001913Z LoggingDriver:json-file CgroupDriver:cgroupfs CgroupVersion:2 NEventsListener:0 KernelVersion:6.6.12-linuxkit OperatingSystem:Ubuntu 22.04.4
LTS OSVersion:22.04 OSType:linux Architecture:x86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:0xc000678070 NCPU:12 MemTotal:6213300224 GenericResources:[] DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy:control-plane.minikube.internal Name:skaffold-475000 Labels:[provider=docker] ExperimentalBuild:false ServerVersion:26.0.0 ClusterStore: ClusterAdvertise: Runtimes:map[io.containerd.runc.v2:{Path:runc Args:[] Shim:<nil>} runc:{Path:runc Args:[] Shim:<nil>}] DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:[] Nodes:0 Managers:0 Cluster:<nil> Warnings:[]} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:ae07eda36dd25f8a1b98dfbf587313b99c0190bb Expected:ae07eda36dd25f8a1b98dfbf587313b99c0190bb} RuncCommit:{ID:v1.1.12-0-g51d5e94 Expected:v1.1.12-0-g51d5e94} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=seccomp,profile=builtin name=cgroupns] ProductLicense: DefaultAddres
sPools:[] Warnings:[]}"
Apr 01 11:44:43 skaffold-475000 cri-dockerd[1341]: time="2024-04-01T11:44:43Z" level=info msg="Setting cgroupDriver cgroupfs"
Apr 01 11:44:43 skaffold-475000 cri-dockerd[1341]: time="2024-04-01T11:44:43Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:,},}"
Apr 01 11:44:43 skaffold-475000 cri-dockerd[1341]: time="2024-04-01T11:44:43Z" level=info msg="Starting the GRPC backend for the Docker CRI interface."
Apr 01 11:44:43 skaffold-475000 cri-dockerd[1341]: time="2024-04-01T11:44:43Z" level=info msg="Start cri-dockerd grpc backend"
Apr 01 11:44:43 skaffold-475000 systemd[1]: Started CRI Interface for Docker Application Container Engine.
Apr 01 11:44:49 skaffold-475000 cri-dockerd[1341]: time="2024-04-01T11:44:49Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/27c93f25b304c5b4840d7401e6a42b62e01042191f4bb2e7ddec5d58fa4698e0/resolv.conf as [nameserver 192.168.65.254 options ndots:0]"
Apr 01 11:44:49 skaffold-475000 cri-dockerd[1341]: time="2024-04-01T11:44:49Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/d6d02fdd35125077ea21a311d81a09e60e2785cfd72b9c1be1184df27fd7a642/resolv.conf as [nameserver 192.168.65.254 options ndots:0]"
Apr 01 11:44:49 skaffold-475000 cri-dockerd[1341]: time="2024-04-01T11:44:49Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/fc5d92602d1814cf354a86978430c3a3089d8bcd06960b84d9c27f63dc496e4e/resolv.conf as [nameserver 192.168.65.254 options ndots:0]"
Apr 01 11:44:49 skaffold-475000 cri-dockerd[1341]: time="2024-04-01T11:44:49Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/0302552ac7e7fe5e3de47df1429f73c065f2c6f8cab6860455f3d3244558db57/resolv.conf as [nameserver 192.168.65.254 options ndots:0]"
Apr 01 11:45:08 skaffold-475000 cri-dockerd[1341]: time="2024-04-01T11:45:08Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/160091d7487cfeb61cc8d07313bbd9747ba0416e2ef7831d007c9b90c712c222/resolv.conf as [nameserver 192.168.65.254 options ndots:0]"
Apr 01 11:45:09 skaffold-475000 cri-dockerd[1341]: time="2024-04-01T11:45:09Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/88a6e031411b87b67c76d69fcddcc88d7655ab68d3a9c82b90c3b6df690af0d8/resolv.conf as [nameserver 192.168.65.254 options ndots:0]"
Apr 01 11:45:09 skaffold-475000 cri-dockerd[1341]: time="2024-04-01T11:45:09Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/5868404cc74836aa478e916804437f5c4835f44fb3c41df224a25598842f4a08/resolv.conf as [nameserver 192.168.65.254 options ndots:0]"
Apr 01 11:45:15 skaffold-475000 cri-dockerd[1341]: time="2024-04-01T11:45:15Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:10.244.0.0/24,},}"
Apr 01 11:45:39 skaffold-475000 dockerd[1114]: time="2024-04-01T11:45:39.006097737Z" level=info msg="ignoring event" container=154c3a30f90ed3d092b293e62db5726fbda834016d788cb6495ac50bd30781c9 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Apr 01 11:49:28 skaffold-475000 cri-dockerd[1341]: time="2024-04-01T11:49:28Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/f4fb527c7889dcbe4339f16e36d145d182aff1e73b39523336e56b60392cafb8/resolv.conf as [nameserver 10.96.0.10 search default.svc.cluster.local svc.cluster.local cluster.local options ndots:5]"
Apr 01 11:49:28 skaffold-475000 cri-dockerd[1341]: time="2024-04-01T11:49:28Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/30f6e32a21b2dedbca45b278ac9f81f987702505fab09ebf2769df90ec5bdecd/resolv.conf as [nameserver 10.96.0.10 search default.svc.cluster.local svc.cluster.local cluster.local options ndots:5]"
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD
b862107073235 f9ab97cf2abc0 5 seconds ago Running leeroy-app 0 30f6e32a21b2d leeroy-app-8477656689-hcjh4
19d8e7a1a14a7 7f81c2b93fe48 5 seconds ago Running leeroy-web 0 f4fb527c7889d leeroy-web-5dfc684c7-64qc7
174faf47ff81d 6e38f40d628db 3 minutes ago Running storage-provisioner 1 160091d7487cf storage-provisioner
1e10f27e74b66 a1d263b5dc5b0 4 minutes ago Running kube-proxy 0 5868404cc7483 kube-proxy-dtccv
a44ae4a6bd15a cbb01a7bd410d 4 minutes ago Running coredns 0 88a6e031411b8 coredns-76f75df574-4dsjr
154c3a30f90ed 6e38f40d628db 4 minutes ago Exited storage-provisioner 0 160091d7487cf storage-provisioner
e8cbed046ac02 6052a25da3f97 4 minutes ago Running kube-controller-manager 0 d6d02fdd35125 kube-controller-manager-skaffold-475000
c43381f6ca3f1 8c390d98f50c0 4 minutes ago Running kube-scheduler 0 0302552ac7e7f kube-scheduler-skaffold-475000
dd71cb5024361 3861cfcd7c04c 4 minutes ago Running etcd 0 27c93f25b304c etcd-skaffold-475000
5e080e3b56e54 39f995c9f1996 4 minutes ago Running kube-apiserver 0 fc5d92602d181 kube-apiserver-skaffold-475000
==> coredns [a44ae4a6bd15] <==
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[WARNING] plugin/kubernetes: starting server with unsynced Kubernetes API
.:53
[INFO] plugin/reload: Running configuration SHA512 = f869070685748660180df1b7a47d58cdafcf2f368266578c062d1151dc2c900964aecc5975e8882e6de6fdfb6460463e30ebfaad2ec8f0c3c6436f80225b3b5b
CoreDNS-1.11.1
linux/amd64, go1.20.7, ae2bbc2
[INFO] 127.0.0.1:51003 - 48802 "HINFO IN 7552216045849326227.5360608599372079098. udp 57 false 512" NXDOMAIN qr,rd,ra 57 0.024635913s
[INFO] plugin/ready: Still waiting on: "kubernetes"
[INFO] plugin/ready: Still waiting on: "kubernetes"
[INFO] plugin/ready: Still waiting on: "kubernetes"
[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.27.4/tools/cache/reflector.go:231: failed to list *v1.Service: Get "https://10.96.0.1:443/api/v1/services?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
[INFO] plugin/kubernetes: Trace[322093261]: "Reflector ListAndWatch" name:pkg/mod/k8s.io/client-go@v0.27.4/tools/cache/reflector.go:231 (01-Apr-2024 11:45:09.515) (total time: 30002ms):
Trace[322093261]: ---"Objects listed" error:Get "https://10.96.0.1:443/api/v1/services?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout 30002ms (11:45:39.517)
Trace[322093261]: [30.00232791s] [30.00232791s] END
[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.27.4/tools/cache/reflector.go:231: failed to list *v1.EndpointSlice: Get "https://10.96.0.1:443/apis/discovery.k8s.io/v1/endpointslices?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
[ERROR] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.27.4/tools/cache/reflector.go:231: Failed to watch *v1.Service: failed to list *v1.Service: Get "https://10.96.0.1:443/api/v1/services?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
[INFO] plugin/kubernetes: Trace[515827201]: "Reflector ListAndWatch" name:pkg/mod/k8s.io/client-go@v0.27.4/tools/cache/reflector.go:231 (01-Apr-2024 11:45:09.515) (total time: 30002ms):
Trace[515827201]: ---"Objects listed" error:Get "https://10.96.0.1:443/apis/discovery.k8s.io/v1/endpointslices?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout 30001ms (11:45:39.517)
Trace[515827201]: [30.002267976s] [30.002267976s] END
[ERROR] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.27.4/tools/cache/reflector.go:231: Failed to watch *v1.EndpointSlice: failed to list *v1.EndpointSlice: Get "https://10.96.0.1:443/apis/discovery.k8s.io/v1/endpointslices?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.27.4/tools/cache/reflector.go:231: failed to list *v1.Namespace: Get "https://10.96.0.1:443/api/v1/namespaces?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
[INFO] plugin/kubernetes: Trace[925363241]: "Reflector ListAndWatch" name:pkg/mod/k8s.io/client-go@v0.27.4/tools/cache/reflector.go:231 (01-Apr-2024 11:45:09.515) (total time: 30003ms):
Trace[925363241]: ---"Objects listed" error:Get "https://10.96.0.1:443/api/v1/namespaces?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout 30002ms (11:45:39.518)
Trace[925363241]: [30.003062726s] [30.003062726s] END
[ERROR] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.27.4/tools/cache/reflector.go:231: Failed to watch *v1.Namespace: failed to list *v1.Namespace: Get "https://10.96.0.1:443/api/v1/namespaces?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
==> describe nodes <==
Name: skaffold-475000
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=skaffold-475000
kubernetes.io/os=linux
minikube.k8s.io/commit=b8aa0d860b7e6047018bc1a9124397cd2c931e0d
minikube.k8s.io/name=skaffold-475000
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2024_04_01T04_44_55_0700
minikube.k8s.io/version=v1.33.0-beta.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///var/run/cri-dockerd.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Mon, 01 Apr 2024 11:44:52 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: skaffold-475000
AcquireTime: <unset>
RenewTime: Mon, 01 Apr 2024 11:49:30 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Mon, 01 Apr 2024 11:49:31 +0000 Mon, 01 Apr 2024 11:44:50 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Mon, 01 Apr 2024 11:49:31 +0000 Mon, 01 Apr 2024 11:44:50 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Mon, 01 Apr 2024 11:49:31 +0000 Mon, 01 Apr 2024 11:44:50 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Mon, 01 Apr 2024 11:49:31 +0000 Mon, 01 Apr 2024 11:44:52 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.76.2
Hostname: skaffold-475000
Capacity:
cpu: 12
ephemeral-storage: 115273188Ki
hugepages-2Mi: 0
memory: 6067676Ki
pods: 110
Allocatable:
cpu: 12
ephemeral-storage: 115273188Ki
hugepages-2Mi: 0
memory: 6067676Ki
pods: 110
System Info:
Machine ID: bcb6d1fa2b114d7b829ee438f8c9bf8d
System UUID: bcb6d1fa2b114d7b829ee438f8c9bf8d
Boot ID: d4451ec4-d02d-4501-b2e4-20d356b89f0d
Kernel Version: 6.6.12-linuxkit
OS Image: Ubuntu 22.04.4 LTS
Operating System: linux
Architecture: amd64
Container Runtime Version: docker://26.0.0
Kubelet Version: v1.29.3
Kube-Proxy Version: v1.29.3
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (9 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default leeroy-app-8477656689-hcjh4 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 6s
default leeroy-web-5dfc684c7-64qc7 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 6s
kube-system coredns-76f75df574-4dsjr 100m (0%!)(MISSING) 0 (0%!)(MISSING) 70Mi (1%!)(MISSING) 170Mi (2%!)(MISSING) 4m25s
kube-system etcd-skaffold-475000 100m (0%!)(MISSING) 0 (0%!)(MISSING) 100Mi (1%!)(MISSING) 0 (0%!)(MISSING) 4m38s
kube-system kube-apiserver-skaffold-475000 250m (2%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 4m38s
kube-system kube-controller-manager-skaffold-475000 200m (1%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 4m38s
kube-system kube-proxy-dtccv 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 4m25s
kube-system kube-scheduler-skaffold-475000 100m (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 4m38s
kube-system storage-provisioner 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 4m37s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 750m (6%!)(MISSING) 0 (0%!)(MISSING)
memory 170Mi (2%!)(MISSING) 170Mi (2%!)(MISSING)
ephemeral-storage 0 (0%!)(MISSING) 0 (0%!)(MISSING)
hugepages-2Mi 0 (0%!)(MISSING) 0 (0%!)(MISSING)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 4m24s kube-proxy
Normal Starting 4m39s kubelet Starting kubelet.
Normal NodeAllocatableEnforced 4m38s kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 4m38s kubelet Node skaffold-475000 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 4m38s kubelet Node skaffold-475000 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 4m38s kubelet Node skaffold-475000 status is now: NodeHasSufficientPID
Normal RegisteredNode 4m26s node-controller Node skaffold-475000 event: Registered Node skaffold-475000 in Controller
==> dmesg <==
[Apr 1 10:29] netlink: 'init': attribute type 4 has an invalid length.
[ +0.030647] fakeowner: loading out-of-tree module taints kernel.
[ +0.000030] netlink: 'init': attribute type 22 has an invalid length.
[Apr 1 11:11] systemd[1517]: memfd_create() called without MFD_EXEC or MFD_NOEXEC_SEAL set
==> etcd [dd71cb502436] <==
{"level":"info","ts":"2024-04-01T11:44:50.888688Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"raft.node: ea7e25599daad906 elected leader ea7e25599daad906 at term 2"}
{"level":"info","ts":"2024-04-01T11:44:50.889523Z","caller":"etcdserver/server.go:2578","msg":"setting up initial cluster version using v2 API","cluster-version":"3.5"}
{"level":"info","ts":"2024-04-01T11:44:50.889973Z","caller":"etcdserver/server.go:2068","msg":"published local member to cluster through raft","local-member-id":"ea7e25599daad906","local-member-attributes":"{Name:skaffold-475000 ClientURLs:[https://192.168.76.2:2379]}","request-path":"/0/members/ea7e25599daad906/attributes","cluster-id":"6f20f2c4b2fb5f8a","publish-timeout":"7s"}
{"level":"info","ts":"2024-04-01T11:44:50.890022Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2024-04-01T11:44:50.890065Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2024-04-01T11:44:50.890202Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"6f20f2c4b2fb5f8a","local-member-id":"ea7e25599daad906","cluster-version":"3.5"}
{"level":"info","ts":"2024-04-01T11:44:50.890369Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
{"level":"info","ts":"2024-04-01T11:44:50.890438Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
{"level":"info","ts":"2024-04-01T11:44:50.890447Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
{"level":"info","ts":"2024-04-01T11:44:50.891616Z","caller":"etcdserver/server.go:2602","msg":"cluster version is updated","cluster-version":"3.5"}
{"level":"info","ts":"2024-04-01T11:44:50.894862Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.76.2:2379"}
{"level":"info","ts":"2024-04-01T11:44:50.894921Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
{"level":"info","ts":"2024-04-01T11:45:08.437105Z","caller":"traceutil/trace.go:171","msg":"trace[1566797485] transaction","detail":"{read_only:false; response_revision:345; number_of_response:1; }","duration":"141.711862ms","start":"2024-04-01T11:45:08.295381Z","end":"2024-04-01T11:45:08.437093Z","steps":["trace[1566797485] 'process raft request' (duration: 137.584907ms)"],"step_count":1}
{"level":"info","ts":"2024-04-01T11:45:08.437493Z","caller":"traceutil/trace.go:171","msg":"trace[690448559] transaction","detail":"{read_only:false; response_revision:346; number_of_response:1; }","duration":"141.723447ms","start":"2024-04-01T11:45:08.295757Z","end":"2024-04-01T11:45:08.437481Z","steps":["trace[690448559] 'process raft request' (duration: 141.099853ms)"],"step_count":1}
{"level":"info","ts":"2024-04-01T11:45:08.437638Z","caller":"traceutil/trace.go:171","msg":"trace[167393662] transaction","detail":"{read_only:false; response_revision:347; number_of_response:1; }","duration":"108.072931ms","start":"2024-04-01T11:45:08.329554Z","end":"2024-04-01T11:45:08.437627Z","steps":["trace[167393662] 'process raft request' (duration: 107.360046ms)"],"step_count":1}
{"level":"info","ts":"2024-04-01T11:45:33.305811Z","caller":"traceutil/trace.go:171","msg":"trace[2114140485] transaction","detail":"{read_only:false; response_revision:380; number_of_response:1; }","duration":"105.691985ms","start":"2024-04-01T11:45:33.200107Z","end":"2024-04-01T11:45:33.305799Z","steps":["trace[2114140485] 'process raft request' (duration: 105.60483ms)"],"step_count":1}
{"level":"info","ts":"2024-04-01T11:48:38.361966Z","caller":"traceutil/trace.go:171","msg":"trace[306083363] transaction","detail":"{read_only:false; response_revision:537; number_of_response:1; }","duration":"176.119179ms","start":"2024-04-01T11:48:38.185835Z","end":"2024-04-01T11:48:38.361954Z","steps":["trace[306083363] 'process raft request' (duration: 176.012063ms)"],"step_count":1}
{"level":"info","ts":"2024-04-01T11:49:28.087348Z","caller":"traceutil/trace.go:171","msg":"trace[1477905571] linearizableReadLoop","detail":"{readStateIndex:657; appliedIndex:655; }","duration":"111.271692ms","start":"2024-04-01T11:49:27.976067Z","end":"2024-04-01T11:49:28.087338Z","steps":["trace[1477905571] 'read index received' (duration: 71.986861ms)","trace[1477905571] 'applied index is now lower than readState.Index' (duration: 39.284414ms)"],"step_count":2}
{"level":"info","ts":"2024-04-01T11:49:28.087349Z","caller":"traceutil/trace.go:171","msg":"trace[910959577] transaction","detail":"{read_only:false; response_revision:593; number_of_response:1; }","duration":"113.69592ms","start":"2024-04-01T11:49:27.973637Z","end":"2024-04-01T11:49:28.087333Z","steps":["trace[910959577] 'process raft request' (duration: 74.452998ms)","trace[910959577] 'compare' (duration: 39.06321ms)"],"step_count":2}
{"level":"info","ts":"2024-04-01T11:49:28.087539Z","caller":"traceutil/trace.go:171","msg":"trace[1586494357] transaction","detail":"{read_only:false; response_revision:595; number_of_response:1; }","duration":"111.317602ms","start":"2024-04-01T11:49:27.976202Z","end":"2024-04-01T11:49:28.08752Z","steps":["trace[1586494357] 'process raft request' (duration: 111.081176ms)"],"step_count":1}
{"level":"info","ts":"2024-04-01T11:49:28.08766Z","caller":"traceutil/trace.go:171","msg":"trace[432411270] transaction","detail":"{read_only:false; response_revision:597; number_of_response:1; }","duration":"110.993133ms","start":"2024-04-01T11:49:27.976647Z","end":"2024-04-01T11:49:28.08764Z","steps":["trace[432411270] 'process raft request' (duration: 110.671471ms)"],"step_count":1}
{"level":"info","ts":"2024-04-01T11:49:28.087752Z","caller":"traceutil/trace.go:171","msg":"trace[728581711] transaction","detail":"{read_only:false; response_revision:596; number_of_response:1; }","duration":"111.228963ms","start":"2024-04-01T11:49:27.976511Z","end":"2024-04-01T11:49:28.08774Z","steps":["trace[728581711] 'process raft request' (duration: 110.78952ms)"],"step_count":1}
{"level":"info","ts":"2024-04-01T11:49:28.087764Z","caller":"traceutil/trace.go:171","msg":"trace[1973709862] transaction","detail":"{read_only:false; response_revision:594; number_of_response:1; }","duration":"113.568878ms","start":"2024-04-01T11:49:27.974183Z","end":"2024-04-01T11:49:28.087752Z","steps":["trace[1973709862] 'process raft request' (duration: 113.071703ms)"],"step_count":1}
{"level":"warn","ts":"2024-04-01T11:49:28.087658Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"111.567542ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/deployments/default/\" range_end:\"/registry/deployments/default0\" ","response":"range_response_count:2 size:5827"}
{"level":"info","ts":"2024-04-01T11:49:28.088066Z","caller":"traceutil/trace.go:171","msg":"trace[1477564471] range","detail":"{range_begin:/registry/deployments/default/; range_end:/registry/deployments/default0; response_count:2; response_revision:597; }","duration":"112.004907ms","start":"2024-04-01T11:49:27.97605Z","end":"2024-04-01T11:49:28.088055Z","steps":["trace[1477564471] 'agreement among raft nodes before linearized reading' (duration: 111.559204ms)"],"step_count":1}
==> kernel <==
11:49:33 up 1:19, 0 users, load average: 6.44, 5.11, 4.69
Linux skaffold-475000 6.6.12-linuxkit #1 SMP PREEMPT_DYNAMIC Tue Jan 30 09:48:40 UTC 2024 x86_64 x86_64 x86_64 GNU/Linux
PRETTY_NAME="Ubuntu 22.04.4 LTS"
==> kube-apiserver [5e080e3b56e5] <==
I0401 11:44:52.121828 1 cache.go:39] Caches are synced for AvailableConditionController controller
I0401 11:44:52.121849 1 shared_informer.go:318] Caches are synced for crd-autoregister
I0401 11:44:52.121905 1 aggregator.go:165] initial CRD sync complete...
I0401 11:44:52.121912 1 autoregister_controller.go:141] Starting autoregister controller
I0401 11:44:52.121915 1 cache.go:32] Waiting for caches to sync for autoregister controller
I0401 11:44:52.121919 1 cache.go:39] Caches are synced for autoregister controller
I0401 11:44:52.123091 1 controller.go:624] quota admission added evaluator for: namespaces
I0401 11:44:52.181997 1 shared_informer.go:318] Caches are synced for node_authorizer
I0401 11:44:52.188518 1 controller.go:624] quota admission added evaluator for: leases.coordination.k8s.io
I0401 11:44:53.023629 1 storage_scheduling.go:95] created PriorityClass system-node-critical with value 2000001000
I0401 11:44:53.026510 1 storage_scheduling.go:95] created PriorityClass system-cluster-critical with value 2000000000
I0401 11:44:53.026546 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist.
I0401 11:44:53.351903 1 controller.go:624] quota admission added evaluator for: roles.rbac.authorization.k8s.io
I0401 11:44:53.376120 1 controller.go:624] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
I0401 11:44:53.494972 1 alloc.go:330] "allocated clusterIPs" service="default/kubernetes" clusterIPs={"IPv4":"10.96.0.1"}
W0401 11:44:53.499689 1 lease.go:265] Resetting endpoints for master service "kubernetes" to [192.168.76.2]
I0401 11:44:53.500528 1 controller.go:624] quota admission added evaluator for: endpoints
I0401 11:44:53.504502 1 controller.go:624] quota admission added evaluator for: endpointslices.discovery.k8s.io
I0401 11:44:54.091040 1 controller.go:624] quota admission added evaluator for: serviceaccounts
I0401 11:44:54.848844 1 controller.go:624] quota admission added evaluator for: deployments.apps
I0401 11:44:54.856576 1 alloc.go:330] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs={"IPv4":"10.96.0.10"}
I0401 11:44:54.866261 1 controller.go:624] quota admission added evaluator for: daemonsets.apps
I0401 11:45:07.899597 1 controller.go:624] quota admission added evaluator for: replicasets.apps
I0401 11:45:08.187626 1 controller.go:624] quota admission added evaluator for: controllerrevisions.apps
E0401 11:49:29.166059 1 watch.go:253] http2: stream closed
==> kube-controller-manager [e8cbed046ac0] <==
I0401 11:45:08.195394 1 event.go:376] "Event occurred" object="kube-system/kube-proxy" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kube-proxy-dtccv"
I0401 11:45:08.293030 1 event.go:376] "Event occurred" object="kube-system/coredns-76f75df574" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-76f75df574-4dsjr"
I0401 11:45:08.438991 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-76f75df574" duration="535.732351ms"
I0401 11:45:08.488849 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-76f75df574" duration="49.722598ms"
I0401 11:45:08.488977 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-76f75df574" duration="50.546µs"
I0401 11:45:08.489029 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-76f75df574" duration="26.858µs"
I0401 11:45:10.315590 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-76f75df574" duration="706.425µs"
I0401 11:45:48.647908 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-76f75df574" duration="5.904329ms"
I0401 11:45:48.648022 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-76f75df574" duration="44.916µs"
I0401 11:49:27.956963 1 event.go:376] "Event occurred" object="default/leeroy-web" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set leeroy-web-5dfc684c7 to 1"
I0401 11:49:27.961504 1 event.go:376] "Event occurred" object="default/leeroy-web-5dfc684c7" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: leeroy-web-5dfc684c7-64qc7"
I0401 11:49:27.966405 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="default/leeroy-web-5dfc684c7" duration="9.820368ms"
I0401 11:49:27.970864 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="default/leeroy-web-5dfc684c7" duration="4.376611ms"
I0401 11:49:27.970960 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="default/leeroy-web-5dfc684c7" duration="33.756µs"
I0401 11:49:27.975195 1 event.go:376] "Event occurred" object="default/leeroy-app" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set leeroy-app-8477656689 to 1"
I0401 11:49:28.089176 1 event.go:376] "Event occurred" object="default/leeroy-app-8477656689" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: leeroy-app-8477656689-hcjh4"
I0401 11:49:28.089256 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="default/leeroy-web-5dfc684c7" duration="45.239µs"
I0401 11:49:28.093685 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="default/leeroy-app-8477656689" duration="118.838978ms"
I0401 11:49:28.098009 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="default/leeroy-app-8477656689" duration="4.256543ms"
I0401 11:49:28.098161 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="default/leeroy-app-8477656689" duration="54.61µs"
I0401 11:49:28.102846 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="default/leeroy-app-8477656689" duration="45.717µs"
I0401 11:49:28.719633 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="default/leeroy-web-5dfc684c7" duration="3.326153ms"
I0401 11:49:28.719732 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="default/leeroy-web-5dfc684c7" duration="25.292µs"
I0401 11:49:28.728812 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="default/leeroy-app-8477656689" duration="4.164606ms"
I0401 11:49:28.728900 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="default/leeroy-app-8477656689" duration="20.855µs"
==> kube-proxy [1e10f27e74b6] <==
I0401 11:45:09.526184 1 server_others.go:72] "Using iptables proxy"
I0401 11:45:09.588069 1 server.go:1050] "Successfully retrieved node IP(s)" IPs=["192.168.76.2"]
I0401 11:45:09.613017 1 server.go:652] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I0401 11:45:09.613059 1 server_others.go:168] "Using iptables Proxier"
I0401 11:45:09.615511 1 server_others.go:512] "Detect-local-mode set to ClusterCIDR, but no cluster CIDR for family" ipFamily="IPv6"
I0401 11:45:09.615546 1 server_others.go:529] "Defaulting to no-op detect-local"
I0401 11:45:09.615566 1 proxier.go:245] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses"
I0401 11:45:09.615879 1 server.go:865] "Version info" version="v1.29.3"
I0401 11:45:09.615918 1 server.go:867] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I0401 11:45:09.616817 1 config.go:188] "Starting service config controller"
I0401 11:45:09.616891 1 shared_informer.go:311] Waiting for caches to sync for service config
I0401 11:45:09.616934 1 config.go:97] "Starting endpoint slice config controller"
I0401 11:45:09.616940 1 shared_informer.go:311] Waiting for caches to sync for endpoint slice config
I0401 11:45:09.619270 1 config.go:315] "Starting node config controller"
I0401 11:45:09.619296 1 shared_informer.go:311] Waiting for caches to sync for node config
I0401 11:45:09.717405 1 shared_informer.go:318] Caches are synced for endpoint slice config
I0401 11:45:09.717452 1 shared_informer.go:318] Caches are synced for service config
I0401 11:45:09.719366 1 shared_informer.go:318] Caches are synced for node config
==> kube-scheduler [c43381f6ca3f] <==
W0401 11:44:52.101729 1 reflector.go:539] vendor/k8s.io/client-go/informers/factory.go:159: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E0401 11:44:52.101805 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:159: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
W0401 11:44:52.101750 1 reflector.go:539] vendor/k8s.io/client-go/informers/factory.go:159: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
E0401 11:44:52.101820 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:159: Failed to watch *v1.CSINode: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
W0401 11:44:52.101825 1 reflector.go:539] vendor/k8s.io/client-go/informers/factory.go:159: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
E0401 11:44:52.101930 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:159: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
W0401 11:44:52.101966 1 reflector.go:539] vendor/k8s.io/client-go/informers/factory.go:159: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
E0401 11:44:52.101978 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:159: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
W0401 11:44:52.102000 1 reflector.go:539] vendor/k8s.io/client-go/informers/factory.go:159: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
E0401 11:44:52.102039 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:159: Failed to watch *v1.PersistentVolume: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
W0401 11:44:52.910739 1 reflector.go:539] vendor/k8s.io/client-go/informers/factory.go:159: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
E0401 11:44:52.910798 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:159: Failed to watch *v1.CSIStorageCapacity: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
W0401 11:44:53.045113 1 reflector.go:539] vendor/k8s.io/client-go/informers/factory.go:159: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
E0401 11:44:53.045159 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:159: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
W0401 11:44:53.051211 1 reflector.go:539] vendor/k8s.io/client-go/informers/factory.go:159: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
E0401 11:44:53.051304 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:159: Failed to watch *v1.PodDisruptionBudget: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
W0401 11:44:53.063339 1 reflector.go:539] vendor/k8s.io/client-go/informers/factory.go:159: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
E0401 11:44:53.063405 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:159: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
W0401 11:44:53.094423 1 reflector.go:539] vendor/k8s.io/client-go/informers/factory.go:159: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
E0401 11:44:53.094468 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:159: Failed to watch *v1.Node: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
W0401 11:44:53.140373 1 reflector.go:539] pkg/server/dynamiccertificates/configmap_cafile_content.go:206: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
E0401 11:44:53.140423 1 reflector.go:147] pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
W0401 11:44:53.211552 1 reflector.go:539] vendor/k8s.io/client-go/informers/factory.go:159: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
E0401 11:44:53.211599 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:159: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
I0401 11:44:55.799545 1 shared_informer.go:318] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
==> kubelet <==
Apr 01 11:45:07 skaffold-475000 kubelet[2627]: I0401 11:45:07.383788 2627 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/731f8613-4c5d-447a-b8b2-e88114c0f069-tmp\") pod \"storage-provisioner\" (UID: \"731f8613-4c5d-447a-b8b2-e88114c0f069\") " pod="kube-system/storage-provisioner"
Apr 01 11:45:07 skaffold-475000 kubelet[2627]: I0401 11:45:07.383861 2627 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qtxfk\" (UniqueName: \"kubernetes.io/projected/731f8613-4c5d-447a-b8b2-e88114c0f069-kube-api-access-qtxfk\") pod \"storage-provisioner\" (UID: \"731f8613-4c5d-447a-b8b2-e88114c0f069\") " pod="kube-system/storage-provisioner"
Apr 01 11:45:07 skaffold-475000 kubelet[2627]: E0401 11:45:07.489781 2627 projected.go:294] Couldn't get configMap kube-system/kube-root-ca.crt: configmap "kube-root-ca.crt" not found
Apr 01 11:45:07 skaffold-475000 kubelet[2627]: E0401 11:45:07.489832 2627 projected.go:200] Error preparing data for projected volume kube-api-access-qtxfk for pod kube-system/storage-provisioner: configmap "kube-root-ca.crt" not found
Apr 01 11:45:07 skaffold-475000 kubelet[2627]: E0401 11:45:07.489925 2627 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/731f8613-4c5d-447a-b8b2-e88114c0f069-kube-api-access-qtxfk podName:731f8613-4c5d-447a-b8b2-e88114c0f069 nodeName:}" failed. No retries permitted until 2024-04-01 11:45:07.98990199 +0000 UTC m=+13.158423071 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-qtxfk" (UniqueName: "kubernetes.io/projected/731f8613-4c5d-447a-b8b2-e88114c0f069-kube-api-access-qtxfk") pod "storage-provisioner" (UID: "731f8613-4c5d-447a-b8b2-e88114c0f069") : configmap "kube-root-ca.crt" not found
Apr 01 11:45:08 skaffold-475000 kubelet[2627]: I0401 11:45:08.201155 2627 topology_manager.go:215] "Topology Admit Handler" podUID="014f0686-eb85-4098-bfbc-1f7eb029a7ba" podNamespace="kube-system" podName="kube-proxy-dtccv"
Apr 01 11:45:08 skaffold-475000 kubelet[2627]: I0401 11:45:08.293050 2627 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-proxy\" (UniqueName: \"kubernetes.io/configmap/014f0686-eb85-4098-bfbc-1f7eb029a7ba-kube-proxy\") pod \"kube-proxy-dtccv\" (UID: \"014f0686-eb85-4098-bfbc-1f7eb029a7ba\") " pod="kube-system/kube-proxy-dtccv"
Apr 01 11:45:08 skaffold-475000 kubelet[2627]: I0401 11:45:08.328052 2627 topology_manager.go:215] "Topology Admit Handler" podUID="bb2e44ae-123f-4c56-b8da-0316fc7c838a" podNamespace="kube-system" podName="coredns-76f75df574-4dsjr"
Apr 01 11:45:08 skaffold-475000 kubelet[2627]: I0401 11:45:08.393529 2627 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/014f0686-eb85-4098-bfbc-1f7eb029a7ba-lib-modules\") pod \"kube-proxy-dtccv\" (UID: \"014f0686-eb85-4098-bfbc-1f7eb029a7ba\") " pod="kube-system/kube-proxy-dtccv"
Apr 01 11:45:08 skaffold-475000 kubelet[2627]: I0401 11:45:08.393590 2627 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/014f0686-eb85-4098-bfbc-1f7eb029a7ba-xtables-lock\") pod \"kube-proxy-dtccv\" (UID: \"014f0686-eb85-4098-bfbc-1f7eb029a7ba\") " pod="kube-system/kube-proxy-dtccv"
Apr 01 11:45:08 skaffold-475000 kubelet[2627]: I0401 11:45:08.393626 2627 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5sjcz\" (UniqueName: \"kubernetes.io/projected/014f0686-eb85-4098-bfbc-1f7eb029a7ba-kube-api-access-5sjcz\") pod \"kube-proxy-dtccv\" (UID: \"014f0686-eb85-4098-bfbc-1f7eb029a7ba\") " pod="kube-system/kube-proxy-dtccv"
Apr 01 11:45:08 skaffold-475000 kubelet[2627]: I0401 11:45:08.495714 2627 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/bb2e44ae-123f-4c56-b8da-0316fc7c838a-config-volume\") pod \"coredns-76f75df574-4dsjr\" (UID: \"bb2e44ae-123f-4c56-b8da-0316fc7c838a\") " pod="kube-system/coredns-76f75df574-4dsjr"
Apr 01 11:45:08 skaffold-475000 kubelet[2627]: I0401 11:45:08.495806 2627 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zm5hq\" (UniqueName: \"kubernetes.io/projected/bb2e44ae-123f-4c56-b8da-0316fc7c838a-kube-api-access-zm5hq\") pod \"coredns-76f75df574-4dsjr\" (UID: \"bb2e44ae-123f-4c56-b8da-0316fc7c838a\") " pod="kube-system/coredns-76f75df574-4dsjr"
Apr 01 11:45:09 skaffold-475000 kubelet[2627]: I0401 11:45:09.225267 2627 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5868404cc74836aa478e916804437f5c4835f44fb3c41df224a25598842f4a08"
Apr 01 11:45:09 skaffold-475000 kubelet[2627]: I0401 11:45:09.302834 2627 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/storage-provisioner" podStartSLOduration=13.302774489 podStartE2EDuration="13.302774489s" podCreationTimestamp="2024-04-01 11:44:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2024-04-01 11:45:09.30205122 +0000 UTC m=+14.470572298" watchObservedRunningTime="2024-04-01 11:45:09.302774489 +0000 UTC m=+14.471295573"
Apr 01 11:45:10 skaffold-475000 kubelet[2627]: I0401 11:45:10.324427 2627 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/coredns-76f75df574-4dsjr" podStartSLOduration=2.324378239 podStartE2EDuration="2.324378239s" podCreationTimestamp="2024-04-01 11:45:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2024-04-01 11:45:10.314935771 +0000 UTC m=+15.483456855" watchObservedRunningTime="2024-04-01 11:45:10.324378239 +0000 UTC m=+15.492899317"
Apr 01 11:45:15 skaffold-475000 kubelet[2627]: I0401 11:45:15.286820 2627 kuberuntime_manager.go:1529] "Updating runtime config through cri with podcidr" CIDR="10.244.0.0/24"
Apr 01 11:45:15 skaffold-475000 kubelet[2627]: I0401 11:45:15.287547 2627 kubelet_network.go:61] "Updating Pod CIDR" originalPodCIDR="" newPodCIDR="10.244.0.0/24"
Apr 01 11:45:39 skaffold-475000 kubelet[2627]: I0401 11:45:39.469166 2627 scope.go:117] "RemoveContainer" containerID="154c3a30f90ed3d092b293e62db5726fbda834016d788cb6495ac50bd30781c9"
Apr 01 11:45:39 skaffold-475000 kubelet[2627]: I0401 11:45:39.476935 2627 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kube-proxy-dtccv" podStartSLOduration=31.476907588 podStartE2EDuration="31.476907588s" podCreationTimestamp="2024-04-01 11:45:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2024-04-01 11:45:10.324598164 +0000 UTC m=+15.493119251" watchObservedRunningTime="2024-04-01 11:45:39.476907588 +0000 UTC m=+44.645421915"
Apr 01 11:49:27 skaffold-475000 kubelet[2627]: I0401 11:49:27.967100 2627 topology_manager.go:215] "Topology Admit Handler" podUID="b2d94415-db3a-441c-9176-675e6236de65" podNamespace="default" podName="leeroy-web-5dfc684c7-64qc7"
Apr 01 11:49:28 skaffold-475000 kubelet[2627]: I0401 11:49:28.000837 2627 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jv2nr\" (UniqueName: \"kubernetes.io/projected/b2d94415-db3a-441c-9176-675e6236de65-kube-api-access-jv2nr\") pod \"leeroy-web-5dfc684c7-64qc7\" (UID: \"b2d94415-db3a-441c-9176-675e6236de65\") " pod="default/leeroy-web-5dfc684c7-64qc7"
Apr 01 11:49:28 skaffold-475000 kubelet[2627]: I0401 11:49:28.094034 2627 topology_manager.go:215] "Topology Admit Handler" podUID="cc247ea0-483c-4f2d-aa88-0eff133725c7" podNamespace="default" podName="leeroy-app-8477656689-hcjh4"
Apr 01 11:49:28 skaffold-475000 kubelet[2627]: I0401 11:49:28.102698 2627 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gfprz\" (UniqueName: \"kubernetes.io/projected/cc247ea0-483c-4f2d-aa88-0eff133725c7-kube-api-access-gfprz\") pod \"leeroy-app-8477656689-hcjh4\" (UID: \"cc247ea0-483c-4f2d-aa88-0eff133725c7\") " pod="default/leeroy-app-8477656689-hcjh4"
Apr 01 11:49:28 skaffold-475000 kubelet[2627]: I0401 11:49:28.724388 2627 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="default/leeroy-web-5dfc684c7-64qc7" podStartSLOduration=1.7243578990000001 podStartE2EDuration="1.724357899s" podCreationTimestamp="2024-04-01 11:49:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2024-04-01 11:49:28.716622127 +0000 UTC m=+273.883988235" watchObservedRunningTime="2024-04-01 11:49:28.724357899 +0000 UTC m=+273.891724007"
==> storage-provisioner [154c3a30f90e] <==
I0401 11:45:08.987481 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
F0401 11:45:38.996593 1 main.go:39] error getting server version: Get "https://10.96.0.1:443/version?timeout=32s": dial tcp 10.96.0.1:443: i/o timeout
==> storage-provisioner [174faf47ff81] <==
I0401 11:45:39.553486 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I0401 11:45:39.559927 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I0401 11:45:39.559976 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I0401 11:45:39.565291 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I0401 11:45:39.565461 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"bccd6f9e-004a-4ff8-84e9-5ca141111b27", APIVersion:"v1", ResourceVersion:"388", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' skaffold-475000_0b909a13-5527-4f89-8964-4a0ce6aa7933 became leader
I0401 11:45:39.565482 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_skaffold-475000_0b909a13-5527-4f89-8964-4a0ce6aa7933!
I0401 11:45:39.667810 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_skaffold-475000_0b909a13-5527-4f89-8964-4a0ce6aa7933!
-- /stdout --
helpers_test.go:254: (dbg) Run: out/minikube-darwin-amd64 status --format={{.APIServer}} -p skaffold-475000 -n skaffold-475000
helpers_test.go:261: (dbg) Run: kubectl --context skaffold-475000 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:285: <<< TestSkaffold FAILED: end of post-mortem logs <<<
helpers_test.go:286: ---------------------/post-mortem---------------------------------
helpers_test.go:175: Cleaning up "skaffold-475000" profile ...
helpers_test.go:178: (dbg) Run: out/minikube-darwin-amd64 delete -p skaffold-475000
helpers_test.go:178: (dbg) Done: out/minikube-darwin-amd64 delete -p skaffold-475000: (3.085628435s)
--- FAIL: TestSkaffold (306.80s)