=== RUN TestPause/serial/Start
pause_test.go:80: (dbg) Run: out/minikube-linux-amd64 start -p pause-966057 --memory=2048 --install-addons=false --wait=all --driver=docker --container-runtime=docker
pause_test.go:80: (dbg) Non-zero exit: out/minikube-linux-amd64 start -p pause-966057 --memory=2048 --install-addons=false --wait=all --driver=docker --container-runtime=docker: signal: killed (15m0.010428954s)
-- stdout --
* [pause-966057] minikube v1.35.0 on Ubuntu 20.04 (kvm/amd64)
- MINIKUBE_LOCATION=20318
- MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
- KUBECONFIG=/home/jenkins/minikube-integration/20318-732960/kubeconfig
- MINIKUBE_HOME=/home/jenkins/minikube-integration/20318-732960/.minikube
- MINIKUBE_BIN=out/minikube-linux-amd64
- MINIKUBE_FORCE_SYSTEMD=
* Using the docker driver based on user configuration
* Using Docker driver with root privileges
* Starting "pause-966057" primary control-plane node in "pause-966057" cluster
* Pulling base image v0.0.46 ...
* Creating docker container (CPUs=2, Memory=2048MB) ...
* Preparing Kubernetes v1.32.1 on Docker 27.4.1 ...
- Generating certificates and keys ...
- Booting up control plane ...
- Configuring RBAC rules ...
* Configuring bridge CNI (Container Networking Interface) ...
* Verifying Kubernetes components...
-- /stdout --
** stderr **
E0127 11:52:05.486563 1025229 start.go:160] Unable to scale down deployment "coredns" in namespace "kube-system" to 1 replica: non-retryable failure while rescaling coredns deployment: Operation cannot be fulfilled on deployments.apps "coredns": the object has been modified; please apply your changes to the latest version and try again
** /stderr **
pause_test.go:82: failed to start minikube with args: "out/minikube-linux-amd64 start -p pause-966057 --memory=2048 --install-addons=false --wait=all --driver=docker --container-runtime=docker" : signal: killed
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:230: ======> post-mortem[TestPause/serial/Start]: docker inspect <======
helpers_test.go:231: (dbg) Run: docker inspect pause-966057
helpers_test.go:235: (dbg) docker inspect pause-966057:
-- stdout --
[
{
"Id": "17887611f4a6872cdd63fa00f948be9e9de17404a7ef8204d23e9ad007c5483a",
"Created": "2025-01-27T11:51:33.383699934Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 1027477,
"ExitCode": 0,
"Error": "",
"StartedAt": "2025-01-27T11:51:33.501498609Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:e72c4cbe9b296d8a58fbcae1a7b969fa1cee662cd7b86f2d4efc5e146519cf0a",
"ResolvConfPath": "/var/lib/docker/containers/17887611f4a6872cdd63fa00f948be9e9de17404a7ef8204d23e9ad007c5483a/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/17887611f4a6872cdd63fa00f948be9e9de17404a7ef8204d23e9ad007c5483a/hostname",
"HostsPath": "/var/lib/docker/containers/17887611f4a6872cdd63fa00f948be9e9de17404a7ef8204d23e9ad007c5483a/hosts",
"LogPath": "/var/lib/docker/containers/17887611f4a6872cdd63fa00f948be9e9de17404a7ef8204d23e9ad007c5483a/17887611f4a6872cdd63fa00f948be9e9de17404a7ef8204d23e9ad007c5483a-json.log",
"Name": "/pause-966057",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"pause-966057:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {
"max-size": "100m"
}
},
"NetworkMode": "pause-966057",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "host",
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 2147483648,
"NanoCpus": 2000000000,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 4294967296,
"MemorySwappiness": null,
"OomKillDisable": false,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"LowerDir": "/var/lib/docker/overlay2/1d46ab333601874ba191a12df3a9c470fc5d61bd1fd35c2142fb531cd2d0bee2-init/diff:/var/lib/docker/overlay2/83d959066ccbcbd8323f7218dae5bb5644fd84b3f11929cfb50bd6d4db7ecbff/diff",
"MergedDir": "/var/lib/docker/overlay2/1d46ab333601874ba191a12df3a9c470fc5d61bd1fd35c2142fb531cd2d0bee2/merged",
"UpperDir": "/var/lib/docker/overlay2/1d46ab333601874ba191a12df3a9c470fc5d61bd1fd35c2142fb531cd2d0bee2/diff",
"WorkDir": "/var/lib/docker/overlay2/1d46ab333601874ba191a12df3a9c470fc5d61bd1fd35c2142fb531cd2d0bee2/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
},
{
"Type": "volume",
"Name": "pause-966057",
"Source": "/var/lib/docker/volumes/pause-966057/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
}
],
"Config": {
"Hostname": "pause-966057",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase:v0.0.46@sha256:fd2d445ddcc33ebc5c6b68a17e6219ea207ce63c005095ea1525296da2d1a279",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"OnBuild": null,
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "pause-966057",
"name.minikube.sigs.k8s.io": "pause-966057",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "65bf3006c8f47da2732e52ef529b1641ceb1a15c05b0fdf12e460d04e9b45497",
"SandboxKey": "/var/run/docker/netns/65bf3006c8f4",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33416"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33417"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33420"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33418"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33419"
}
]
},
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"pause-966057": {
"IPAMConfig": {
"IPv4Address": "192.168.94.2"
},
"Links": null,
"Aliases": null,
"MacAddress": "02:42:c0:a8:5e:02",
"DriverOpts": null,
"NetworkID": "bbb7144340609221ccaf0e47bc153307c9b54fc2bec12f3454b842159f0dcf18",
"EndpointID": "a36b8b7f04f12bf64870f0e51f7b6455a6dfb93129d72db30a67182ac72209fc",
"Gateway": "192.168.94.1",
"IPAddress": "192.168.94.2",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"pause-966057",
"17887611f4a6"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:239: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p pause-966057 -n pause-966057
helpers_test.go:244: <<< TestPause/serial/Start FAILED: start of post-mortem logs <<<
helpers_test.go:245: ======> post-mortem[TestPause/serial/Start]: minikube logs <======
helpers_test.go:247: (dbg) Run: out/minikube-linux-amd64 -p pause-966057 logs -n 25
helpers_test.go:252: TestPause/serial/Start logs:
-- stdout --
==> Audit <==
|---------|---------------------------------|---------------------------|---------|---------|---------------------|---------------------|
| Command | Args | Profile | User | Version | Start Time | End Time |
|---------|---------------------------------|---------------------------|---------|---------|---------------------|---------------------|
| delete | -p calico-981568 | calico-981568 | jenkins | v1.35.0 | 27 Jan 25 12:06 UTC | 27 Jan 25 12:06 UTC |
| start | -p enable-default-cni-981568 | enable-default-cni-981568 | jenkins | v1.35.0 | 27 Jan 25 12:06 UTC | |
| | --memory=3072 | | | | | |
| | --alsologtostderr --wait=true | | | | | |
| | --wait-timeout=15m | | | | | |
| | --enable-default-cni=true | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=docker | | | | | |
| ssh | -p custom-flannel-981568 sudo | custom-flannel-981568 | jenkins | v1.35.0 | 27 Jan 25 12:06 UTC | 27 Jan 25 12:06 UTC |
| | cat /etc/nsswitch.conf | | | | | |
| ssh | -p custom-flannel-981568 sudo | custom-flannel-981568 | jenkins | v1.35.0 | 27 Jan 25 12:06 UTC | 27 Jan 25 12:06 UTC |
| | cat /etc/hosts | | | | | |
| ssh | -p custom-flannel-981568 sudo | custom-flannel-981568 | jenkins | v1.35.0 | 27 Jan 25 12:06 UTC | 27 Jan 25 12:06 UTC |
| | cat /etc/resolv.conf | | | | | |
| ssh | -p custom-flannel-981568 sudo | custom-flannel-981568 | jenkins | v1.35.0 | 27 Jan 25 12:06 UTC | 27 Jan 25 12:06 UTC |
| | crictl pods | | | | | |
| ssh | -p custom-flannel-981568 sudo | custom-flannel-981568 | jenkins | v1.35.0 | 27 Jan 25 12:06 UTC | 27 Jan 25 12:06 UTC |
| | crictl ps --all | | | | | |
| ssh | -p false-981568 pgrep -a | false-981568 | jenkins | v1.35.0 | 27 Jan 25 12:06 UTC | 27 Jan 25 12:06 UTC |
| | kubelet | | | | | |
| ssh | -p custom-flannel-981568 sudo | custom-flannel-981568 | jenkins | v1.35.0 | 27 Jan 25 12:06 UTC | 27 Jan 25 12:06 UTC |
| | find /etc/cni -type f -exec sh | | | | | |
| | -c 'echo {}; cat {}' \; | | | | | |
| ssh | -p custom-flannel-981568 sudo | custom-flannel-981568 | jenkins | v1.35.0 | 27 Jan 25 12:06 UTC | 27 Jan 25 12:06 UTC |
| | ip a s | | | | | |
| ssh | -p custom-flannel-981568 sudo | custom-flannel-981568 | jenkins | v1.35.0 | 27 Jan 25 12:06 UTC | 27 Jan 25 12:06 UTC |
| | ip r s | | | | | |
| ssh | -p custom-flannel-981568 sudo | custom-flannel-981568 | jenkins | v1.35.0 | 27 Jan 25 12:06 UTC | 27 Jan 25 12:06 UTC |
| | iptables-save | | | | | |
| ssh | -p custom-flannel-981568 sudo | custom-flannel-981568 | jenkins | v1.35.0 | 27 Jan 25 12:06 UTC | 27 Jan 25 12:06 UTC |
| | iptables -t nat -L -n -v | | | | | |
| ssh | -p custom-flannel-981568 sudo | custom-flannel-981568 | jenkins | v1.35.0 | 27 Jan 25 12:06 UTC | 27 Jan 25 12:06 UTC |
| | cat /run/flannel/subnet.env | | | | | |
| ssh | -p custom-flannel-981568 | custom-flannel-981568 | jenkins | v1.35.0 | 27 Jan 25 12:06 UTC | |
| | sudo cat | | | | | |
| | /etc/kube-flannel/cni-conf.json | | | | | |
| ssh | -p custom-flannel-981568 sudo | custom-flannel-981568 | jenkins | v1.35.0 | 27 Jan 25 12:06 UTC | 27 Jan 25 12:06 UTC |
| | systemctl status kubelet --all | | | | | |
| | --full --no-pager | | | | | |
| ssh | -p custom-flannel-981568 | custom-flannel-981568 | jenkins | v1.35.0 | 27 Jan 25 12:06 UTC | 27 Jan 25 12:06 UTC |
| | sudo systemctl cat kubelet | | | | | |
| | --no-pager | | | | | |
| ssh | -p custom-flannel-981568 sudo | custom-flannel-981568 | jenkins | v1.35.0 | 27 Jan 25 12:06 UTC | 27 Jan 25 12:06 UTC |
| | journalctl -xeu kubelet --all | | | | | |
| | --full --no-pager | | | | | |
| ssh | -p custom-flannel-981568 | custom-flannel-981568 | jenkins | v1.35.0 | 27 Jan 25 12:06 UTC | 27 Jan 25 12:06 UTC |
| | sudo cat | | | | | |
| | /etc/kubernetes/kubelet.conf | | | | | |
| ssh | -p custom-flannel-981568 | custom-flannel-981568 | jenkins | v1.35.0 | 27 Jan 25 12:06 UTC | 27 Jan 25 12:06 UTC |
| | sudo cat | | | | | |
| | /var/lib/kubelet/config.yaml | | | | | |
| ssh | -p custom-flannel-981568 sudo | custom-flannel-981568 | jenkins | v1.35.0 | 27 Jan 25 12:06 UTC | 27 Jan 25 12:06 UTC |
| | systemctl status docker --all | | | | | |
| | --full --no-pager | | | | | |
| ssh | -p custom-flannel-981568 | custom-flannel-981568 | jenkins | v1.35.0 | 27 Jan 25 12:06 UTC | 27 Jan 25 12:06 UTC |
| | sudo systemctl cat docker | | | | | |
| | --no-pager | | | | | |
| ssh | -p custom-flannel-981568 sudo | custom-flannel-981568 | jenkins | v1.35.0 | 27 Jan 25 12:06 UTC | 27 Jan 25 12:06 UTC |
| | cat /etc/docker/daemon.json | | | | | |
| ssh | -p custom-flannel-981568 sudo | custom-flannel-981568 | jenkins | v1.35.0 | 27 Jan 25 12:06 UTC | 27 Jan 25 12:06 UTC |
| | docker system info | | | | | |
| ssh | -p custom-flannel-981568 sudo | custom-flannel-981568 | jenkins | v1.35.0 | 27 Jan 25 12:06 UTC | |
| | systemctl status cri-docker | | | | | |
| | --all --full --no-pager | | | | | |
|---------|---------------------------------|---------------------------|---------|---------|---------------------|---------------------|
==> Last Start <==
Log file created at: 2025/01/27 12:06:19
Running on machine: ubuntu-20-agent-14
Binary: Built with gc go1.23.4 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I0127 12:06:19.687557 1206863 out.go:345] Setting OutFile to fd 1 ...
I0127 12:06:19.687675 1206863 out.go:392] TERM=,COLORTERM=, which probably does not support color
I0127 12:06:19.687684 1206863 out.go:358] Setting ErrFile to fd 2...
I0127 12:06:19.687689 1206863 out.go:392] TERM=,COLORTERM=, which probably does not support color
I0127 12:06:19.687907 1206863 root.go:338] Updating PATH: /home/jenkins/minikube-integration/20318-732960/.minikube/bin
I0127 12:06:19.688690 1206863 out.go:352] Setting JSON to false
I0127 12:06:19.690791 1206863 start.go:129] hostinfo: {"hostname":"ubuntu-20-agent-14","uptime":10127,"bootTime":1737969453,"procs":526,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1074-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
I0127 12:06:19.690932 1206863 start.go:139] virtualization: kvm guest
I0127 12:06:19.694150 1206863 out.go:177] * [enable-default-cni-981568] minikube v1.35.0 on Ubuntu 20.04 (kvm/amd64)
I0127 12:06:19.695635 1206863 out.go:177] - MINIKUBE_LOCATION=20318
I0127 12:06:19.695668 1206863 notify.go:220] Checking for updates...
I0127 12:06:19.698368 1206863 out.go:177] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I0127 12:06:19.699610 1206863 out.go:177] - KUBECONFIG=/home/jenkins/minikube-integration/20318-732960/kubeconfig
I0127 12:06:19.701017 1206863 out.go:177] - MINIKUBE_HOME=/home/jenkins/minikube-integration/20318-732960/.minikube
I0127 12:06:19.702240 1206863 out.go:177] - MINIKUBE_BIN=out/minikube-linux-amd64
I0127 12:06:19.703448 1206863 out.go:177] - MINIKUBE_FORCE_SYSTEMD=
I0127 12:06:19.705285 1206863 config.go:182] Loaded profile config "custom-flannel-981568": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.32.1
I0127 12:06:19.705425 1206863 config.go:182] Loaded profile config "false-981568": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.32.1
I0127 12:06:19.705547 1206863 config.go:182] Loaded profile config "pause-966057": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.32.1
I0127 12:06:19.705645 1206863 driver.go:394] Setting default libvirt URI to qemu:///system
I0127 12:06:19.733167 1206863 docker.go:123] docker version: linux-27.5.1:Docker Engine - Community
I0127 12:06:19.733273 1206863 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0127 12:06:19.797629 1206863 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:3 ContainersRunning:3 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:49 OomKillDisable:true NGoroutines:74 SystemTime:2025-01-27 12:06:19.787734319 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1074-gcp OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:x
86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33647988736 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-14 Labels:[] ExperimentalBuild:false ServerVersion:27.5.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:bcc810d6b9066471b0b6fa75f557a15a1cbf31bb Expected:bcc810d6b9066471b0b6fa75f557a15a1cbf31bb} RuncCommit:{ID:v1.2.4-0-g6c52b3f Expected:v1.2.4-0-g6c52b3f} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerEr
rors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.20.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.32.4] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I0127 12:06:19.797738 1206863 docker.go:318] overlay module found
I0127 12:06:19.799782 1206863 out.go:177] * Using the docker driver based on user configuration
I0127 12:06:19.800945 1206863 start.go:297] selected driver: docker
I0127 12:06:19.800969 1206863 start.go:901] validating driver "docker" against <nil>
I0127 12:06:19.800984 1206863 start.go:912] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I0127 12:06:19.802283 1206863 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0127 12:06:19.862971 1206863 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:3 ContainersRunning:3 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:49 OomKillDisable:true NGoroutines:74 SystemTime:2025-01-27 12:06:19.852520321 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1074-gcp OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:x
86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33647988736 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-14 Labels:[] ExperimentalBuild:false ServerVersion:27.5.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:bcc810d6b9066471b0b6fa75f557a15a1cbf31bb Expected:bcc810d6b9066471b0b6fa75f557a15a1cbf31bb} RuncCommit:{ID:v1.2.4-0-g6c52b3f Expected:v1.2.4-0-g6c52b3f} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerEr
rors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.20.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.32.4] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I0127 12:06:19.863234 1206863 start_flags.go:310] no existing cluster config was found, will generate one from the flags
E0127 12:06:19.863537 1206863 start_flags.go:464] Found deprecated --enable-default-cni flag, setting --cni=bridge
I0127 12:06:19.863577 1206863 start_flags.go:947] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I0127 12:06:19.865624 1206863 out.go:177] * Using Docker driver with root privileges
I0127 12:06:19.867196 1206863 cni.go:84] Creating CNI manager for "bridge"
I0127 12:06:19.867226 1206863 start_flags.go:319] Found "bridge CNI" CNI - setting NetworkPlugin=cni
I0127 12:06:19.867321 1206863 start.go:340] cluster config:
{Name:enable-default-cni-981568 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.46@sha256:fd2d445ddcc33ebc5c6b68a17e6219ea207ce63c005095ea1525296da2d1a279 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.32.1 ClusterName:enable-default-cni-981568 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime
:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:bridge} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.32.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:15m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAu
thSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0127 12:06:19.868932 1206863 out.go:177] * Starting "enable-default-cni-981568" primary control-plane node in "enable-default-cni-981568" cluster
I0127 12:06:19.870280 1206863 cache.go:121] Beginning downloading kic base image for docker with docker
I0127 12:06:19.871745 1206863 out.go:177] * Pulling base image v0.0.46 ...
I0127 12:06:19.873007 1206863 preload.go:131] Checking if preload exists for k8s version v1.32.1 and runtime docker
I0127 12:06:19.873050 1206863 preload.go:146] Found local preload: /home/jenkins/minikube-integration/20318-732960/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.32.1-docker-overlay2-amd64.tar.lz4
I0127 12:06:19.873061 1206863 cache.go:56] Caching tarball of preloaded images
I0127 12:06:19.873118 1206863 image.go:81] Checking for gcr.io/k8s-minikube/kicbase:v0.0.46@sha256:fd2d445ddcc33ebc5c6b68a17e6219ea207ce63c005095ea1525296da2d1a279 in local docker daemon
I0127 12:06:19.873158 1206863 preload.go:172] Found /home/jenkins/minikube-integration/20318-732960/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.32.1-docker-overlay2-amd64.tar.lz4 in cache, skipping download
I0127 12:06:19.873173 1206863 cache.go:59] Finished verifying existence of preloaded tar for v1.32.1 on docker
I0127 12:06:19.873279 1206863 profile.go:143] Saving config to /home/jenkins/minikube-integration/20318-732960/.minikube/profiles/enable-default-cni-981568/config.json ...
I0127 12:06:19.873314 1206863 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20318-732960/.minikube/profiles/enable-default-cni-981568/config.json: {Name:mk12b75a285ec803c8baba5f569c665efd45517e Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0127 12:06:19.896630 1206863 image.go:100] Found gcr.io/k8s-minikube/kicbase:v0.0.46@sha256:fd2d445ddcc33ebc5c6b68a17e6219ea207ce63c005095ea1525296da2d1a279 in local docker daemon, skipping pull
I0127 12:06:19.896662 1206863 cache.go:145] gcr.io/k8s-minikube/kicbase:v0.0.46@sha256:fd2d445ddcc33ebc5c6b68a17e6219ea207ce63c005095ea1525296da2d1a279 exists in daemon, skipping load
I0127 12:06:19.896692 1206863 cache.go:227] Successfully downloaded all kic artifacts
I0127 12:06:19.896738 1206863 start.go:360] acquireMachinesLock for enable-default-cni-981568: {Name:mk7f5ed90897847cc70c8c69678e5f52f0e18a4c Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0127 12:06:19.896859 1206863 start.go:364] duration metric: took 98.904µs to acquireMachinesLock for "enable-default-cni-981568"
I0127 12:06:19.896891 1206863 start.go:93] Provisioning new machine with config: &{Name:enable-default-cni-981568 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.46@sha256:fd2d445ddcc33ebc5c6b68a17e6219ea207ce63c005095ea1525296da2d1a279 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.32.1 ClusterName:enable-default-cni-981568 Namespace:default APIServerHAVIP: APIServerName:min
ikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:bridge} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.32.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:15m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:fal
se CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.32.1 ContainerRuntime:docker ControlPlane:true Worker:true}
I0127 12:06:19.897006 1206863 start.go:125] createHost starting for "" (driver="docker")
I0127 12:06:19.564874 1194893 pod_ready.go:93] pod "coredns-668d6bf9bc-sz944" in "kube-system" namespace has status "Ready":"True"
I0127 12:06:19.564903 1194893 pod_ready.go:82] duration metric: took 511.911347ms for pod "coredns-668d6bf9bc-sz944" in "kube-system" namespace to be "Ready" ...
I0127 12:06:19.564920 1194893 pod_ready.go:79] waiting up to 15m0s for pod "etcd-false-981568" in "kube-system" namespace to be "Ready" ...
I0127 12:06:19.573319 1194893 pod_ready.go:93] pod "etcd-false-981568" in "kube-system" namespace has status "Ready":"True"
I0127 12:06:19.573349 1194893 pod_ready.go:82] duration metric: took 8.420226ms for pod "etcd-false-981568" in "kube-system" namespace to be "Ready" ...
I0127 12:06:19.573363 1194893 pod_ready.go:79] waiting up to 15m0s for pod "kube-apiserver-false-981568" in "kube-system" namespace to be "Ready" ...
I0127 12:06:19.578870 1194893 pod_ready.go:93] pod "kube-apiserver-false-981568" in "kube-system" namespace has status "Ready":"True"
I0127 12:06:19.578893 1194893 pod_ready.go:82] duration metric: took 5.522598ms for pod "kube-apiserver-false-981568" in "kube-system" namespace to be "Ready" ...
I0127 12:06:19.578903 1194893 pod_ready.go:79] waiting up to 15m0s for pod "kube-controller-manager-false-981568" in "kube-system" namespace to be "Ready" ...
I0127 12:06:19.582972 1194893 pod_ready.go:93] pod "kube-controller-manager-false-981568" in "kube-system" namespace has status "Ready":"True"
I0127 12:06:19.582997 1194893 pod_ready.go:82] duration metric: took 4.086143ms for pod "kube-controller-manager-false-981568" in "kube-system" namespace to be "Ready" ...
I0127 12:06:19.583013 1194893 pod_ready.go:79] waiting up to 15m0s for pod "kube-proxy-6mzf4" in "kube-system" namespace to be "Ready" ...
I0127 12:06:19.850809 1194893 pod_ready.go:93] pod "kube-proxy-6mzf4" in "kube-system" namespace has status "Ready":"True"
I0127 12:06:19.850846 1194893 pod_ready.go:82] duration metric: took 267.824698ms for pod "kube-proxy-6mzf4" in "kube-system" namespace to be "Ready" ...
I0127 12:06:19.850863 1194893 pod_ready.go:79] waiting up to 15m0s for pod "kube-scheduler-false-981568" in "kube-system" namespace to be "Ready" ...
I0127 12:06:20.250805 1194893 pod_ready.go:93] pod "kube-scheduler-false-981568" in "kube-system" namespace has status "Ready":"True"
I0127 12:06:20.250834 1194893 pod_ready.go:82] duration metric: took 399.960443ms for pod "kube-scheduler-false-981568" in "kube-system" namespace to be "Ready" ...
I0127 12:06:20.250850 1194893 pod_ready.go:39] duration metric: took 7.261131829s for extra waiting for all system-critical and pods with labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I0127 12:06:20.250874 1194893 api_server.go:52] waiting for apiserver process to appear ...
I0127 12:06:20.250932 1194893 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I0127 12:06:20.265275 1194893 api_server.go:72] duration metric: took 8.884850497s to wait for apiserver process to appear ...
I0127 12:06:20.265307 1194893 api_server.go:88] waiting for apiserver healthz status ...
I0127 12:06:20.265331 1194893 api_server.go:253] Checking apiserver healthz at https://192.168.85.2:8443/healthz ...
I0127 12:06:20.270956 1194893 api_server.go:279] https://192.168.85.2:8443/healthz returned 200:
ok
I0127 12:06:20.272291 1194893 api_server.go:141] control plane version: v1.32.1
I0127 12:06:20.272322 1194893 api_server.go:131] duration metric: took 7.007062ms to wait for apiserver health ...
I0127 12:06:20.272332 1194893 system_pods.go:43] waiting for kube-system pods to appear ...
I0127 12:06:20.453812 1194893 system_pods.go:59] 7 kube-system pods found
I0127 12:06:20.453852 1194893 system_pods.go:61] "coredns-668d6bf9bc-sz944" [bac339cd-896c-447e-97d4-f2718a7d73c4] Running
I0127 12:06:20.453860 1194893 system_pods.go:61] "etcd-false-981568" [95bc7b49-2f05-49d6-9f52-4ea62a529b33] Running
I0127 12:06:20.453865 1194893 system_pods.go:61] "kube-apiserver-false-981568" [50b35eb5-8cf5-45ec-adc8-1c82729e5ad6] Running
I0127 12:06:20.453871 1194893 system_pods.go:61] "kube-controller-manager-false-981568" [aa369ef9-0003-4fbf-93f5-1a96e9dacdfd] Running
I0127 12:06:20.453875 1194893 system_pods.go:61] "kube-proxy-6mzf4" [bc0f0ebf-2bc4-4027-9166-9eba1ba8fd9b] Running
I0127 12:06:20.453882 1194893 system_pods.go:61] "kube-scheduler-false-981568" [920c8a0d-0dc5-49cd-9287-2d0a9670a7d9] Running
I0127 12:06:20.453893 1194893 system_pods.go:61] "storage-provisioner" [2ff7ccc1-397d-469f-8281-476fb0f97e81] Running
I0127 12:06:20.453903 1194893 system_pods.go:74] duration metric: took 181.56231ms to wait for pod list to return data ...
I0127 12:06:20.453917 1194893 default_sa.go:34] waiting for default service account to be created ...
I0127 12:06:20.650998 1194893 default_sa.go:45] found service account: "default"
I0127 12:06:20.651030 1194893 default_sa.go:55] duration metric: took 197.102896ms for default service account to be created ...
I0127 12:06:20.651043 1194893 system_pods.go:137] waiting for k8s-apps to be running ...
I0127 12:06:20.853180 1194893 system_pods.go:87] 7 kube-system pods found
I0127 12:06:21.051392 1194893 system_pods.go:105] "coredns-668d6bf9bc-sz944" [bac339cd-896c-447e-97d4-f2718a7d73c4] Running
I0127 12:06:21.051424 1194893 system_pods.go:105] "etcd-false-981568" [95bc7b49-2f05-49d6-9f52-4ea62a529b33] Running
I0127 12:06:21.051432 1194893 system_pods.go:105] "kube-apiserver-false-981568" [50b35eb5-8cf5-45ec-adc8-1c82729e5ad6] Running
I0127 12:06:21.051442 1194893 system_pods.go:105] "kube-controller-manager-false-981568" [aa369ef9-0003-4fbf-93f5-1a96e9dacdfd] Running
I0127 12:06:21.051449 1194893 system_pods.go:105] "kube-proxy-6mzf4" [bc0f0ebf-2bc4-4027-9166-9eba1ba8fd9b] Running
I0127 12:06:21.051457 1194893 system_pods.go:105] "kube-scheduler-false-981568" [920c8a0d-0dc5-49cd-9287-2d0a9670a7d9] Running
I0127 12:06:21.051468 1194893 system_pods.go:105] "storage-provisioner" [2ff7ccc1-397d-469f-8281-476fb0f97e81] Running
I0127 12:06:21.051478 1194893 system_pods.go:147] duration metric: took 400.426537ms to wait for k8s-apps to be running ...
I0127 12:06:21.051490 1194893 system_svc.go:44] waiting for kubelet service to be running ....
I0127 12:06:21.051547 1194893 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I0127 12:06:21.066093 1194893 system_svc.go:56] duration metric: took 14.591135ms WaitForService to wait for kubelet
I0127 12:06:21.066134 1194893 kubeadm.go:582] duration metric: took 9.685714015s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I0127 12:06:21.066158 1194893 node_conditions.go:102] verifying NodePressure condition ...
I0127 12:06:21.250947 1194893 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I0127 12:06:21.250976 1194893 node_conditions.go:123] node cpu capacity is 8
I0127 12:06:21.250992 1194893 node_conditions.go:105] duration metric: took 184.82811ms to run NodePressure ...
I0127 12:06:21.251007 1194893 start.go:241] waiting for startup goroutines ...
I0127 12:06:21.251016 1194893 start.go:246] waiting for cluster config update ...
I0127 12:06:21.251031 1194893 start.go:255] writing updated cluster config ...
I0127 12:06:21.251352 1194893 ssh_runner.go:195] Run: rm -f paused
I0127 12:06:21.310359 1194893 start.go:600] kubectl: 1.32.1, cluster: 1.32.1 (minor skew: 0)
I0127 12:06:21.312622 1194893 out.go:177] * Done! kubectl is now configured to use "false-981568" cluster and "default" namespace by default
I0127 12:06:19.898994 1206863 out.go:235] * Creating docker container (CPUs=2, Memory=3072MB) ...
I0127 12:06:19.899295 1206863 start.go:159] libmachine.API.Create for "enable-default-cni-981568" (driver="docker")
I0127 12:06:19.899333 1206863 client.go:168] LocalClient.Create starting
I0127 12:06:19.899421 1206863 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/20318-732960/.minikube/certs/ca.pem
I0127 12:06:19.899470 1206863 main.go:141] libmachine: Decoding PEM data...
I0127 12:06:19.899494 1206863 main.go:141] libmachine: Parsing certificate...
I0127 12:06:19.899568 1206863 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/20318-732960/.minikube/certs/cert.pem
I0127 12:06:19.899597 1206863 main.go:141] libmachine: Decoding PEM data...
I0127 12:06:19.899610 1206863 main.go:141] libmachine: Parsing certificate...
I0127 12:06:19.900004 1206863 cli_runner.go:164] Run: docker network inspect enable-default-cni-981568 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W0127 12:06:19.918403 1206863 cli_runner.go:211] docker network inspect enable-default-cni-981568 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I0127 12:06:19.918476 1206863 network_create.go:284] running [docker network inspect enable-default-cni-981568] to gather additional debugging logs...
I0127 12:06:19.918496 1206863 cli_runner.go:164] Run: docker network inspect enable-default-cni-981568
W0127 12:06:19.936355 1206863 cli_runner.go:211] docker network inspect enable-default-cni-981568 returned with exit code 1
I0127 12:06:19.936384 1206863 network_create.go:287] error running [docker network inspect enable-default-cni-981568]: docker network inspect enable-default-cni-981568: exit status 1
stdout:
[]
stderr:
Error response from daemon: network enable-default-cni-981568 not found
I0127 12:06:19.936397 1206863 network_create.go:289] output of [docker network inspect enable-default-cni-981568]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network enable-default-cni-981568 not found
** /stderr **
I0127 12:06:19.936521 1206863 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0127 12:06:19.956518 1206863 network.go:211] skipping subnet 192.168.49.0/24 that is taken: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName:br-fd0ebbfc036c IfaceIPv4:192.168.49.1 IfaceMTU:1500 IfaceMAC:02:42:4b:59:04:a5} reservation:<nil>}
I0127 12:06:19.957347 1206863 network.go:211] skipping subnet 192.168.58.0/24 that is taken: &{IP:192.168.58.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.58.0/24 Gateway:192.168.58.1 ClientMin:192.168.58.2 ClientMax:192.168.58.254 Broadcast:192.168.58.255 IsPrivate:true Interface:{IfaceName:br-c8e661517e01 IfaceIPv4:192.168.58.1 IfaceMTU:1500 IfaceMAC:02:42:5d:9d:77:44} reservation:<nil>}
I0127 12:06:19.958267 1206863 network.go:211] skipping subnet 192.168.67.0/24 that is taken: &{IP:192.168.67.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.67.0/24 Gateway:192.168.67.1 ClientMin:192.168.67.2 ClientMax:192.168.67.254 Broadcast:192.168.67.255 IsPrivate:true Interface:{IfaceName:br-dbf36381b130 IfaceIPv4:192.168.67.1 IfaceMTU:1500 IfaceMAC:02:42:36:5f:6d:da} reservation:<nil>}
I0127 12:06:19.959352 1206863 network.go:206] using free private subnet 192.168.76.0/24: &{IP:192.168.76.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.76.0/24 Gateway:192.168.76.1 ClientMin:192.168.76.2 ClientMax:192.168.76.254 Broadcast:192.168.76.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc001d75c20}
I0127 12:06:19.959378 1206863 network_create.go:124] attempt to create docker network enable-default-cni-981568 192.168.76.0/24 with gateway 192.168.76.1 and MTU of 1500 ...
I0127 12:06:19.959428 1206863 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.76.0/24 --gateway=192.168.76.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=enable-default-cni-981568 enable-default-cni-981568
I0127 12:06:20.038428 1206863 network_create.go:108] docker network enable-default-cni-981568 192.168.76.0/24 created
I0127 12:06:20.038463 1206863 kic.go:121] calculated static IP "192.168.76.2" for the "enable-default-cni-981568" container
I0127 12:06:20.038522 1206863 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I0127 12:06:20.059618 1206863 cli_runner.go:164] Run: docker volume create enable-default-cni-981568 --label name.minikube.sigs.k8s.io=enable-default-cni-981568 --label created_by.minikube.sigs.k8s.io=true
I0127 12:06:20.080065 1206863 oci.go:103] Successfully created a docker volume enable-default-cni-981568
I0127 12:06:20.080160 1206863 cli_runner.go:164] Run: docker run --rm --name enable-default-cni-981568-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=enable-default-cni-981568 --entrypoint /usr/bin/test -v enable-default-cni-981568:/var gcr.io/k8s-minikube/kicbase:v0.0.46@sha256:fd2d445ddcc33ebc5c6b68a17e6219ea207ce63c005095ea1525296da2d1a279 -d /var/lib
I0127 12:06:20.644379 1206863 oci.go:107] Successfully prepared a docker volume enable-default-cni-981568
I0127 12:06:20.644425 1206863 preload.go:131] Checking if preload exists for k8s version v1.32.1 and runtime docker
I0127 12:06:20.644451 1206863 kic.go:194] Starting extracting preloaded images to volume ...
I0127 12:06:20.644519 1206863 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/20318-732960/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.32.1-docker-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v enable-default-cni-981568:/extractDir gcr.io/k8s-minikube/kicbase:v0.0.46@sha256:fd2d445ddcc33ebc5c6b68a17e6219ea207ce63c005095ea1525296da2d1a279 -I lz4 -xf /preloaded.tar -C /extractDir
I0127 12:06:24.149148 1206863 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/20318-732960/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.32.1-docker-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v enable-default-cni-981568:/extractDir gcr.io/k8s-minikube/kicbase:v0.0.46@sha256:fd2d445ddcc33ebc5c6b68a17e6219ea207ce63c005095ea1525296da2d1a279 -I lz4 -xf /preloaded.tar -C /extractDir: (3.504569178s)
I0127 12:06:24.149183 1206863 kic.go:203] duration metric: took 3.504728089s to extract preloaded images to volume ...
W0127 12:06:24.149319 1206863 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
I0127 12:06:24.149440 1206863 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I0127 12:06:24.209244 1206863 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname enable-default-cni-981568 --name enable-default-cni-981568 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=enable-default-cni-981568 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=enable-default-cni-981568 --network enable-default-cni-981568 --ip 192.168.76.2 --volume enable-default-cni-981568:/var --security-opt apparmor=unconfined --memory=3072mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase:v0.0.46@sha256:fd2d445ddcc33ebc5c6b68a17e6219ea207ce63c005095ea1525296da2d1a279
I0127 12:06:24.571865 1206863 cli_runner.go:164] Run: docker container inspect enable-default-cni-981568 --format={{.State.Running}}
I0127 12:06:24.597405 1206863 cli_runner.go:164] Run: docker container inspect enable-default-cni-981568 --format={{.State.Status}}
I0127 12:06:24.621030 1206863 cli_runner.go:164] Run: docker exec enable-default-cni-981568 stat /var/lib/dpkg/alternatives/iptables
I0127 12:06:24.680609 1206863 oci.go:144] the created container "enable-default-cni-981568" has a running status.
I0127 12:06:24.680779 1206863 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/20318-732960/.minikube/machines/enable-default-cni-981568/id_rsa...
==> Docker <==
Jan 27 11:51:40 pause-966057 dockerd[1372]: time="2025-01-27T11:51:40.364719390Z" level=info msg="Docker daemon" commit=c710b88 containerd-snapshotter=false storage-driver=overlay2 version=27.4.1
Jan 27 11:51:40 pause-966057 dockerd[1372]: time="2025-01-27T11:51:40.364809007Z" level=info msg="Daemon has completed initialization"
Jan 27 11:51:40 pause-966057 dockerd[1372]: time="2025-01-27T11:51:40.389196656Z" level=info msg="API listen on [::]:2376"
Jan 27 11:51:40 pause-966057 dockerd[1372]: time="2025-01-27T11:51:40.389222462Z" level=info msg="API listen on /var/run/docker.sock"
Jan 27 11:51:40 pause-966057 systemd[1]: Started Docker Application Container Engine.
Jan 27 11:51:41 pause-966057 systemd[1]: Starting CRI Interface for Docker Application Container Engine...
Jan 27 11:51:41 pause-966057 cri-dockerd[1646]: time="2025-01-27T11:51:41Z" level=info msg="Starting cri-dockerd dev (HEAD)"
Jan 27 11:51:41 pause-966057 cri-dockerd[1646]: time="2025-01-27T11:51:41Z" level=info msg="Connecting to docker on the Endpoint unix:///var/run/docker.sock"
Jan 27 11:51:41 pause-966057 cri-dockerd[1646]: time="2025-01-27T11:51:41Z" level=info msg="Start docker client with request timeout 0s"
Jan 27 11:51:41 pause-966057 cri-dockerd[1646]: time="2025-01-27T11:51:41Z" level=info msg="Hairpin mode is set to hairpin-veth"
Jan 27 11:51:41 pause-966057 cri-dockerd[1646]: time="2025-01-27T11:51:41Z" level=info msg="Loaded network plugin cni"
Jan 27 11:51:41 pause-966057 cri-dockerd[1646]: time="2025-01-27T11:51:41Z" level=info msg="Docker cri networking managed by network plugin cni"
Jan 27 11:51:41 pause-966057 cri-dockerd[1646]: time="2025-01-27T11:51:41Z" level=info msg="Setting cgroupDriver cgroupfs"
Jan 27 11:51:41 pause-966057 cri-dockerd[1646]: time="2025-01-27T11:51:41Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:,},}"
Jan 27 11:51:41 pause-966057 cri-dockerd[1646]: time="2025-01-27T11:51:41Z" level=info msg="Starting the GRPC backend for the Docker CRI interface."
Jan 27 11:51:41 pause-966057 cri-dockerd[1646]: time="2025-01-27T11:51:41Z" level=info msg="Start cri-dockerd grpc backend"
Jan 27 11:51:41 pause-966057 systemd[1]: Started CRI Interface for Docker Application Container Engine.
Jan 27 11:51:54 pause-966057 cri-dockerd[1646]: time="2025-01-27T11:51:54Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/9bc9b6b75bcfa8c52969f6fb5358ff2abb87b240177315ce2f23372ec7479d39/resolv.conf as [nameserver 192.168.94.1 search us-east4-a.c.k8s-minikube.internal c.k8s-minikube.internal google.internal options edns0 trust-ad ndots:0]"
Jan 27 11:51:54 pause-966057 cri-dockerd[1646]: time="2025-01-27T11:51:54Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/ddd57952f05e420210945acf2ca8a77ef223a6ff4a830be65f922cf519c7c62f/resolv.conf as [nameserver 192.168.94.1 search us-east4-a.c.k8s-minikube.internal c.k8s-minikube.internal google.internal options trust-ad ndots:0 edns0]"
Jan 27 11:51:54 pause-966057 cri-dockerd[1646]: time="2025-01-27T11:51:54Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/706997de5107d17f7a64729da9ed0e9162c7405d1bfd7f99f8c766888099c1ed/resolv.conf as [nameserver 192.168.94.1 search us-east4-a.c.k8s-minikube.internal c.k8s-minikube.internal google.internal options edns0 trust-ad ndots:0]"
Jan 27 11:51:54 pause-966057 cri-dockerd[1646]: time="2025-01-27T11:51:54Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/7afbab0d612e433869316e5eaa6d64adab81a2d758734537057a5940a3e2314f/resolv.conf as [nameserver 192.168.94.1 search us-east4-a.c.k8s-minikube.internal c.k8s-minikube.internal google.internal options ndots:0 edns0 trust-ad]"
Jan 27 11:52:06 pause-966057 cri-dockerd[1646]: time="2025-01-27T11:52:06Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/7c401b8594cf85c9a980ed3293dd46e75bef3d370e32a5c614278b9bcab68f8b/resolv.conf as [nameserver 192.168.94.1 search us-east4-a.c.k8s-minikube.internal c.k8s-minikube.internal google.internal options edns0 trust-ad ndots:0]"
Jan 27 11:52:06 pause-966057 cri-dockerd[1646]: time="2025-01-27T11:52:06Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/919d8a42975ad71427d1a13036ef8c2468ebf15e3a79a100d7923249f4d2f887/resolv.conf as [nameserver 192.168.94.1 search us-east4-a.c.k8s-minikube.internal c.k8s-minikube.internal google.internal options edns0 trust-ad ndots:0]"
Jan 27 11:52:06 pause-966057 cri-dockerd[1646]: time="2025-01-27T11:52:06Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/2f0b236a142fda0e1998324b72705f531cd2df06d90459015d77dc163f1d92da/resolv.conf as [nameserver 192.168.94.1 search us-east4-a.c.k8s-minikube.internal c.k8s-minikube.internal google.internal options edns0 trust-ad ndots:0]"
Jan 27 11:52:11 pause-966057 cri-dockerd[1646]: time="2025-01-27T11:52:11Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:10.244.0.0/24,},}"
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD
f4f59eb4fcdef c69fa2e9cbf5f 14 minutes ago Running coredns 0 919d8a42975ad coredns-668d6bf9bc-jqmxl
3afa59f0ab5ab e29f9c7391fd9 14 minutes ago Running kube-proxy 0 2f0b236a142fd kube-proxy-8gdm9
16a085131f106 c69fa2e9cbf5f 14 minutes ago Running coredns 0 7c401b8594cf8 coredns-668d6bf9bc-9qph4
822ee588e0772 95c0bda56fc4d 14 minutes ago Running kube-apiserver 0 7afbab0d612e4 kube-apiserver-pause-966057
603c6e45fc787 019ee182b58e2 14 minutes ago Running kube-controller-manager 0 706997de5107d kube-controller-manager-pause-966057
5874f9a6bf9a5 2b0d6572d062c 14 minutes ago Running kube-scheduler 0 ddd57952f05e4 kube-scheduler-pause-966057
59ca7d1c4a3bc a9e7e6b294baf 14 minutes ago Running etcd 0 9bc9b6b75bcfa etcd-pause-966057
==> coredns [16a085131f10] <==
[INFO] plugin/ready: Still waiting on: "kubernetes"
[INFO] plugin/ready: Still waiting on: "kubernetes"
[WARNING] plugin/kubernetes: starting server with unsynced Kubernetes API
.:53
[INFO] plugin/reload: Running configuration SHA512 = c7556d8fdf49c5e32a9077be8cfb9fc6947bb07e663a10d55b192eb63ad1f2bd9793e8e5f5a36fc9abb1957831eec5c997fd9821790e3990ae9531bf41ecea37
CoreDNS-1.11.3
linux/amd64, go1.21.11, a6338e9
[INFO] 127.0.0.1:37614 - 56057 "HINFO IN 4119459429824991052.6882402577518085140. udp 57 false 512" NXDOMAIN qr,rd,ra 57 0.010365492s
[INFO] plugin/ready: Still waiting on: "kubernetes"
[INFO] plugin/ready: Still waiting on: "kubernetes"
[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.29.3/tools/cache/reflector.go:229: failed to list *v1.Namespace: Get "https://10.96.0.1:443/api/v1/namespaces?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.29.3/tools/cache/reflector.go:229: failed to list *v1.Service: Get "https://10.96.0.1:443/api/v1/services?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
[INFO] plugin/kubernetes: Trace[1034005533]: "Reflector ListAndWatch" name:pkg/mod/k8s.io/client-go@v0.29.3/tools/cache/reflector.go:229 (27-Jan-2025 11:52:06.672) (total time: 30000ms):
Trace[1034005533]: ---"Objects listed" error:Get "https://10.96.0.1:443/api/v1/namespaces?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout 30000ms (11:52:36.673)
Trace[1034005533]: [30.000704156s] [30.000704156s] END
[ERROR] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.29.3/tools/cache/reflector.go:229: Failed to watch *v1.Namespace: failed to list *v1.Namespace: Get "https://10.96.0.1:443/api/v1/namespaces?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
[INFO] plugin/kubernetes: Trace[1122865068]: "Reflector ListAndWatch" name:pkg/mod/k8s.io/client-go@v0.29.3/tools/cache/reflector.go:229 (27-Jan-2025 11:52:06.672) (total time: 30000ms):
Trace[1122865068]: ---"Objects listed" error:Get "https://10.96.0.1:443/api/v1/services?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout 30000ms (11:52:36.673)
Trace[1122865068]: [30.000759344s] [30.000759344s] END
[ERROR] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.29.3/tools/cache/reflector.go:229: Failed to watch *v1.Service: failed to list *v1.Service: Get "https://10.96.0.1:443/api/v1/services?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.29.3/tools/cache/reflector.go:229: failed to list *v1.EndpointSlice: Get "https://10.96.0.1:443/apis/discovery.k8s.io/v1/endpointslices?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
[INFO] plugin/kubernetes: Trace[1613898332]: "Reflector ListAndWatch" name:pkg/mod/k8s.io/client-go@v0.29.3/tools/cache/reflector.go:229 (27-Jan-2025 11:52:06.672) (total time: 30000ms):
Trace[1613898332]: ---"Objects listed" error:Get "https://10.96.0.1:443/apis/discovery.k8s.io/v1/endpointslices?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout 30000ms (11:52:36.673)
Trace[1613898332]: [30.000665775s] [30.000665775s] END
[ERROR] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.29.3/tools/cache/reflector.go:229: Failed to watch *v1.EndpointSlice: failed to list *v1.EndpointSlice: Get "https://10.96.0.1:443/apis/discovery.k8s.io/v1/endpointslices?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
==> coredns [f4f59eb4fcde] <==
[WARNING] plugin/kubernetes: starting server with unsynced Kubernetes API
.:53
[INFO] plugin/reload: Running configuration SHA512 = c7556d8fdf49c5e32a9077be8cfb9fc6947bb07e663a10d55b192eb63ad1f2bd9793e8e5f5a36fc9abb1957831eec5c997fd9821790e3990ae9531bf41ecea37
CoreDNS-1.11.3
linux/amd64, go1.21.11, a6338e9
[INFO] 127.0.0.1:48047 - 43984 "HINFO IN 4672111443383488671.4843245864462572254. udp 57 false 512" NXDOMAIN qr,rd,ra 57 0.013976804s
[INFO] plugin/ready: Still waiting on: "kubernetes"
[INFO] plugin/ready: Still waiting on: "kubernetes"
[INFO] plugin/ready: Still waiting on: "kubernetes"
[INFO] plugin/ready: Still waiting on: "kubernetes"
[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.29.3/tools/cache/reflector.go:229: failed to list *v1.EndpointSlice: Get "https://10.96.0.1:443/apis/discovery.k8s.io/v1/endpointslices?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
[INFO] plugin/kubernetes: Trace[224861599]: "Reflector ListAndWatch" name:pkg/mod/k8s.io/client-go@v0.29.3/tools/cache/reflector.go:229 (27-Jan-2025 11:52:06.747) (total time: 30000ms):
Trace[224861599]: ---"Objects listed" error:Get "https://10.96.0.1:443/apis/discovery.k8s.io/v1/endpointslices?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout 30000ms (11:52:36.747)
Trace[224861599]: [30.000723474s] [30.000723474s] END
[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.29.3/tools/cache/reflector.go:229: failed to list *v1.Namespace: Get "https://10.96.0.1:443/api/v1/namespaces?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.29.3/tools/cache/reflector.go:229: failed to list *v1.Service: Get "https://10.96.0.1:443/api/v1/services?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
[INFO] plugin/kubernetes: Trace[764812613]: "Reflector ListAndWatch" name:pkg/mod/k8s.io/client-go@v0.29.3/tools/cache/reflector.go:229 (27-Jan-2025 11:52:06.747) (total time: 30000ms):
Trace[764812613]: ---"Objects listed" error:Get "https://10.96.0.1:443/api/v1/services?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout 30000ms (11:52:36.747)
Trace[764812613]: [30.000969174s] [30.000969174s] END
[ERROR] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.29.3/tools/cache/reflector.go:229: Failed to watch *v1.Service: failed to list *v1.Service: Get "https://10.96.0.1:443/api/v1/services?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
[INFO] plugin/kubernetes: Trace[1687304521]: "Reflector ListAndWatch" name:pkg/mod/k8s.io/client-go@v0.29.3/tools/cache/reflector.go:229 (27-Jan-2025 11:52:06.747) (total time: 30000ms):
Trace[1687304521]: ---"Objects listed" error:Get "https://10.96.0.1:443/api/v1/namespaces?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout 30000ms (11:52:36.747)
Trace[1687304521]: [30.000966014s] [30.000966014s] END
[ERROR] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.29.3/tools/cache/reflector.go:229: Failed to watch *v1.Namespace: failed to list *v1.Namespace: Get "https://10.96.0.1:443/api/v1/namespaces?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
[ERROR] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.29.3/tools/cache/reflector.go:229: Failed to watch *v1.EndpointSlice: failed to list *v1.EndpointSlice: Get "https://10.96.0.1:443/apis/discovery.k8s.io/v1/endpointslices?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
==> describe nodes <==
Name: pause-966057
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=pause-966057
kubernetes.io/os=linux
minikube.k8s.io/commit=21d19df81a8d69cdaec1a8f1932c09dc00369650
minikube.k8s.io/name=pause-966057
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2025_01_27T11_52_01_0700
minikube.k8s.io/version=v1.35.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///var/run/cri-dockerd.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Mon, 27 Jan 2025 11:51:59 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: pause-966057
AcquireTime: <unset>
RenewTime: Mon, 27 Jan 2025 12:06:27 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Mon, 27 Jan 2025 12:02:32 +0000 Mon, 27 Jan 2025 11:51:59 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Mon, 27 Jan 2025 12:02:32 +0000 Mon, 27 Jan 2025 11:51:59 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Mon, 27 Jan 2025 12:02:32 +0000 Mon, 27 Jan 2025 11:51:59 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Mon, 27 Jan 2025 12:02:32 +0000 Mon, 27 Jan 2025 11:52:01 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.94.2
Hostname: pause-966057
Capacity:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32859364Ki
pods: 110
Allocatable:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32859364Ki
pods: 110
System Info:
Machine ID: 403c9f482f8a42cb920f2174a1fc5108
System UUID: 8930be97-03c1-4e51-8e64-a50b4cee0ce7
Boot ID: 8be9b8aa-e32f-4f6e-8bc2-f041e27fdb85
Kernel Version: 5.15.0-1074-gcp
OS Image: Ubuntu 22.04.5 LTS
Operating System: linux
Architecture: amd64
Container Runtime Version: docker://27.4.1
Kubelet Version: v1.32.1
Kube-Proxy Version: v1.32.1
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (7 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
kube-system coredns-668d6bf9bc-9qph4 100m (1%) 0 (0%) 70Mi (0%) 170Mi (0%) 14m
kube-system coredns-668d6bf9bc-jqmxl 100m (1%) 0 (0%) 70Mi (0%) 170Mi (0%) 14m
kube-system etcd-pause-966057 100m (1%) 0 (0%) 100Mi (0%) 0 (0%) 14m
kube-system kube-apiserver-pause-966057 250m (3%) 0 (0%) 0 (0%) 0 (0%) 14m
kube-system kube-controller-manager-pause-966057 200m (2%) 0 (0%) 0 (0%) 0 (0%) 14m
kube-system kube-proxy-8gdm9 0 (0%) 0 (0%) 0 (0%) 0 (0%) 14m
kube-system kube-scheduler-pause-966057 100m (1%) 0 (0%) 0 (0%) 0 (0%) 14m
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 850m (10%) 0 (0%)
memory 240Mi (0%) 340Mi (1%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 14m kube-proxy
Normal NodeAllocatableEnforced 14m kubelet Updated Node Allocatable limit across pods
Warning CgroupV1 14m kubelet cgroup v1 support is in maintenance mode, please migrate to cgroup v2
Normal NodeHasSufficientMemory 14m (x8 over 14m) kubelet Node pause-966057 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 14m (x8 over 14m) kubelet Node pause-966057 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 14m (x7 over 14m) kubelet Node pause-966057 status is now: NodeHasSufficientPID
Normal Starting 14m kubelet Starting kubelet.
Warning CgroupV1 14m kubelet cgroup v1 support is in maintenance mode, please migrate to cgroup v2
Normal Starting 14m kubelet Starting kubelet.
Normal NodeAllocatableEnforced 14m kubelet Updated Node Allocatable limit across pods
Normal NodeReady 14m kubelet Node pause-966057 status is now: NodeReady
Normal NodeHasSufficientMemory 14m kubelet Node pause-966057 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 14m kubelet Node pause-966057 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 14m kubelet Node pause-966057 status is now: NodeHasSufficientPID
Normal RegisteredNode 14m node-controller Node pause-966057 event: Registered Node pause-966057 in Controller
==> dmesg <==
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff 12 4d 77 91 ba 46 08 06
[Jan27 12:03] IPv4: martian source 10.244.0.1 from 10.244.0.2, on dev eth0
[ +0.000008] ll header: 00000000: ff ff ff ff ff ff de 2b bd 65 2f 3f 08 06
[ +0.054244] IPv4: martian source 10.244.0.1 from 10.244.0.3, on dev eth0
[ +0.000007] ll header: 00000000: ff ff ff ff ff ff 76 c7 a4 62 ce b2 08 06
[ +25.038924] IPv4: martian source 10.244.0.4 from 10.244.0.4, on dev bridge
[ +0.000007] ll header: 00000000: ff ff ff ff ff ff 46 b2 65 f0 f7 25 08 06
[ +1.453729] IPv4: martian source 10.244.0.1 from 10.244.0.5, on dev eth0
[ +0.000008] ll header: 00000000: ff ff ff ff ff ff 72 98 6d 24 5b dc 08 06
[Jan27 12:04] IPv4: martian source 10.244.0.1 from 10.244.0.2, on dev eth0
[ +0.000005] ll header: 00000000: ff ff ff ff ff ff 6e bd 5f 1e fb cc 08 06
[ +16.493700] IPv4: martian source 10.244.0.1 from 10.244.0.3, on dev eth0
[ +0.000007] ll header: 00000000: ff ff ff ff ff ff 42 87 7d 02 d9 f0 08 06
[ +0.000430] IPv4: martian source 10.244.0.3 from 10.244.0.2, on dev eth0
[ +0.000004] ll header: 00000000: ff ff ff ff ff ff 6e bd 5f 1e fb cc 08 06
[Jan27 12:06] IPv4: martian source 10.244.0.1 from 10.244.0.2, on dev eth0
[ +0.000008] ll header: 00000000: ff ff ff ff ff ff 52 2d 8b b9 59 da 08 06
[ +11.985105] IPv4: martian source 10.244.0.1 from 10.244.0.2, on dev eth0
[ +0.000008] ll header: 00000000: ff ff ff ff ff ff 3a 67 71 ad cf ca 08 06
[ +0.033985] IPv4: martian source 10.244.0.1 from 10.244.0.3, on dev eth0
[ +0.000007] ll header: 00000000: ff ff ff ff ff ff 76 af 28 ec d4 50 08 06
[ +0.695954] IPv4: martian source 10.244.0.1 from 10.244.0.3, on dev eth0
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff de 75 c8 8d 29 05 08 06
[ +0.000377] IPv4: martian source 10.244.0.3 from 10.244.0.2, on dev eth0
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff 52 2d 8b b9 59 da 08 06
==> etcd [59ca7d1c4a3b] <==
{"level":"info","ts":"2025-01-27T11:51:55.592658Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-01-27T11:51:55.594056Z","caller":"v3rpc/health.go:61","msg":"grpc service status changed","service":"","status":"SERVING"}
{"level":"info","ts":"2025-01-27T11:51:55.595583Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
{"level":"info","ts":"2025-01-27T11:51:55.636287Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
{"level":"info","ts":"2025-01-27T11:51:55.636476Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
{"level":"info","ts":"2025-01-27T11:51:55.592519Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-01-27T11:51:55.639273Z","caller":"etcdserver/server.go:2651","msg":"setting up initial cluster version using v2 API","cluster-version":"3.5"}
{"level":"info","ts":"2025-01-27T11:51:55.641339Z","caller":"v3rpc/health.go:61","msg":"grpc service status changed","service":"","status":"SERVING"}
{"level":"info","ts":"2025-01-27T11:51:55.648137Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.94.2:2379"}
{"level":"info","ts":"2025-01-27T11:51:55.660666Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"da400bbece288f5a","local-member-id":"dfc97eb0aae75b33","cluster-version":"3.5"}
{"level":"info","ts":"2025-01-27T11:51:55.660855Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
{"level":"info","ts":"2025-01-27T11:51:55.660896Z","caller":"etcdserver/server.go:2675","msg":"cluster version is updated","cluster-version":"3.5"}
{"level":"info","ts":"2025-01-27T11:53:10.088437Z","caller":"traceutil/trace.go:171","msg":"trace[1056898632] transaction","detail":"{read_only:false; response_revision:432; number_of_response:1; }","duration":"126.607409ms","start":"2025-01-27T11:53:09.961803Z","end":"2025-01-27T11:53:10.088410Z","steps":["trace[1056898632] 'process raft request' (duration: 63.62801ms)","trace[1056898632] 'compare' (duration: 62.882736ms)"],"step_count":2}
{"level":"info","ts":"2025-01-27T11:53:10.278085Z","caller":"traceutil/trace.go:171","msg":"trace[1587755845] transaction","detail":"{read_only:false; response_revision:433; number_of_response:1; }","duration":"123.270644ms","start":"2025-01-27T11:53:10.154787Z","end":"2025-01-27T11:53:10.278058Z","steps":["trace[1587755845] 'process raft request' (duration: 65.41436ms)","trace[1587755845] 'compare' (duration: 57.75186ms)"],"step_count":2}
{"level":"warn","ts":"2025-01-27T11:53:12.199359Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"101.926021ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/pods\" limit:1 ","response":"range_response_count:0 size:5"}
{"level":"info","ts":"2025-01-27T11:53:12.199489Z","caller":"traceutil/trace.go:171","msg":"trace[625121325] range","detail":"{range_begin:/registry/pods; range_end:; response_count:0; response_revision:434; }","duration":"102.102463ms","start":"2025-01-27T11:53:12.097367Z","end":"2025-01-27T11:53:12.199470Z","steps":["trace[625121325] 'range keys from in-memory index tree' (duration: 101.848371ms)"],"step_count":1}
{"level":"info","ts":"2025-01-27T11:54:30.143261Z","caller":"traceutil/trace.go:171","msg":"trace[1469465215] transaction","detail":"{read_only:false; response_revision:456; number_of_response:1; }","duration":"166.771264ms","start":"2025-01-27T11:54:29.976450Z","end":"2025-01-27T11:54:30.143221Z","steps":["trace[1469465215] 'process raft request' (duration: 121.96959ms)","trace[1469465215] 'compare' (duration: 44.646746ms)"],"step_count":2}
{"level":"info","ts":"2025-01-27T11:54:40.078079Z","caller":"traceutil/trace.go:171","msg":"trace[1345065943] transaction","detail":"{read_only:false; response_revision:459; number_of_response:1; }","duration":"119.962534ms","start":"2025-01-27T11:54:39.958093Z","end":"2025-01-27T11:54:40.078055Z","steps":["trace[1345065943] 'process raft request' (duration: 58.37795ms)","trace[1345065943] 'compare' (duration: 61.489252ms)"],"step_count":2}
{"level":"warn","ts":"2025-01-27T11:57:55.592746Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"119.666819ms","expected-duration":"100ms","prefix":"read-only range ","request":"limit:1 serializable:true keys_only:true ","response":"range_response_count:0 size:5"}
{"level":"info","ts":"2025-01-27T11:57:55.592837Z","caller":"traceutil/trace.go:171","msg":"trace[1255951600] range","detail":"{range_begin:; range_end:; response_count:0; response_revision:519; }","duration":"119.777577ms","start":"2025-01-27T11:57:55.473042Z","end":"2025-01-27T11:57:55.592820Z","steps":["trace[1255951600] 'range keys from in-memory index tree' (duration: 119.639374ms)"],"step_count":1}
{"level":"info","ts":"2025-01-27T12:01:56.243373Z","caller":"mvcc/index.go:214","msg":"compact tree index","revision":501}
{"level":"info","ts":"2025-01-27T12:01:56.247363Z","caller":"mvcc/kvstore_compaction.go:72","msg":"finished scheduled compaction","compact-revision":501,"took":"3.714214ms","hash":1048855672,"current-db-size-bytes":1089536,"current-db-size":"1.1 MB","current-db-size-in-use-bytes":1089536,"current-db-size-in-use":"1.1 MB"}
{"level":"info","ts":"2025-01-27T12:01:56.247420Z","caller":"mvcc/hash.go:151","msg":"storing new hash","hash":1048855672,"revision":501,"compact-revision":-1}
{"level":"warn","ts":"2025-01-27T12:05:20.139428Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"122.733579ms","expected-duration":"100ms","prefix":"","request":"header:<ID:6571759728838042135 username:\"kube-apiserver-etcd-client\" auth_revision:1 > txn:<compare:<target:MOD key:\"/registry/masterleases/192.168.94.2\" mod_revision:650 > success:<request_put:<key:\"/registry/masterleases/192.168.94.2\" value_size:65 lease:6571759728838042133 >> failure:<request_range:<key:\"/registry/masterleases/192.168.94.2\" > >>","response":"size:16"}
{"level":"info","ts":"2025-01-27T12:05:20.139826Z","caller":"traceutil/trace.go:171","msg":"trace[917190372] transaction","detail":"{read_only:false; response_revision:653; number_of_response:1; }","duration":"151.242572ms","start":"2025-01-27T12:05:19.988553Z","end":"2025-01-27T12:05:20.139795Z","steps":["trace[917190372] 'process raft request' (duration: 27.475626ms)","trace[917190372] 'compare' (duration: 122.615056ms)"],"step_count":2}
==> kernel <==
12:06:29 up 2:48, 0 users, load average: 4.60, 4.05, 6.00
Linux pause-966057 5.15.0-1074-gcp #83~20.04.1-Ubuntu SMP Wed Dec 18 20:42:35 UTC 2024 x86_64 x86_64 x86_64 GNU/Linux
PRETTY_NAME="Ubuntu 22.04.5 LTS"
==> kube-apiserver [822ee588e077] <==
I0127 11:51:58.230533 1 cache.go:32] Waiting for caches to sync for autoregister controller
I0127 11:51:58.230624 1 cache.go:39] Caches are synced for autoregister controller
I0127 11:51:58.228483 1 shared_informer.go:320] Caches are synced for cluster_authentication_trust_controller
I0127 11:51:58.228499 1 cache.go:39] Caches are synced for LocalAvailability controller
I0127 11:51:58.253952 1 shared_informer.go:320] Caches are synced for *generic.policySource[*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicy,*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicyBinding,k8s.io/apiserver/pkg/admission/plugin/policy/validating.Validator]
I0127 11:51:58.254255 1 policy_source.go:240] refreshing policies
E0127 11:51:58.266799 1 controller.go:148] "Unhandled Error" err="while syncing ConfigMap \"kube-system/kube-apiserver-legacy-service-account-token-tracking\", err: namespaces \"kube-system\" not found" logger="UnhandledError"
E0127 11:51:58.329793 1 controller.go:148] "Unhandled Error" err="while syncing ConfigMap \"kube-system/kube-apiserver-legacy-service-account-token-tracking\", err: namespaces \"kube-system\" not found" logger="UnhandledError"
I0127 11:51:58.330120 1 controller.go:615] quota admission added evaluator for: namespaces
I0127 11:51:58.429554 1 controller.go:615] quota admission added evaluator for: leases.coordination.k8s.io
I0127 11:51:59.116783 1 storage_scheduling.go:95] created PriorityClass system-node-critical with value 2000001000
I0127 11:51:59.120983 1 storage_scheduling.go:95] created PriorityClass system-cluster-critical with value 2000000000
I0127 11:51:59.121009 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist.
I0127 11:51:59.784897 1 controller.go:615] quota admission added evaluator for: roles.rbac.authorization.k8s.io
I0127 11:51:59.831772 1 controller.go:615] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
I0127 11:51:59.924483 1 alloc.go:330] "allocated clusterIPs" service="default/kubernetes" clusterIPs={"IPv4":"10.96.0.1"}
W0127 11:51:59.932822 1 lease.go:265] Resetting endpoints for master service "kubernetes" to [192.168.94.2]
I0127 11:51:59.934402 1 controller.go:615] quota admission added evaluator for: endpoints
I0127 11:51:59.944197 1 controller.go:615] quota admission added evaluator for: endpointslices.discovery.k8s.io
I0127 11:52:00.164385 1 controller.go:615] quota admission added evaluator for: serviceaccounts
I0127 11:52:01.069933 1 controller.go:615] quota admission added evaluator for: deployments.apps
I0127 11:52:01.085416 1 alloc.go:330] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs={"IPv4":"10.96.0.10"}
I0127 11:52:01.100618 1 controller.go:615] quota admission added evaluator for: daemonsets.apps
I0127 11:52:05.314559 1 controller.go:615] quota admission added evaluator for: controllerrevisions.apps
I0127 11:52:05.446243 1 controller.go:615] quota admission added evaluator for: replicasets.apps
==> kube-controller-manager [603c6e45fc78] <==
I0127 11:52:04.720534 1 shared_informer.go:320] Caches are synced for resource quota
I0127 11:52:04.720602 1 shared_informer.go:320] Caches are synced for namespace
I0127 11:52:04.720761 1 shared_informer.go:320] Caches are synced for resource quota
I0127 11:52:04.721004 1 shared_informer.go:320] Caches are synced for ClusterRoleAggregator
I0127 11:52:04.723278 1 shared_informer.go:320] Caches are synced for legacy-service-account-token-cleaner
I0127 11:52:04.730914 1 shared_informer.go:320] Caches are synced for endpoint_slice_mirroring
I0127 11:52:04.749242 1 shared_informer.go:320] Caches are synced for disruption
I0127 11:52:04.754662 1 shared_informer.go:320] Caches are synced for garbage collector
I0127 11:52:04.760784 1 shared_informer.go:320] Caches are synced for garbage collector
I0127 11:52:04.760816 1 garbagecollector.go:154] "Garbage collector: all resource monitors have synced" logger="garbage-collector-controller"
I0127 11:52:04.760826 1 garbagecollector.go:157] "Proceeding to collect garbage" logger="garbage-collector-controller"
I0127 11:52:05.436880 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="pause-966057"
I0127 11:52:05.779600 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="328.860598ms"
I0127 11:52:05.794160 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="14.495586ms"
I0127 11:52:05.794270 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="66.14µs"
I0127 11:52:05.809094 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="89.3µs"
I0127 11:52:07.443133 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="142.253µs"
I0127 11:52:07.454300 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="82.858µs"
I0127 11:52:11.699733 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="pause-966057"
I0127 11:52:41.228669 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="9.308515ms"
I0127 11:52:41.228817 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="101.76µs"
I0127 11:52:45.958051 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="8.705924ms"
I0127 11:52:45.958164 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="72.422µs"
I0127 11:57:26.612862 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="pause-966057"
I0127 12:02:32.788874 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="pause-966057"
==> kube-proxy [3afa59f0ab5a] <==
I0127 11:52:06.766500 1 server_linux.go:66] "Using iptables proxy"
I0127 11:52:06.859970 1 server.go:698] "Successfully retrieved node IP(s)" IPs=["192.168.94.2"]
E0127 11:52:06.860058 1 server.go:234] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
I0127 11:52:06.881439 1 server.go:243] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I0127 11:52:06.881504 1 server_linux.go:170] "Using iptables Proxier"
I0127 11:52:06.883446 1 proxier.go:255] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
I0127 11:52:06.883820 1 server.go:497] "Version info" version="v1.32.1"
I0127 11:52:06.883863 1 server.go:499] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I0127 11:52:06.885532 1 config.go:199] "Starting service config controller"
I0127 11:52:06.885806 1 config.go:329] "Starting node config controller"
I0127 11:52:06.885970 1 shared_informer.go:313] Waiting for caches to sync for service config
I0127 11:52:06.886017 1 shared_informer.go:313] Waiting for caches to sync for node config
I0127 11:52:06.885360 1 config.go:105] "Starting endpoint slice config controller"
I0127 11:52:06.886383 1 shared_informer.go:313] Waiting for caches to sync for endpoint slice config
I0127 11:52:06.987138 1 shared_informer.go:320] Caches are synced for node config
I0127 11:52:06.987160 1 shared_informer.go:320] Caches are synced for service config
I0127 11:52:06.987183 1 shared_informer.go:320] Caches are synced for endpoint slice config
==> kube-scheduler [5874f9a6bf9a] <==
W0127 11:51:58.219589 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
E0127 11:51:58.225563 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User \"system:kube-scheduler\" cannot list resource \"pods\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0127 11:51:58.223133 1 reflector.go:569] runtime/asm_amd64.s:1700: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
E0127 11:51:58.225833 1 reflector.go:166] "Unhandled Error" err="runtime/asm_amd64.s:1700: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"extension-apiserver-authentication\" is forbidden: User \"system:kube-scheduler\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"kube-system\"" logger="UnhandledError"
W0127 11:51:59.094577 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.VolumeAttachment: volumeattachments.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "volumeattachments" in API group "storage.k8s.io" at the cluster scope
E0127 11:51:59.094630 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.VolumeAttachment: failed to list *v1.VolumeAttachment: volumeattachments.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"volumeattachments\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError"
W0127 11:51:59.125637 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
E0127 11:51:59.125686 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User \"system:kube-scheduler\" cannot list resource \"namespaces\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0127 11:51:59.272954 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
E0127 11:51:59.273118 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: nodes is forbidden: User \"system:kube-scheduler\" cannot list resource \"nodes\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0127 11:51:59.311148 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
E0127 11:51:59.311434 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumeclaims\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0127 11:51:59.368822 1 reflector.go:569] runtime/asm_amd64.s:1700: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
E0127 11:51:59.369126 1 reflector.go:166] "Unhandled Error" err="runtime/asm_amd64.s:1700: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"extension-apiserver-authentication\" is forbidden: User \"system:kube-scheduler\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"kube-system\"" logger="UnhandledError"
W0127 11:51:59.404053 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
E0127 11:51:59.404134 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSINode: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csinodes\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError"
W0127 11:51:59.414811 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
E0127 11:51:59.414868 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.StatefulSet: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"statefulsets\" in API group \"apps\" at the cluster scope" logger="UnhandledError"
W0127 11:51:59.515476 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
E0127 11:51:59.515569 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User \"system:kube-scheduler\" cannot list resource \"pods\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0127 11:51:59.530561 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
E0127 11:51:59.530611 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.ReplicaSet: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicasets\" in API group \"apps\" at the cluster scope" logger="UnhandledError"
W0127 11:51:59.557481 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
E0127 11:51:59.557861 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIStorageCapacity: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csistoragecapacities\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError"
I0127 11:52:01.717911 1 shared_informer.go:320] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
==> kubelet <==
Jan 27 11:52:02 pause-966057 kubelet[2527]: I0127 11:52:02.231931 2527 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-controller-manager-pause-966057" podStartSLOduration=1.231911895 podStartE2EDuration="1.231911895s" podCreationTimestamp="2025-01-27 11:52:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-01-27 11:52:02.231826044 +0000 UTC m=+1.341462458" watchObservedRunningTime="2025-01-27 11:52:02.231911895 +0000 UTC m=+1.341548306"
Jan 27 11:52:02 pause-966057 kubelet[2527]: I0127 11:52:02.249967 2527 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/etcd-pause-966057" podStartSLOduration=1.249944593 podStartE2EDuration="1.249944593s" podCreationTimestamp="2025-01-27 11:52:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-01-27 11:52:02.249631008 +0000 UTC m=+1.359267418" watchObservedRunningTime="2025-01-27 11:52:02.249944593 +0000 UTC m=+1.359581005"
Jan 27 11:52:05 pause-966057 kubelet[2527]: I0127 11:52:05.361410 2527 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-proxy\" (UniqueName: \"kubernetes.io/configmap/012c024c-237a-4d01-b79c-68b073836a81-kube-proxy\") pod \"kube-proxy-8gdm9\" (UID: \"012c024c-237a-4d01-b79c-68b073836a81\") " pod="kube-system/kube-proxy-8gdm9"
Jan 27 11:52:05 pause-966057 kubelet[2527]: I0127 11:52:05.362132 2527 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/012c024c-237a-4d01-b79c-68b073836a81-lib-modules\") pod \"kube-proxy-8gdm9\" (UID: \"012c024c-237a-4d01-b79c-68b073836a81\") " pod="kube-system/kube-proxy-8gdm9"
Jan 27 11:52:05 pause-966057 kubelet[2527]: I0127 11:52:05.362286 2527 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6x5dg\" (UniqueName: \"kubernetes.io/projected/012c024c-237a-4d01-b79c-68b073836a81-kube-api-access-6x5dg\") pod \"kube-proxy-8gdm9\" (UID: \"012c024c-237a-4d01-b79c-68b073836a81\") " pod="kube-system/kube-proxy-8gdm9"
Jan 27 11:52:05 pause-966057 kubelet[2527]: I0127 11:52:05.362425 2527 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/012c024c-237a-4d01-b79c-68b073836a81-xtables-lock\") pod \"kube-proxy-8gdm9\" (UID: \"012c024c-237a-4d01-b79c-68b073836a81\") " pod="kube-system/kube-proxy-8gdm9"
Jan 27 11:52:05 pause-966057 kubelet[2527]: E0127 11:52:05.475574 2527 projected.go:288] Couldn't get configMap kube-system/kube-root-ca.crt: configmap "kube-root-ca.crt" not found
Jan 27 11:52:05 pause-966057 kubelet[2527]: E0127 11:52:05.475646 2527 projected.go:194] Error preparing data for projected volume kube-api-access-6x5dg for pod kube-system/kube-proxy-8gdm9: configmap "kube-root-ca.crt" not found
Jan 27 11:52:05 pause-966057 kubelet[2527]: E0127 11:52:05.476163 2527 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/012c024c-237a-4d01-b79c-68b073836a81-kube-api-access-6x5dg podName:012c024c-237a-4d01-b79c-68b073836a81 nodeName:}" failed. No retries permitted until 2025-01-27 11:52:05.97574411 +0000 UTC m=+5.085380520 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-6x5dg" (UniqueName: "kubernetes.io/projected/012c024c-237a-4d01-b79c-68b073836a81-kube-api-access-6x5dg") pod "kube-proxy-8gdm9" (UID: "012c024c-237a-4d01-b79c-68b073836a81") : configmap "kube-root-ca.crt" not found
Jan 27 11:52:05 pause-966057 kubelet[2527]: I0127 11:52:05.866849 2527 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lz8mp\" (UniqueName: \"kubernetes.io/projected/a8b10abb-2932-4d18-985f-f634e0a62503-kube-api-access-lz8mp\") pod \"coredns-668d6bf9bc-9qph4\" (UID: \"a8b10abb-2932-4d18-985f-f634e0a62503\") " pod="kube-system/coredns-668d6bf9bc-9qph4"
Jan 27 11:52:05 pause-966057 kubelet[2527]: I0127 11:52:05.867258 2527 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a8b10abb-2932-4d18-985f-f634e0a62503-config-volume\") pod \"coredns-668d6bf9bc-9qph4\" (UID: \"a8b10abb-2932-4d18-985f-f634e0a62503\") " pod="kube-system/coredns-668d6bf9bc-9qph4"
Jan 27 11:52:05 pause-966057 kubelet[2527]: I0127 11:52:05.867425 2527 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5091afef-2bfa-4383-ba2e-0b420ec97d41-config-volume\") pod \"coredns-668d6bf9bc-jqmxl\" (UID: \"5091afef-2bfa-4383-ba2e-0b420ec97d41\") " pod="kube-system/coredns-668d6bf9bc-jqmxl"
Jan 27 11:52:05 pause-966057 kubelet[2527]: I0127 11:52:05.867575 2527 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8fmfh\" (UniqueName: \"kubernetes.io/projected/5091afef-2bfa-4383-ba2e-0b420ec97d41-kube-api-access-8fmfh\") pod \"coredns-668d6bf9bc-jqmxl\" (UID: \"5091afef-2bfa-4383-ba2e-0b420ec97d41\") " pod="kube-system/coredns-668d6bf9bc-jqmxl"
Jan 27 11:52:05 pause-966057 kubelet[2527]: I0127 11:52:05.976656 2527 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory"
Jan 27 11:52:06 pause-966057 kubelet[2527]: I0127 11:52:06.328887 2527 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7c401b8594cf85c9a980ed3293dd46e75bef3d370e32a5c614278b9bcab68f8b"
Jan 27 11:52:06 pause-966057 kubelet[2527]: I0127 11:52:06.414149 2527 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="919d8a42975ad71427d1a13036ef8c2468ebf15e3a79a100d7923249f4d2f887"
Jan 27 11:52:07 pause-966057 kubelet[2527]: I0127 11:52:07.442615 2527 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/coredns-668d6bf9bc-jqmxl" podStartSLOduration=2.442586462 podStartE2EDuration="2.442586462s" podCreationTimestamp="2025-01-27 11:52:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-01-27 11:52:07.442276004 +0000 UTC m=+6.551912416" watchObservedRunningTime="2025-01-27 11:52:07.442586462 +0000 UTC m=+6.552222870"
Jan 27 11:52:07 pause-966057 kubelet[2527]: I0127 11:52:07.454074 2527 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/coredns-668d6bf9bc-9qph4" podStartSLOduration=2.454047715 podStartE2EDuration="2.454047715s" podCreationTimestamp="2025-01-27 11:52:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-01-27 11:52:07.453670015 +0000 UTC m=+6.563306425" watchObservedRunningTime="2025-01-27 11:52:07.454047715 +0000 UTC m=+6.563684127"
Jan 27 11:52:08 pause-966057 kubelet[2527]: I0127 11:52:08.458536 2527 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness"
Jan 27 11:52:08 pause-966057 kubelet[2527]: I0127 11:52:08.458561 2527 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness"
Jan 27 11:52:10 pause-966057 kubelet[2527]: I0127 11:52:10.370700 2527 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-proxy-8gdm9" podStartSLOduration=5.370675251 podStartE2EDuration="5.370675251s" podCreationTimestamp="2025-01-27 11:52:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-01-27 11:52:07.464907957 +0000 UTC m=+6.574544369" watchObservedRunningTime="2025-01-27 11:52:10.370675251 +0000 UTC m=+9.480311672"
Jan 27 11:52:11 pause-966057 kubelet[2527]: I0127 11:52:11.203255 2527 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness"
Jan 27 11:52:11 pause-966057 kubelet[2527]: I0127 11:52:11.689561 2527 kuberuntime_manager.go:1702] "Updating runtime config through cri with podcidr" CIDR="10.244.0.0/24"
Jan 27 11:52:11 pause-966057 kubelet[2527]: I0127 11:52:11.690410 2527 kubelet_network.go:61] "Updating Pod CIDR" originalPodCIDR="" newPodCIDR="10.244.0.0/24"
Jan 27 11:52:15 pause-966057 kubelet[2527]: I0127 11:52:15.923019 2527 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness"
-- /stdout --
helpers_test.go:254: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p pause-966057 -n pause-966057
helpers_test.go:261: (dbg) Run: kubectl --context pause-966057 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:285: <<< TestPause/serial/Start FAILED: end of post-mortem logs <<<
helpers_test.go:286: ---------------------/post-mortem---------------------------------
--- FAIL: TestPause/serial/Start (901.78s)