=== RUN TestFunctionalNewestKubernetes/Versionv1.34.0-beta.0/serial/StartWithProxy
functional_test.go:2251: (dbg) Run: out/minikube-linux-amd64 start -p functional-699837 --memory=4096 --apiserver-port=8441 --wait=all --driver=docker --container-runtime=docker --kubernetes-version=v1.34.0-beta.0
E0804 08:46:47.353392 1582690 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21223-1578987/.minikube/profiles/addons-309866/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0804 08:49:03.491491 1582690 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21223-1578987/.minikube/profiles/addons-309866/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0804 08:49:31.201887 1582690 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21223-1578987/.minikube/profiles/addons-309866/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0804 08:50:41.685369 1582690 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21223-1578987/.minikube/profiles/functional-114794/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0804 08:50:41.691720 1582690 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21223-1578987/.minikube/profiles/functional-114794/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0804 08:50:41.703007 1582690 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21223-1578987/.minikube/profiles/functional-114794/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0804 08:50:41.724323 1582690 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21223-1578987/.minikube/profiles/functional-114794/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0804 08:50:41.765670 1582690 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21223-1578987/.minikube/profiles/functional-114794/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0804 08:50:41.847089 1582690 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21223-1578987/.minikube/profiles/functional-114794/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0804 08:50:42.008623 1582690 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21223-1578987/.minikube/profiles/functional-114794/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0804 08:50:42.330351 1582690 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21223-1578987/.minikube/profiles/functional-114794/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0804 08:50:42.972392 1582690 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21223-1578987/.minikube/profiles/functional-114794/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0804 08:50:44.254129 1582690 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21223-1578987/.minikube/profiles/functional-114794/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0804 08:50:46.816978 1582690 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21223-1578987/.minikube/profiles/functional-114794/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0804 08:50:51.938491 1582690 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21223-1578987/.minikube/profiles/functional-114794/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0804 08:51:02.180422 1582690 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21223-1578987/.minikube/profiles/functional-114794/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0804 08:51:22.661786 1582690 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21223-1578987/.minikube/profiles/functional-114794/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0804 08:52:03.623930 1582690 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21223-1578987/.minikube/profiles/functional-114794/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0804 08:53:25.545446 1582690 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21223-1578987/.minikube/profiles/functional-114794/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0804 08:54:03.491925 1582690 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21223-1578987/.minikube/profiles/addons-309866/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
functional_test.go:2251: (dbg) Non-zero exit: out/minikube-linux-amd64 start -p functional-699837 --memory=4096 --apiserver-port=8441 --wait=all --driver=docker --container-runtime=docker --kubernetes-version=v1.34.0-beta.0: exit status 80 (8m39.407126006s)
-- stdout --
* [functional-699837] minikube v1.36.0 on Ubuntu 20.04 (kvm/amd64)
- MINIKUBE_LOCATION=21223
- MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
- KUBECONFIG=/home/jenkins/minikube-integration/21223-1578987/kubeconfig
- MINIKUBE_HOME=/home/jenkins/minikube-integration/21223-1578987/.minikube
- MINIKUBE_BIN=out/minikube-linux-amd64
- MINIKUBE_FORCE_SYSTEMD=
* Using the docker driver based on user configuration
* Using Docker driver with root privileges
* Starting "functional-699837" primary control-plane node in "functional-699837" cluster
* Pulling base image v0.0.47-1753871403-21198 ...
* Creating docker container (CPUs=2, Memory=4096MB) ...* Found network options:
- HTTP_PROXY=localhost:38447
* Please see https://minikube.sigs.k8s.io/docs/handbook/vpn_and_proxy/ for more details
* Preparing Kubernetes v1.34.0-beta.0 on Docker 28.3.3 ... - Generating certificates and keys ... - Booting up control plane ... - Generating certificates and keys ... - Booting up control plane ...
-- /stdout --
** stderr **
! Local proxy ignored: not passing HTTP_PROXY=localhost:38447 to docker env.
! Local proxy ignored: not passing HTTP_PROXY=localhost:38447 to docker env.
! You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP (192.168.49.2).
! initialization failed, will try again: wait: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0-beta.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables": Process exited with status 1
stdout:
[init] Using Kubernetes version: v1.34.0-beta.0
[preflight] Running pre-flight checks
[preflight] The system verification failed. Printing the output from the verification:
[0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1083-gcp[0m
[0;37mOS[0m: [0;32mLinux[0m
[0;37mCGROUPS_CPU[0m: [0;32menabled[0m
[0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
[0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
[0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
[0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
[0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
[0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
[0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
[0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/var/lib/minikube/certs"
[certs] Using existing ca certificate authority
[certs] Using existing apiserver certificate and key on disk
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [functional-699837 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [functional-699837 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "super-admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
[patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
[kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
[kubelet-check] The kubelet is healthy after 1.001773586s
[control-plane-check] Waiting for healthy control plane components. This can take up to 4m0s
[control-plane-check] Checking kube-apiserver at https://192.168.49.2:8441/livez
[control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz
[control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez
[control-plane-check] kube-controller-manager is healthy after 2.545633246s
[control-plane-check] kube-scheduler is healthy after 33.512654334s
[control-plane-check] kube-apiserver is not healthy after 4m0.000406592s
A control plane component may have crashed or exited when started by the container runtime.
To troubleshoot, list all containers using your preferred container runtimes CLI.
Here is one example how you may list all running Kubernetes containers by using crictl:
- 'crictl --runtime-endpoint unix:///var/run/cri-dockerd.sock ps -a | grep kube | grep -v pause'
Once you have found the failing container, you can inspect its logs with:
- 'crictl --runtime-endpoint unix:///var/run/cri-dockerd.sock logs CONTAINERID'
stderr:
[WARNING SystemVerification]: cgroups v1 support is in maintenance mode, please migrate to cgroups v2
[WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1083-gcp\n", err: exit status 1
[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
error: error execution phase wait-control-plane: failed while waiting for the control plane to start: kube-apiserver check failed at https://192.168.49.2:8441/livez: Get "https://control-plane.minikube.internal:8441/livez?timeout=10s": dial tcp 192.168.49.2:8441: connect: connection refused
To see the stack trace of this error execute with --v=5 or higher
*
X Error starting cluster: wait: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0-beta.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables": Process exited with status 1
stdout:
[init] Using Kubernetes version: v1.34.0-beta.0
[preflight] Running pre-flight checks
[preflight] The system verification failed. Printing the output from the verification:
[0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1083-gcp[0m
[0;37mOS[0m: [0;32mLinux[0m
[0;37mCGROUPS_CPU[0m: [0;32menabled[0m
[0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
[0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
[0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
[0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
[0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
[0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
[0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
[0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/var/lib/minikube/certs"
[certs] Using existing ca certificate authority
[certs] Using existing apiserver certificate and key on disk
[certs] Using existing apiserver-kubelet-client certificate and key on disk
[certs] Using existing front-proxy-ca certificate authority
[certs] Using existing front-proxy-client certificate and key on disk
[certs] Using existing etcd/ca certificate authority
[certs] Using existing etcd/server certificate and key on disk
[certs] Using existing etcd/peer certificate and key on disk
[certs] Using existing etcd/healthcheck-client certificate and key on disk
[certs] Using existing apiserver-etcd-client certificate and key on disk
[certs] Using the existing "sa" key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "super-admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
[patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
[kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
[kubelet-check] The kubelet is healthy after 501.731399ms
[control-plane-check] Waiting for healthy control plane components. This can take up to 4m0s
[control-plane-check] Checking kube-apiserver at https://192.168.49.2:8441/livez
[control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz
[control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez
[control-plane-check] kube-controller-manager is healthy after 3.505015094s
[control-plane-check] kube-scheduler is healthy after 33.41794123s
[control-plane-check] kube-apiserver is not healthy after 4m0.000473142s
A control plane component may have crashed or exited when started by the container runtime.
To troubleshoot, list all containers using your preferred container runtimes CLI.
Here is one example how you may list all running Kubernetes containers by using crictl:
- 'crictl --runtime-endpoint unix:///var/run/cri-dockerd.sock ps -a | grep kube | grep -v pause'
Once you have found the failing container, you can inspect its logs with:
- 'crictl --runtime-endpoint unix:///var/run/cri-dockerd.sock logs CONTAINERID'
stderr:
[WARNING SystemVerification]: cgroups v1 support is in maintenance mode, please migrate to cgroups v2
[WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1083-gcp\n", err: exit status 1
[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
error: error execution phase wait-control-plane: failed while waiting for the control plane to start: kube-apiserver check failed at https://192.168.49.2:8441/livez: client rate limiter Wait returned an error: context deadline exceeded
To see the stack trace of this error execute with --v=5 or higher
*
╭─────────────────────────────────────────────────────────────────────────────────────────────╮
│ │
│ * If the above advice does not help, please let us know: │
│ https://github.com/kubernetes/minikube/issues/new/choose │
│ │
│ * Please run `minikube logs --file=logs.txt` and attach logs.txt to the GitHub issue. │
│ │
╰─────────────────────────────────────────────────────────────────────────────────────────────╯
X Exiting due to GUEST_START: failed to start node: wait: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0-beta.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables": Process exited with status 1
stdout:
[init] Using Kubernetes version: v1.34.0-beta.0
[preflight] Running pre-flight checks
[preflight] The system verification failed. Printing the output from the verification:
[0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1083-gcp[0m
[0;37mOS[0m: [0;32mLinux[0m
[0;37mCGROUPS_CPU[0m: [0;32menabled[0m
[0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
[0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
[0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
[0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
[0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
[0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
[0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
[0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/var/lib/minikube/certs"
[certs] Using existing ca certificate authority
[certs] Using existing apiserver certificate and key on disk
[certs] Using existing apiserver-kubelet-client certificate and key on disk
[certs] Using existing front-proxy-ca certificate authority
[certs] Using existing front-proxy-client certificate and key on disk
[certs] Using existing etcd/ca certificate authority
[certs] Using existing etcd/server certificate and key on disk
[certs] Using existing etcd/peer certificate and key on disk
[certs] Using existing etcd/healthcheck-client certificate and key on disk
[certs] Using existing apiserver-etcd-client certificate and key on disk
[certs] Using the existing "sa" key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "super-admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
[patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
[kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
[kubelet-check] The kubelet is healthy after 501.731399ms
[control-plane-check] Waiting for healthy control plane components. This can take up to 4m0s
[control-plane-check] Checking kube-apiserver at https://192.168.49.2:8441/livez
[control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz
[control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez
[control-plane-check] kube-controller-manager is healthy after 3.505015094s
[control-plane-check] kube-scheduler is healthy after 33.41794123s
[control-plane-check] kube-apiserver is not healthy after 4m0.000473142s
A control plane component may have crashed or exited when started by the container runtime.
To troubleshoot, list all containers using your preferred container runtimes CLI.
Here is one example how you may list all running Kubernetes containers by using crictl:
- 'crictl --runtime-endpoint unix:///var/run/cri-dockerd.sock ps -a | grep kube | grep -v pause'
Once you have found the failing container, you can inspect its logs with:
- 'crictl --runtime-endpoint unix:///var/run/cri-dockerd.sock logs CONTAINERID'
stderr:
[WARNING SystemVerification]: cgroups v1 support is in maintenance mode, please migrate to cgroups v2
[WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1083-gcp\n", err: exit status 1
[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
error: error execution phase wait-control-plane: failed while waiting for the control plane to start: kube-apiserver check failed at https://192.168.49.2:8441/livez: client rate limiter Wait returned an error: context deadline exceeded
To see the stack trace of this error execute with --v=5 or higher
*
╭─────────────────────────────────────────────────────────────────────────────────────────────╮
│ │
│ * If the above advice does not help, please let us know: │
│ https://github.com/kubernetes/minikube/issues/new/choose │
│ │
│ * Please run `minikube logs --file=logs.txt` and attach logs.txt to the GitHub issue. │
│ │
╰─────────────────────────────────────────────────────────────────────────────────────────────╯
** /stderr **
functional_test.go:2253: failed minikube start. args "out/minikube-linux-amd64 start -p functional-699837 --memory=4096 --apiserver-port=8441 --wait=all --driver=docker --container-runtime=docker --kubernetes-version=v1.34.0-beta.0": exit status 80
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:230: ======> post-mortem[TestFunctionalNewestKubernetes/Versionv1.34.0-beta.0/serial/StartWithProxy]: docker inspect <======
helpers_test.go:231: (dbg) Run: docker inspect functional-699837
helpers_test.go:235: (dbg) docker inspect functional-699837:
-- stdout --
[
{
"Id": "c369b96e23d5b41fbe502377870d491580cb85c5215f8441347e14f0e4bc37ef",
"Created": "2025-08-04T08:46:45.45274172Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 1645232,
"ExitCode": 0,
"Error": "",
"StartedAt": "2025-08-04T08:46:45.480784715Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:da3843d6394f34289e593ae899877bec769ea93dbd69d427e43ba72c57cff8a2",
"ResolvConfPath": "/var/lib/docker/containers/c369b96e23d5b41fbe502377870d491580cb85c5215f8441347e14f0e4bc37ef/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/c369b96e23d5b41fbe502377870d491580cb85c5215f8441347e14f0e4bc37ef/hostname",
"HostsPath": "/var/lib/docker/containers/c369b96e23d5b41fbe502377870d491580cb85c5215f8441347e14f0e4bc37ef/hosts",
"LogPath": "/var/lib/docker/containers/c369b96e23d5b41fbe502377870d491580cb85c5215f8441347e14f0e4bc37ef/c369b96e23d5b41fbe502377870d491580cb85c5215f8441347e14f0e4bc37ef-json.log",
"Name": "/functional-699837",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"functional-699837:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {
"max-size": "100m"
}
},
"NetworkMode": "functional-699837",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8441/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "host",
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 4294967296,
"NanoCpus": 2000000000,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 8589934592,
"MemorySwappiness": null,
"OomKillDisable": false,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"ID": "c369b96e23d5b41fbe502377870d491580cb85c5215f8441347e14f0e4bc37ef",
"LowerDir": "/var/lib/docker/overlay2/328952bd765245f57c2eaa05b0bd7cdbe686ae38a32f149eefbc775cdfc03252-init/diff:/var/lib/docker/overlay2/14186d2bed6bdd9b20ff44dd2ed07ccdaf1758422566a466fa49e19085ed482d/diff",
"MergedDir": "/var/lib/docker/overlay2/328952bd765245f57c2eaa05b0bd7cdbe686ae38a32f149eefbc775cdfc03252/merged",
"UpperDir": "/var/lib/docker/overlay2/328952bd765245f57c2eaa05b0bd7cdbe686ae38a32f149eefbc775cdfc03252/diff",
"WorkDir": "/var/lib/docker/overlay2/328952bd765245f57c2eaa05b0bd7cdbe686ae38a32f149eefbc775cdfc03252/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
},
{
"Type": "volume",
"Name": "functional-699837",
"Source": "/var/lib/docker/volumes/functional-699837/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
}
],
"Config": {
"Hostname": "functional-699837",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8441/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.47-1753871403-21198@sha256:df7d018c3a6a26c5bb83a41102cf6ee056f62471011edba5d602d02edb5f5d1d",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"OnBuild": null,
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "functional-699837",
"name.minikube.sigs.k8s.io": "functional-699837",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "28a81d3856c88da8c1d30d5c1cccd74ba2a899c3397b78caf0ac9da484142038",
"SandboxKey": "/var/run/docker/netns/28a81d3856c8",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32783"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32784"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32787"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32785"
}
],
"8441/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32786"
}
]
},
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"functional-699837": {
"IPAMConfig": {
"IPv4Address": "192.168.49.2"
},
"Links": null,
"Aliases": null,
"MacAddress": "be:c5:9a:18:f2:69",
"DriverOpts": null,
"GwPriority": 0,
"NetworkID": "763070d9e7bba0803db69bf71eb608d56921d0bfd4c71a1d39d0701f7372b87c",
"EndpointID": "83493e8c17b59326d8c479c2c0d7a5ded2cae3362a881c1ce8347b3f751ead15",
"Gateway": "192.168.49.1",
"IPAddress": "192.168.49.2",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"functional-699837",
"c369b96e23d5"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:239: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p functional-699837 -n functional-699837
helpers_test.go:239: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.Host}} -p functional-699837 -n functional-699837: exit status 6 (266.501474ms)
-- stdout --
Running
WARNING: Your kubectl is pointing to stale minikube-vm.
To fix the kubectl context, run `minikube update-context`
-- /stdout --
** stderr **
E0804 08:55:20.712428 1653572 status.go:458] kubeconfig endpoint: get endpoint: "functional-699837" does not appear in /home/jenkins/minikube-integration/21223-1578987/kubeconfig
** /stderr **
helpers_test.go:239: status error: exit status 6 (may be ok)
helpers_test.go:241: "functional-699837" host is not running, skipping log retrieval (state="Running\nWARNING: Your kubectl is pointing to stale minikube-vm.\nTo fix the kubectl context, run `minikube update-context`")
--- FAIL: TestFunctionalNewestKubernetes/Versionv1.34.0-beta.0/serial/StartWithProxy (519.70s)