=== RUN TestFunctionalNewestKubernetes/Versionv1.35.0-rc.1/serial/StartWithProxy
functional_test.go:2244: (dbg) Run: out/minikube-linux-amd64 start -p functional-384766 --memory=4096 --apiserver-port=8441 --wait=all --driver=docker --container-runtime=docker --kubernetes-version=v1.35.0-rc.1
E1222 22:43:14.878966 75803 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22301-72233/.minikube/profiles/addons-268945/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1222 22:45:31.033066 75803 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22301-72233/.minikube/profiles/addons-268945/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1222 22:45:58.726253 75803 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22301-72233/.minikube/profiles/addons-268945/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1222 22:46:30.659414 75803 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22301-72233/.minikube/profiles/functional-580825/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1222 22:46:30.668202 75803 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22301-72233/.minikube/profiles/functional-580825/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1222 22:46:30.679130 75803 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22301-72233/.minikube/profiles/functional-580825/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1222 22:46:30.699439 75803 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22301-72233/.minikube/profiles/functional-580825/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1222 22:46:30.739745 75803 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22301-72233/.minikube/profiles/functional-580825/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1222 22:46:30.820130 75803 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22301-72233/.minikube/profiles/functional-580825/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1222 22:46:30.980631 75803 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22301-72233/.minikube/profiles/functional-580825/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1222 22:46:31.301255 75803 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22301-72233/.minikube/profiles/functional-580825/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1222 22:46:31.942273 75803 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22301-72233/.minikube/profiles/functional-580825/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1222 22:46:33.222787 75803 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22301-72233/.minikube/profiles/functional-580825/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1222 22:46:35.784552 75803 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22301-72233/.minikube/profiles/functional-580825/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1222 22:46:40.905277 75803 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22301-72233/.minikube/profiles/functional-580825/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1222 22:46:51.146275 75803 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22301-72233/.minikube/profiles/functional-580825/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1222 22:47:11.627041 75803 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22301-72233/.minikube/profiles/functional-580825/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1222 22:47:52.588104 75803 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22301-72233/.minikube/profiles/functional-580825/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1222 22:49:14.510812 75803 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22301-72233/.minikube/profiles/functional-580825/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1222 22:50:31.033633 75803 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22301-72233/.minikube/profiles/addons-268945/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
functional_test.go:2244: (dbg) Non-zero exit: out/minikube-linux-amd64 start -p functional-384766 --memory=4096 --apiserver-port=8441 --wait=all --driver=docker --container-runtime=docker --kubernetes-version=v1.35.0-rc.1: exit status 109 (8m16.472465193s)
-- stdout --
* [functional-384766] minikube v1.37.0 on Ubuntu 22.04 (kvm/amd64)
- MINIKUBE_LOCATION=22301
- MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
- KUBECONFIG=/home/jenkins/minikube-integration/22301-72233/kubeconfig
- MINIKUBE_HOME=/home/jenkins/minikube-integration/22301-72233/.minikube
- MINIKUBE_BIN=out/minikube-linux-amd64
- MINIKUBE_FORCE_SYSTEMD=
* Using the docker driver based on user configuration
* Using Docker driver with root privileges
* Starting "functional-384766" primary control-plane node in "functional-384766" cluster
* Pulling base image v0.0.48-1766394456-22288 ...
* Found network options:
- HTTP_PROXY=localhost:34075
* Please see https://minikube.sigs.k8s.io/docs/handbook/vpn_and_proxy/ for more details
-- /stdout --
** stderr **
! Local proxy ignored: not passing HTTP_PROXY=localhost:34075 to docker env.
! Local proxy ignored: not passing HTTP_PROXY=localhost:34075 to docker env.
! You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP (192.168.49.2).
! initialization failed, will try again: wait: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0-rc.1:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables": Process exited with status 1
stdout:
[init] Using Kubernetes version: v1.35.0-rc.1
[preflight] Running pre-flight checks
[preflight] The system verification failed. Printing the output from the verification:
[0;37mKERNEL_VERSION[0m: [0;32m6.8.0-1045-gcp[0m
[0;37mOS[0m: [0;32mLinux[0m
[0;37mCGROUPS_CPU[0m: [0;32menabled[0m
[0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
[0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
[0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
[0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
[0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
[0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
[0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
[0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/var/lib/minikube/certs"
[certs] Using existing ca certificate authority
[certs] Using existing apiserver certificate and key on disk
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [functional-384766 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [functional-384766 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "super-admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
[patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
[kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
[kubelet-check] The kubelet is not healthy after 4m0.001109614s
Unfortunately, an error has occurred, likely caused by:
- The kubelet is not running
- The kubelet is unhealthy due to a misconfiguration of the node in some way (required cgroups disabled)
If you are on a systemd-powered system, you can try to troubleshoot the error with the following commands:
- 'systemctl status kubelet'
- 'journalctl -xeu kubelet'
stderr:
[WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/6.8.0-1045-gcp\n", err: exit status 1
[WARNING SystemVerification]: cgroups v1 support is deprecated and will be removed in a future release. Please migrate to cgroups v2. To explicitly enable cgroups v1 support for kubelet v1.35 or newer, you must set the kubelet configuration option 'FailCgroupV1' to 'false'. You must also explicitly skip this validation. For more information, see https://git.k8s.io/enhancements/keps/sig-node/5573-remove-cgroup-v1
[WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
error: error execution phase wait-control-plane: failed while waiting for the kubelet to start: The HTTP call equal to 'curl -sSL http://127.0.0.1:10248/healthz' returned error: Get "http://127.0.0.1:10248/healthz": context deadline exceeded
To see the stack trace of this error execute with --v=5 or higher
*
X Error starting cluster: wait: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0-rc.1:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables": Process exited with status 1
stdout:
[init] Using Kubernetes version: v1.35.0-rc.1
[preflight] Running pre-flight checks
[preflight] The system verification failed. Printing the output from the verification:
[0;37mKERNEL_VERSION[0m: [0;32m6.8.0-1045-gcp[0m
[0;37mOS[0m: [0;32mLinux[0m
[0;37mCGROUPS_CPU[0m: [0;32menabled[0m
[0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
[0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
[0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
[0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
[0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
[0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
[0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
[0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/var/lib/minikube/certs"
[certs] Using existing ca certificate authority
[certs] Using existing apiserver certificate and key on disk
[certs] Using existing apiserver-kubelet-client certificate and key on disk
[certs] Using existing front-proxy-ca certificate authority
[certs] Using existing front-proxy-client certificate and key on disk
[certs] Using existing etcd/ca certificate authority
[certs] Using existing etcd/server certificate and key on disk
[certs] Using existing etcd/peer certificate and key on disk
[certs] Using existing etcd/healthcheck-client certificate and key on disk
[certs] Using existing apiserver-etcd-client certificate and key on disk
[certs] Using the existing "sa" key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "super-admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
[patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
[kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
[kubelet-check] The kubelet is not healthy after 4m0.000802765s
Unfortunately, an error has occurred, likely caused by:
- The kubelet is not running
- The kubelet is unhealthy due to a misconfiguration of the node in some way (required cgroups disabled)
If you are on a systemd-powered system, you can try to troubleshoot the error with the following commands:
- 'systemctl status kubelet'
- 'journalctl -xeu kubelet'
stderr:
[WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/6.8.0-1045-gcp\n", err: exit status 1
[WARNING SystemVerification]: cgroups v1 support is deprecated and will be removed in a future release. Please migrate to cgroups v2. To explicitly enable cgroups v1 support for kubelet v1.35 or newer, you must set the kubelet configuration option 'FailCgroupV1' to 'false'. You must also explicitly skip this validation. For more information, see https://git.k8s.io/enhancements/keps/sig-node/5573-remove-cgroup-v1
[WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
error: error execution phase wait-control-plane: failed while waiting for the kubelet to start: The HTTP call equal to 'curl -sSL http://127.0.0.1:10248/healthz' returned error: Get "http://127.0.0.1:10248/healthz": dial tcp 127.0.0.1:10248: connect: connection refused
To see the stack trace of this error execute with --v=5 or higher
*
╭─────────────────────────────────────────────────────────────────────────────────────────────╮
│ │
│ * If the above advice does not help, please let us know: │
│ https://github.com/kubernetes/minikube/issues/new/choose │
│ │
│ * Please run `minikube logs --file=logs.txt` and attach logs.txt to the GitHub issue. │
│ │
╰─────────────────────────────────────────────────────────────────────────────────────────────╯
X Exiting due to K8S_KUBELET_NOT_RUNNING: wait: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0-rc.1:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables": Process exited with status 1
stdout:
[init] Using Kubernetes version: v1.35.0-rc.1
[preflight] Running pre-flight checks
[preflight] The system verification failed. Printing the output from the verification:
[0;37mKERNEL_VERSION[0m: [0;32m6.8.0-1045-gcp[0m
[0;37mOS[0m: [0;32mLinux[0m
[0;37mCGROUPS_CPU[0m: [0;32menabled[0m
[0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
[0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
[0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
[0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
[0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
[0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
[0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
[0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/var/lib/minikube/certs"
[certs] Using existing ca certificate authority
[certs] Using existing apiserver certificate and key on disk
[certs] Using existing apiserver-kubelet-client certificate and key on disk
[certs] Using existing front-proxy-ca certificate authority
[certs] Using existing front-proxy-client certificate and key on disk
[certs] Using existing etcd/ca certificate authority
[certs] Using existing etcd/server certificate and key on disk
[certs] Using existing etcd/peer certificate and key on disk
[certs] Using existing etcd/healthcheck-client certificate and key on disk
[certs] Using existing apiserver-etcd-client certificate and key on disk
[certs] Using the existing "sa" key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "super-admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
[patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
[kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
[kubelet-check] The kubelet is not healthy after 4m0.000802765s
Unfortunately, an error has occurred, likely caused by:
- The kubelet is not running
- The kubelet is unhealthy due to a misconfiguration of the node in some way (required cgroups disabled)
If you are on a systemd-powered system, you can try to troubleshoot the error with the following commands:
- 'systemctl status kubelet'
- 'journalctl -xeu kubelet'
stderr:
[WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/6.8.0-1045-gcp\n", err: exit status 1
[WARNING SystemVerification]: cgroups v1 support is deprecated and will be removed in a future release. Please migrate to cgroups v2. To explicitly enable cgroups v1 support for kubelet v1.35 or newer, you must set the kubelet configuration option 'FailCgroupV1' to 'false'. You must also explicitly skip this validation. For more information, see https://git.k8s.io/enhancements/keps/sig-node/5573-remove-cgroup-v1
[WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
error: error execution phase wait-control-plane: failed while waiting for the kubelet to start: The HTTP call equal to 'curl -sSL http://127.0.0.1:10248/healthz' returned error: Get "http://127.0.0.1:10248/healthz": dial tcp 127.0.0.1:10248: connect: connection refused
To see the stack trace of this error execute with --v=5 or higher
* Suggestion: Check output of 'journalctl -xeu kubelet', try passing --extra-config=kubelet.cgroup-driver=systemd to minikube start
* Related issue: https://github.com/kubernetes/minikube/issues/4172
** /stderr **
functional_test.go:2246: failed minikube start. args "out/minikube-linux-amd64 start -p functional-384766 --memory=4096 --apiserver-port=8441 --wait=all --driver=docker --container-runtime=docker --kubernetes-version=v1.35.0-rc.1": exit status 109
helpers_test.go:223: -----------------------post-mortem--------------------------------
helpers_test.go:224: ======> post-mortem[TestFunctionalNewestKubernetes/Versionv1.35.0-rc.1/serial/StartWithProxy]: network settings <======
helpers_test.go:231: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:239: ======> post-mortem[TestFunctionalNewestKubernetes/Versionv1.35.0-rc.1/serial/StartWithProxy]: docker inspect <======
helpers_test.go:240: (dbg) Run: docker inspect functional-384766
helpers_test.go:244: (dbg) docker inspect functional-384766:
-- stdout --
[
{
"Id": "e126b999cc063ee0a68492e79491a8674b8fc6008cc067cb30902412e51fc42c",
"Created": "2025-12-22T22:43:03.818900502Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 134904,
"ExitCode": 0,
"Error": "",
"StartedAt": "2025-12-22T22:43:03.847527913Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:9a87e850a5e640dd3e5f71477885272b970ba271e3722be8bebbe0157f704ffd",
"ResolvConfPath": "/var/lib/docker/containers/e126b999cc063ee0a68492e79491a8674b8fc6008cc067cb30902412e51fc42c/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/e126b999cc063ee0a68492e79491a8674b8fc6008cc067cb30902412e51fc42c/hostname",
"HostsPath": "/var/lib/docker/containers/e126b999cc063ee0a68492e79491a8674b8fc6008cc067cb30902412e51fc42c/hosts",
"LogPath": "/var/lib/docker/containers/e126b999cc063ee0a68492e79491a8674b8fc6008cc067cb30902412e51fc42c/e126b999cc063ee0a68492e79491a8674b8fc6008cc067cb30902412e51fc42c-json.log",
"Name": "/functional-384766",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"functional-384766:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {
"max-size": "100m"
}
},
"NetworkMode": "functional-384766",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8441/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "host",
"Dns": null,
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 4294967296,
"NanoCpus": 2000000000,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 8589934592,
"MemorySwappiness": null,
"OomKillDisable": false,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"ID": "e126b999cc063ee0a68492e79491a8674b8fc6008cc067cb30902412e51fc42c",
"LowerDir": "/var/lib/docker/overlay2/3e3d10c0ae87018d46767d6a2bb62611a8b9a288f6938e75c60f3cd57119d4bf-init/diff:/var/lib/docker/overlay2/c57dd1a41102d99c4ed6be3c60b871435428bd2cea6a3d8d172f0a67527ba009/diff",
"MergedDir": "/var/lib/docker/overlay2/3e3d10c0ae87018d46767d6a2bb62611a8b9a288f6938e75c60f3cd57119d4bf/merged",
"UpperDir": "/var/lib/docker/overlay2/3e3d10c0ae87018d46767d6a2bb62611a8b9a288f6938e75c60f3cd57119d4bf/diff",
"WorkDir": "/var/lib/docker/overlay2/3e3d10c0ae87018d46767d6a2bb62611a8b9a288f6938e75c60f3cd57119d4bf/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
},
{
"Type": "volume",
"Name": "functional-384766",
"Source": "/var/lib/docker/volumes/functional-384766/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
}
],
"Config": {
"Hostname": "functional-384766",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8441/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1766394456-22288@sha256:35aded7a4a0ae59b3c3af27bf7edc655e2fc3c5eaa3d1028779c0f2939f0c484",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "functional-384766",
"name.minikube.sigs.k8s.io": "functional-384766",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"SandboxID": "d6f65d275ad1e1cfaea153f23b0c094464e089c27de9a12387045fa2c863e00e",
"SandboxKey": "/var/run/docker/netns/d6f65d275ad1",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32783"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32784"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32787"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32785"
}
],
"8441/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32786"
}
]
},
"Networks": {
"functional-384766": {
"IPAMConfig": {
"IPv4Address": "192.168.49.2",
"IPv6Address": ""
},
"Links": null,
"Aliases": null,
"DriverOpts": null,
"GwPriority": 0,
"NetworkID": "1b177601c4f3a252e4feb1553da3a4110e40d5b9ed2bd5de6789f2bc9f8f5c2b",
"EndpointID": "2c787f98c5d836612c102f7592dc2eccfef09327c2a6cadf1319fd6559b5eca8",
"Gateway": "192.168.49.1",
"IPAddress": "192.168.49.2",
"MacAddress": "d6:90:04:78:9b:e3",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"functional-384766",
"e126b999cc06"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:248: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p functional-384766 -n functional-384766
helpers_test.go:248: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.Host}} -p functional-384766 -n functional-384766: exit status 6 (299.036727ms)
-- stdout --
Running
WARNING: Your kubectl is pointing to stale minikube-vm.
To fix the kubectl context, run `minikube update-context`
-- /stdout --
** stderr **
E1222 22:51:16.570194 146268 status.go:458] kubeconfig endpoint: get endpoint: "functional-384766" does not appear in /home/jenkins/minikube-integration/22301-72233/kubeconfig
** /stderr **
helpers_test.go:248: status error: exit status 6 (may be ok)
helpers_test.go:253: <<< TestFunctionalNewestKubernetes/Versionv1.35.0-rc.1/serial/StartWithProxy FAILED: start of post-mortem logs <<<
helpers_test.go:254: ======> post-mortem[TestFunctionalNewestKubernetes/Versionv1.35.0-rc.1/serial/StartWithProxy]: minikube logs <======
helpers_test.go:256: (dbg) Run: out/minikube-linux-amd64 -p functional-384766 logs -n 25
helpers_test.go:261: TestFunctionalNewestKubernetes/Versionv1.35.0-rc.1/serial/StartWithProxy logs:
-- stdout --
==> Audit <==
┌────────────────┬──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬───────────────────┬─────────┬─────────┬─────────────────────┬─────────────────────┐
│ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │
├────────────────┼──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼───────────────────┼─────────┼─────────┼─────────────────────┼─────────────────────┤
│ service │ functional-580825 service hello-node --url │ functional-580825 │ jenkins │ v1.37.0 │ 22 Dec 25 22:42 UTC │ 22 Dec 25 22:42 UTC │
│ update-context │ functional-580825 update-context --alsologtostderr -v=2 │ functional-580825 │ jenkins │ v1.37.0 │ 22 Dec 25 22:42 UTC │ 22 Dec 25 22:42 UTC │
│ update-context │ functional-580825 update-context --alsologtostderr -v=2 │ functional-580825 │ jenkins │ v1.37.0 │ 22 Dec 25 22:42 UTC │ 22 Dec 25 22:42 UTC │
│ update-context │ functional-580825 update-context --alsologtostderr -v=2 │ functional-580825 │ jenkins │ v1.37.0 │ 22 Dec 25 22:42 UTC │ 22 Dec 25 22:42 UTC │
│ image │ functional-580825 image load --daemon ghcr.io/medyagh/image-mirrors/kicbase/echo-server:functional-580825 --alsologtostderr │ functional-580825 │ jenkins │ v1.37.0 │ 22 Dec 25 22:42 UTC │ 22 Dec 25 22:42 UTC │
│ image │ functional-580825 image ls │ functional-580825 │ jenkins │ v1.37.0 │ 22 Dec 25 22:42 UTC │ 22 Dec 25 22:42 UTC │
│ image │ functional-580825 image load --daemon ghcr.io/medyagh/image-mirrors/kicbase/echo-server:functional-580825 --alsologtostderr │ functional-580825 │ jenkins │ v1.37.0 │ 22 Dec 25 22:42 UTC │ 22 Dec 25 22:42 UTC │
│ image │ functional-580825 image ls │ functional-580825 │ jenkins │ v1.37.0 │ 22 Dec 25 22:42 UTC │ 22 Dec 25 22:42 UTC │
│ image │ functional-580825 image load --daemon ghcr.io/medyagh/image-mirrors/kicbase/echo-server:functional-580825 --alsologtostderr │ functional-580825 │ jenkins │ v1.37.0 │ 22 Dec 25 22:42 UTC │ 22 Dec 25 22:42 UTC │
│ image │ functional-580825 image ls │ functional-580825 │ jenkins │ v1.37.0 │ 22 Dec 25 22:42 UTC │ 22 Dec 25 22:42 UTC │
│ image │ functional-580825 image save ghcr.io/medyagh/image-mirrors/kicbase/echo-server:functional-580825 /home/jenkins/workspace/Docker_Linux_integration/echo-server-save.tar --alsologtostderr │ functional-580825 │ jenkins │ v1.37.0 │ 22 Dec 25 22:42 UTC │ 22 Dec 25 22:42 UTC │
│ image │ functional-580825 image rm ghcr.io/medyagh/image-mirrors/kicbase/echo-server:functional-580825 --alsologtostderr │ functional-580825 │ jenkins │ v1.37.0 │ 22 Dec 25 22:42 UTC │ 22 Dec 25 22:42 UTC │
│ image │ functional-580825 image ls │ functional-580825 │ jenkins │ v1.37.0 │ 22 Dec 25 22:42 UTC │ 22 Dec 25 22:42 UTC │
│ image │ functional-580825 image load /home/jenkins/workspace/Docker_Linux_integration/echo-server-save.tar --alsologtostderr │ functional-580825 │ jenkins │ v1.37.0 │ 22 Dec 25 22:42 UTC │ 22 Dec 25 22:42 UTC │
│ image │ functional-580825 image ls │ functional-580825 │ jenkins │ v1.37.0 │ 22 Dec 25 22:42 UTC │ 22 Dec 25 22:42 UTC │
│ image │ functional-580825 image save --daemon ghcr.io/medyagh/image-mirrors/kicbase/echo-server:functional-580825 --alsologtostderr │ functional-580825 │ jenkins │ v1.37.0 │ 22 Dec 25 22:42 UTC │ 22 Dec 25 22:42 UTC │
│ ssh │ functional-580825 ssh pgrep buildkitd │ functional-580825 │ jenkins │ v1.37.0 │ 22 Dec 25 22:42 UTC │ │
│ image │ functional-580825 image ls --format json --alsologtostderr │ functional-580825 │ jenkins │ v1.37.0 │ 22 Dec 25 22:42 UTC │ 22 Dec 25 22:42 UTC │
│ image │ functional-580825 image ls --format short --alsologtostderr │ functional-580825 │ jenkins │ v1.37.0 │ 22 Dec 25 22:42 UTC │ │
│ image │ functional-580825 image ls --format yaml --alsologtostderr │ functional-580825 │ jenkins │ v1.37.0 │ 22 Dec 25 22:42 UTC │ 22 Dec 25 22:42 UTC │
│ image │ functional-580825 image ls --format table --alsologtostderr │ functional-580825 │ jenkins │ v1.37.0 │ 22 Dec 25 22:42 UTC │ 22 Dec 25 22:42 UTC │
│ image │ functional-580825 image build -t localhost/my-image:functional-580825 testdata/build --alsologtostderr │ functional-580825 │ jenkins │ v1.37.0 │ 22 Dec 25 22:42 UTC │ 22 Dec 25 22:42 UTC │
│ image │ functional-580825 image ls │ functional-580825 │ jenkins │ v1.37.0 │ 22 Dec 25 22:42 UTC │ 22 Dec 25 22:42 UTC │
│ delete │ -p functional-580825 │ functional-580825 │ jenkins │ v1.37.0 │ 22 Dec 25 22:42 UTC │ 22 Dec 25 22:42 UTC │
│ start │ -p functional-384766 --memory=4096 --apiserver-port=8441 --wait=all --driver=docker --container-runtime=docker --kubernetes-version=v1.35.0-rc.1 │ functional-384766 │ jenkins │ v1.37.0 │ 22 Dec 25 22:42 UTC │ │
└────────────────┴──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴───────────────────┴─────────┴─────────┴─────────────────────┴─────────────────────┘
==> Last Start <==
Log file created at: 2025/12/22 22:42:59
Running on machine: ubuntu-20-agent-5
Binary: Built with gc go1.25.5 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I1222 22:42:59.842390 134327 out.go:360] Setting OutFile to fd 1 ...
I1222 22:42:59.842986 134327 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1222 22:42:59.843001 134327 out.go:374] Setting ErrFile to fd 2...
I1222 22:42:59.843007 134327 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1222 22:42:59.843511 134327 root.go:338] Updating PATH: /home/jenkins/minikube-integration/22301-72233/.minikube/bin
I1222 22:42:59.844291 134327 out.go:368] Setting JSON to false
I1222 22:42:59.845173 134327 start.go:133] hostinfo: {"hostname":"ubuntu-20-agent-5","uptime":8720,"bootTime":1766434660,"procs":187,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"22.04","kernelVersion":"6.8.0-1045-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
I1222 22:42:59.845246 134327 start.go:143] virtualization: kvm guest
I1222 22:42:59.846903 134327 out.go:179] * [functional-384766] minikube v1.37.0 on Ubuntu 22.04 (kvm/amd64)
I1222 22:42:59.848336 134327 out.go:179] - MINIKUBE_LOCATION=22301
I1222 22:42:59.848434 134327 notify.go:221] Checking for updates...
I1222 22:42:59.850419 134327 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I1222 22:42:59.851606 134327 out.go:179] - KUBECONFIG=/home/jenkins/minikube-integration/22301-72233/kubeconfig
I1222 22:42:59.852679 134327 out.go:179] - MINIKUBE_HOME=/home/jenkins/minikube-integration/22301-72233/.minikube
I1222 22:42:59.853888 134327 out.go:179] - MINIKUBE_BIN=out/minikube-linux-amd64
I1222 22:42:59.855048 134327 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I1222 22:42:59.856385 134327 driver.go:422] Setting default libvirt URI to qemu:///system
I1222 22:42:59.881657 134327 docker.go:124] docker version: linux-29.1.3:Docker Engine - Community
I1222 22:42:59.881722 134327 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1222 22:42:59.935838 134327 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:3 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:24 OomKillDisable:true NGoroutines:44 SystemTime:2025-12-22 22:42:59.92621169 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:6.8.0-1045-gcp OperatingSystem:Ubuntu 22.04.5 LTS OSType:linux Architecture:x8
6_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33652080640 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-5 Labels:[] ExperimentalBuild:false ServerVersion:29.1.3 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:dea7da592f5d1d2b7755e3a161be07f43fad8f75 Expected:} RuncCommit:{ID:v1.3.4-0-gd6d73eb8 Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:[WARNING: Support for cgroup v1 is deprecated and planned to be removed
by no later than May 2029 (https://github.com/moby/moby/issues/51111)] ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.30.1] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v5.0.0] map[Name:model Path:/usr/libexec/docker/cli-plugins/docker-model SchemaVersion:0.1.0 ShortDescription:Docker Model Runner Vendor:Docker Inc. Version:v1.0.6] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I1222 22:42:59.935934 134327 docker.go:319] overlay module found
I1222 22:42:59.937690 134327 out.go:179] * Using the docker driver based on user configuration
I1222 22:42:59.938762 134327 start.go:309] selected driver: docker
I1222 22:42:59.938768 134327 start.go:928] validating driver "docker" against <nil>
I1222 22:42:59.938777 134327 start.go:939] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I1222 22:42:59.939672 134327 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1222 22:42:59.998039 134327 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:3 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:24 OomKillDisable:true NGoroutines:44 SystemTime:2025-12-22 22:42:59.988941385 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:6.8.0-1045-gcp OperatingSystem:Ubuntu 22.04.5 LTS OSType:linux Architecture:x
86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33652080640 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-5 Labels:[] ExperimentalBuild:false ServerVersion:29.1.3 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:dea7da592f5d1d2b7755e3a161be07f43fad8f75 Expected:} RuncCommit:{ID:v1.3.4-0-gd6d73eb8 Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:[WARNING: Support for cgroup v1 is deprecated and planned to be remove
d by no later than May 2029 (https://github.com/moby/moby/issues/51111)] ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.30.1] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v5.0.0] map[Name:model Path:/usr/libexec/docker/cli-plugins/docker-model SchemaVersion:0.1.0 ShortDescription:Docker Model Runner Vendor:Docker Inc. Version:v1.0.6] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I1222 22:42:59.998172 134327 start_flags.go:333] no existing cluster config was found, will generate one from the flags
I1222 22:42:59.998379 134327 start_flags.go:1019] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1222 22:43:00.000073 134327 out.go:179] * Using Docker driver with root privileges
I1222 22:43:00.001318 134327 cni.go:84] Creating CNI manager for ""
I1222 22:43:00.001382 134327 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I1222 22:43:00.001392 134327 start_flags.go:342] Found "bridge CNI" CNI - setting NetworkPlugin=cni
W1222 22:43:00.001462 134327 out.go:285] ! Local proxy ignored: not passing HTTP_PROXY=localhost:34075 to docker env.
I1222 22:43:00.001540 134327 start.go:353] cluster config:
{Name:functional-384766 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1766394456-22288@sha256:35aded7a4a0ae59b3c3af27bf7edc655e2fc3c5eaa3d1028779c0f2939f0c484 Memory:4096 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8441 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.35.0-rc.1 ClusterName:functional-384766 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local Con
tainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8441 KubernetesVersion:v1.35.0-rc.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSoc
k: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s Rosetta:false}
I1222 22:43:00.002818 134327 out.go:179] * Starting "functional-384766" primary control-plane node in "functional-384766" cluster
I1222 22:43:00.003840 134327 cache.go:134] Beginning downloading kic base image for docker with docker
I1222 22:43:00.004860 134327 out.go:179] * Pulling base image v0.0.48-1766394456-22288 ...
I1222 22:43:00.005907 134327 preload.go:188] Checking if preload exists for k8s version v1.35.0-rc.1 and runtime docker
I1222 22:43:00.005930 134327 preload.go:203] Found local preload: /home/jenkins/minikube-integration/22301-72233/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.35.0-rc.1-docker-overlay2-amd64.tar.lz4
I1222 22:43:00.005942 134327 cache.go:65] Caching tarball of preloaded images
I1222 22:43:00.006017 134327 preload.go:251] Found /home/jenkins/minikube-integration/22301-72233/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.35.0-rc.1-docker-overlay2-amd64.tar.lz4 in cache, skipping download
I1222 22:43:00.006023 134327 cache.go:68] Finished verifying existence of preloaded tar for v1.35.0-rc.1 on docker
I1222 22:43:00.006026 134327 image.go:81] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1766394456-22288@sha256:35aded7a4a0ae59b3c3af27bf7edc655e2fc3c5eaa3d1028779c0f2939f0c484 in local docker daemon
I1222 22:43:00.006340 134327 profile.go:143] Saving config to /home/jenkins/minikube-integration/22301-72233/.minikube/profiles/functional-384766/config.json ...
I1222 22:43:00.006358 134327 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22301-72233/.minikube/profiles/functional-384766/config.json: {Name:mk103cffb42129f0ed4cacda0289d1119f019236 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1222 22:43:00.026068 134327 image.go:100] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1766394456-22288@sha256:35aded7a4a0ae59b3c3af27bf7edc655e2fc3c5eaa3d1028779c0f2939f0c484 in local docker daemon, skipping pull
I1222 22:43:00.026078 134327 cache.go:158] gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1766394456-22288@sha256:35aded7a4a0ae59b3c3af27bf7edc655e2fc3c5eaa3d1028779c0f2939f0c484 exists in daemon, skipping load
I1222 22:43:00.026091 134327 cache.go:243] Successfully downloaded all kic artifacts
I1222 22:43:00.026123 134327 start.go:360] acquireMachinesLock for functional-384766: {Name:mk956fe60c71d3d96aa218ecf73d6e39f6ab1bf3 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1222 22:43:00.026210 134327 start.go:364] duration metric: took 74.871µs to acquireMachinesLock for "functional-384766"
I1222 22:43:00.026228 134327 start.go:93] Provisioning new machine with config: &{Name:functional-384766 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1766394456-22288@sha256:35aded7a4a0ae59b3c3af27bf7edc655e2fc3c5eaa3d1028779c0f2939f0c484 Memory:4096 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8441 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.35.0-rc.1 ClusterName:functional-384766 Namespace:default APIServerHAVIP: APIS
erverName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8441 KubernetesVersion:v1.35.0-rc.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false Cu
stomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s Rosetta:false} &{Name: IP: Port:8441 KubernetesVersion:v1.35.0-rc.1 ContainerRuntime:docker ControlPlane:true Worker:true}
I1222 22:43:00.026285 134327 start.go:125] createHost starting for "" (driver="docker")
I1222 22:43:00.028019 134327 out.go:252] * Creating docker container (CPUs=2, Memory=4096MB) ...
W1222 22:43:00.028335 134327 out.go:285] ! Local proxy ignored: not passing HTTP_PROXY=localhost:34075 to docker env.
I1222 22:43:00.028357 134327 start.go:159] libmachine.API.Create for "functional-384766" (driver="docker")
I1222 22:43:00.028377 134327 client.go:173] LocalClient.Create starting
I1222 22:43:00.028467 134327 main.go:144] libmachine: Reading certificate data from /home/jenkins/minikube-integration/22301-72233/.minikube/certs/ca.pem
I1222 22:43:00.028500 134327 main.go:144] libmachine: Decoding PEM data...
I1222 22:43:00.028522 134327 main.go:144] libmachine: Parsing certificate...
I1222 22:43:00.028625 134327 main.go:144] libmachine: Reading certificate data from /home/jenkins/minikube-integration/22301-72233/.minikube/certs/cert.pem
I1222 22:43:00.028651 134327 main.go:144] libmachine: Decoding PEM data...
I1222 22:43:00.028665 134327 main.go:144] libmachine: Parsing certificate...
I1222 22:43:00.029099 134327 cli_runner.go:164] Run: docker network inspect functional-384766 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W1222 22:43:00.046212 134327 cli_runner.go:211] docker network inspect functional-384766 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I1222 22:43:00.046286 134327 network_create.go:284] running [docker network inspect functional-384766] to gather additional debugging logs...
I1222 22:43:00.046302 134327 cli_runner.go:164] Run: docker network inspect functional-384766
W1222 22:43:00.061994 134327 cli_runner.go:211] docker network inspect functional-384766 returned with exit code 1
I1222 22:43:00.062026 134327 network_create.go:287] error running [docker network inspect functional-384766]: docker network inspect functional-384766: exit status 1
stdout:
[]
stderr:
Error response from daemon: network functional-384766 not found
I1222 22:43:00.062040 134327 network_create.go:289] output of [docker network inspect functional-384766]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network functional-384766 not found
** /stderr **
I1222 22:43:00.062138 134327 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1222 22:43:00.078959 134327 network.go:206] using free private subnet 192.168.49.0/24: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc001c74990}
I1222 22:43:00.078984 134327 network_create.go:124] attempt to create docker network functional-384766 192.168.49.0/24 with gateway 192.168.49.1 and MTU of 1500 ...
I1222 22:43:00.079022 134327 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.49.0/24 --gateway=192.168.49.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=functional-384766 functional-384766
I1222 22:43:00.124783 134327 network_create.go:108] docker network functional-384766 192.168.49.0/24 created
I1222 22:43:00.124803 134327 kic.go:121] calculated static IP "192.168.49.2" for the "functional-384766" container
I1222 22:43:00.124884 134327 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I1222 22:43:00.141158 134327 cli_runner.go:164] Run: docker volume create functional-384766 --label name.minikube.sigs.k8s.io=functional-384766 --label created_by.minikube.sigs.k8s.io=true
I1222 22:43:00.158715 134327 oci.go:103] Successfully created a docker volume functional-384766
I1222 22:43:00.158781 134327 cli_runner.go:164] Run: docker run --rm --name functional-384766-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=functional-384766 --entrypoint /usr/bin/test -v functional-384766:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1766394456-22288@sha256:35aded7a4a0ae59b3c3af27bf7edc655e2fc3c5eaa3d1028779c0f2939f0c484 -d /var/lib
I1222 22:43:00.536843 134327 oci.go:107] Successfully prepared a docker volume functional-384766
I1222 22:43:00.536916 134327 preload.go:188] Checking if preload exists for k8s version v1.35.0-rc.1 and runtime docker
I1222 22:43:00.536925 134327 kic.go:194] Starting extracting preloaded images to volume ...
I1222 22:43:00.536991 134327 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/22301-72233/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.35.0-rc.1-docker-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v functional-384766:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1766394456-22288@sha256:35aded7a4a0ae59b3c3af27bf7edc655e2fc3c5eaa3d1028779c0f2939f0c484 -I lz4 -xf /preloaded.tar -C /extractDir
I1222 22:43:03.750255 134327 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/22301-72233/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.35.0-rc.1-docker-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v functional-384766:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1766394456-22288@sha256:35aded7a4a0ae59b3c3af27bf7edc655e2fc3c5eaa3d1028779c0f2939f0c484 -I lz4 -xf /preloaded.tar -C /extractDir: (3.213205309s)
I1222 22:43:03.750283 134327 kic.go:203] duration metric: took 3.213354248s to extract preloaded images to volume ...
W1222 22:43:03.750455 134327 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
I1222 22:43:03.750553 134327 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I1222 22:43:03.802914 134327 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname functional-384766 --name functional-384766 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=functional-384766 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=functional-384766 --network functional-384766 --ip 192.168.49.2 --volume functional-384766:/var --security-opt apparmor=unconfined --memory=4096mb --cpus=2 -e container=docker --expose 8441 --publish=127.0.0.1::8441 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1766394456-22288@sha256:35aded7a4a0ae59b3c3af27bf7edc655e2fc3c5eaa3d1028779c0f2939f0c484
I1222 22:43:04.057295 134327 cli_runner.go:164] Run: docker container inspect functional-384766 --format={{.State.Running}}
I1222 22:43:04.075762 134327 cli_runner.go:164] Run: docker container inspect functional-384766 --format={{.State.Status}}
I1222 22:43:04.095685 134327 cli_runner.go:164] Run: docker exec functional-384766 stat /var/lib/dpkg/alternatives/iptables
I1222 22:43:04.146231 134327 oci.go:144] the created container "functional-384766" has a running status.
I1222 22:43:04.146269 134327 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/22301-72233/.minikube/machines/functional-384766/id_rsa...
I1222 22:43:04.254326 134327 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/22301-72233/.minikube/machines/functional-384766/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I1222 22:43:04.278215 134327 cli_runner.go:164] Run: docker container inspect functional-384766 --format={{.State.Status}}
I1222 22:43:04.296073 134327 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I1222 22:43:04.296086 134327 kic_runner.go:114] Args: [docker exec --privileged functional-384766 chown docker:docker /home/docker/.ssh/authorized_keys]
I1222 22:43:04.344786 134327 cli_runner.go:164] Run: docker container inspect functional-384766 --format={{.State.Status}}
I1222 22:43:04.363524 134327 machine.go:94] provisionDockerMachine start ...
I1222 22:43:04.363626 134327 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-384766
I1222 22:43:04.381571 134327 main.go:144] libmachine: Using SSH client type: native
I1222 22:43:04.381878 134327 main.go:144] libmachine: &{{{<nil> 0 [] [] []} docker [0x84da00] 0x8506a0 <nil> [] 0s} 127.0.0.1 32783 <nil> <nil>}
I1222 22:43:04.381888 134327 main.go:144] libmachine: About to run SSH command:
hostname
I1222 22:43:04.382785 134327 main.go:144] libmachine: Error dialing TCP: ssh: handshake failed: read tcp 127.0.0.1:34832->127.0.0.1:32783: read: connection reset by peer
I1222 22:43:07.526439 134327 main.go:144] libmachine: SSH cmd err, output: <nil>: functional-384766
I1222 22:43:07.526469 134327 ubuntu.go:182] provisioning hostname "functional-384766"
I1222 22:43:07.526536 134327 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-384766
I1222 22:43:07.545948 134327 main.go:144] libmachine: Using SSH client type: native
I1222 22:43:07.546193 134327 main.go:144] libmachine: &{{{<nil> 0 [] [] []} docker [0x84da00] 0x8506a0 <nil> [] 0s} 127.0.0.1 32783 <nil> <nil>}
I1222 22:43:07.546212 134327 main.go:144] libmachine: About to run SSH command:
sudo hostname functional-384766 && echo "functional-384766" | sudo tee /etc/hostname
I1222 22:43:07.695866 134327 main.go:144] libmachine: SSH cmd err, output: <nil>: functional-384766
I1222 22:43:07.695924 134327 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-384766
I1222 22:43:07.712544 134327 main.go:144] libmachine: Using SSH client type: native
I1222 22:43:07.712852 134327 main.go:144] libmachine: &{{{<nil> 0 [] [] []} docker [0x84da00] 0x8506a0 <nil> [] 0s} 127.0.0.1 32783 <nil> <nil>}
I1222 22:43:07.712871 134327 main.go:144] libmachine: About to run SSH command:
if ! grep -xq '.*\sfunctional-384766' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 functional-384766/g' /etc/hosts;
else
echo '127.0.1.1 functional-384766' | sudo tee -a /etc/hosts;
fi
fi
I1222 22:43:07.854099 134327 main.go:144] libmachine: SSH cmd err, output: <nil>:
I1222 22:43:07.854127 134327 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/22301-72233/.minikube CaCertPath:/home/jenkins/minikube-integration/22301-72233/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/22301-72233/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/22301-72233/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/22301-72233/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/22301-72233/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/22301-72233/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/22301-72233/.minikube}
I1222 22:43:07.854172 134327 ubuntu.go:190] setting up certificates
I1222 22:43:07.854186 134327 provision.go:84] configureAuth start
I1222 22:43:07.854262 134327 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" functional-384766
I1222 22:43:07.871910 134327 provision.go:143] copyHostCerts
I1222 22:43:07.871972 134327 exec_runner.go:144] found /home/jenkins/minikube-integration/22301-72233/.minikube/key.pem, removing ...
I1222 22:43:07.871980 134327 exec_runner.go:203] rm: /home/jenkins/minikube-integration/22301-72233/.minikube/key.pem
I1222 22:43:07.872058 134327 exec_runner.go:151] cp: /home/jenkins/minikube-integration/22301-72233/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/22301-72233/.minikube/key.pem (1679 bytes)
I1222 22:43:07.872160 134327 exec_runner.go:144] found /home/jenkins/minikube-integration/22301-72233/.minikube/ca.pem, removing ...
I1222 22:43:07.872165 134327 exec_runner.go:203] rm: /home/jenkins/minikube-integration/22301-72233/.minikube/ca.pem
I1222 22:43:07.872195 134327 exec_runner.go:151] cp: /home/jenkins/minikube-integration/22301-72233/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/22301-72233/.minikube/ca.pem (1082 bytes)
I1222 22:43:07.872271 134327 exec_runner.go:144] found /home/jenkins/minikube-integration/22301-72233/.minikube/cert.pem, removing ...
I1222 22:43:07.872275 134327 exec_runner.go:203] rm: /home/jenkins/minikube-integration/22301-72233/.minikube/cert.pem
I1222 22:43:07.872301 134327 exec_runner.go:151] cp: /home/jenkins/minikube-integration/22301-72233/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/22301-72233/.minikube/cert.pem (1123 bytes)
I1222 22:43:07.872409 134327 provision.go:117] generating server cert: /home/jenkins/minikube-integration/22301-72233/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/22301-72233/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/22301-72233/.minikube/certs/ca-key.pem org=jenkins.functional-384766 san=[127.0.0.1 192.168.49.2 functional-384766 localhost minikube]
I1222 22:43:08.036521 134327 provision.go:177] copyRemoteCerts
I1222 22:43:08.036586 134327 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1222 22:43:08.036675 134327 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-384766
I1222 22:43:08.054544 134327 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32783 SSHKeyPath:/home/jenkins/minikube-integration/22301-72233/.minikube/machines/functional-384766/id_rsa Username:docker}
I1222 22:43:08.155832 134327 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22301-72233/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I1222 22:43:08.175219 134327 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22301-72233/.minikube/machines/server.pem --> /etc/docker/server.pem (1220 bytes)
I1222 22:43:08.192227 134327 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22301-72233/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
I1222 22:43:08.209514 134327 provision.go:87] duration metric: took 355.315794ms to configureAuth
I1222 22:43:08.209533 134327 ubuntu.go:206] setting minikube options for container-runtime
I1222 22:43:08.209748 134327 config.go:182] Loaded profile config "functional-384766": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.35.0-rc.1
I1222 22:43:08.209796 134327 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-384766
I1222 22:43:08.227145 134327 main.go:144] libmachine: Using SSH client type: native
I1222 22:43:08.227353 134327 main.go:144] libmachine: &{{{<nil> 0 [] [] []} docker [0x84da00] 0x8506a0 <nil> [] 0s} 127.0.0.1 32783 <nil> <nil>}
I1222 22:43:08.227358 134327 main.go:144] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I1222 22:43:08.369310 134327 main.go:144] libmachine: SSH cmd err, output: <nil>: overlay
I1222 22:43:08.369322 134327 ubuntu.go:71] root file system type: overlay
I1222 22:43:08.369445 134327 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
I1222 22:43:08.369495 134327 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-384766
I1222 22:43:08.386966 134327 main.go:144] libmachine: Using SSH client type: native
I1222 22:43:08.387189 134327 main.go:144] libmachine: &{{{<nil> 0 [] [] []} docker [0x84da00] 0x8506a0 <nil> [] 0s} 127.0.0.1 32783 <nil> <nil>}
I1222 22:43:08.387245 134327 main.go:144] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 \
-H fd:// --containerd=/run/containerd/containerd.sock \
-H unix:///var/run/docker.sock \
--default-ulimit=nofile=1048576:1048576 \
--tlsverify \
--tlscacert /etc/docker/ca.pem \
--tlscert /etc/docker/server.pem \
--tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I1222 22:43:08.539683 134327 main.go:144] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
I1222 22:43:08.539748 134327 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-384766
I1222 22:43:08.558294 134327 main.go:144] libmachine: Using SSH client type: native
I1222 22:43:08.558506 134327 main.go:144] libmachine: &{{{<nil> 0 [] [] []} docker [0x84da00] 0x8506a0 <nil> [] 0s} 127.0.0.1 32783 <nil> <nil>}
I1222 22:43:08.558520 134327 main.go:144] libmachine: About to run SSH command:
sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
I1222 22:43:09.658984 134327 main.go:144] libmachine: SSH cmd err, output: <nil>: --- /lib/systemd/system/docker.service 2025-12-12 14:48:15.000000000 +0000
+++ /lib/systemd/system/docker.service.new 2025-12-22 22:43:08.537193298 +0000
@@ -9,23 +9,34 @@
[Service]
Type=notify
-# the default is not to use systemd for cgroups because the delegate issues still
-# exists and systemd currently does not support the cgroup feature set required
-# for containers run by docker
-ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
-ExecReload=/bin/kill -s HUP $MAINPID
-TimeoutStartSec=0
-RestartSec=2
Restart=always
+
+
+# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
+# The base configuration already specifies an 'ExecStart=...' command. The first directive
+# here is to clear out that command inherited from the base configuration. Without this,
+# the command from the base configuration and the command specified here are treated as
+# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
+# will catch this invalid input and refuse to start the service with an error like:
+# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
+
+# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
+# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
+ExecStart=
+ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
+ExecReload=/bin/kill -s HUP $MAINPID
+
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
+LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
-# Comment TasksMax if your systemd version does not support it.
-# Only systemd 226 and above support this option.
+# Uncomment TasksMax if your systemd version supports it.
+# Only systemd 226 and above support this version.
TasksMax=infinity
+TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
Synchronizing state of docker.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable docker
I1222 22:43:09.659006 134327 machine.go:97] duration metric: took 5.295469546s to provisionDockerMachine
I1222 22:43:09.659016 134327 client.go:176] duration metric: took 9.630634498s to LocalClient.Create
I1222 22:43:09.659035 134327 start.go:167] duration metric: took 9.630678208s to libmachine.API.Create "functional-384766"
I1222 22:43:09.659043 134327 start.go:293] postStartSetup for "functional-384766" (driver="docker")
I1222 22:43:09.659056 134327 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1222 22:43:09.659106 134327 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1222 22:43:09.659184 134327 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-384766
I1222 22:43:09.676234 134327 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32783 SSHKeyPath:/home/jenkins/minikube-integration/22301-72233/.minikube/machines/functional-384766/id_rsa Username:docker}
I1222 22:43:09.779722 134327 ssh_runner.go:195] Run: cat /etc/os-release
I1222 22:43:09.783227 134327 main.go:144] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I1222 22:43:09.783244 134327 info.go:137] Remote host: Debian GNU/Linux 12 (bookworm)
I1222 22:43:09.783255 134327 filesync.go:126] Scanning /home/jenkins/minikube-integration/22301-72233/.minikube/addons for local assets ...
I1222 22:43:09.783329 134327 filesync.go:126] Scanning /home/jenkins/minikube-integration/22301-72233/.minikube/files for local assets ...
I1222 22:43:09.783425 134327 filesync.go:149] local asset: /home/jenkins/minikube-integration/22301-72233/.minikube/files/etc/ssl/certs/758032.pem -> 758032.pem in /etc/ssl/certs
I1222 22:43:09.783521 134327 filesync.go:149] local asset: /home/jenkins/minikube-integration/22301-72233/.minikube/files/etc/test/nested/copy/75803/hosts -> hosts in /etc/test/nested/copy/75803
I1222 22:43:09.783569 134327 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs /etc/test/nested/copy/75803
I1222 22:43:09.791324 134327 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22301-72233/.minikube/files/etc/ssl/certs/758032.pem --> /etc/ssl/certs/758032.pem (1708 bytes)
I1222 22:43:09.810812 134327 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22301-72233/.minikube/files/etc/test/nested/copy/75803/hosts --> /etc/test/nested/copy/75803/hosts (40 bytes)
I1222 22:43:09.827873 134327 start.go:296] duration metric: took 168.81655ms for postStartSetup
I1222 22:43:09.828226 134327 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" functional-384766
I1222 22:43:09.845546 134327 profile.go:143] Saving config to /home/jenkins/minikube-integration/22301-72233/.minikube/profiles/functional-384766/config.json ...
I1222 22:43:09.845802 134327 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I1222 22:43:09.845857 134327 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-384766
I1222 22:43:09.862738 134327 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32783 SSHKeyPath:/home/jenkins/minikube-integration/22301-72233/.minikube/machines/functional-384766/id_rsa Username:docker}
I1222 22:43:09.961157 134327 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I1222 22:43:09.966010 134327 start.go:128] duration metric: took 9.939709767s to createHost
I1222 22:43:09.966047 134327 start.go:83] releasing machines lock for "functional-384766", held for 9.939811396s
I1222 22:43:09.966125 134327 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" functional-384766
I1222 22:43:09.984820 134327 out.go:179] * Found network options:
I1222 22:43:09.986317 134327 out.go:179] - HTTP_PROXY=localhost:34075
W1222 22:43:09.987447 134327 out.go:285] ! You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP (192.168.49.2).
I1222 22:43:09.988617 134327 out.go:179] * Please see https://minikube.sigs.k8s.io/docs/handbook/vpn_and_proxy/ for more details
I1222 22:43:09.989650 134327 ssh_runner.go:195] Run: cat /version.json
I1222 22:43:09.989683 134327 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-384766
I1222 22:43:09.989748 134327 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1222 22:43:09.989803 134327 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-384766
I1222 22:43:10.008991 134327 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32783 SSHKeyPath:/home/jenkins/minikube-integration/22301-72233/.minikube/machines/functional-384766/id_rsa Username:docker}
I1222 22:43:10.009302 134327 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32783 SSHKeyPath:/home/jenkins/minikube-integration/22301-72233/.minikube/machines/functional-384766/id_rsa Username:docker}
I1222 22:43:10.163176 134327 ssh_runner.go:195] Run: systemctl --version
I1222 22:43:10.169754 134327 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W1222 22:43:10.174094 134327 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I1222 22:43:10.174136 134327 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1222 22:43:10.197824 134327 cni.go:262] disabled [/etc/cni/net.d/10-crio-bridge.conflist.disabled, /etc/cni/net.d/87-podman-bridge.conflist] bridge cni config(s)
I1222 22:43:10.197845 134327 start.go:496] detecting cgroup driver to use...
I1222 22:43:10.197875 134327 detect.go:187] detected "cgroupfs" cgroup driver on host os
I1222 22:43:10.197983 134327 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1222 22:43:10.211554 134327 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I1222 22:43:10.221448 134327 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1222 22:43:10.229443 134327 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I1222 22:43:10.229490 134327 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I1222 22:43:10.237838 134327 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1222 22:43:10.245745 134327 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1222 22:43:10.253441 134327 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1222 22:43:10.261191 134327 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1222 22:43:10.268514 134327 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1222 22:43:10.276423 134327 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1222 22:43:10.284385 134327 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1222 22:43:10.292740 134327 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1222 22:43:10.299472 134327 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1222 22:43:10.306266 134327 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1222 22:43:10.387115 134327 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1222 22:43:10.458229 134327 start.go:496] detecting cgroup driver to use...
I1222 22:43:10.458267 134327 detect.go:187] detected "cgroupfs" cgroup driver on host os
I1222 22:43:10.458329 134327 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I1222 22:43:10.471348 134327 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I1222 22:43:10.482805 134327 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
I1222 22:43:10.502905 134327 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I1222 22:43:10.514464 134327 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1222 22:43:10.525950 134327 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
" | sudo tee /etc/crictl.yaml"
I1222 22:43:10.539182 134327 ssh_runner.go:195] Run: which cri-dockerd
I1222 22:43:10.542645 134327 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
I1222 22:43:10.551212 134327 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (192 bytes)
I1222 22:43:10.563822 134327 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I1222 22:43:10.643708 134327 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I1222 22:43:10.722977 134327 docker.go:578] configuring docker to use "cgroupfs" as cgroup driver...
I1222 22:43:10.723079 134327 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (130 bytes)
I1222 22:43:10.736218 134327 ssh_runner.go:195] Run: sudo systemctl reset-failed docker
I1222 22:43:10.747889 134327 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1222 22:43:10.822668 134327 ssh_runner.go:195] Run: sudo systemctl restart docker
I1222 22:43:11.487017 134327 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1222 22:43:11.499323 134327 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
I1222 22:43:11.511496 134327 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1222 22:43:11.523269 134327 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
I1222 22:43:11.606371 134327 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
I1222 22:43:11.686203 134327 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1222 22:43:11.776030 134327 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
I1222 22:43:11.801947 134327 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
I1222 22:43:11.813698 134327 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1222 22:43:11.896166 134327 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I1222 22:43:11.964841 134327 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1222 22:43:11.977925 134327 start.go:543] Will wait 60s for socket path /var/run/cri-dockerd.sock
I1222 22:43:11.977977 134327 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
I1222 22:43:11.981749 134327 start.go:564] Will wait 60s for crictl version
I1222 22:43:11.981790 134327 ssh_runner.go:195] Run: which crictl
I1222 22:43:11.985190 134327 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl version
I1222 22:43:12.010044 134327 start.go:580] Version: 0.1.0
RuntimeName: docker
RuntimeVersion: 29.1.3
RuntimeApiVersion: v1
I1222 22:43:12.010089 134327 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I1222 22:43:12.033700 134327 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I1222 22:43:12.058968 134327 out.go:252] * Preparing Kubernetes v1.35.0-rc.1 on Docker 29.1.3 ...
I1222 22:43:12.059036 134327 cli_runner.go:164] Run: docker network inspect functional-384766 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1222 22:43:12.075916 134327 ssh_runner.go:195] Run: grep 192.168.49.1 host.minikube.internal$ /etc/hosts
I1222 22:43:12.079882 134327 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.49.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1222 22:43:12.090109 134327 kubeadm.go:884] updating cluster {Name:functional-384766 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1766394456-22288@sha256:35aded7a4a0ae59b3c3af27bf7edc655e2fc3c5eaa3d1028779c0f2939f0c484 Memory:4096 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8441 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.35.0-rc.1 ClusterName:functional-384766 Namespace:default APIServerHAVIP: APIServerName:minikubeC
A APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8441 KubernetesVersion:v1.35.0-rc.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQem
uFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s Rosetta:false} ...
I1222 22:43:12.090209 134327 preload.go:188] Checking if preload exists for k8s version v1.35.0-rc.1 and runtime docker
I1222 22:43:12.090247 134327 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I1222 22:43:12.110567 134327 docker.go:694] Got preloaded images: -- stdout --
registry.k8s.io/kube-scheduler:v1.35.0-rc.1
registry.k8s.io/kube-controller-manager:v1.35.0-rc.1
registry.k8s.io/kube-apiserver:v1.35.0-rc.1
registry.k8s.io/kube-proxy:v1.35.0-rc.1
registry.k8s.io/etcd:3.6.6-0
registry.k8s.io/coredns/coredns:v1.13.1
registry.k8s.io/pause:3.10.1
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I1222 22:43:12.110581 134327 docker.go:624] Images already preloaded, skipping extraction
I1222 22:43:12.110652 134327 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I1222 22:43:12.130217 134327 docker.go:694] Got preloaded images: -- stdout --
registry.k8s.io/kube-controller-manager:v1.35.0-rc.1
registry.k8s.io/kube-apiserver:v1.35.0-rc.1
registry.k8s.io/kube-scheduler:v1.35.0-rc.1
registry.k8s.io/kube-proxy:v1.35.0-rc.1
registry.k8s.io/etcd:3.6.6-0
registry.k8s.io/coredns/coredns:v1.13.1
registry.k8s.io/pause:3.10.1
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I1222 22:43:12.130234 134327 cache_images.go:86] Images are preloaded, skipping loading
I1222 22:43:12.130243 134327 kubeadm.go:935] updating node { 192.168.49.2 8441 v1.35.0-rc.1 docker true true} ...
I1222 22:43:12.130358 134327 kubeadm.go:947] kubelet [Unit]
Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.35.0-rc.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=functional-384766 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.49.2
[Install]
config:
{KubernetesVersion:v1.35.0-rc.1 ClusterName:functional-384766 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I1222 22:43:12.130419 134327 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
I1222 22:43:12.179367 134327 cni.go:84] Creating CNI manager for ""
I1222 22:43:12.179382 134327 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I1222 22:43:12.179401 134327 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
I1222 22:43:12.179423 134327 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.49.2 APIServerPort:8441 KubernetesVersion:v1.35.0-rc.1 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:functional-384766 NodeName:functional-384766 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.49.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.49.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPa
th:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///var/run/cri-dockerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1222 22:43:12.179554 134327 kubeadm.go:196] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.49.2
bindPort: 8441
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///var/run/cri-dockerd.sock
name: "functional-384766"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.49.2"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.49.2"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8441
etcd:
local:
dataDir: /var/lib/minikube/etcd
kubernetesVersion: v1.35.0-rc.1
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1222 22:43:12.179667 134327 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.35.0-rc.1
I1222 22:43:12.187643 134327 binaries.go:51] Found k8s binaries, skipping transfer
I1222 22:43:12.187687 134327 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1222 22:43:12.195094 134327 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (321 bytes)
I1222 22:43:12.206812 134327 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (357 bytes)
I1222 22:43:12.218731 134327 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2223 bytes)
I1222 22:43:12.230618 134327 ssh_runner.go:195] Run: grep 192.168.49.2 control-plane.minikube.internal$ /etc/hosts
I1222 22:43:12.233995 134327 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.49.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1222 22:43:12.243311 134327 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1222 22:43:12.320819 134327 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1222 22:43:12.347752 134327 certs.go:69] Setting up /home/jenkins/minikube-integration/22301-72233/.minikube/profiles/functional-384766 for IP: 192.168.49.2
I1222 22:43:12.347766 134327 certs.go:195] generating shared ca certs ...
I1222 22:43:12.347785 134327 certs.go:227] acquiring lock for ca certs: {Name:mk952cc8302daab7c0050aedd5db4002f6808128 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1222 22:43:12.347952 134327 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/22301-72233/.minikube/ca.key
I1222 22:43:12.347991 134327 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/22301-72233/.minikube/proxy-client-ca.key
I1222 22:43:12.347998 134327 certs.go:257] generating profile certs ...
I1222 22:43:12.348050 134327 certs.go:364] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/22301-72233/.minikube/profiles/functional-384766/client.key
I1222 22:43:12.348060 134327 crypto.go:68] Generating cert /home/jenkins/minikube-integration/22301-72233/.minikube/profiles/functional-384766/client.crt with IP's: []
I1222 22:43:12.393869 134327 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/22301-72233/.minikube/profiles/functional-384766/client.crt ...
I1222 22:43:12.393886 134327 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22301-72233/.minikube/profiles/functional-384766/client.crt: {Name:mk530e071aad18b3134693c324f9b1dfed234a51 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1222 22:43:12.394072 134327 crypto.go:164] Writing key to /home/jenkins/minikube-integration/22301-72233/.minikube/profiles/functional-384766/client.key ...
I1222 22:43:12.394079 134327 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22301-72233/.minikube/profiles/functional-384766/client.key: {Name:mk36ff40081dccdbc3e28dcc99a9f01fe02f823a Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1222 22:43:12.394157 134327 certs.go:364] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/22301-72233/.minikube/profiles/functional-384766/apiserver.key.c9e079a8
I1222 22:43:12.394167 134327 crypto.go:68] Generating cert /home/jenkins/minikube-integration/22301-72233/.minikube/profiles/functional-384766/apiserver.crt.c9e079a8 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.49.2]
I1222 22:43:12.495430 134327 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/22301-72233/.minikube/profiles/functional-384766/apiserver.crt.c9e079a8 ...
I1222 22:43:12.495447 134327 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22301-72233/.minikube/profiles/functional-384766/apiserver.crt.c9e079a8: {Name:mk991fe04215d5a48dff19d83b332257e2a9b977 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1222 22:43:12.495606 134327 crypto.go:164] Writing key to /home/jenkins/minikube-integration/22301-72233/.minikube/profiles/functional-384766/apiserver.key.c9e079a8 ...
I1222 22:43:12.495614 134327 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22301-72233/.minikube/profiles/functional-384766/apiserver.key.c9e079a8: {Name:mk6f551b6f6ed8853a3ffaf4212c2ec6a1212fac Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1222 22:43:12.495688 134327 certs.go:382] copying /home/jenkins/minikube-integration/22301-72233/.minikube/profiles/functional-384766/apiserver.crt.c9e079a8 -> /home/jenkins/minikube-integration/22301-72233/.minikube/profiles/functional-384766/apiserver.crt
I1222 22:43:12.495781 134327 certs.go:386] copying /home/jenkins/minikube-integration/22301-72233/.minikube/profiles/functional-384766/apiserver.key.c9e079a8 -> /home/jenkins/minikube-integration/22301-72233/.minikube/profiles/functional-384766/apiserver.key
I1222 22:43:12.495840 134327 certs.go:364] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/22301-72233/.minikube/profiles/functional-384766/proxy-client.key
I1222 22:43:12.495851 134327 crypto.go:68] Generating cert /home/jenkins/minikube-integration/22301-72233/.minikube/profiles/functional-384766/proxy-client.crt with IP's: []
I1222 22:43:12.599432 134327 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/22301-72233/.minikube/profiles/functional-384766/proxy-client.crt ...
I1222 22:43:12.599449 134327 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22301-72233/.minikube/profiles/functional-384766/proxy-client.crt: {Name:mka7f526831d42cc26b69e542f287f3ffd5c2994 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1222 22:43:12.599660 134327 crypto.go:164] Writing key to /home/jenkins/minikube-integration/22301-72233/.minikube/profiles/functional-384766/proxy-client.key ...
I1222 22:43:12.599671 134327 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22301-72233/.minikube/profiles/functional-384766/proxy-client.key: {Name:mke420499a15db0f79a1ec68ba87e862a7601a77 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1222 22:43:12.599860 134327 certs.go:484] found cert: /home/jenkins/minikube-integration/22301-72233/.minikube/certs/75803.pem (1338 bytes)
W1222 22:43:12.599896 134327 certs.go:480] ignoring /home/jenkins/minikube-integration/22301-72233/.minikube/certs/75803_empty.pem, impossibly tiny 0 bytes
I1222 22:43:12.599903 134327 certs.go:484] found cert: /home/jenkins/minikube-integration/22301-72233/.minikube/certs/ca-key.pem (1675 bytes)
I1222 22:43:12.599927 134327 certs.go:484] found cert: /home/jenkins/minikube-integration/22301-72233/.minikube/certs/ca.pem (1082 bytes)
I1222 22:43:12.599947 134327 certs.go:484] found cert: /home/jenkins/minikube-integration/22301-72233/.minikube/certs/cert.pem (1123 bytes)
I1222 22:43:12.599966 134327 certs.go:484] found cert: /home/jenkins/minikube-integration/22301-72233/.minikube/certs/key.pem (1679 bytes)
I1222 22:43:12.600002 134327 certs.go:484] found cert: /home/jenkins/minikube-integration/22301-72233/.minikube/files/etc/ssl/certs/758032.pem (1708 bytes)
I1222 22:43:12.600587 134327 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22301-72233/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1222 22:43:12.618629 134327 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22301-72233/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1679 bytes)
I1222 22:43:12.635948 134327 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22301-72233/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1222 22:43:12.652387 134327 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22301-72233/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I1222 22:43:12.669240 134327 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22301-72233/.minikube/profiles/functional-384766/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1424 bytes)
I1222 22:43:12.685719 134327 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22301-72233/.minikube/profiles/functional-384766/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I1222 22:43:12.702239 134327 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22301-72233/.minikube/profiles/functional-384766/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1222 22:43:12.718406 134327 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22301-72233/.minikube/profiles/functional-384766/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
I1222 22:43:12.735111 134327 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22301-72233/.minikube/certs/75803.pem --> /usr/share/ca-certificates/75803.pem (1338 bytes)
I1222 22:43:12.754419 134327 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22301-72233/.minikube/files/etc/ssl/certs/758032.pem --> /usr/share/ca-certificates/758032.pem (1708 bytes)
I1222 22:43:12.771231 134327 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22301-72233/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1222 22:43:12.788411 134327 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (722 bytes)
I1222 22:43:12.800191 134327 ssh_runner.go:195] Run: openssl version
I1222 22:43:12.806073 134327 ssh_runner.go:195] Run: sudo test -s /usr/share/ca-certificates/758032.pem
I1222 22:43:12.812870 134327 ssh_runner.go:195] Run: sudo ln -fs /usr/share/ca-certificates/758032.pem /etc/ssl/certs/758032.pem
I1222 22:43:12.819841 134327 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/758032.pem
I1222 22:43:12.823139 134327 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Dec 22 22:42 /usr/share/ca-certificates/758032.pem
I1222 22:43:12.823180 134327 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/758032.pem
I1222 22:43:12.856461 134327 ssh_runner.go:195] Run: sudo test -L /etc/ssl/certs/3ec20f2e.0
I1222 22:43:12.863976 134327 ssh_runner.go:195] Run: sudo ln -fs /etc/ssl/certs/758032.pem /etc/ssl/certs/3ec20f2e.0
I1222 22:43:12.870984 134327 ssh_runner.go:195] Run: sudo test -s /usr/share/ca-certificates/minikubeCA.pem
I1222 22:43:12.877801 134327 ssh_runner.go:195] Run: sudo ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem
I1222 22:43:12.884620 134327 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1222 22:43:12.888032 134327 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Dec 22 22:33 /usr/share/ca-certificates/minikubeCA.pem
I1222 22:43:12.888070 134327 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1222 22:43:12.921031 134327 ssh_runner.go:195] Run: sudo test -L /etc/ssl/certs/b5213941.0
I1222 22:43:12.928344 134327 ssh_runner.go:195] Run: sudo ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0
I1222 22:43:12.935237 134327 ssh_runner.go:195] Run: sudo test -s /usr/share/ca-certificates/75803.pem
I1222 22:43:12.942135 134327 ssh_runner.go:195] Run: sudo ln -fs /usr/share/ca-certificates/75803.pem /etc/ssl/certs/75803.pem
I1222 22:43:12.949033 134327 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/75803.pem
I1222 22:43:12.952633 134327 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Dec 22 22:42 /usr/share/ca-certificates/75803.pem
I1222 22:43:12.952666 134327 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/75803.pem
I1222 22:43:12.986137 134327 ssh_runner.go:195] Run: sudo test -L /etc/ssl/certs/51391683.0
I1222 22:43:12.993388 134327 ssh_runner.go:195] Run: sudo ln -fs /etc/ssl/certs/75803.pem /etc/ssl/certs/51391683.0
I1222 22:43:13.000443 134327 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1222 22:43:13.003773 134327 certs.go:400] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I1222 22:43:13.003823 134327 kubeadm.go:401] StartCluster: {Name:functional-384766 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1766394456-22288@sha256:35aded7a4a0ae59b3c3af27bf7edc655e2fc3c5eaa3d1028779c0f2939f0c484 Memory:4096 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8441 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.35.0-rc.1 ClusterName:functional-384766 Namespace:default APIServerHAVIP: APIServerName:minikubeCA A
PIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8441 KubernetesVersion:v1.35.0-rc.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFi
rmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s Rosetta:false}
I1222 22:43:13.003935 134327 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
I1222 22:43:13.022246 134327 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1222 22:43:13.029925 134327 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1222 22:43:13.037260 134327 kubeadm.go:215] ignoring SystemVerification for kubeadm because of docker driver
I1222 22:43:13.037300 134327 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1222 22:43:13.044335 134327 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1222 22:43:13.044343 134327 kubeadm.go:158] found existing configuration files:
I1222 22:43:13.044373 134327 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/admin.conf
I1222 22:43:13.051253 134327 kubeadm.go:164] "https://control-plane.minikube.internal:8441" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1222 22:43:13.051300 134327 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1222 22:43:13.058200 134327 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/kubelet.conf
I1222 22:43:13.065361 134327 kubeadm.go:164] "https://control-plane.minikube.internal:8441" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1222 22:43:13.065396 134327 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1222 22:43:13.072996 134327 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/controller-manager.conf
I1222 22:43:13.080540 134327 kubeadm.go:164] "https://control-plane.minikube.internal:8441" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1222 22:43:13.080578 134327 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1222 22:43:13.087954 134327 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/scheduler.conf
I1222 22:43:13.095464 134327 kubeadm.go:164] "https://control-plane.minikube.internal:8441" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1222 22:43:13.095501 134327 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1222 22:43:13.102677 134327 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0-rc.1:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I1222 22:43:13.216388 134327 kubeadm.go:319] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/6.8.0-1045-gcp\n", err: exit status 1
I1222 22:43:13.216952 134327 kubeadm.go:319] [WARNING SystemVerification]: cgroups v1 support is deprecated and will be removed in a future release. Please migrate to cgroups v2. To explicitly enable cgroups v1 support for kubelet v1.35 or newer, you must set the kubelet configuration option 'FailCgroupV1' to 'false'. You must also explicitly skip this validation. For more information, see https://git.k8s.io/enhancements/keps/sig-node/5573-remove-cgroup-v1
I1222 22:43:13.273527 134327 kubeadm.go:319] [WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I1222 22:47:14.702958 134327 kubeadm.go:319] error: error execution phase wait-control-plane: failed while waiting for the kubelet to start: The HTTP call equal to 'curl -sSL http://127.0.0.1:10248/healthz' returned error: Get "http://127.0.0.1:10248/healthz": context deadline exceeded
I1222 22:47:14.703001 134327 kubeadm.go:319]
I1222 22:47:14.703173 134327 kubeadm.go:319] To see the stack trace of this error execute with --v=5 or higher
I1222 22:47:14.706122 134327 kubeadm.go:319] [init] Using Kubernetes version: v1.35.0-rc.1
I1222 22:47:14.706161 134327 kubeadm.go:319] [preflight] Running pre-flight checks
I1222 22:47:14.706254 134327 kubeadm.go:319] [preflight] The system verification failed. Printing the output from the verification:
I1222 22:47:14.706333 134327 kubeadm.go:319] [0;37mKERNEL_VERSION[0m: [0;32m6.8.0-1045-gcp[0m
I1222 22:47:14.706366 134327 kubeadm.go:319] [0;37mOS[0m: [0;32mLinux[0m
I1222 22:47:14.706416 134327 kubeadm.go:319] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I1222 22:47:14.706466 134327 kubeadm.go:319] [0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
I1222 22:47:14.706505 134327 kubeadm.go:319] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I1222 22:47:14.706549 134327 kubeadm.go:319] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I1222 22:47:14.706609 134327 kubeadm.go:319] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I1222 22:47:14.706661 134327 kubeadm.go:319] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I1222 22:47:14.706722 134327 kubeadm.go:319] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I1222 22:47:14.706770 134327 kubeadm.go:319] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I1222 22:47:14.706806 134327 kubeadm.go:319] [0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
I1222 22:47:14.706878 134327 kubeadm.go:319] [preflight] Pulling images required for setting up a Kubernetes cluster
I1222 22:47:14.706958 134327 kubeadm.go:319] [preflight] This might take a minute or two, depending on the speed of your internet connection
I1222 22:47:14.707046 134327 kubeadm.go:319] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
I1222 22:47:14.707099 134327 kubeadm.go:319] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I1222 22:47:14.708921 134327 out.go:252] - Generating certificates and keys ...
I1222 22:47:14.709006 134327 kubeadm.go:319] [certs] Using existing ca certificate authority
I1222 22:47:14.709096 134327 kubeadm.go:319] [certs] Using existing apiserver certificate and key on disk
I1222 22:47:14.709185 134327 kubeadm.go:319] [certs] Generating "apiserver-kubelet-client" certificate and key
I1222 22:47:14.709241 134327 kubeadm.go:319] [certs] Generating "front-proxy-ca" certificate and key
I1222 22:47:14.709313 134327 kubeadm.go:319] [certs] Generating "front-proxy-client" certificate and key
I1222 22:47:14.709355 134327 kubeadm.go:319] [certs] Generating "etcd/ca" certificate and key
I1222 22:47:14.709397 134327 kubeadm.go:319] [certs] Generating "etcd/server" certificate and key
I1222 22:47:14.709500 134327 kubeadm.go:319] [certs] etcd/server serving cert is signed for DNS names [functional-384766 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I1222 22:47:14.709542 134327 kubeadm.go:319] [certs] Generating "etcd/peer" certificate and key
I1222 22:47:14.709652 134327 kubeadm.go:319] [certs] etcd/peer serving cert is signed for DNS names [functional-384766 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I1222 22:47:14.709700 134327 kubeadm.go:319] [certs] Generating "etcd/healthcheck-client" certificate and key
I1222 22:47:14.709758 134327 kubeadm.go:319] [certs] Generating "apiserver-etcd-client" certificate and key
I1222 22:47:14.709794 134327 kubeadm.go:319] [certs] Generating "sa" key and public key
I1222 22:47:14.709843 134327 kubeadm.go:319] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I1222 22:47:14.709884 134327 kubeadm.go:319] [kubeconfig] Writing "admin.conf" kubeconfig file
I1222 22:47:14.709957 134327 kubeadm.go:319] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I1222 22:47:14.710042 134327 kubeadm.go:319] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I1222 22:47:14.710137 134327 kubeadm.go:319] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I1222 22:47:14.710192 134327 kubeadm.go:319] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I1222 22:47:14.710259 134327 kubeadm.go:319] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I1222 22:47:14.710321 134327 kubeadm.go:319] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I1222 22:47:14.711693 134327 out.go:252] - Booting up control plane ...
I1222 22:47:14.711763 134327 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-apiserver"
I1222 22:47:14.711832 134327 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I1222 22:47:14.711885 134327 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-scheduler"
I1222 22:47:14.711969 134327 kubeadm.go:319] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I1222 22:47:14.712050 134327 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
I1222 22:47:14.712137 134327 kubeadm.go:319] [patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
I1222 22:47:14.712210 134327 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I1222 22:47:14.712241 134327 kubeadm.go:319] [kubelet-start] Starting the kubelet
I1222 22:47:14.712365 134327 kubeadm.go:319] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I1222 22:47:14.712468 134327 kubeadm.go:319] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I1222 22:47:14.712537 134327 kubeadm.go:319] [kubelet-check] The kubelet is not healthy after 4m0.001109614s
I1222 22:47:14.712542 134327 kubeadm.go:319]
I1222 22:47:14.712602 134327 kubeadm.go:319] Unfortunately, an error has occurred, likely caused by:
I1222 22:47:14.712629 134327 kubeadm.go:319] - The kubelet is not running
I1222 22:47:14.712711 134327 kubeadm.go:319] - The kubelet is unhealthy due to a misconfiguration of the node in some way (required cgroups disabled)
I1222 22:47:14.712713 134327 kubeadm.go:319]
I1222 22:47:14.712833 134327 kubeadm.go:319] If you are on a systemd-powered system, you can try to troubleshoot the error with the following commands:
I1222 22:47:14.712875 134327 kubeadm.go:319] - 'systemctl status kubelet'
I1222 22:47:14.712911 134327 kubeadm.go:319] - 'journalctl -xeu kubelet'
I1222 22:47:14.712955 134327 kubeadm.go:319]
W1222 22:47:14.713125 134327 out.go:285] ! initialization failed, will try again: wait: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0-rc.1:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables": Process exited with status 1
stdout:
[init] Using Kubernetes version: v1.35.0-rc.1
[preflight] Running pre-flight checks
[preflight] The system verification failed. Printing the output from the verification:
[0;37mKERNEL_VERSION[0m: [0;32m6.8.0-1045-gcp[0m
[0;37mOS[0m: [0;32mLinux[0m
[0;37mCGROUPS_CPU[0m: [0;32menabled[0m
[0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
[0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
[0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
[0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
[0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
[0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
[0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
[0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/var/lib/minikube/certs"
[certs] Using existing ca certificate authority
[certs] Using existing apiserver certificate and key on disk
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [functional-384766 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [functional-384766 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "super-admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
[patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
[kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
[kubelet-check] The kubelet is not healthy after 4m0.001109614s
Unfortunately, an error has occurred, likely caused by:
- The kubelet is not running
- The kubelet is unhealthy due to a misconfiguration of the node in some way (required cgroups disabled)
If you are on a systemd-powered system, you can try to troubleshoot the error with the following commands:
- 'systemctl status kubelet'
- 'journalctl -xeu kubelet'
stderr:
[WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/6.8.0-1045-gcp\n", err: exit status 1
[WARNING SystemVerification]: cgroups v1 support is deprecated and will be removed in a future release. Please migrate to cgroups v2. To explicitly enable cgroups v1 support for kubelet v1.35 or newer, you must set the kubelet configuration option 'FailCgroupV1' to 'false'. You must also explicitly skip this validation. For more information, see https://git.k8s.io/enhancements/keps/sig-node/5573-remove-cgroup-v1
[WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
error: error execution phase wait-control-plane: failed while waiting for the kubelet to start: The HTTP call equal to 'curl -sSL http://127.0.0.1:10248/healthz' returned error: Get "http://127.0.0.1:10248/healthz": context deadline exceeded
To see the stack trace of this error execute with --v=5 or higher
I1222 22:47:14.713237 134327 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0-rc.1:$PATH" kubeadm reset --cri-socket /var/run/cri-dockerd.sock --force"
I1222 22:47:15.126260 134327 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1222 22:47:15.138517 134327 kubeadm.go:215] ignoring SystemVerification for kubeadm because of docker driver
I1222 22:47:15.138583 134327 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1222 22:47:15.146236 134327 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1222 22:47:15.146247 134327 kubeadm.go:158] found existing configuration files:
I1222 22:47:15.146286 134327 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/admin.conf
I1222 22:47:15.153519 134327 kubeadm.go:164] "https://control-plane.minikube.internal:8441" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1222 22:47:15.153558 134327 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1222 22:47:15.160666 134327 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/kubelet.conf
I1222 22:47:15.167756 134327 kubeadm.go:164] "https://control-plane.minikube.internal:8441" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1222 22:47:15.167801 134327 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1222 22:47:15.174775 134327 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/controller-manager.conf
I1222 22:47:15.181677 134327 kubeadm.go:164] "https://control-plane.minikube.internal:8441" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1222 22:47:15.181718 134327 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1222 22:47:15.188520 134327 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/scheduler.conf
I1222 22:47:15.195495 134327 kubeadm.go:164] "https://control-plane.minikube.internal:8441" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1222 22:47:15.195527 134327 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1222 22:47:15.202385 134327 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0-rc.1:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I1222 22:47:15.304277 134327 kubeadm.go:319] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/6.8.0-1045-gcp\n", err: exit status 1
I1222 22:47:15.304802 134327 kubeadm.go:319] [WARNING SystemVerification]: cgroups v1 support is deprecated and will be removed in a future release. Please migrate to cgroups v2. To explicitly enable cgroups v1 support for kubelet v1.35 or newer, you must set the kubelet configuration option 'FailCgroupV1' to 'false'. You must also explicitly skip this validation. For more information, see https://git.k8s.io/enhancements/keps/sig-node/5573-remove-cgroup-v1
I1222 22:47:15.360296 134327 kubeadm.go:319] [WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I1222 22:51:15.873250 134327 kubeadm.go:319] error: error execution phase wait-control-plane: failed while waiting for the kubelet to start: The HTTP call equal to 'curl -sSL http://127.0.0.1:10248/healthz' returned error: Get "http://127.0.0.1:10248/healthz": dial tcp 127.0.0.1:10248: connect: connection refused
I1222 22:51:15.873301 134327 kubeadm.go:319]
I1222 22:51:15.873379 134327 kubeadm.go:319] To see the stack trace of this error execute with --v=5 or higher
I1222 22:51:15.876563 134327 kubeadm.go:319] [init] Using Kubernetes version: v1.35.0-rc.1
I1222 22:51:15.876648 134327 kubeadm.go:319] [preflight] Running pre-flight checks
I1222 22:51:15.876741 134327 kubeadm.go:319] [preflight] The system verification failed. Printing the output from the verification:
I1222 22:51:15.876789 134327 kubeadm.go:319] [0;37mKERNEL_VERSION[0m: [0;32m6.8.0-1045-gcp[0m
I1222 22:51:15.876817 134327 kubeadm.go:319] [0;37mOS[0m: [0;32mLinux[0m
I1222 22:51:15.876860 134327 kubeadm.go:319] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I1222 22:51:15.876898 134327 kubeadm.go:319] [0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
I1222 22:51:15.876939 134327 kubeadm.go:319] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I1222 22:51:15.876980 134327 kubeadm.go:319] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I1222 22:51:15.877051 134327 kubeadm.go:319] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I1222 22:51:15.877112 134327 kubeadm.go:319] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I1222 22:51:15.877162 134327 kubeadm.go:319] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I1222 22:51:15.877207 134327 kubeadm.go:319] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I1222 22:51:15.877279 134327 kubeadm.go:319] [0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
I1222 22:51:15.877385 134327 kubeadm.go:319] [preflight] Pulling images required for setting up a Kubernetes cluster
I1222 22:51:15.877523 134327 kubeadm.go:319] [preflight] This might take a minute or two, depending on the speed of your internet connection
I1222 22:51:15.877645 134327 kubeadm.go:319] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
I1222 22:51:15.877709 134327 kubeadm.go:319] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I1222 22:51:15.879470 134327 out.go:252] - Generating certificates and keys ...
I1222 22:51:15.879536 134327 kubeadm.go:319] [certs] Using existing ca certificate authority
I1222 22:51:15.879616 134327 kubeadm.go:319] [certs] Using existing apiserver certificate and key on disk
I1222 22:51:15.879730 134327 kubeadm.go:319] [certs] Using existing apiserver-kubelet-client certificate and key on disk
I1222 22:51:15.879815 134327 kubeadm.go:319] [certs] Using existing front-proxy-ca certificate authority
I1222 22:51:15.879907 134327 kubeadm.go:319] [certs] Using existing front-proxy-client certificate and key on disk
I1222 22:51:15.879981 134327 kubeadm.go:319] [certs] Using existing etcd/ca certificate authority
I1222 22:51:15.880071 134327 kubeadm.go:319] [certs] Using existing etcd/server certificate and key on disk
I1222 22:51:15.880156 134327 kubeadm.go:319] [certs] Using existing etcd/peer certificate and key on disk
I1222 22:51:15.880222 134327 kubeadm.go:319] [certs] Using existing etcd/healthcheck-client certificate and key on disk
I1222 22:51:15.880287 134327 kubeadm.go:319] [certs] Using existing apiserver-etcd-client certificate and key on disk
I1222 22:51:15.880316 134327 kubeadm.go:319] [certs] Using the existing "sa" key
I1222 22:51:15.880365 134327 kubeadm.go:319] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I1222 22:51:15.880409 134327 kubeadm.go:319] [kubeconfig] Writing "admin.conf" kubeconfig file
I1222 22:51:15.880454 134327 kubeadm.go:319] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I1222 22:51:15.880500 134327 kubeadm.go:319] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I1222 22:51:15.880550 134327 kubeadm.go:319] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I1222 22:51:15.880645 134327 kubeadm.go:319] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I1222 22:51:15.880761 134327 kubeadm.go:319] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I1222 22:51:15.880828 134327 kubeadm.go:319] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I1222 22:51:15.882035 134327 out.go:252] - Booting up control plane ...
I1222 22:51:15.882102 134327 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-apiserver"
I1222 22:51:15.882164 134327 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I1222 22:51:15.882233 134327 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-scheduler"
I1222 22:51:15.882343 134327 kubeadm.go:319] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I1222 22:51:15.882437 134327 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
I1222 22:51:15.882520 134327 kubeadm.go:319] [patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
I1222 22:51:15.882583 134327 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I1222 22:51:15.882649 134327 kubeadm.go:319] [kubelet-start] Starting the kubelet
I1222 22:51:15.882790 134327 kubeadm.go:319] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I1222 22:51:15.882922 134327 kubeadm.go:319] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I1222 22:51:15.882978 134327 kubeadm.go:319] [kubelet-check] The kubelet is not healthy after 4m0.000802765s
I1222 22:51:15.882981 134327 kubeadm.go:319]
I1222 22:51:15.883028 134327 kubeadm.go:319] Unfortunately, an error has occurred, likely caused by:
I1222 22:51:15.883054 134327 kubeadm.go:319] - The kubelet is not running
I1222 22:51:15.883146 134327 kubeadm.go:319] - The kubelet is unhealthy due to a misconfiguration of the node in some way (required cgroups disabled)
I1222 22:51:15.883150 134327 kubeadm.go:319]
I1222 22:51:15.883240 134327 kubeadm.go:319] If you are on a systemd-powered system, you can try to troubleshoot the error with the following commands:
I1222 22:51:15.883272 134327 kubeadm.go:319] - 'systemctl status kubelet'
I1222 22:51:15.883297 134327 kubeadm.go:319] - 'journalctl -xeu kubelet'
I1222 22:51:15.883327 134327 kubeadm.go:319]
I1222 22:51:15.883374 134327 kubeadm.go:403] duration metric: took 8m2.879554887s to StartCluster
I1222 22:51:15.883454 134327 cri.go:61] listing CRI containers in root : {State:all Name:kube-apiserver Namespaces:[]}
I1222 22:51:15.883515 134327 ssh_runner.go:195] Run: sudo crictl --timeout=10s ps -a --quiet --name=kube-apiserver
I1222 22:51:15.919830 134327 cri.go:96] found id: ""
I1222 22:51:15.919859 134327 logs.go:282] 0 containers: []
W1222 22:51:15.919870 134327 logs.go:284] No container was found matching "kube-apiserver"
I1222 22:51:15.919878 134327 cri.go:61] listing CRI containers in root : {State:all Name:etcd Namespaces:[]}
I1222 22:51:15.919930 134327 ssh_runner.go:195] Run: sudo crictl --timeout=10s ps -a --quiet --name=etcd
I1222 22:51:15.944698 134327 cri.go:96] found id: ""
I1222 22:51:15.944716 134327 logs.go:282] 0 containers: []
W1222 22:51:15.944725 134327 logs.go:284] No container was found matching "etcd"
I1222 22:51:15.944760 134327 cri.go:61] listing CRI containers in root : {State:all Name:coredns Namespaces:[]}
I1222 22:51:15.944815 134327 ssh_runner.go:195] Run: sudo crictl --timeout=10s ps -a --quiet --name=coredns
I1222 22:51:15.969109 134327 cri.go:96] found id: ""
I1222 22:51:15.969124 134327 logs.go:282] 0 containers: []
W1222 22:51:15.969131 134327 logs.go:284] No container was found matching "coredns"
I1222 22:51:15.969136 134327 cri.go:61] listing CRI containers in root : {State:all Name:kube-scheduler Namespaces:[]}
I1222 22:51:15.969180 134327 ssh_runner.go:195] Run: sudo crictl --timeout=10s ps -a --quiet --name=kube-scheduler
I1222 22:51:15.993741 134327 cri.go:96] found id: ""
I1222 22:51:15.993756 134327 logs.go:282] 0 containers: []
W1222 22:51:15.993762 134327 logs.go:284] No container was found matching "kube-scheduler"
I1222 22:51:15.993770 134327 cri.go:61] listing CRI containers in root : {State:all Name:kube-proxy Namespaces:[]}
I1222 22:51:15.993825 134327 ssh_runner.go:195] Run: sudo crictl --timeout=10s ps -a --quiet --name=kube-proxy
I1222 22:51:16.022352 134327 cri.go:96] found id: ""
I1222 22:51:16.022377 134327 logs.go:282] 0 containers: []
W1222 22:51:16.022387 134327 logs.go:284] No container was found matching "kube-proxy"
I1222 22:51:16.022394 134327 cri.go:61] listing CRI containers in root : {State:all Name:kube-controller-manager Namespaces:[]}
I1222 22:51:16.022453 134327 ssh_runner.go:195] Run: sudo crictl --timeout=10s ps -a --quiet --name=kube-controller-manager
I1222 22:51:16.051944 134327 cri.go:96] found id: ""
I1222 22:51:16.051963 134327 logs.go:282] 0 containers: []
W1222 22:51:16.051973 134327 logs.go:284] No container was found matching "kube-controller-manager"
I1222 22:51:16.051980 134327 cri.go:61] listing CRI containers in root : {State:all Name:kindnet Namespaces:[]}
I1222 22:51:16.052030 134327 ssh_runner.go:195] Run: sudo crictl --timeout=10s ps -a --quiet --name=kindnet
I1222 22:51:16.079795 134327 cri.go:96] found id: ""
I1222 22:51:16.079810 134327 logs.go:282] 0 containers: []
W1222 22:51:16.079817 134327 logs.go:284] No container was found matching "kindnet"
I1222 22:51:16.079834 134327 logs.go:123] Gathering logs for kubelet ...
I1222 22:51:16.079844 134327 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1222 22:51:16.127554 134327 logs.go:123] Gathering logs for dmesg ...
I1222 22:51:16.127582 134327 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1222 22:51:16.143319 134327 logs.go:123] Gathering logs for describe nodes ...
I1222 22:51:16.143338 134327 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0-rc.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1222 22:51:16.199784 134327 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0-rc.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0-rc.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
E1222 22:51:16.192528 9298 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://localhost:8441/api?timeout=32s\": dial tcp [::1]:8441: connect: connection refused"
E1222 22:51:16.193128 9298 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://localhost:8441/api?timeout=32s\": dial tcp [::1]:8441: connect: connection refused"
E1222 22:51:16.194732 9298 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://localhost:8441/api?timeout=32s\": dial tcp [::1]:8441: connect: connection refused"
E1222 22:51:16.195115 9298 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://localhost:8441/api?timeout=32s\": dial tcp [::1]:8441: connect: connection refused"
E1222 22:51:16.196676 9298 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://localhost:8441/api?timeout=32s\": dial tcp [::1]:8441: connect: connection refused"
The connection to the server localhost:8441 was refused - did you specify the right host or port?
output:
** stderr **
E1222 22:51:16.192528 9298 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://localhost:8441/api?timeout=32s\": dial tcp [::1]:8441: connect: connection refused"
E1222 22:51:16.193128 9298 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://localhost:8441/api?timeout=32s\": dial tcp [::1]:8441: connect: connection refused"
E1222 22:51:16.194732 9298 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://localhost:8441/api?timeout=32s\": dial tcp [::1]:8441: connect: connection refused"
E1222 22:51:16.195115 9298 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://localhost:8441/api?timeout=32s\": dial tcp [::1]:8441: connect: connection refused"
E1222 22:51:16.196676 9298 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://localhost:8441/api?timeout=32s\": dial tcp [::1]:8441: connect: connection refused"
The connection to the server localhost:8441 was refused - did you specify the right host or port?
** /stderr **
I1222 22:51:16.199815 134327 logs.go:123] Gathering logs for Docker ...
I1222 22:51:16.199829 134327 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u docker -u cri-docker -n 400"
I1222 22:51:16.221641 134327 logs.go:123] Gathering logs for container status ...
I1222 22:51:16.221662 134327 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
W1222 22:51:16.249910 134327 out.go:434] Error starting cluster: wait: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0-rc.1:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables": Process exited with status 1
stdout:
[init] Using Kubernetes version: v1.35.0-rc.1
[preflight] Running pre-flight checks
[preflight] The system verification failed. Printing the output from the verification:
[0;37mKERNEL_VERSION[0m: [0;32m6.8.0-1045-gcp[0m
[0;37mOS[0m: [0;32mLinux[0m
[0;37mCGROUPS_CPU[0m: [0;32menabled[0m
[0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
[0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
[0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
[0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
[0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
[0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
[0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
[0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/var/lib/minikube/certs"
[certs] Using existing ca certificate authority
[certs] Using existing apiserver certificate and key on disk
[certs] Using existing apiserver-kubelet-client certificate and key on disk
[certs] Using existing front-proxy-ca certificate authority
[certs] Using existing front-proxy-client certificate and key on disk
[certs] Using existing etcd/ca certificate authority
[certs] Using existing etcd/server certificate and key on disk
[certs] Using existing etcd/peer certificate and key on disk
[certs] Using existing etcd/healthcheck-client certificate and key on disk
[certs] Using existing apiserver-etcd-client certificate and key on disk
[certs] Using the existing "sa" key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "super-admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
[patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
[kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
[kubelet-check] The kubelet is not healthy after 4m0.000802765s
Unfortunately, an error has occurred, likely caused by:
- The kubelet is not running
- The kubelet is unhealthy due to a misconfiguration of the node in some way (required cgroups disabled)
If you are on a systemd-powered system, you can try to troubleshoot the error with the following commands:
- 'systemctl status kubelet'
- 'journalctl -xeu kubelet'
stderr:
[WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/6.8.0-1045-gcp\n", err: exit status 1
[WARNING SystemVerification]: cgroups v1 support is deprecated and will be removed in a future release. Please migrate to cgroups v2. To explicitly enable cgroups v1 support for kubelet v1.35 or newer, you must set the kubelet configuration option 'FailCgroupV1' to 'false'. You must also explicitly skip this validation. For more information, see https://git.k8s.io/enhancements/keps/sig-node/5573-remove-cgroup-v1
[WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
error: error execution phase wait-control-plane: failed while waiting for the kubelet to start: The HTTP call equal to 'curl -sSL http://127.0.0.1:10248/healthz' returned error: Get "http://127.0.0.1:10248/healthz": dial tcp 127.0.0.1:10248: connect: connection refused
To see the stack trace of this error execute with --v=5 or higher
W1222 22:51:16.249952 134327 out.go:285] *
W1222 22:51:16.250032 134327 out.go:285] X Error starting cluster: wait: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0-rc.1:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables": Process exited with status 1
stdout:
[init] Using Kubernetes version: v1.35.0-rc.1
[preflight] Running pre-flight checks
[preflight] The system verification failed. Printing the output from the verification:
[0;37mKERNEL_VERSION[0m: [0;32m6.8.0-1045-gcp[0m
[0;37mOS[0m: [0;32mLinux[0m
[0;37mCGROUPS_CPU[0m: [0;32menabled[0m
[0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
[0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
[0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
[0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
[0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
[0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
[0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
[0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/var/lib/minikube/certs"
[certs] Using existing ca certificate authority
[certs] Using existing apiserver certificate and key on disk
[certs] Using existing apiserver-kubelet-client certificate and key on disk
[certs] Using existing front-proxy-ca certificate authority
[certs] Using existing front-proxy-client certificate and key on disk
[certs] Using existing etcd/ca certificate authority
[certs] Using existing etcd/server certificate and key on disk
[certs] Using existing etcd/peer certificate and key on disk
[certs] Using existing etcd/healthcheck-client certificate and key on disk
[certs] Using existing apiserver-etcd-client certificate and key on disk
[certs] Using the existing "sa" key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "super-admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
[patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
[kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
[kubelet-check] The kubelet is not healthy after 4m0.000802765s
Unfortunately, an error has occurred, likely caused by:
- The kubelet is not running
- The kubelet is unhealthy due to a misconfiguration of the node in some way (required cgroups disabled)
If you are on a systemd-powered system, you can try to troubleshoot the error with the following commands:
- 'systemctl status kubelet'
- 'journalctl -xeu kubelet'
stderr:
[WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/6.8.0-1045-gcp\n", err: exit status 1
[WARNING SystemVerification]: cgroups v1 support is deprecated and will be removed in a future release. Please migrate to cgroups v2. To explicitly enable cgroups v1 support for kubelet v1.35 or newer, you must set the kubelet configuration option 'FailCgroupV1' to 'false'. You must also explicitly skip this validation. For more information, see https://git.k8s.io/enhancements/keps/sig-node/5573-remove-cgroup-v1
[WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
error: error execution phase wait-control-plane: failed while waiting for the kubelet to start: The HTTP call equal to 'curl -sSL http://127.0.0.1:10248/healthz' returned error: Get "http://127.0.0.1:10248/healthz": dial tcp 127.0.0.1:10248: connect: connection refused
To see the stack trace of this error execute with --v=5 or higher
W1222 22:51:16.250052 134327 out.go:285] *
W1222 22:51:16.250321 134327 out.go:308] ╭─────────────────────────────────────────────────────────────────────────────────────────────╮
│ │
│ * If the above advice does not help, please let us know: │
│ https://github.com/kubernetes/minikube/issues/new/choose │
│ │
│ * Please run `minikube logs --file=logs.txt` and attach logs.txt to the GitHub issue. │
│ │
╰─────────────────────────────────────────────────────────────────────────────────────────────╯
I1222 22:51:16.253724 134327 out.go:203]
W1222 22:51:16.254723 134327 out.go:285] X Exiting due to K8S_KUBELET_NOT_RUNNING: wait: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0-rc.1:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables": Process exited with status 1
stdout:
[init] Using Kubernetes version: v1.35.0-rc.1
[preflight] Running pre-flight checks
[preflight] The system verification failed. Printing the output from the verification:
[0;37mKERNEL_VERSION[0m: [0;32m6.8.0-1045-gcp[0m
[0;37mOS[0m: [0;32mLinux[0m
[0;37mCGROUPS_CPU[0m: [0;32menabled[0m
[0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
[0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
[0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
[0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
[0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
[0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
[0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
[0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/var/lib/minikube/certs"
[certs] Using existing ca certificate authority
[certs] Using existing apiserver certificate and key on disk
[certs] Using existing apiserver-kubelet-client certificate and key on disk
[certs] Using existing front-proxy-ca certificate authority
[certs] Using existing front-proxy-client certificate and key on disk
[certs] Using existing etcd/ca certificate authority
[certs] Using existing etcd/server certificate and key on disk
[certs] Using existing etcd/peer certificate and key on disk
[certs] Using existing etcd/healthcheck-client certificate and key on disk
[certs] Using existing apiserver-etcd-client certificate and key on disk
[certs] Using the existing "sa" key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "super-admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
[patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
[kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
[kubelet-check] The kubelet is not healthy after 4m0.000802765s
Unfortunately, an error has occurred, likely caused by:
- The kubelet is not running
- The kubelet is unhealthy due to a misconfiguration of the node in some way (required cgroups disabled)
If you are on a systemd-powered system, you can try to troubleshoot the error with the following commands:
- 'systemctl status kubelet'
- 'journalctl -xeu kubelet'
stderr:
[WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/6.8.0-1045-gcp\n", err: exit status 1
[WARNING SystemVerification]: cgroups v1 support is deprecated and will be removed in a future release. Please migrate to cgroups v2. To explicitly enable cgroups v1 support for kubelet v1.35 or newer, you must set the kubelet configuration option 'FailCgroupV1' to 'false'. You must also explicitly skip this validation. For more information, see https://git.k8s.io/enhancements/keps/sig-node/5573-remove-cgroup-v1
[WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
error: error execution phase wait-control-plane: failed while waiting for the kubelet to start: The HTTP call equal to 'curl -sSL http://127.0.0.1:10248/healthz' returned error: Get "http://127.0.0.1:10248/healthz": dial tcp 127.0.0.1:10248: connect: connection refused
To see the stack trace of this error execute with --v=5 or higher
W1222 22:51:16.254765 134327 out.go:285] * Suggestion: Check output of 'journalctl -xeu kubelet', try passing --extra-config=kubelet.cgroup-driver=systemd to minikube start
W1222 22:51:16.254780 134327 out.go:285] * Related issue: https://github.com/kubernetes/minikube/issues/4172
I1222 22:51:16.256500 134327 out.go:203]
==> Docker <==
Dec 22 22:43:10 functional-384766 dockerd[1192]: time="2025-12-22T22:43:10.968454608Z" level=info msg="Restoring containers: start."
Dec 22 22:43:10 functional-384766 dockerd[1192]: time="2025-12-22T22:43:10.982234726Z" level=info msg="Deleting nftables IPv4 rules" error="exit status 1"
Dec 22 22:43:10 functional-384766 dockerd[1192]: time="2025-12-22T22:43:10.993188038Z" level=info msg="Deleting nftables IPv6 rules" error="exit status 1"
Dec 22 22:43:11 functional-384766 dockerd[1192]: time="2025-12-22T22:43:11.453024800Z" level=info msg="Loading containers: done."
Dec 22 22:43:11 functional-384766 dockerd[1192]: time="2025-12-22T22:43:11.463301670Z" level=warning msg="WARNING: Support for cgroup v1 is deprecated and planned to be removed by no later than May 2029 (https://github.com/moby/moby/issues/51111)"
Dec 22 22:43:11 functional-384766 dockerd[1192]: time="2025-12-22T22:43:11.463344433Z" level=info msg="Docker daemon" commit=fbf3ed2 containerd-snapshotter=false storage-driver=overlay2 version=29.1.3
Dec 22 22:43:11 functional-384766 dockerd[1192]: time="2025-12-22T22:43:11.463376191Z" level=info msg="Initializing buildkit"
Dec 22 22:43:11 functional-384766 dockerd[1192]: time="2025-12-22T22:43:11.480778445Z" level=info msg="Completed buildkit initialization"
Dec 22 22:43:11 functional-384766 dockerd[1192]: time="2025-12-22T22:43:11.484945277Z" level=info msg="Daemon has completed initialization"
Dec 22 22:43:11 functional-384766 dockerd[1192]: time="2025-12-22T22:43:11.485013905Z" level=info msg="API listen on /var/run/docker.sock"
Dec 22 22:43:11 functional-384766 dockerd[1192]: time="2025-12-22T22:43:11.485049657Z" level=info msg="API listen on /run/docker.sock"
Dec 22 22:43:11 functional-384766 dockerd[1192]: time="2025-12-22T22:43:11.485052110Z" level=info msg="API listen on [::]:2376"
Dec 22 22:43:11 functional-384766 systemd[1]: Started docker.service - Docker Application Container Engine.
Dec 22 22:43:11 functional-384766 systemd[1]: Starting cri-docker.service - CRI Interface for Docker Application Container Engine...
Dec 22 22:43:11 functional-384766 cri-dockerd[1482]: time="2025-12-22T22:43:11Z" level=info msg="Starting cri-dockerd dev (HEAD)"
Dec 22 22:43:11 functional-384766 cri-dockerd[1482]: time="2025-12-22T22:43:11Z" level=info msg="Connecting to docker on the Endpoint unix:///var/run/docker.sock"
Dec 22 22:43:11 functional-384766 cri-dockerd[1482]: time="2025-12-22T22:43:11Z" level=info msg="Start docker client with request timeout 0s"
Dec 22 22:43:11 functional-384766 cri-dockerd[1482]: time="2025-12-22T22:43:11Z" level=info msg="Hairpin mode is set to hairpin-veth"
Dec 22 22:43:11 functional-384766 cri-dockerd[1482]: time="2025-12-22T22:43:11Z" level=info msg="Loaded network plugin cni"
Dec 22 22:43:11 functional-384766 cri-dockerd[1482]: time="2025-12-22T22:43:11Z" level=info msg="Docker cri networking managed by network plugin cni"
Dec 22 22:43:11 functional-384766 cri-dockerd[1482]: time="2025-12-22T22:43:11Z" level=info msg="Setting cgroupDriver cgroupfs"
Dec 22 22:43:11 functional-384766 cri-dockerd[1482]: time="2025-12-22T22:43:11Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:,},}"
Dec 22 22:43:11 functional-384766 cri-dockerd[1482]: time="2025-12-22T22:43:11Z" level=info msg="Starting the GRPC backend for the Docker CRI interface."
Dec 22 22:43:11 functional-384766 cri-dockerd[1482]: time="2025-12-22T22:43:11Z" level=info msg="Start cri-dockerd grpc backend"
Dec 22 22:43:11 functional-384766 systemd[1]: Started cri-docker.service - CRI Interface for Docker Application Container Engine.
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE
==> describe nodes <==
command /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0-rc.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" failed with error: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0-rc.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
E1222 22:51:17.095733 9455 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://localhost:8441/api?timeout=32s\": dial tcp [::1]:8441: connect: connection refused"
E1222 22:51:17.096227 9455 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://localhost:8441/api?timeout=32s\": dial tcp [::1]:8441: connect: connection refused"
E1222 22:51:17.097809 9455 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://localhost:8441/api?timeout=32s\": dial tcp [::1]:8441: connect: connection refused"
E1222 22:51:17.098172 9455 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://localhost:8441/api?timeout=32s\": dial tcp [::1]:8441: connect: connection refused"
E1222 22:51:17.099645 9455 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://localhost:8441/api?timeout=32s\": dial tcp [::1]:8441: connect: connection refused"
The connection to the server localhost:8441 was refused - did you specify the right host or port?
==> dmesg <==
[ +0.000010] ll header: 00000000: ff ff ff ff ff ff da 9e 7f a3 27 cb 08 06
[ +0.239045] IPv4: martian source 10.244.0.1 from 10.244.0.22, on dev eth0
[ +0.000008] ll header: 00000000: ff ff ff ff ff ff 6e eb f7 fd 0a 48 08 06
[ +0.170967] IPv4: martian source 10.244.0.1 from 10.244.0.21, on dev eth0
[ +0.000009] ll header: 00000000: ff ff ff ff ff ff 16 5a dc 65 fc cc 08 06
[Dec22 22:37] IPv4: martian source 10.244.0.1 from 10.244.0.32, on dev eth0
[ +0.000008] ll header: 00000000: ff ff ff ff ff ff 66 cb ee 90 55 2b 08 06
[ +0.000450] IPv4: martian source 10.244.0.32 from 10.244.0.2, on dev eth0
[ +0.000005] ll header: 00000000: ff ff ff ff ff ff be 43 50 0c dd 15 08 06
[ +0.000658] IPv4: martian source 10.244.0.32 from 10.244.0.7, on dev eth0
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff 4e 41 3c 76 23 2b 08 06
[ +1.709294] IPv4: martian source 10.244.0.31 from 10.244.0.26, on dev eth0
[ +0.000007] ll header: 00000000: ff ff ff ff ff ff be b6 30 85 5f 4e 08 06
[ +0.532867] IPv4: martian source 10.244.0.26 from 10.244.0.2, on dev eth0
[ +0.000008] ll header: 00000000: ff ff ff ff ff ff be 43 50 0c dd 15 08 06
[Dec22 22:39] IPv4: martian source 10.244.0.1 from 10.244.0.2, on dev eth0
[ +0.000007] ll header: 00000000: ff ff ff ff ff ff 46 b7 49 09 f9 e0 08 06
[ +0.006417] IPv4: martian source 10.244.0.1 from 10.244.0.3, on dev eth0
[ +0.000007] ll header: 00000000: ff ff ff ff ff ff 1e e5 c5 4f 67 2b 08 06
[Dec22 22:40] IPv4: martian source 10.244.0.1 from 10.244.0.4, on dev eth0
[ +0.000010] ll header: 00000000: ff ff ff ff ff ff 22 2e 10 70 70 25 08 06
[Dec22 22:41] IPv4: martian source 10.244.0.1 from 10.244.0.6, on dev eth0
[ +0.000034] ll header: 00000000: ff ff ff ff ff ff ee d7 ae 32 ba c5 08 06
[Dec22 22:42] IPv4: martian source 10.244.0.1 from 10.244.0.15, on dev eth0
[ +0.000007] ll header: 00000000: ff ff ff ff ff ff 82 95 cb 2f 8e 91 08 06
==> kernel <==
22:51:17 up 2:33, 0 user, load average: 0.01, 0.23, 0.71
Linux functional-384766 6.8.0-1045-gcp #48~22.04.1-Ubuntu SMP Tue Nov 25 13:07:56 UTC 2025 x86_64 GNU/Linux
PRETTY_NAME="Debian GNU/Linux 12 (bookworm)"
==> kubelet <==
Dec 22 22:51:13 functional-384766 systemd[1]: kubelet.service: Failed with result 'exit-code'.
Dec 22 22:51:14 functional-384766 systemd[1]: kubelet.service: Scheduled restart job, restart counter is at 318.
Dec 22 22:51:14 functional-384766 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent.
Dec 22 22:51:14 functional-384766 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent.
Dec 22 22:51:14 functional-384766 kubelet[9167]: E1222 22:51:14.535514 9167 run.go:72] "command failed" err="failed to validate kubelet configuration, error: kubelet is configured to not run on a host using cgroup v1. cgroup v1 support is unsupported and will be removed in a future release, path: &TypeMeta{Kind:,APIVersion:,}"
Dec 22 22:51:14 functional-384766 systemd[1]: kubelet.service: Main process exited, code=exited, status=1/FAILURE
Dec 22 22:51:14 functional-384766 systemd[1]: kubelet.service: Failed with result 'exit-code'.
Dec 22 22:51:15 functional-384766 systemd[1]: kubelet.service: Scheduled restart job, restart counter is at 319.
Dec 22 22:51:15 functional-384766 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent.
Dec 22 22:51:15 functional-384766 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent.
Dec 22 22:51:15 functional-384766 kubelet[9179]: E1222 22:51:15.286791 9179 run.go:72] "command failed" err="failed to validate kubelet configuration, error: kubelet is configured to not run on a host using cgroup v1. cgroup v1 support is unsupported and will be removed in a future release, path: &TypeMeta{Kind:,APIVersion:,}"
Dec 22 22:51:15 functional-384766 systemd[1]: kubelet.service: Main process exited, code=exited, status=1/FAILURE
Dec 22 22:51:15 functional-384766 systemd[1]: kubelet.service: Failed with result 'exit-code'.
Dec 22 22:51:15 functional-384766 systemd[1]: kubelet.service: Scheduled restart job, restart counter is at 320.
Dec 22 22:51:15 functional-384766 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent.
Dec 22 22:51:15 functional-384766 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent.
Dec 22 22:51:16 functional-384766 kubelet[9243]: E1222 22:51:16.039425 9243 run.go:72] "command failed" err="failed to validate kubelet configuration, error: kubelet is configured to not run on a host using cgroup v1. cgroup v1 support is unsupported and will be removed in a future release, path: &TypeMeta{Kind:,APIVersion:,}"
Dec 22 22:51:16 functional-384766 systemd[1]: kubelet.service: Main process exited, code=exited, status=1/FAILURE
Dec 22 22:51:16 functional-384766 systemd[1]: kubelet.service: Failed with result 'exit-code'.
Dec 22 22:51:16 functional-384766 systemd[1]: kubelet.service: Scheduled restart job, restart counter is at 321.
Dec 22 22:51:16 functional-384766 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent.
Dec 22 22:51:16 functional-384766 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent.
Dec 22 22:51:16 functional-384766 kubelet[9340]: E1222 22:51:16.785283 9340 run.go:72] "command failed" err="failed to validate kubelet configuration, error: kubelet is configured to not run on a host using cgroup v1. cgroup v1 support is unsupported and will be removed in a future release, path: &TypeMeta{Kind:,APIVersion:,}"
Dec 22 22:51:16 functional-384766 systemd[1]: kubelet.service: Main process exited, code=exited, status=1/FAILURE
Dec 22 22:51:16 functional-384766 systemd[1]: kubelet.service: Failed with result 'exit-code'.
-- /stdout --
helpers_test.go:263: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p functional-384766 -n functional-384766
helpers_test.go:263: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.APIServer}} -p functional-384766 -n functional-384766: exit status 6 (313.513443ms)
-- stdout --
Stopped
WARNING: Your kubectl is pointing to stale minikube-vm.
To fix the kubectl context, run `minikube update-context`
-- /stdout --
** stderr **
E1222 22:51:17.496915 146607 status.go:458] kubeconfig endpoint: get endpoint: "functional-384766" does not appear in /home/jenkins/minikube-integration/22301-72233/kubeconfig
** /stderr **
helpers_test.go:263: status error: exit status 6 (may be ok)
helpers_test.go:265: "functional-384766" apiserver is not running, skipping kubectl commands (state="Stopped")
--- FAIL: TestFunctionalNewestKubernetes/Versionv1.35.0-rc.1/serial/StartWithProxy (497.72s)