=== RUN TestFunctional/parallel/DashboardCmd
=== PAUSE TestFunctional/parallel/DashboardCmd
=== CONT TestFunctional/parallel/DashboardCmd
functional_test.go:901: (dbg) daemon: [out/minikube-linux-amd64 dashboard --url --port 36195 -p functional-857474 --alsologtostderr -v=1]
functional_test.go:914: output didn't produce a URL
functional_test.go:906: (dbg) stopping [out/minikube-linux-amd64 dashboard --url --port 36195 -p functional-857474 --alsologtostderr -v=1] ...
functional_test.go:906: (dbg) [out/minikube-linux-amd64 dashboard --url --port 36195 -p functional-857474 --alsologtostderr -v=1] stdout:
functional_test.go:906: (dbg) [out/minikube-linux-amd64 dashboard --url --port 36195 -p functional-857474 --alsologtostderr -v=1] stderr:
I0226 10:32:20.410379 17618 out.go:291] Setting OutFile to fd 1 ...
I0226 10:32:20.410756 17618 out.go:338] TERM=,COLORTERM=, which probably does not support color
I0226 10:32:20.410772 17618 out.go:304] Setting ErrFile to fd 2...
I0226 10:32:20.410777 17618 out.go:338] TERM=,COLORTERM=, which probably does not support color
I0226 10:32:20.410987 17618 root.go:338] Updating PATH: /home/jenkins/minikube-integration/18222-4657/.minikube/bin
I0226 10:32:20.411341 17618 mustload.go:65] Loading cluster: functional-857474
I0226 10:32:20.411685 17618 config.go:182] Loaded profile config "functional-857474": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.28.4
I0226 10:32:20.412086 17618 main.go:141] libmachine: Found binary path at /home/jenkins/workspace/KVM_Linux_integration/out/docker-machine-driver-kvm2
I0226 10:32:20.412141 17618 main.go:141] libmachine: Launching plugin server for driver kvm2
I0226 10:32:20.427529 17618 main.go:141] libmachine: Plugin server listening at address 127.0.0.1:39233
I0226 10:32:20.428017 17618 main.go:141] libmachine: () Calling .GetVersion
I0226 10:32:20.428652 17618 main.go:141] libmachine: Using API Version 1
I0226 10:32:20.428683 17618 main.go:141] libmachine: () Calling .SetConfigRaw
I0226 10:32:20.429002 17618 main.go:141] libmachine: () Calling .GetMachineName
I0226 10:32:20.429186 17618 main.go:141] libmachine: (functional-857474) Calling .GetState
I0226 10:32:20.430776 17618 host.go:66] Checking if "functional-857474" exists ...
I0226 10:32:20.431107 17618 main.go:141] libmachine: Found binary path at /home/jenkins/workspace/KVM_Linux_integration/out/docker-machine-driver-kvm2
I0226 10:32:20.431167 17618 main.go:141] libmachine: Launching plugin server for driver kvm2
I0226 10:32:20.447277 17618 main.go:141] libmachine: Plugin server listening at address 127.0.0.1:33263
I0226 10:32:20.447712 17618 main.go:141] libmachine: () Calling .GetVersion
I0226 10:32:20.448247 17618 main.go:141] libmachine: Using API Version 1
I0226 10:32:20.448272 17618 main.go:141] libmachine: () Calling .SetConfigRaw
I0226 10:32:20.448670 17618 main.go:141] libmachine: () Calling .GetMachineName
I0226 10:32:20.448886 17618 main.go:141] libmachine: (functional-857474) Calling .DriverName
I0226 10:32:20.449068 17618 api_server.go:166] Checking apiserver status ...
I0226 10:32:20.449127 17618 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I0226 10:32:20.449154 17618 main.go:141] libmachine: (functional-857474) Calling .GetSSHHostname
I0226 10:32:20.452591 17618 main.go:141] libmachine: (functional-857474) DBG | domain functional-857474 has defined MAC address 52:54:00:f1:08:be in network mk-functional-857474
I0226 10:32:20.452993 17618 main.go:141] libmachine: (functional-857474) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:f1:08:be", ip: ""} in network mk-functional-857474: {Iface:virbr1 ExpiryTime:2024-02-26 11:30:04 +0000 UTC Type:0 Mac:52:54:00:f1:08:be Iaid: IPaddr:192.168.39.23 Prefix:24 Hostname:functional-857474 Clientid:01:52:54:00:f1:08:be}
I0226 10:32:20.453029 17618 main.go:141] libmachine: (functional-857474) DBG | domain functional-857474 has defined IP address 192.168.39.23 and MAC address 52:54:00:f1:08:be in network mk-functional-857474
I0226 10:32:20.453123 17618 main.go:141] libmachine: (functional-857474) Calling .GetSSHPort
I0226 10:32:20.453314 17618 main.go:141] libmachine: (functional-857474) Calling .GetSSHKeyPath
I0226 10:32:20.453488 17618 main.go:141] libmachine: (functional-857474) Calling .GetSSHUsername
I0226 10:32:20.453645 17618 sshutil.go:53] new ssh client: &{IP:192.168.39.23 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/18222-4657/.minikube/machines/functional-857474/id_rsa Username:docker}
I0226 10:32:20.570444 17618 ssh_runner.go:195] Run: sudo egrep ^[0-9]+:freezer: /proc/7435/cgroup
I0226 10:32:20.581745 17618 api_server.go:182] apiserver freezer: "7:freezer:/kubepods/burstable/pod201e66c8b882097b716ff943f0d10b63/3ba85e169876f6f8d452b0ffaf5f09c265c001a4ad214640db293e9278755c71"
I0226 10:32:20.581811 17618 ssh_runner.go:195] Run: sudo cat /sys/fs/cgroup/freezer/kubepods/burstable/pod201e66c8b882097b716ff943f0d10b63/3ba85e169876f6f8d452b0ffaf5f09c265c001a4ad214640db293e9278755c71/freezer.state
I0226 10:32:20.593099 17618 api_server.go:204] freezer state: "THAWED"
I0226 10:32:20.593127 17618 api_server.go:253] Checking apiserver healthz at https://192.168.39.23:8441/healthz ...
I0226 10:32:20.598325 17618 api_server.go:279] https://192.168.39.23:8441/healthz returned 200:
ok
W0226 10:32:20.598365 17618 out.go:239] * Enabling dashboard ...
* Enabling dashboard ...
I0226 10:32:20.598507 17618 config.go:182] Loaded profile config "functional-857474": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.28.4
I0226 10:32:20.598527 17618 addons.go:69] Setting dashboard=true in profile "functional-857474"
I0226 10:32:20.598533 17618 addons.go:234] Setting addon dashboard=true in "functional-857474"
I0226 10:32:20.598554 17618 host.go:66] Checking if "functional-857474" exists ...
I0226 10:32:20.598784 17618 main.go:141] libmachine: Found binary path at /home/jenkins/workspace/KVM_Linux_integration/out/docker-machine-driver-kvm2
I0226 10:32:20.598822 17618 main.go:141] libmachine: Launching plugin server for driver kvm2
I0226 10:32:20.614828 17618 main.go:141] libmachine: Plugin server listening at address 127.0.0.1:37947
I0226 10:32:20.615336 17618 main.go:141] libmachine: () Calling .GetVersion
I0226 10:32:20.615835 17618 main.go:141] libmachine: Using API Version 1
I0226 10:32:20.615854 17618 main.go:141] libmachine: () Calling .SetConfigRaw
I0226 10:32:20.616155 17618 main.go:141] libmachine: () Calling .GetMachineName
I0226 10:32:20.616657 17618 main.go:141] libmachine: Found binary path at /home/jenkins/workspace/KVM_Linux_integration/out/docker-machine-driver-kvm2
I0226 10:32:20.616697 17618 main.go:141] libmachine: Launching plugin server for driver kvm2
I0226 10:32:20.634004 17618 main.go:141] libmachine: Plugin server listening at address 127.0.0.1:42591
I0226 10:32:20.634488 17618 main.go:141] libmachine: () Calling .GetVersion
I0226 10:32:20.635020 17618 main.go:141] libmachine: Using API Version 1
I0226 10:32:20.635046 17618 main.go:141] libmachine: () Calling .SetConfigRaw
I0226 10:32:20.635402 17618 main.go:141] libmachine: () Calling .GetMachineName
I0226 10:32:20.635587 17618 main.go:141] libmachine: (functional-857474) Calling .GetState
I0226 10:32:20.637335 17618 main.go:141] libmachine: (functional-857474) Calling .DriverName
I0226 10:32:20.641819 17618 out.go:177] - Using image docker.io/kubernetesui/dashboard:v2.7.0
I0226 10:32:20.643939 17618 out.go:177] - Using image docker.io/kubernetesui/metrics-scraper:v1.0.8
I0226 10:32:20.645902 17618 addons.go:426] installing /etc/kubernetes/addons/dashboard-ns.yaml
I0226 10:32:20.645921 17618 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/dashboard-ns.yaml (759 bytes)
I0226 10:32:20.645939 17618 main.go:141] libmachine: (functional-857474) Calling .GetSSHHostname
I0226 10:32:20.649132 17618 main.go:141] libmachine: (functional-857474) DBG | domain functional-857474 has defined MAC address 52:54:00:f1:08:be in network mk-functional-857474
I0226 10:32:20.649678 17618 main.go:141] libmachine: (functional-857474) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:f1:08:be", ip: ""} in network mk-functional-857474: {Iface:virbr1 ExpiryTime:2024-02-26 11:30:04 +0000 UTC Type:0 Mac:52:54:00:f1:08:be Iaid: IPaddr:192.168.39.23 Prefix:24 Hostname:functional-857474 Clientid:01:52:54:00:f1:08:be}
I0226 10:32:20.649706 17618 main.go:141] libmachine: (functional-857474) DBG | domain functional-857474 has defined IP address 192.168.39.23 and MAC address 52:54:00:f1:08:be in network mk-functional-857474
I0226 10:32:20.649856 17618 main.go:141] libmachine: (functional-857474) Calling .GetSSHPort
I0226 10:32:20.650049 17618 main.go:141] libmachine: (functional-857474) Calling .GetSSHKeyPath
I0226 10:32:20.650201 17618 main.go:141] libmachine: (functional-857474) Calling .GetSSHUsername
I0226 10:32:20.650344 17618 sshutil.go:53] new ssh client: &{IP:192.168.39.23 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/18222-4657/.minikube/machines/functional-857474/id_rsa Username:docker}
I0226 10:32:20.775989 17618 addons.go:426] installing /etc/kubernetes/addons/dashboard-clusterrole.yaml
I0226 10:32:20.776015 17618 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/dashboard-clusterrole.yaml (1001 bytes)
I0226 10:32:20.817049 17618 addons.go:426] installing /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml
I0226 10:32:20.817071 17618 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml (1018 bytes)
I0226 10:32:20.847997 17618 addons.go:426] installing /etc/kubernetes/addons/dashboard-configmap.yaml
I0226 10:32:20.848024 17618 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/dashboard-configmap.yaml (837 bytes)
I0226 10:32:20.870222 17618 addons.go:426] installing /etc/kubernetes/addons/dashboard-dp.yaml
I0226 10:32:20.870250 17618 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/dashboard-dp.yaml (4288 bytes)
I0226 10:32:20.911541 17618 addons.go:426] installing /etc/kubernetes/addons/dashboard-role.yaml
I0226 10:32:20.911565 17618 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/dashboard-role.yaml (1724 bytes)
I0226 10:32:20.955705 17618 addons.go:426] installing /etc/kubernetes/addons/dashboard-rolebinding.yaml
I0226 10:32:20.955734 17618 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/dashboard-rolebinding.yaml (1046 bytes)
I0226 10:32:20.998351 17618 addons.go:426] installing /etc/kubernetes/addons/dashboard-sa.yaml
I0226 10:32:20.998374 17618 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/dashboard-sa.yaml (837 bytes)
I0226 10:32:21.032326 17618 addons.go:426] installing /etc/kubernetes/addons/dashboard-secret.yaml
I0226 10:32:21.032351 17618 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/dashboard-secret.yaml (1389 bytes)
I0226 10:32:21.068362 17618 addons.go:426] installing /etc/kubernetes/addons/dashboard-svc.yaml
I0226 10:32:21.068394 17618 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/dashboard-svc.yaml (1294 bytes)
I0226 10:32:21.098955 17618 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml
I0226 10:32:23.343280 17618 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: (2.244275753s)
I0226 10:32:23.343352 17618 main.go:141] libmachine: Making call to close driver server
I0226 10:32:23.343367 17618 main.go:141] libmachine: (functional-857474) Calling .Close
I0226 10:32:23.343622 17618 main.go:141] libmachine: Successfully made call to close driver server
I0226 10:32:23.343648 17618 main.go:141] libmachine: Making call to close connection to plugin binary
I0226 10:32:23.343658 17618 main.go:141] libmachine: Making call to close driver server
I0226 10:32:23.343669 17618 main.go:141] libmachine: (functional-857474) Calling .Close
I0226 10:32:23.343913 17618 main.go:141] libmachine: Successfully made call to close driver server
I0226 10:32:23.343944 17618 main.go:141] libmachine: Making call to close connection to plugin binary
I0226 10:32:23.345434 17618 out.go:177] * Some dashboard features require the metrics-server addon. To enable all features please run:
minikube -p functional-857474 addons enable metrics-server
I0226 10:32:23.346580 17618 addons.go:197] Writing out "functional-857474" config to set dashboard=true...
W0226 10:32:23.346836 17618 out.go:239] * Verifying dashboard health ...
* Verifying dashboard health ...
I0226 10:32:23.347811 17618 kapi.go:59] client config for functional-857474: &rest.Config{Host:"https://192.168.39.23:8441", APIPath:"", ContentConfig:rest.ContentConfig{AcceptContentTypes:"", ContentType:"", GroupVersion:(*schema.GroupVersion)(nil), NegotiatedSerializer:runtime.NegotiatedSerializer(nil)}, Username:"", Password:"", BearerToken:"", BearerTokenFile:"", Impersonate:rest.ImpersonationConfig{UserName:"", UID:"", Groups:[]string(nil), Extra:map[string][]string(nil)}, AuthProvider:<nil>, AuthConfigPersister:rest.AuthProviderConfigPersister(nil), ExecProvider:<nil>, TLSClientConfig:rest.sanitizedTLSClientConfig{Insecure:false, ServerName:"", CertFile:"/home/jenkins/minikube-integration/18222-4657/.minikube/profiles/functional-857474/client.crt", KeyFile:"/home/jenkins/minikube-integration/18222-4657/.minikube/profiles/functional-857474/client.key", CAFile:"/home/jenkins/minikube-integration/18222-4657/.minikube/ca.crt", CertData:[]uint8(nil), KeyData:[]uint8(nil), CAData:[]uint8(nil), NextP
rotos:[]string(nil)}, UserAgent:"", DisableCompression:false, Transport:http.RoundTripper(nil), WrapTransport:(transport.WrapperFunc)(0x1c5ab80), QPS:0, Burst:0, RateLimiter:flowcontrol.RateLimiter(nil), WarningHandler:rest.WarningHandler(nil), Timeout:0, Dial:(func(context.Context, string, string) (net.Conn, error))(nil), Proxy:(func(*http.Request) (*url.URL, error))(nil)}
I0226 10:32:23.359904 17618 service.go:214] Found service: &Service{ObjectMeta:{kubernetes-dashboard kubernetes-dashboard 2d9e9bb0-c71e-4964-b104-31a853a75610 710 0 2024-02-26 10:32:23 +0000 UTC <nil> <nil> map[addonmanager.kubernetes.io/mode:Reconcile k8s-app:kubernetes-dashboard kubernetes.io/minikube-addons:dashboard] map[kubectl.kubernetes.io/last-applied-configuration:{"apiVersion":"v1","kind":"Service","metadata":{"annotations":{},"labels":{"addonmanager.kubernetes.io/mode":"Reconcile","k8s-app":"kubernetes-dashboard","kubernetes.io/minikube-addons":"dashboard"},"name":"kubernetes-dashboard","namespace":"kubernetes-dashboard"},"spec":{"ports":[{"port":80,"targetPort":9090}],"selector":{"k8s-app":"kubernetes-dashboard"}}}
] [] [] [{kubectl-client-side-apply Update v1 2024-02-26 10:32:23 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:kubectl.kubernetes.io/last-applied-configuration":{}},"f:labels":{".":{},"f:addonmanager.kubernetes.io/mode":{},"f:k8s-app":{},"f:kubernetes.io/minikube-addons":{}}},"f:spec":{"f:internalTrafficPolicy":{},"f:ports":{".":{},"k:{\"port\":80,\"protocol\":\"TCP\"}":{".":{},"f:port":{},"f:protocol":{},"f:targetPort":{}}},"f:selector":{},"f:sessionAffinity":{},"f:type":{}}} }]},Spec:ServiceSpec{Ports:[]ServicePort{ServicePort{Name:,Protocol:TCP,Port:80,TargetPort:{0 9090 },NodePort:0,AppProtocol:nil,},},Selector:map[string]string{k8s-app: kubernetes-dashboard,},ClusterIP:10.105.225.130,Type:ClusterIP,ExternalIPs:[],SessionAffinity:None,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.105.225.130],IPFamilies:[IPv4],AllocateLoadBalan
cerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}
W0226 10:32:23.360086 17618 out.go:239] * Launching proxy ...
* Launching proxy ...
I0226 10:32:23.360157 17618 dashboard.go:152] Executing: /usr/local/bin/kubectl [/usr/local/bin/kubectl --context functional-857474 proxy --port 36195]
I0226 10:32:23.360556 17618 dashboard.go:157] Waiting for kubectl to output host:port ...
I0226 10:32:23.446680 17618 out.go:177]
W0226 10:32:23.448245 17618 out.go:239] X Exiting due to HOST_KUBECTL_PROXY: kubectl proxy: readByteWithTimeout: EOF
X Exiting due to HOST_KUBECTL_PROXY: kubectl proxy: readByteWithTimeout: EOF
W0226 10:32:23.448264 17618 out.go:239] *
*
W0226 10:32:23.450071 17618 out.go:239] ╭─────────────────────────────────────────────────────────────────────────────────────────────╮
│ │
│ * If the above advice does not help, please let us know: │
│ https://github.com/kubernetes/minikube/issues/new/choose │
│ │
│ * Please run `minikube logs --file=logs.txt` and attach logs.txt to the GitHub issue. │
│ * Please also attach the following file to the GitHub issue: │
│ * - /tmp/minikube_dashboard_2f9e80c8c4dc47927ad6915561a20c5705c3b3b4_0.log │
│ │
╰─────────────────────────────────────────────────────────────────────────────────────────────╯
╭─────────────────────────────────────────────────────────────────────────────────────────────╮
│ │
│ * If the above advice does not help, please let us know: │
│ https://github.com/kubernetes/minikube/issues/new/choose │
│ │
│ * Please run `minikube logs --file=logs.txt` and attach logs.txt to the GitHub issue. │
│ * Please also attach the following file to the GitHub issue: │
│ * - /tmp/minikube_dashboard_2f9e80c8c4dc47927ad6915561a20c5705c3b3b4_0.log │
│ │
╰─────────────────────────────────────────────────────────────────────────────────────────────╯
I0226 10:32:23.451664 17618 out.go:177]
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:239: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p functional-857474 -n functional-857474
helpers_test.go:244: <<< TestFunctional/parallel/DashboardCmd FAILED: start of post-mortem logs <<<
helpers_test.go:245: ======> post-mortem[TestFunctional/parallel/DashboardCmd]: minikube logs <======
helpers_test.go:247: (dbg) Run: out/minikube-linux-amd64 -p functional-857474 logs -n 25
helpers_test.go:247: (dbg) Done: out/minikube-linux-amd64 -p functional-857474 logs -n 25: (3.302349324s)
helpers_test.go:252: TestFunctional/parallel/DashboardCmd logs:
-- stdout --
==> Audit <==
|-----------|--------------------------------------------------------------------------|-------------------|---------|---------|---------------------|---------------------|
| Command | Args | Profile | User | Version | Start Time | End Time |
|-----------|--------------------------------------------------------------------------|-------------------|---------|---------|---------------------|---------------------|
| start | -p functional-857474 | functional-857474 | jenkins | v1.32.0 | 26 Feb 24 10:31 UTC | 26 Feb 24 10:32 UTC |
| | --extra-config=apiserver.enable-admission-plugins=NamespaceAutoProvision | | | | | |
| | --wait=all | | | | | |
| service | invalid-svc -p | functional-857474 | jenkins | v1.32.0 | 26 Feb 24 10:32 UTC | |
| | functional-857474 | | | | | |
| config | functional-857474 config unset | functional-857474 | jenkins | v1.32.0 | 26 Feb 24 10:32 UTC | 26 Feb 24 10:32 UTC |
| | cpus | | | | | |
| config | functional-857474 config get | functional-857474 | jenkins | v1.32.0 | 26 Feb 24 10:32 UTC | |
| | cpus | | | | | |
| config | functional-857474 config set | functional-857474 | jenkins | v1.32.0 | 26 Feb 24 10:32 UTC | 26 Feb 24 10:32 UTC |
| | cpus 2 | | | | | |
| config | functional-857474 config get | functional-857474 | jenkins | v1.32.0 | 26 Feb 24 10:32 UTC | 26 Feb 24 10:32 UTC |
| | cpus | | | | | |
| ssh | functional-857474 ssh -n | functional-857474 | jenkins | v1.32.0 | 26 Feb 24 10:32 UTC | 26 Feb 24 10:32 UTC |
| | functional-857474 sudo cat | | | | | |
| | /home/docker/cp-test.txt | | | | | |
| config | functional-857474 config unset | functional-857474 | jenkins | v1.32.0 | 26 Feb 24 10:32 UTC | 26 Feb 24 10:32 UTC |
| | cpus | | | | | |
| config | functional-857474 config get | functional-857474 | jenkins | v1.32.0 | 26 Feb 24 10:32 UTC | |
| | cpus | | | | | |
| start | -p functional-857474 | functional-857474 | jenkins | v1.32.0 | 26 Feb 24 10:32 UTC | |
| | --dry-run --memory | | | | | |
| | 250MB --alsologtostderr | | | | | |
| | --driver=kvm2 | | | | | |
| cp | functional-857474 cp | functional-857474 | jenkins | v1.32.0 | 26 Feb 24 10:32 UTC | 26 Feb 24 10:32 UTC |
| | functional-857474:/home/docker/cp-test.txt | | | | | |
| | /tmp/TestFunctionalparallelCpCmd3909854723/001/cp-test.txt | | | | | |
| start | -p functional-857474 | functional-857474 | jenkins | v1.32.0 | 26 Feb 24 10:32 UTC | |
| | --dry-run --memory | | | | | |
| | 250MB --alsologtostderr | | | | | |
| | --driver=kvm2 | | | | | |
| ssh | functional-857474 ssh -n | functional-857474 | jenkins | v1.32.0 | 26 Feb 24 10:32 UTC | 26 Feb 24 10:32 UTC |
| | functional-857474 sudo cat | | | | | |
| | /home/docker/cp-test.txt | | | | | |
| start | -p functional-857474 --dry-run | functional-857474 | jenkins | v1.32.0 | 26 Feb 24 10:32 UTC | |
| | --alsologtostderr -v=1 | | | | | |
| | --driver=kvm2 | | | | | |
| dashboard | --url --port 36195 | functional-857474 | jenkins | v1.32.0 | 26 Feb 24 10:32 UTC | |
| | -p functional-857474 | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| cp | functional-857474 cp | functional-857474 | jenkins | v1.32.0 | 26 Feb 24 10:32 UTC | 26 Feb 24 10:32 UTC |
| | testdata/cp-test.txt | | | | | |
| | /tmp/does/not/exist/cp-test.txt | | | | | |
| license | | minikube | jenkins | v1.32.0 | 26 Feb 24 10:32 UTC | 26 Feb 24 10:32 UTC |
| ssh | functional-857474 ssh -n | functional-857474 | jenkins | v1.32.0 | 26 Feb 24 10:32 UTC | 26 Feb 24 10:32 UTC |
| | functional-857474 sudo cat | | | | | |
| | /tmp/does/not/exist/cp-test.txt | | | | | |
| ssh | functional-857474 ssh sudo | functional-857474 | jenkins | v1.32.0 | 26 Feb 24 10:32 UTC | |
| | systemctl is-active crio | | | | | |
| ssh | functional-857474 ssh findmnt | functional-857474 | jenkins | v1.32.0 | 26 Feb 24 10:32 UTC | |
| | -T /mount-9p | grep 9p | | | | | |
| mount | -p functional-857474 | functional-857474 | jenkins | v1.32.0 | 26 Feb 24 10:32 UTC | |
| | /tmp/TestFunctionalparallelMountCmdany-port590098306/001:/mount-9p | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| image | functional-857474 image load --daemon | functional-857474 | jenkins | v1.32.0 | 26 Feb 24 10:32 UTC | |
| | gcr.io/google-containers/addon-resizer:functional-857474 | | | | | |
| | --alsologtostderr | | | | | |
| ssh | functional-857474 ssh findmnt | functional-857474 | jenkins | v1.32.0 | 26 Feb 24 10:32 UTC | 26 Feb 24 10:32 UTC |
| | -T /mount-9p | grep 9p | | | | | |
| ssh | functional-857474 ssh -- ls | functional-857474 | jenkins | v1.32.0 | 26 Feb 24 10:32 UTC | 26 Feb 24 10:32 UTC |
| | -la /mount-9p | | | | | |
| ssh | functional-857474 ssh cat | functional-857474 | jenkins | v1.32.0 | 26 Feb 24 10:32 UTC | 26 Feb 24 10:32 UTC |
| | /mount-9p/test-1708943541872401773 | | | | | |
|-----------|--------------------------------------------------------------------------|-------------------|---------|---------|---------------------|---------------------|
==> Last Start <==
Log file created at: 2024/02/26 10:32:20
Running on machine: ubuntu-20-agent-7
Binary: Built with gc go1.22.0 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I0226 10:32:20.255245 17552 out.go:291] Setting OutFile to fd 1 ...
I0226 10:32:20.257554 17552 out.go:338] TERM=,COLORTERM=, which probably does not support color
I0226 10:32:20.257569 17552 out.go:304] Setting ErrFile to fd 2...
I0226 10:32:20.257574 17552 out.go:338] TERM=,COLORTERM=, which probably does not support color
I0226 10:32:20.257867 17552 root.go:338] Updating PATH: /home/jenkins/minikube-integration/18222-4657/.minikube/bin
I0226 10:32:20.258642 17552 out.go:298] Setting JSON to false
I0226 10:32:20.260088 17552 start.go:129] hostinfo: {"hostname":"ubuntu-20-agent-7","uptime":887,"bootTime":1708942653,"procs":210,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1052-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
I0226 10:32:20.260167 17552 start.go:139] virtualization: kvm guest
I0226 10:32:20.262519 17552 out.go:177] * [functional-857474] minikube v1.32.0 on Ubuntu 20.04 (kvm/amd64)
I0226 10:32:20.264736 17552 out.go:177] - MINIKUBE_LOCATION=18222
I0226 10:32:20.264769 17552 notify.go:220] Checking for updates...
I0226 10:32:20.267692 17552 out.go:177] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I0226 10:32:20.268988 17552 out.go:177] - KUBECONFIG=/home/jenkins/minikube-integration/18222-4657/kubeconfig
I0226 10:32:20.270274 17552 out.go:177] - MINIKUBE_HOME=/home/jenkins/minikube-integration/18222-4657/.minikube
I0226 10:32:20.271590 17552 out.go:177] - MINIKUBE_BIN=out/minikube-linux-amd64
I0226 10:32:20.272863 17552 out.go:177] - MINIKUBE_FORCE_SYSTEMD=
I0226 10:32:20.274580 17552 config.go:182] Loaded profile config "functional-857474": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.28.4
I0226 10:32:20.274996 17552 main.go:141] libmachine: Found binary path at /home/jenkins/workspace/KVM_Linux_integration/out/docker-machine-driver-kvm2
I0226 10:32:20.275077 17552 main.go:141] libmachine: Launching plugin server for driver kvm2
I0226 10:32:20.290071 17552 main.go:141] libmachine: Plugin server listening at address 127.0.0.1:43505
I0226 10:32:20.290448 17552 main.go:141] libmachine: () Calling .GetVersion
I0226 10:32:20.291025 17552 main.go:141] libmachine: Using API Version 1
I0226 10:32:20.291045 17552 main.go:141] libmachine: () Calling .SetConfigRaw
I0226 10:32:20.291424 17552 main.go:141] libmachine: () Calling .GetMachineName
I0226 10:32:20.291607 17552 main.go:141] libmachine: (functional-857474) Calling .DriverName
I0226 10:32:20.291948 17552 driver.go:392] Setting default libvirt URI to qemu:///system
I0226 10:32:20.292264 17552 main.go:141] libmachine: Found binary path at /home/jenkins/workspace/KVM_Linux_integration/out/docker-machine-driver-kvm2
I0226 10:32:20.292302 17552 main.go:141] libmachine: Launching plugin server for driver kvm2
I0226 10:32:20.307346 17552 main.go:141] libmachine: Plugin server listening at address 127.0.0.1:42521
I0226 10:32:20.307872 17552 main.go:141] libmachine: () Calling .GetVersion
I0226 10:32:20.308332 17552 main.go:141] libmachine: Using API Version 1
I0226 10:32:20.308356 17552 main.go:141] libmachine: () Calling .SetConfigRaw
I0226 10:32:20.308873 17552 main.go:141] libmachine: () Calling .GetMachineName
I0226 10:32:20.309061 17552 main.go:141] libmachine: (functional-857474) Calling .DriverName
I0226 10:32:20.343820 17552 out.go:177] * Using the kvm2 driver based on existing profile
I0226 10:32:20.345301 17552 start.go:299] selected driver: kvm2
I0226 10:32:20.345320 17552 start.go:903] validating driver "kvm2" against &{Name:functional-857474 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/17936/minikube-v1.32.1-1708020063-17936-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1708008208-17936@sha256:4ea1136332ba1476cda33a97bf12e2f96995cc120674fbafd3ade22d1118ecdf Memory:4000 CPUs:2 DiskSize:20000 VMDriver: Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig
:{KubernetesVersion:v1.28.4 ClusterName:functional-857474 Namespace:default APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:apiserver Key:enable-admission-plugins Value:NamespaceAutoProvision}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8441 NodeName:} Nodes:[{Name: IP:192.168.39.23 Port:8441 KubernetesVersion:v1.28.4 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 Ce
rtExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0226 10:32:20.345432 17552 start.go:914] status for kvm2: {Installed:true Healthy:true Running:true NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I0226 10:32:20.346384 17552 cni.go:84] Creating CNI manager for ""
I0226 10:32:20.346406 17552 cni.go:158] "kvm2" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I0226 10:32:20.346413 17552 start_flags.go:323] config:
{Name:functional-857474 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/17936/minikube-v1.32.1-1708020063-17936-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1708008208-17936@sha256:4ea1136332ba1476cda33a97bf12e2f96995cc120674fbafd3ade22d1118ecdf Memory:4000 CPUs:2 DiskSize:20000 VMDriver: Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.4 ClusterName:functional-857474 Namespace:default AP
IServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:apiserver Key:enable-admission-plugins Value:NamespaceAutoProvision}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8441 NodeName:} Nodes:[{Name: IP:192.168.39.23 Port:8441 KubernetesVersion:v1.28.4 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host M
ount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0226 10:32:20.349244 17552 out.go:177] * dry-run validation complete!
==> Docker <==
-- Journal begins at Mon 2024-02-26 10:30:00 UTC, ends at Mon 2024-02-26 10:32:24 UTC. --
Feb 26 10:32:15 functional-857474 dockerd[6477]: time="2024-02-26T10:32:15.436280909Z" level=info msg="loading plugin \"io.containerd.ttrpc.v1.task\"..." runtime=io.containerd.runc.v2 type=io.containerd.ttrpc.v1
Feb 26 10:32:15 functional-857474 cri-dockerd[6700]: time="2024-02-26T10:32:15Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/917c62a88af640303f9beb2a09fb7e397032f9cfd318017b21b3dceb1a2f70f8/resolv.conf as [nameserver 10.96.0.10 search default.svc.cluster.local svc.cluster.local cluster.local options ndots:5]"
Feb 26 10:32:16 functional-857474 dockerd[6470]: time="2024-02-26T10:32:16.770472480Z" level=error msg="Not continuing with pull after error: errors:\ndenied: requested access to the resource is denied\nunauthorized: authentication required\n"
Feb 26 10:32:16 functional-857474 dockerd[6470]: time="2024-02-26T10:32:16.770541696Z" level=info msg="Ignoring extra error returned from registry" error="unauthorized: authentication required"
Feb 26 10:32:18 functional-857474 dockerd[6470]: time="2024-02-26T10:32:18.505509466Z" level=info msg="ignoring event" container=917c62a88af640303f9beb2a09fb7e397032f9cfd318017b21b3dceb1a2f70f8 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Feb 26 10:32:18 functional-857474 dockerd[6477]: time="2024-02-26T10:32:18.506952185Z" level=info msg="shim disconnected" id=917c62a88af640303f9beb2a09fb7e397032f9cfd318017b21b3dceb1a2f70f8 namespace=moby
Feb 26 10:32:18 functional-857474 dockerd[6477]: time="2024-02-26T10:32:18.507906660Z" level=warning msg="cleaning up after shim disconnected" id=917c62a88af640303f9beb2a09fb7e397032f9cfd318017b21b3dceb1a2f70f8 namespace=moby
Feb 26 10:32:18 functional-857474 dockerd[6477]: time="2024-02-26T10:32:18.507919035Z" level=info msg="cleaning up dead shim" namespace=moby
Feb 26 10:32:20 functional-857474 dockerd[6477]: time="2024-02-26T10:32:20.016884113Z" level=info msg="loading plugin \"io.containerd.internal.v1.shutdown\"..." runtime=io.containerd.runc.v2 type=io.containerd.internal.v1
Feb 26 10:32:20 functional-857474 dockerd[6477]: time="2024-02-26T10:32:20.017093961Z" level=info msg="loading plugin \"io.containerd.ttrpc.v1.pause\"..." runtime=io.containerd.runc.v2 type=io.containerd.ttrpc.v1
Feb 26 10:32:20 functional-857474 dockerd[6477]: time="2024-02-26T10:32:20.017118222Z" level=info msg="loading plugin \"io.containerd.event.v1.publisher\"..." runtime=io.containerd.runc.v2 type=io.containerd.event.v1
Feb 26 10:32:20 functional-857474 dockerd[6477]: time="2024-02-26T10:32:20.017130941Z" level=info msg="loading plugin \"io.containerd.ttrpc.v1.task\"..." runtime=io.containerd.runc.v2 type=io.containerd.ttrpc.v1
Feb 26 10:32:20 functional-857474 cri-dockerd[6700]: time="2024-02-26T10:32:20Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/ff502c12ef79dd99c320fac84895c006f8d6b84befa1cf9ca65ee008013fd7e2/resolv.conf as [nameserver 10.96.0.10 search default.svc.cluster.local svc.cluster.local cluster.local options ndots:5]"
Feb 26 10:32:23 functional-857474 dockerd[6477]: time="2024-02-26T10:32:23.912039525Z" level=info msg="loading plugin \"io.containerd.internal.v1.shutdown\"..." runtime=io.containerd.runc.v2 type=io.containerd.internal.v1
Feb 26 10:32:23 functional-857474 dockerd[6477]: time="2024-02-26T10:32:23.912843808Z" level=info msg="loading plugin \"io.containerd.ttrpc.v1.pause\"..." runtime=io.containerd.runc.v2 type=io.containerd.ttrpc.v1
Feb 26 10:32:23 functional-857474 dockerd[6477]: time="2024-02-26T10:32:23.913100492Z" level=info msg="loading plugin \"io.containerd.event.v1.publisher\"..." runtime=io.containerd.runc.v2 type=io.containerd.event.v1
Feb 26 10:32:23 functional-857474 dockerd[6477]: time="2024-02-26T10:32:23.913471715Z" level=info msg="loading plugin \"io.containerd.ttrpc.v1.task\"..." runtime=io.containerd.runc.v2 type=io.containerd.ttrpc.v1
Feb 26 10:32:23 functional-857474 dockerd[6477]: time="2024-02-26T10:32:23.966538793Z" level=info msg="loading plugin \"io.containerd.internal.v1.shutdown\"..." runtime=io.containerd.runc.v2 type=io.containerd.internal.v1
Feb 26 10:32:23 functional-857474 dockerd[6477]: time="2024-02-26T10:32:23.966776364Z" level=info msg="loading plugin \"io.containerd.ttrpc.v1.pause\"..." runtime=io.containerd.runc.v2 type=io.containerd.ttrpc.v1
Feb 26 10:32:23 functional-857474 dockerd[6477]: time="2024-02-26T10:32:23.966804734Z" level=info msg="loading plugin \"io.containerd.event.v1.publisher\"..." runtime=io.containerd.runc.v2 type=io.containerd.event.v1
Feb 26 10:32:23 functional-857474 dockerd[6477]: time="2024-02-26T10:32:23.966819728Z" level=info msg="loading plugin \"io.containerd.ttrpc.v1.task\"..." runtime=io.containerd.runc.v2 type=io.containerd.ttrpc.v1
Feb 26 10:32:24 functional-857474 dockerd[6477]: time="2024-02-26T10:32:24.384400683Z" level=info msg="loading plugin \"io.containerd.internal.v1.shutdown\"..." runtime=io.containerd.runc.v2 type=io.containerd.internal.v1
Feb 26 10:32:24 functional-857474 dockerd[6477]: time="2024-02-26T10:32:24.384528488Z" level=info msg="loading plugin \"io.containerd.ttrpc.v1.pause\"..." runtime=io.containerd.runc.v2 type=io.containerd.ttrpc.v1
Feb 26 10:32:24 functional-857474 dockerd[6477]: time="2024-02-26T10:32:24.384551429Z" level=info msg="loading plugin \"io.containerd.event.v1.publisher\"..." runtime=io.containerd.runc.v2 type=io.containerd.event.v1
Feb 26 10:32:24 functional-857474 dockerd[6477]: time="2024-02-26T10:32:24.384564247Z" level=info msg="loading plugin \"io.containerd.ttrpc.v1.task\"..." runtime=io.containerd.runc.v2 type=io.containerd.ttrpc.v1
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD
40127dd5309fb 6e38f40d628db 25 seconds ago Running storage-provisioner 2 81785dbad3ba2 storage-provisioner
328cc8969577b ead0a4a53df89 25 seconds ago Running coredns 2 767ea5130efd9 coredns-5dd5756b68-vnv2s
1533c06204dd3 83f6cc407eed8 26 seconds ago Running kube-proxy 2 760954ffdc63b kube-proxy-95p5d
3ce4c98969063 73deb9a3f7025 31 seconds ago Running etcd 2 23e4e28928b31 etcd-functional-857474
ad10bc0800978 e3db313c6dbc0 32 seconds ago Running kube-scheduler 2 a14d4603e5632 kube-scheduler-functional-857474
3ba85e169876f 7fe0e6f37db33 32 seconds ago Running kube-apiserver 0 82a091e9c7369 kube-apiserver-functional-857474
fa5c3b0a2ddb2 d058aa5ab969c 33 seconds ago Running kube-controller-manager 2 c4a6455245cf5 kube-controller-manager-functional-857474
b315d1bf43cf2 6e38f40d628db About a minute ago Exited storage-provisioner 1 2471455fc9a37 storage-provisioner
4c4c42268e0c2 e3db313c6dbc0 About a minute ago Exited kube-scheduler 1 7c69c5f66a143 kube-scheduler-functional-857474
eac178e2ce790 ead0a4a53df89 About a minute ago Exited coredns 1 e221a95884dc0 coredns-5dd5756b68-vnv2s
5adebf3b569b1 73deb9a3f7025 About a minute ago Exited etcd 1 c6dfc131c09b6 etcd-functional-857474
075ed6dc39fbc d058aa5ab969c About a minute ago Exited kube-controller-manager 1 2b267546f0c89 kube-controller-manager-functional-857474
a70d991ff697a 83f6cc407eed8 About a minute ago Exited kube-proxy 1 3b40a63a7c02e kube-proxy-95p5d
b8958b5669026 7fe0e6f37db33 About a minute ago Exited kube-apiserver 1 b45c31fbc947e kube-apiserver-functional-857474
==> coredns [328cc8969577] <==
.:53
[INFO] plugin/reload: Running configuration SHA512 = 6c8bd46af3d98e03c4ae8e438c65dd0c69a5f817565481bcf1725dd66ff794963b7938c81e3a23d4c2ad9e52f818076e819219c79e8007dd90564767ed68ba4c
CoreDNS-1.10.1
linux/amd64, go1.20, 055b2c3
[INFO] 127.0.0.1:53778 - 50581 "HINFO IN 7863195941770471242.6214584127982040424. udp 57 false 512" NXDOMAIN qr,rd,ra 57 0.022028944s
==> coredns [eac178e2ce79] <==
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/ready: Still waiting on: "kubernetes"
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/ready: Still waiting on: "kubernetes"
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
.:53
[INFO] plugin/reload: Running configuration SHA512 = 6c8bd46af3d98e03c4ae8e438c65dd0c69a5f817565481bcf1725dd66ff794963b7938c81e3a23d4c2ad9e52f818076e819219c79e8007dd90564767ed68ba4c
CoreDNS-1.10.1
linux/amd64, go1.20, 055b2c3
[INFO] 127.0.0.1:34858 - 26155 "HINFO IN 4408902536723854879.7814752514980301813. udp 57 false 512" NXDOMAIN qr,rd,ra 57 0.020603478s
[INFO] SIGTERM: Shutting down servers then terminating
[INFO] plugin/health: Going into lameduck mode for 5s
==> describe nodes <==
Name: functional-857474
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=functional-857474
kubernetes.io/os=linux
minikube.k8s.io/commit=4011915ad0e9b27ff42994854397cc2ed93516c6
minikube.k8s.io/name=functional-857474
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2024_02_26T10_30_35_0700
minikube.k8s.io/version=v1.32.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///var/run/cri-dockerd.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Mon, 26 Feb 2024 10:30:31 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: functional-857474
AcquireTime: <unset>
RenewTime: Mon, 26 Feb 2024 10:32:17 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Mon, 26 Feb 2024 10:31:57 +0000 Mon, 26 Feb 2024 10:30:30 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Mon, 26 Feb 2024 10:31:57 +0000 Mon, 26 Feb 2024 10:30:30 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Mon, 26 Feb 2024 10:31:57 +0000 Mon, 26 Feb 2024 10:30:30 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Mon, 26 Feb 2024 10:31:57 +0000 Mon, 26 Feb 2024 10:30:37 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.39.23
Hostname: functional-857474
Capacity:
cpu: 2
ephemeral-storage: 17784752Ki
hugepages-2Mi: 0
memory: 3914496Ki
pods: 110
Allocatable:
cpu: 2
ephemeral-storage: 17784752Ki
hugepages-2Mi: 0
memory: 3914496Ki
pods: 110
System Info:
Machine ID: a26a8b25c88f4471b2cca63ca381cccc
System UUID: a26a8b25-c88f-4471-b2cc-a63ca381cccc
Boot ID: e98e5698-ff62-4eaa-ab92-897850f05269
Kernel Version: 5.10.57
OS Image: Buildroot 2021.02.12
Operating System: linux
Architecture: amd64
Container Runtime Version: docker://24.0.7
Kubelet Version: v1.28.4
Kube-Proxy Version: v1.28.4
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (11 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox-mount 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 2s
default hello-node-d7447cc7f-fxlzm 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 6s
kube-system coredns-5dd5756b68-vnv2s 100m (5%!)(MISSING) 0 (0%!)(MISSING) 70Mi (1%!)(MISSING) 170Mi (4%!)(MISSING) 97s
kube-system etcd-functional-857474 100m (5%!)(MISSING) 0 (0%!)(MISSING) 100Mi (2%!)(MISSING) 0 (0%!)(MISSING) 110s
kube-system kube-apiserver-functional-857474 250m (12%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 27s
kube-system kube-controller-manager-functional-857474 200m (10%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 111s
kube-system kube-proxy-95p5d 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 97s
kube-system kube-scheduler-functional-857474 100m (5%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 111s
kube-system storage-provisioner 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 95s
kubernetes-dashboard dashboard-metrics-scraper-7fd5cb4ddc-gk4qv 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 3s
kubernetes-dashboard kubernetes-dashboard-8694d4445c-s9996 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 3s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 750m (37%!)(MISSING) 0 (0%!)(MISSING)
memory 170Mi (4%!)(MISSING) 170Mi (4%!)(MISSING)
ephemeral-storage 0 (0%!)(MISSING) 0 (0%!)(MISSING)
hugepages-2Mi 0 (0%!)(MISSING) 0 (0%!)(MISSING)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 95s kube-proxy
Normal Starting 25s kube-proxy
Normal Starting 67s kube-proxy
Normal NodeHasSufficientPID 118s (x7 over 118s) kubelet Node functional-857474 status is now: NodeHasSufficientPID
Normal NodeHasNoDiskPressure 118s (x8 over 118s) kubelet Node functional-857474 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientMemory 118s (x8 over 118s) kubelet Node functional-857474 status is now: NodeHasSufficientMemory
Normal NodeAllocatableEnforced 118s kubelet Updated Node Allocatable limit across pods
Normal Starting 110s kubelet Starting kubelet.
Normal NodeHasSufficientMemory 110s kubelet Node functional-857474 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 110s kubelet Node functional-857474 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 110s kubelet Node functional-857474 status is now: NodeHasSufficientPID
Normal NodeAllocatableEnforced 110s kubelet Updated Node Allocatable limit across pods
Normal NodeReady 108s kubelet Node functional-857474 status is now: NodeReady
Normal RegisteredNode 98s node-controller Node functional-857474 event: Registered Node functional-857474 in Controller
Normal NodeNotReady 90s kubelet Node functional-857474 status is now: NodeNotReady
Normal RegisteredNode 55s node-controller Node functional-857474 event: Registered Node functional-857474 in Controller
Normal Starting 34s kubelet Starting kubelet.
Normal NodeHasSufficientMemory 34s (x8 over 34s) kubelet Node functional-857474 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 34s (x8 over 34s) kubelet Node functional-857474 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 34s (x7 over 34s) kubelet Node functional-857474 status is now: NodeHasSufficientPID
Normal NodeAllocatableEnforced 34s kubelet Updated Node Allocatable limit across pods
Normal RegisteredNode 15s node-controller Node functional-857474 event: Registered Node functional-857474 in Controller
==> dmesg <==
[ +3.774415] systemd-fstab-generator[1498]: Ignoring "noauto" for root device
[ +7.756164] systemd-fstab-generator[2400]: Ignoring "noauto" for root device
[ +14.985535] kauditd_printk_skb: 39 callbacks suppressed
[ +4.365710] systemd-fstab-generator[3520]: Ignoring "noauto" for root device
[ +0.306012] systemd-fstab-generator[3554]: Ignoring "noauto" for root device
[ +0.157826] systemd-fstab-generator[3565]: Ignoring "noauto" for root device
[ +0.152844] systemd-fstab-generator[3578]: Ignoring "noauto" for root device
[ +5.211958] kauditd_printk_skb: 25 callbacks suppressed
[Feb26 10:31] systemd-fstab-generator[4171]: Ignoring "noauto" for root device
[ +0.117605] systemd-fstab-generator[4182]: Ignoring "noauto" for root device
[ +0.113102] systemd-fstab-generator[4193]: Ignoring "noauto" for root device
[ +0.140194] systemd-fstab-generator[4208]: Ignoring "noauto" for root device
[ +7.486923] kauditd_printk_skb: 29 callbacks suppressed
[ +21.647476] systemd-fstab-generator[5878]: Ignoring "noauto" for root device
[ +0.369564] systemd-fstab-generator[6007]: Ignoring "noauto" for root device
[ +0.151801] systemd-fstab-generator[6018]: Ignoring "noauto" for root device
[ +0.161888] systemd-fstab-generator[6031]: Ignoring "noauto" for root device
[ +12.022250] systemd-fstab-generator[6649]: Ignoring "noauto" for root device
[ +0.114665] systemd-fstab-generator[6660]: Ignoring "noauto" for root device
[ +0.102234] systemd-fstab-generator[6671]: Ignoring "noauto" for root device
[ +0.137817] systemd-fstab-generator[6686]: Ignoring "noauto" for root device
[ +1.896773] systemd-fstab-generator[6955]: Ignoring "noauto" for root device
[ +8.698834] kauditd_printk_skb: 29 callbacks suppressed
[Feb26 10:32] kauditd_printk_skb: 11 callbacks suppressed
[ +5.048953] kauditd_printk_skb: 13 callbacks suppressed
==> etcd [3ce4c9896906] <==
{"level":"info","ts":"2024-02-26T10:31:54.51707Z","caller":"embed/etcd.go:597","msg":"serving peer traffic","address":"192.168.39.23:2380"}
{"level":"info","ts":"2024-02-26T10:31:54.517113Z","caller":"embed/etcd.go:569","msg":"cmux::serve","address":"192.168.39.23:2380"}
{"level":"info","ts":"2024-02-26T10:31:54.517292Z","caller":"embed/etcd.go:278","msg":"now serving peer/client/metrics","local-member-id":"c6baa4636f442c95","initial-advertise-peer-urls":["https://192.168.39.23:2380"],"listen-peer-urls":["https://192.168.39.23:2380"],"advertise-client-urls":["https://192.168.39.23:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.39.23:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"]}
{"level":"info","ts":"2024-02-26T10:31:54.517414Z","caller":"embed/etcd.go:855","msg":"serving metrics","address":"http://127.0.0.1:2381"}
{"level":"info","ts":"2024-02-26T10:31:55.989312Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"c6baa4636f442c95 is starting a new election at term 3"}
{"level":"info","ts":"2024-02-26T10:31:55.989358Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"c6baa4636f442c95 became pre-candidate at term 3"}
{"level":"info","ts":"2024-02-26T10:31:55.989396Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"c6baa4636f442c95 received MsgPreVoteResp from c6baa4636f442c95 at term 3"}
{"level":"info","ts":"2024-02-26T10:31:55.989413Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"c6baa4636f442c95 became candidate at term 4"}
{"level":"info","ts":"2024-02-26T10:31:55.989418Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"c6baa4636f442c95 received MsgVoteResp from c6baa4636f442c95 at term 4"}
{"level":"info","ts":"2024-02-26T10:31:55.989426Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"c6baa4636f442c95 became leader at term 4"}
{"level":"info","ts":"2024-02-26T10:31:55.989433Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"raft.node: c6baa4636f442c95 elected leader c6baa4636f442c95 at term 4"}
{"level":"info","ts":"2024-02-26T10:31:55.995137Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2024-02-26T10:31:55.995366Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2024-02-26T10:31:55.996704Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.39.23:2379"}
{"level":"info","ts":"2024-02-26T10:31:55.996864Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
{"level":"info","ts":"2024-02-26T10:31:55.996946Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
{"level":"info","ts":"2024-02-26T10:31:55.99703Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
{"level":"info","ts":"2024-02-26T10:31:55.995141Z","caller":"etcdserver/server.go:2062","msg":"published local member to cluster through raft","local-member-id":"c6baa4636f442c95","local-member-attributes":"{Name:functional-857474 ClientURLs:[https://192.168.39.23:2379]}","request-path":"/0/members/c6baa4636f442c95/attributes","cluster-id":"7d4cc2b8d7236707","publish-timeout":"7s"}
{"level":"info","ts":"2024-02-26T10:32:23.226227Z","caller":"traceutil/trace.go:171","msg":"trace[2000007113] linearizableReadLoop","detail":"{readStateIndex:757; appliedIndex:755; }","duration":"161.928712ms","start":"2024-02-26T10:32:23.064265Z","end":"2024-02-26T10:32:23.226194Z","steps":["trace[2000007113] 'read index received' (duration: 91.795236ms)","trace[2000007113] 'applied index is now lower than readState.Index' (duration: 70.132633ms)"],"step_count":2}
{"level":"info","ts":"2024-02-26T10:32:23.226465Z","caller":"traceutil/trace.go:171","msg":"trace[1652003069] transaction","detail":"{read_only:false; response_revision:708; number_of_response:1; }","duration":"162.363956ms","start":"2024-02-26T10:32:23.064086Z","end":"2024-02-26T10:32:23.22645Z","steps":["trace[1652003069] 'process raft request' (duration: 162.050069ms)"],"step_count":1}
{"level":"info","ts":"2024-02-26T10:32:23.226549Z","caller":"traceutil/trace.go:171","msg":"trace[333350230] transaction","detail":"{read_only:false; response_revision:707; number_of_response:1; }","duration":"170.922248ms","start":"2024-02-26T10:32:23.05561Z","end":"2024-02-26T10:32:23.226532Z","steps":["trace[333350230] 'process raft request' (duration: 100.440825ms)","trace[333350230] 'compare' (duration: 69.857039ms)"],"step_count":2}
{"level":"warn","ts":"2024-02-26T10:32:23.22686Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"162.44214ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/services/specs/kubernetes-dashboard/kubernetes-dashboard\" ","response":"range_response_count:0 size:5"}
{"level":"warn","ts":"2024-02-26T10:32:23.226918Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"125.695468ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/serviceaccounts/kubernetes-dashboard/kubernetes-dashboard\" ","response":"range_response_count:1 size:957"}
{"level":"info","ts":"2024-02-26T10:32:23.226951Z","caller":"traceutil/trace.go:171","msg":"trace[1622146799] range","detail":"{range_begin:/registry/services/specs/kubernetes-dashboard/kubernetes-dashboard; range_end:; response_count:0; response_revision:708; }","duration":"162.687154ms","start":"2024-02-26T10:32:23.06425Z","end":"2024-02-26T10:32:23.226937Z","steps":["trace[1622146799] 'agreement among raft nodes before linearized reading' (duration: 162.39518ms)"],"step_count":1}
{"level":"info","ts":"2024-02-26T10:32:23.227044Z","caller":"traceutil/trace.go:171","msg":"trace[330422240] range","detail":"{range_begin:/registry/serviceaccounts/kubernetes-dashboard/kubernetes-dashboard; range_end:; response_count:1; response_revision:708; }","duration":"125.775414ms","start":"2024-02-26T10:32:23.101175Z","end":"2024-02-26T10:32:23.226951Z","steps":["trace[330422240] 'agreement among raft nodes before linearized reading' (duration: 125.662827ms)"],"step_count":1}
==> etcd [5adebf3b569b] <==
{"level":"info","ts":"2024-02-26T10:31:15.079217Z","caller":"embed/etcd.go:569","msg":"cmux::serve","address":"192.168.39.23:2380"}
{"level":"info","ts":"2024-02-26T10:31:16.557202Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"c6baa4636f442c95 is starting a new election at term 2"}
{"level":"info","ts":"2024-02-26T10:31:16.557529Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"c6baa4636f442c95 became pre-candidate at term 2"}
{"level":"info","ts":"2024-02-26T10:31:16.557864Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"c6baa4636f442c95 received MsgPreVoteResp from c6baa4636f442c95 at term 2"}
{"level":"info","ts":"2024-02-26T10:31:16.557949Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"c6baa4636f442c95 became candidate at term 3"}
{"level":"info","ts":"2024-02-26T10:31:16.558123Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"c6baa4636f442c95 received MsgVoteResp from c6baa4636f442c95 at term 3"}
{"level":"info","ts":"2024-02-26T10:31:16.558253Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"c6baa4636f442c95 became leader at term 3"}
{"level":"info","ts":"2024-02-26T10:31:16.558379Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"raft.node: c6baa4636f442c95 elected leader c6baa4636f442c95 at term 3"}
{"level":"info","ts":"2024-02-26T10:31:16.561165Z","caller":"etcdserver/server.go:2062","msg":"published local member to cluster through raft","local-member-id":"c6baa4636f442c95","local-member-attributes":"{Name:functional-857474 ClientURLs:[https://192.168.39.23:2379]}","request-path":"/0/members/c6baa4636f442c95/attributes","cluster-id":"7d4cc2b8d7236707","publish-timeout":"7s"}
{"level":"info","ts":"2024-02-26T10:31:16.561168Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2024-02-26T10:31:16.561201Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2024-02-26T10:31:16.562746Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
{"level":"info","ts":"2024-02-26T10:31:16.561367Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
{"level":"info","ts":"2024-02-26T10:31:16.563009Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
{"level":"info","ts":"2024-02-26T10:31:16.563375Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.39.23:2379"}
{"level":"info","ts":"2024-02-26T10:31:37.065535Z","caller":"osutil/interrupt_unix.go:64","msg":"received signal; shutting down","signal":"terminated"}
{"level":"info","ts":"2024-02-26T10:31:37.065577Z","caller":"embed/etcd.go:376","msg":"closing etcd server","name":"functional-857474","data-dir":"/var/lib/minikube/etcd","advertise-peer-urls":["https://192.168.39.23:2380"],"advertise-client-urls":["https://192.168.39.23:2379"]}
{"level":"warn","ts":"2024-02-26T10:31:37.065699Z","caller":"embed/serve.go:212","msg":"stopping secure grpc server due to error","error":"accept tcp 127.0.0.1:2379: use of closed network connection"}
{"level":"warn","ts":"2024-02-26T10:31:37.065806Z","caller":"embed/serve.go:214","msg":"stopped secure grpc server due to error","error":"accept tcp 127.0.0.1:2379: use of closed network connection"}
{"level":"warn","ts":"2024-02-26T10:31:37.085181Z","caller":"embed/serve.go:212","msg":"stopping secure grpc server due to error","error":"accept tcp 192.168.39.23:2379: use of closed network connection"}
{"level":"warn","ts":"2024-02-26T10:31:37.08522Z","caller":"embed/serve.go:214","msg":"stopped secure grpc server due to error","error":"accept tcp 192.168.39.23:2379: use of closed network connection"}
{"level":"info","ts":"2024-02-26T10:31:37.085265Z","caller":"etcdserver/server.go:1465","msg":"skipped leadership transfer for single voting member cluster","local-member-id":"c6baa4636f442c95","current-leader-member-id":"c6baa4636f442c95"}
{"level":"info","ts":"2024-02-26T10:31:37.093302Z","caller":"embed/etcd.go:579","msg":"stopping serving peer traffic","address":"192.168.39.23:2380"}
{"level":"info","ts":"2024-02-26T10:31:37.093403Z","caller":"embed/etcd.go:584","msg":"stopped serving peer traffic","address":"192.168.39.23:2380"}
{"level":"info","ts":"2024-02-26T10:31:37.093413Z","caller":"embed/etcd.go:378","msg":"closed etcd server","name":"functional-857474","data-dir":"/var/lib/minikube/etcd","advertise-peer-urls":["https://192.168.39.23:2380"],"advertise-client-urls":["https://192.168.39.23:2379"]}
==> kernel <==
10:32:26 up 2 min, 0 users, load average: 2.36, 0.97, 0.36
Linux functional-857474 5.10.57 #1 SMP Thu Feb 15 22:26:06 UTC 2024 x86_64 GNU/Linux
PRETTY_NAME="Buildroot 2021.02.12"
==> kube-apiserver [3ba85e169876] <==
I0226 10:31:57.564859 1 cache.go:39] Caches are synced for AvailableConditionController controller
I0226 10:31:57.564861 1 cache.go:39] Caches are synced for APIServiceRegistrationController controller
I0226 10:31:57.565542 1 shared_informer.go:318] Caches are synced for cluster_authentication_trust_controller
I0226 10:31:57.566938 1 shared_informer.go:318] Caches are synced for crd-autoregister
I0226 10:31:57.567038 1 aggregator.go:166] initial CRD sync complete...
I0226 10:31:57.567045 1 autoregister_controller.go:141] Starting autoregister controller
I0226 10:31:57.567049 1 cache.go:32] Waiting for caches to sync for autoregister controller
I0226 10:31:57.567055 1 cache.go:39] Caches are synced for autoregister controller
I0226 10:31:57.572064 1 apf_controller.go:377] Running API Priority and Fairness config worker
I0226 10:31:57.572077 1 apf_controller.go:380] Running API Priority and Fairness periodic rebalancing process
E0226 10:31:57.585542 1 controller.go:97] Error removing old endpoints from kubernetes service: no API server IP addresses were listed in storage, refusing to erase all endpoints for the kubernetes Service
I0226 10:31:58.378173 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist.
I0226 10:31:59.257479 1 controller.go:624] quota admission added evaluator for: serviceaccounts
I0226 10:31:59.276116 1 controller.go:624] quota admission added evaluator for: deployments.apps
I0226 10:31:59.345729 1 controller.go:624] quota admission added evaluator for: daemonsets.apps
I0226 10:31:59.384782 1 controller.go:624] quota admission added evaluator for: roles.rbac.authorization.k8s.io
I0226 10:31:59.393532 1 controller.go:624] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
I0226 10:32:09.959493 1 controller.go:624] quota admission added evaluator for: endpoints
I0226 10:32:10.026282 1 controller.go:624] quota admission added evaluator for: endpointslices.discovery.k8s.io
I0226 10:32:15.048321 1 alloc.go:330] "allocated clusterIPs" service="default/invalid-svc" clusterIPs={"IPv4":"10.106.150.81"}
I0226 10:32:19.466808 1 controller.go:624] quota admission added evaluator for: replicasets.apps
I0226 10:32:19.595405 1 alloc.go:330] "allocated clusterIPs" service="default/hello-node" clusterIPs={"IPv4":"10.111.239.233"}
I0226 10:32:22.619165 1 controller.go:624] quota admission added evaluator for: namespaces
I0226 10:32:23.270940 1 alloc.go:330] "allocated clusterIPs" service="kubernetes-dashboard/kubernetes-dashboard" clusterIPs={"IPv4":"10.105.225.130"}
I0226 10:32:23.323088 1 alloc.go:330] "allocated clusterIPs" service="kubernetes-dashboard/dashboard-metrics-scraper" clusterIPs={"IPv4":"10.110.183.172"}
==> kube-apiserver [b8958b566902] <==
W0226 10:31:46.508364 1 logging.go:59] [core] [Channel #145 SubChannel #146] grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W0226 10:31:46.533803 1 logging.go:59] [core] [Channel #2 SubChannel #4] grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W0226 10:31:46.545697 1 logging.go:59] [core] [Channel #70 SubChannel #71] grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W0226 10:31:46.618732 1 logging.go:59] [core] [Channel #112 SubChannel #113] grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W0226 10:31:46.622521 1 logging.go:59] [core] [Channel #157 SubChannel #158] grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W0226 10:31:46.630374 1 logging.go:59] [core] [Channel #25 SubChannel #26] grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W0226 10:31:46.665613 1 logging.go:59] [core] [Channel #49 SubChannel #50] grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W0226 10:31:46.678740 1 logging.go:59] [core] [Channel #91 SubChannel #92] grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W0226 10:31:46.745832 1 logging.go:59] [core] [Channel #97 SubChannel #98] grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W0226 10:31:46.770107 1 logging.go:59] [core] [Channel #64 SubChannel #65] grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W0226 10:31:46.784702 1 logging.go:59] [core] [Channel #178 SubChannel #179] grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W0226 10:31:46.791510 1 logging.go:59] [core] [Channel #160 SubChannel #161] grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W0226 10:31:46.832165 1 logging.go:59] [core] [Channel #142 SubChannel #143] grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W0226 10:31:46.857048 1 logging.go:59] [core] [Channel #76 SubChannel #77] grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W0226 10:31:46.883779 1 logging.go:59] [core] [Channel #166 SubChannel #167] grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W0226 10:31:46.943209 1 logging.go:59] [core] [Channel #133 SubChannel #134] grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W0226 10:31:46.959835 1 logging.go:59] [core] [Channel #5 SubChannel #6] grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W0226 10:31:46.977448 1 logging.go:59] [core] [Channel #136 SubChannel #137] grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W0226 10:31:46.985636 1 logging.go:59] [core] [Channel #31 SubChannel #32] grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W0226 10:31:47.022357 1 logging.go:59] [core] [Channel #46 SubChannel #47] grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W0226 10:31:47.025849 1 logging.go:59] [core] [Channel #130 SubChannel #131] grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W0226 10:31:47.036304 1 logging.go:59] [core] [Channel #22 SubChannel #23] grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W0226 10:31:47.043131 1 logging.go:59] [core] [Channel #82 SubChannel #83] grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W0226 10:31:47.064438 1 logging.go:59] [core] [Channel #73 SubChannel #74] grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W0226 10:31:47.179587 1 logging.go:59] [core] [Channel #163 SubChannel #164] grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
==> kube-controller-manager [075ed6dc39fb] <==
I0226 10:31:30.818288 1 taint_manager.go:205] "Starting NoExecuteTaintManager"
I0226 10:31:30.818409 1 taint_manager.go:210] "Sending events to api server"
I0226 10:31:30.818528 1 event.go:307] "Event occurred" object="functional-857474" fieldPath="" kind="Node" apiVersion="v1" type="Normal" reason="RegisteredNode" message="Node functional-857474 event: Registered Node functional-857474 in Controller"
I0226 10:31:30.833030 1 shared_informer.go:318] Caches are synced for endpoint_slice_mirroring
I0226 10:31:30.834560 1 shared_informer.go:318] Caches are synced for job
I0226 10:31:30.836859 1 shared_informer.go:318] Caches are synced for TTL
I0226 10:31:30.837160 1 shared_informer.go:318] Caches are synced for HPA
I0226 10:31:30.839314 1 shared_informer.go:318] Caches are synced for PV protection
I0226 10:31:30.848273 1 shared_informer.go:318] Caches are synced for endpoint_slice
I0226 10:31:30.851852 1 shared_informer.go:318] Caches are synced for GC
I0226 10:31:30.854745 1 shared_informer.go:318] Caches are synced for daemon sets
I0226 10:31:30.872593 1 shared_informer.go:318] Caches are synced for cronjob
I0226 10:31:30.897714 1 shared_informer.go:318] Caches are synced for attach detach
I0226 10:31:30.928257 1 shared_informer.go:318] Caches are synced for persistent volume
I0226 10:31:30.933139 1 shared_informer.go:318] Caches are synced for PVC protection
I0226 10:31:30.936653 1 shared_informer.go:318] Caches are synced for stateful set
I0226 10:31:30.945037 1 shared_informer.go:318] Caches are synced for ephemeral
I0226 10:31:30.956393 1 shared_informer.go:318] Caches are synced for resource quota
I0226 10:31:30.958465 1 shared_informer.go:318] Caches are synced for deployment
I0226 10:31:30.964921 1 shared_informer.go:318] Caches are synced for disruption
I0226 10:31:30.968244 1 shared_informer.go:318] Caches are synced for resource quota
I0226 10:31:30.970543 1 shared_informer.go:318] Caches are synced for expand
I0226 10:31:31.386835 1 shared_informer.go:318] Caches are synced for garbage collector
I0226 10:31:31.395280 1 shared_informer.go:318] Caches are synced for garbage collector
I0226 10:31:31.395340 1 garbagecollector.go:166] "All resource monitors have synced. Proceeding to collect garbage"
==> kube-controller-manager [fa5c3b0a2ddb] <==
I0226 10:32:22.857015 1 event.go:307] "Event occurred" object="kubernetes-dashboard/dashboard-metrics-scraper-7fd5cb4ddc" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Warning" reason="FailedCreate" message="Error creating: pods \"dashboard-metrics-scraper-7fd5cb4ddc-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found"
I0226 10:32:22.865521 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kubernetes-dashboard/kubernetes-dashboard-8694d4445c" duration="21.299957ms"
E0226 10:32:22.865561 1 replica_set.go:557] sync "kubernetes-dashboard/kubernetes-dashboard-8694d4445c" failed with pods "kubernetes-dashboard-8694d4445c-" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount "kubernetes-dashboard" not found
I0226 10:32:22.866261 1 event.go:307] "Event occurred" object="kubernetes-dashboard/kubernetes-dashboard-8694d4445c" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Warning" reason="FailedCreate" message="Error creating: pods \"kubernetes-dashboard-8694d4445c-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found"
I0226 10:32:22.871092 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kubernetes-dashboard/dashboard-metrics-scraper-7fd5cb4ddc" duration="14.351015ms"
E0226 10:32:22.871135 1 replica_set.go:557] sync "kubernetes-dashboard/dashboard-metrics-scraper-7fd5cb4ddc" failed with pods "dashboard-metrics-scraper-7fd5cb4ddc-" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount "kubernetes-dashboard" not found
I0226 10:32:22.871191 1 event.go:307] "Event occurred" object="kubernetes-dashboard/dashboard-metrics-scraper-7fd5cb4ddc" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Warning" reason="FailedCreate" message="Error creating: pods \"dashboard-metrics-scraper-7fd5cb4ddc-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found"
I0226 10:32:22.881760 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kubernetes-dashboard/kubernetes-dashboard-8694d4445c" duration="16.164609ms"
E0226 10:32:22.881781 1 replica_set.go:557] sync "kubernetes-dashboard/kubernetes-dashboard-8694d4445c" failed with pods "kubernetes-dashboard-8694d4445c-" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount "kubernetes-dashboard" not found
I0226 10:32:22.882119 1 event.go:307] "Event occurred" object="kubernetes-dashboard/kubernetes-dashboard-8694d4445c" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Warning" reason="FailedCreate" message="Error creating: pods \"kubernetes-dashboard-8694d4445c-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found"
I0226 10:32:22.892115 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kubernetes-dashboard/kubernetes-dashboard-8694d4445c" duration="10.302699ms"
E0226 10:32:22.892134 1 replica_set.go:557] sync "kubernetes-dashboard/kubernetes-dashboard-8694d4445c" failed with pods "kubernetes-dashboard-8694d4445c-" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount "kubernetes-dashboard" not found
I0226 10:32:22.892170 1 event.go:307] "Event occurred" object="kubernetes-dashboard/kubernetes-dashboard-8694d4445c" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Warning" reason="FailedCreate" message="Error creating: pods \"kubernetes-dashboard-8694d4445c-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found"
I0226 10:32:22.892284 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kubernetes-dashboard/dashboard-metrics-scraper-7fd5cb4ddc" duration="14.500545ms"
E0226 10:32:22.892291 1 replica_set.go:557] sync "kubernetes-dashboard/dashboard-metrics-scraper-7fd5cb4ddc" failed with pods "dashboard-metrics-scraper-7fd5cb4ddc-" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount "kubernetes-dashboard" not found
I0226 10:32:22.892310 1 event.go:307] "Event occurred" object="kubernetes-dashboard/dashboard-metrics-scraper-7fd5cb4ddc" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Warning" reason="FailedCreate" message="Error creating: pods \"dashboard-metrics-scraper-7fd5cb4ddc-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found"
I0226 10:32:22.923275 1 event.go:307] "Event occurred" object="kubernetes-dashboard/kubernetes-dashboard-8694d4445c" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kubernetes-dashboard-8694d4445c-s9996"
I0226 10:32:22.951827 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kubernetes-dashboard/kubernetes-dashboard-8694d4445c" duration="48.932696ms"
I0226 10:32:22.992117 1 event.go:307] "Event occurred" object="kubernetes-dashboard/dashboard-metrics-scraper-7fd5cb4ddc" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: dashboard-metrics-scraper-7fd5cb4ddc-gk4qv"
I0226 10:32:23.006506 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kubernetes-dashboard/kubernetes-dashboard-8694d4445c" duration="54.623652ms"
I0226 10:32:23.006590 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kubernetes-dashboard/kubernetes-dashboard-8694d4445c" duration="52.308µs"
I0226 10:32:23.020736 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kubernetes-dashboard/dashboard-metrics-scraper-7fd5cb4ddc" duration="46.461518ms"
I0226 10:32:23.037452 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kubernetes-dashboard/dashboard-metrics-scraper-7fd5cb4ddc" duration="16.674441ms"
I0226 10:32:23.037543 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kubernetes-dashboard/dashboard-metrics-scraper-7fd5cb4ddc" duration="54.739µs"
I0226 10:32:23.230577 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kubernetes-dashboard/dashboard-metrics-scraper-7fd5cb4ddc" duration="56.033µs"
==> kube-proxy [1533c06204dd] <==
I0226 10:31:59.840221 1 server_others.go:69] "Using iptables proxy"
I0226 10:31:59.861545 1 node.go:141] Successfully retrieved node IP: 192.168.39.23
I0226 10:32:00.098104 1 server_others.go:121] "No iptables support for family" ipFamily="IPv6"
I0226 10:32:00.098127 1 server.go:634] "kube-proxy running in single-stack mode" ipFamily="IPv4"
I0226 10:32:00.110836 1 server_others.go:152] "Using iptables Proxier"
I0226 10:32:00.110926 1 proxier.go:251] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses"
I0226 10:32:00.111716 1 server.go:846] "Version info" version="v1.28.4"
I0226 10:32:00.111735 1 server.go:848] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I0226 10:32:00.113938 1 config.go:188] "Starting service config controller"
I0226 10:32:00.114046 1 shared_informer.go:311] Waiting for caches to sync for service config
I0226 10:32:00.114071 1 config.go:97] "Starting endpoint slice config controller"
I0226 10:32:00.114075 1 shared_informer.go:311] Waiting for caches to sync for endpoint slice config
I0226 10:32:00.121412 1 config.go:315] "Starting node config controller"
I0226 10:32:00.122439 1 shared_informer.go:311] Waiting for caches to sync for node config
I0226 10:32:00.214685 1 shared_informer.go:318] Caches are synced for endpoint slice config
I0226 10:32:00.214734 1 shared_informer.go:318] Caches are synced for service config
I0226 10:32:00.222505 1 shared_informer.go:318] Caches are synced for node config
==> kube-proxy [a70d991ff697] <==
I0226 10:31:15.414726 1 server_others.go:69] "Using iptables proxy"
I0226 10:31:18.233701 1 node.go:141] Successfully retrieved node IP: 192.168.39.23
I0226 10:31:18.342205 1 server_others.go:121] "No iptables support for family" ipFamily="IPv6"
I0226 10:31:18.342255 1 server.go:634] "kube-proxy running in single-stack mode" ipFamily="IPv4"
I0226 10:31:18.348427 1 server_others.go:152] "Using iptables Proxier"
I0226 10:31:18.348489 1 proxier.go:251] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses"
I0226 10:31:18.348934 1 server.go:846] "Version info" version="v1.28.4"
I0226 10:31:18.349053 1 server.go:848] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I0226 10:31:18.350408 1 config.go:188] "Starting service config controller"
I0226 10:31:18.350603 1 shared_informer.go:311] Waiting for caches to sync for service config
I0226 10:31:18.350841 1 config.go:97] "Starting endpoint slice config controller"
I0226 10:31:18.350907 1 shared_informer.go:311] Waiting for caches to sync for endpoint slice config
I0226 10:31:18.351912 1 config.go:315] "Starting node config controller"
I0226 10:31:18.354571 1 shared_informer.go:311] Waiting for caches to sync for node config
I0226 10:31:18.451463 1 shared_informer.go:318] Caches are synced for endpoint slice config
I0226 10:31:18.451521 1 shared_informer.go:318] Caches are synced for service config
I0226 10:31:18.455323 1 shared_informer.go:318] Caches are synced for node config
==> kube-scheduler [4c4c42268e0c] <==
I0226 10:31:16.177510 1 serving.go:348] Generated self-signed cert in-memory
W0226 10:31:18.020933 1 requestheader_controller.go:193] Unable to get configmap/extension-apiserver-authentication in kube-system. Usually fixed by 'kubectl create rolebinding -n kube-system ROLEBINDING_NAME --role=extension-apiserver-authentication-reader --serviceaccount=YOUR_NS:YOUR_SA'
W0226 10:31:18.021059 1 authentication.go:368] Error looking up in-cluster authentication configuration: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot get resource "configmaps" in API group "" in the namespace "kube-system"
W0226 10:31:18.021070 1 authentication.go:369] Continuing without authentication configuration. This may treat all requests as anonymous.
W0226 10:31:18.021077 1 authentication.go:370] To require authentication configuration lookup to succeed, set --authentication-tolerate-lookup-failure=false
I0226 10:31:18.118908 1 server.go:154] "Starting Kubernetes Scheduler" version="v1.28.4"
I0226 10:31:18.120918 1 server.go:156] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I0226 10:31:18.160298 1 secure_serving.go:213] Serving securely on 127.0.0.1:10259
I0226 10:31:18.160584 1 configmap_cafile_content.go:202] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
I0226 10:31:18.160625 1 shared_informer.go:311] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
I0226 10:31:18.160725 1 tlsconfig.go:240] "Starting DynamicServingCertificateController"
I0226 10:31:18.261685 1 shared_informer.go:318] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
I0226 10:31:37.096424 1 secure_serving.go:258] Stopped listening on 127.0.0.1:10259
I0226 10:31:37.099180 1 tlsconfig.go:255] "Shutting down DynamicServingCertificateController"
E0226 10:31:37.099368 1 run.go:74] "command failed" err="finished without leader elect"
==> kube-scheduler [ad10bc080097] <==
I0226 10:31:54.550205 1 serving.go:348] Generated self-signed cert in-memory
W0226 10:31:57.463317 1 requestheader_controller.go:193] Unable to get configmap/extension-apiserver-authentication in kube-system. Usually fixed by 'kubectl create rolebinding -n kube-system ROLEBINDING_NAME --role=extension-apiserver-authentication-reader --serviceaccount=YOUR_NS:YOUR_SA'
W0226 10:31:57.463361 1 authentication.go:368] Error looking up in-cluster authentication configuration: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot get resource "configmaps" in API group "" in the namespace "kube-system"
W0226 10:31:57.463372 1 authentication.go:369] Continuing without authentication configuration. This may treat all requests as anonymous.
W0226 10:31:57.463378 1 authentication.go:370] To require authentication configuration lookup to succeed, set --authentication-tolerate-lookup-failure=false
I0226 10:31:57.495097 1 server.go:154] "Starting Kubernetes Scheduler" version="v1.28.4"
I0226 10:31:57.495118 1 server.go:156] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I0226 10:31:57.499785 1 secure_serving.go:213] Serving securely on 127.0.0.1:10259
I0226 10:31:57.503664 1 configmap_cafile_content.go:202] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
I0226 10:31:57.503712 1 shared_informer.go:311] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
I0226 10:31:57.503735 1 tlsconfig.go:240] "Starting DynamicServingCertificateController"
I0226 10:31:57.604771 1 shared_informer.go:318] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
==> kubelet <==
-- Journal begins at Mon 2024-02-26 10:30:00 UTC, ends at Mon 2024-02-26 10:32:27 UTC. --
Feb 26 10:32:15 functional-857474 kubelet[6961]: I0226 10:32:15.013683 6961 memory_manager.go:346] "RemoveStaleState removing state" podUID="f184dfcf5ccfbce2b7b34ff0b6595ee7" containerName="kube-apiserver"
Feb 26 10:32:15 functional-857474 kubelet[6961]: I0226 10:32:15.131889 6961 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nlbhs\" (UniqueName: \"kubernetes.io/projected/d2429e1c-30bb-4e06-8281-e3836769ccc9-kube-api-access-nlbhs\") pod \"invalid-svc\" (UID: \"d2429e1c-30bb-4e06-8281-e3836769ccc9\") " pod="default/invalid-svc"
Feb 26 10:32:16 functional-857474 kubelet[6961]: E0226 10:32:16.774790 6961 remote_image.go:180] "PullImage from image service failed" err="rpc error: code = Unknown desc = Error response from daemon: pull access denied for nonexistingimage, repository does not exist or may require 'docker login': denied: requested access to the resource is denied" image="nonexistingimage:latest"
Feb 26 10:32:16 functional-857474 kubelet[6961]: E0226 10:32:16.774847 6961 kuberuntime_image.go:53] "Failed to pull image" err="Error response from daemon: pull access denied for nonexistingimage, repository does not exist or may require 'docker login': denied: requested access to the resource is denied" image="nonexistingimage:latest"
Feb 26 10:32:16 functional-857474 kubelet[6961]: E0226 10:32:16.775020 6961 kuberuntime_manager.go:1261] container &Container{Name:nginx,Image:nonexistingimage:latest,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:,HostPort:0,ContainerPort:80,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-nlbhs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod invalid-svc_default(d2429e1c-30bb-4e06-8281-e3836769ccc9):
ErrImagePull: Error response from daemon: pull access denied for nonexistingimage, repository does not exist or may require 'docker login': denied: requested access to the resource is denied
Feb 26 10:32:16 functional-857474 kubelet[6961]: E0226 10:32:16.775071 6961 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nginx\" with ErrImagePull: \"Error response from daemon: pull access denied for nonexistingimage, repository does not exist or may require 'docker login': denied: requested access to the resource is denied\"" pod="default/invalid-svc" podUID="d2429e1c-30bb-4e06-8281-e3836769ccc9"
Feb 26 10:32:17 functional-857474 kubelet[6961]: E0226 10:32:17.332555 6961 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nginx\" with ImagePullBackOff: \"Back-off pulling image \\\"nonexistingimage:latest\\\"\"" pod="default/invalid-svc" podUID="d2429e1c-30bb-4e06-8281-e3836769ccc9"
Feb 26 10:32:18 functional-857474 kubelet[6961]: I0226 10:32:18.655255 6961 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nlbhs\" (UniqueName: \"kubernetes.io/projected/d2429e1c-30bb-4e06-8281-e3836769ccc9-kube-api-access-nlbhs\") pod \"d2429e1c-30bb-4e06-8281-e3836769ccc9\" (UID: \"d2429e1c-30bb-4e06-8281-e3836769ccc9\") "
Feb 26 10:32:18 functional-857474 kubelet[6961]: I0226 10:32:18.661784 6961 operation_generator.go:882] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d2429e1c-30bb-4e06-8281-e3836769ccc9-kube-api-access-nlbhs" (OuterVolumeSpecName: "kube-api-access-nlbhs") pod "d2429e1c-30bb-4e06-8281-e3836769ccc9" (UID: "d2429e1c-30bb-4e06-8281-e3836769ccc9"). InnerVolumeSpecName "kube-api-access-nlbhs". PluginName "kubernetes.io/projected", VolumeGidValue ""
Feb 26 10:32:18 functional-857474 kubelet[6961]: I0226 10:32:18.756280 6961 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-nlbhs\" (UniqueName: \"kubernetes.io/projected/d2429e1c-30bb-4e06-8281-e3836769ccc9-kube-api-access-nlbhs\") on node \"functional-857474\" DevicePath \"\""
Feb 26 10:32:19 functional-857474 kubelet[6961]: I0226 10:32:19.502597 6961 kubelet_volumes.go:161] "Cleaned up orphaned pod volumes dir" podUID="d2429e1c-30bb-4e06-8281-e3836769ccc9" path="/var/lib/kubelet/pods/d2429e1c-30bb-4e06-8281-e3836769ccc9/volumes"
Feb 26 10:32:19 functional-857474 kubelet[6961]: I0226 10:32:19.520776 6961 topology_manager.go:215] "Topology Admit Handler" podUID="1e1818bb-b8d3-489e-91e3-f0c8faf2749d" podNamespace="default" podName="hello-node-d7447cc7f-fxlzm"
Feb 26 10:32:19 functional-857474 kubelet[6961]: I0226 10:32:19.521074 6961 memory_manager.go:346] "RemoveStaleState removing state" podUID="f184dfcf5ccfbce2b7b34ff0b6595ee7" containerName="kube-apiserver"
Feb 26 10:32:19 functional-857474 kubelet[6961]: I0226 10:32:19.664885 6961 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b5pf6\" (UniqueName: \"kubernetes.io/projected/1e1818bb-b8d3-489e-91e3-f0c8faf2749d-kube-api-access-b5pf6\") pod \"hello-node-d7447cc7f-fxlzm\" (UID: \"1e1818bb-b8d3-489e-91e3-f0c8faf2749d\") " pod="default/hello-node-d7447cc7f-fxlzm"
Feb 26 10:32:20 functional-857474 kubelet[6961]: I0226 10:32:20.653918 6961 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ff502c12ef79dd99c320fac84895c006f8d6b84befa1cf9ca65ee008013fd7e2"
Feb 26 10:32:22 functional-857474 kubelet[6961]: I0226 10:32:22.939953 6961 topology_manager.go:215] "Topology Admit Handler" podUID="b8323d98-0639-462c-a391-3666ad9b8ff5" podNamespace="kubernetes-dashboard" podName="kubernetes-dashboard-8694d4445c-s9996"
Feb 26 10:32:22 functional-857474 kubelet[6961]: I0226 10:32:22.997384 6961 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp-volume\" (UniqueName: \"kubernetes.io/empty-dir/b8323d98-0639-462c-a391-3666ad9b8ff5-tmp-volume\") pod \"kubernetes-dashboard-8694d4445c-s9996\" (UID: \"b8323d98-0639-462c-a391-3666ad9b8ff5\") " pod="kubernetes-dashboard/kubernetes-dashboard-8694d4445c-s9996"
Feb 26 10:32:22 functional-857474 kubelet[6961]: I0226 10:32:22.997446 6961 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2j42x\" (UniqueName: \"kubernetes.io/projected/b8323d98-0639-462c-a391-3666ad9b8ff5-kube-api-access-2j42x\") pod \"kubernetes-dashboard-8694d4445c-s9996\" (UID: \"b8323d98-0639-462c-a391-3666ad9b8ff5\") " pod="kubernetes-dashboard/kubernetes-dashboard-8694d4445c-s9996"
Feb 26 10:32:23 functional-857474 kubelet[6961]: I0226 10:32:23.014505 6961 topology_manager.go:215] "Topology Admit Handler" podUID="ccecc91e-7bb0-48be-9351-e84ff442b7f7" podNamespace="kubernetes-dashboard" podName="dashboard-metrics-scraper-7fd5cb4ddc-gk4qv"
Feb 26 10:32:23 functional-857474 kubelet[6961]: I0226 10:32:23.098432 6961 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp-volume\" (UniqueName: \"kubernetes.io/empty-dir/ccecc91e-7bb0-48be-9351-e84ff442b7f7-tmp-volume\") pod \"dashboard-metrics-scraper-7fd5cb4ddc-gk4qv\" (UID: \"ccecc91e-7bb0-48be-9351-e84ff442b7f7\") " pod="kubernetes-dashboard/dashboard-metrics-scraper-7fd5cb4ddc-gk4qv"
Feb 26 10:32:23 functional-857474 kubelet[6961]: I0226 10:32:23.098605 6961 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pr2tw\" (UniqueName: \"kubernetes.io/projected/ccecc91e-7bb0-48be-9351-e84ff442b7f7-kube-api-access-pr2tw\") pod \"dashboard-metrics-scraper-7fd5cb4ddc-gk4qv\" (UID: \"ccecc91e-7bb0-48be-9351-e84ff442b7f7\") " pod="kubernetes-dashboard/dashboard-metrics-scraper-7fd5cb4ddc-gk4qv"
Feb 26 10:32:23 functional-857474 kubelet[6961]: I0226 10:32:23.745738 6961 topology_manager.go:215] "Topology Admit Handler" podUID="46045731-f83a-44de-ac18-ca03d3ffce4f" podNamespace="default" podName="busybox-mount"
Feb 26 10:32:23 functional-857474 kubelet[6961]: I0226 10:32:23.816511 6961 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-volume\" (UniqueName: \"kubernetes.io/host-path/46045731-f83a-44de-ac18-ca03d3ffce4f-test-volume\") pod \"busybox-mount\" (UID: \"46045731-f83a-44de-ac18-ca03d3ffce4f\") " pod="default/busybox-mount"
Feb 26 10:32:23 functional-857474 kubelet[6961]: I0226 10:32:23.816574 6961 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kgmd4\" (UniqueName: \"kubernetes.io/projected/46045731-f83a-44de-ac18-ca03d3ffce4f-kube-api-access-kgmd4\") pod \"busybox-mount\" (UID: \"46045731-f83a-44de-ac18-ca03d3ffce4f\") " pod="default/busybox-mount"
Feb 26 10:32:24 functional-857474 kubelet[6961]: I0226 10:32:24.864662 6961 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b0226f6aefc02ab68c70e17283bdf37523ca653968837aa0a57ce4d5a3207a7e"
==> storage-provisioner [40127dd5309f] <==
I0226 10:32:00.496632 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I0226 10:32:00.506409 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I0226 10:32:00.506645 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I0226 10:32:17.910149 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I0226 10:32:17.910632 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_functional-857474_636feb40-a28f-45ac-bb44-369aaa4695ec!
I0226 10:32:17.911861 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"81a8d818-5dfe-47f1-b34a-56b77e9a997f", APIVersion:"v1", ResourceVersion:"618", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' functional-857474_636feb40-a28f-45ac-bb44-369aaa4695ec became leader
I0226 10:32:18.011882 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_functional-857474_636feb40-a28f-45ac-bb44-369aaa4695ec!
==> storage-provisioner [b315d1bf43cf] <==
I0226 10:31:16.061350 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I0226 10:31:18.247349 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I0226 10:31:18.247473 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I0226 10:31:35.659372 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I0226 10:31:35.660327 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"81a8d818-5dfe-47f1-b34a-56b77e9a997f", APIVersion:"v1", ResourceVersion:"509", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' functional-857474_84e637ed-1464-4c44-8186-69bc44ac87ee became leader
I0226 10:31:35.661897 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_functional-857474_84e637ed-1464-4c44-8186-69bc44ac87ee!
I0226 10:31:35.763209 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_functional-857474_84e637ed-1464-4c44-8186-69bc44ac87ee!
-- /stdout --
helpers_test.go:254: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p functional-857474 -n functional-857474
helpers_test.go:261: (dbg) Run: kubectl --context functional-857474 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:272: non-running pods: busybox-mount hello-node-d7447cc7f-fxlzm dashboard-metrics-scraper-7fd5cb4ddc-gk4qv kubernetes-dashboard-8694d4445c-s9996
helpers_test.go:274: ======> post-mortem[TestFunctional/parallel/DashboardCmd]: describe non-running pods <======
helpers_test.go:277: (dbg) Run: kubectl --context functional-857474 describe pod busybox-mount hello-node-d7447cc7f-fxlzm dashboard-metrics-scraper-7fd5cb4ddc-gk4qv kubernetes-dashboard-8694d4445c-s9996
helpers_test.go:277: (dbg) Non-zero exit: kubectl --context functional-857474 describe pod busybox-mount hello-node-d7447cc7f-fxlzm dashboard-metrics-scraper-7fd5cb4ddc-gk4qv kubernetes-dashboard-8694d4445c-s9996: exit status 1 (77.989309ms)
-- stdout --
Name: busybox-mount
Namespace: default
Priority: 0
Service Account: default
Node: functional-857474/192.168.39.23
Start Time: Mon, 26 Feb 2024 10:32:23 +0000
Labels: integration-test=busybox-mount
Annotations: <none>
Status: Pending
IP:
IPs: <none>
Containers:
mount-munger:
Container ID:
Image: gcr.io/k8s-minikube/busybox:1.28.4-glibc
Image ID:
Port: <none>
Host Port: <none>
Command:
/bin/sh
-c
--
Args:
cat /mount-9p/created-by-test; echo test > /mount-9p/created-by-pod; rm /mount-9p/created-by-test-removed-by-pod; echo test > /mount-9p/created-by-pod-removed-by-test date >> /mount-9p/pod-dates
State: Waiting
Reason: ContainerCreating
Ready: False
Restart Count: 0
Environment: <none>
Mounts:
/mount-9p from test-volume (rw)
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-kgmd4 (ro)
Conditions:
Type Status
Initialized True
Ready False
ContainersReady False
PodScheduled True
Volumes:
test-volume:
Type: HostPath (bare host directory volume)
Path: /mount-9p
HostPathType:
kube-api-access-kgmd4:
Type: Projected (a volume that contains injected data from multiple sources)
TokenExpirationSeconds: 3607
ConfigMapName: kube-root-ca.crt
ConfigMapOptional: <nil>
DownwardAPI: true
QoS Class: BestEffort
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 3s default-scheduler Successfully assigned default/busybox-mount to functional-857474
Normal Pulling 2s kubelet Pulling image "gcr.io/k8s-minikube/busybox:1.28.4-glibc"
Name: hello-node-d7447cc7f-fxlzm
Namespace: default
Priority: 0
Service Account: default
Node: functional-857474/192.168.39.23
Start Time: Mon, 26 Feb 2024 10:32:19 +0000
Labels: app=hello-node
pod-template-hash=d7447cc7f
Annotations: <none>
Status: Pending
IP:
IPs: <none>
Controlled By: ReplicaSet/hello-node-d7447cc7f
Containers:
echoserver:
Container ID:
Image: registry.k8s.io/echoserver:1.8
Image ID:
Port: <none>
Host Port: <none>
State: Waiting
Reason: ContainerCreating
Ready: False
Restart Count: 0
Environment: <none>
Mounts:
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-b5pf6 (ro)
Conditions:
Type Status
Initialized True
Ready False
ContainersReady False
PodScheduled True
Volumes:
kube-api-access-b5pf6:
Type: Projected (a volume that contains injected data from multiple sources)
TokenExpirationSeconds: 3607
ConfigMapName: kube-root-ca.crt
ConfigMapOptional: <nil>
DownwardAPI: true
QoS Class: BestEffort
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 8s default-scheduler Successfully assigned default/hello-node-d7447cc7f-fxlzm to functional-857474
Normal Pulling 7s kubelet Pulling image "registry.k8s.io/echoserver:1.8"
Normal Pulled 0s kubelet Successfully pulled image "registry.k8s.io/echoserver:1.8" in 6.729s (6.729s including waiting)
-- /stdout --
** stderr **
Error from server (NotFound): pods "dashboard-metrics-scraper-7fd5cb4ddc-gk4qv" not found
Error from server (NotFound): pods "kubernetes-dashboard-8694d4445c-s9996" not found
** /stderr **
helpers_test.go:279: kubectl --context functional-857474 describe pod busybox-mount hello-node-d7447cc7f-fxlzm dashboard-metrics-scraper-7fd5cb4ddc-gk4qv kubernetes-dashboard-8694d4445c-s9996: exit status 1
--- FAIL: TestFunctional/parallel/DashboardCmd (7.38s)