Test Report: Docker_Linux_docker_arm64 21550

                    
                      0aba0a8e31d541259ffdeb45c9650281430067b8:2025-09-17:41464
                    
                

Test fail (8/347)

x
+
TestAddons/parallel/LocalPath (345.79s)

                                                
                                                
=== RUN   TestAddons/parallel/LocalPath
=== PAUSE TestAddons/parallel/LocalPath

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/LocalPath
addons_test.go:949: (dbg) Run:  kubectl --context addons-235235 apply -f testdata/storage-provisioner-rancher/pvc.yaml
addons_test.go:955: (dbg) Run:  kubectl --context addons-235235 apply -f testdata/storage-provisioner-rancher/pod.yaml
addons_test.go:959: (dbg) TestAddons/parallel/LocalPath: waiting 5m0s for pvc "test-pvc" in namespace "default" ...
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Non-zero exit: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default: context deadline exceeded (1.583µs)
helpers_test.go:404: TestAddons/parallel/LocalPath: WARNING: PVC get for "default" "test-pvc" returned: context deadline exceeded
addons_test.go:960: failed waiting for PVC test-pvc: context deadline exceeded
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:223: ======>  post-mortem[TestAddons/parallel/LocalPath]: network settings <======
helpers_test.go:230: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:238: ======>  post-mortem[TestAddons/parallel/LocalPath]: docker inspect <======
helpers_test.go:239: (dbg) Run:  docker inspect addons-235235
helpers_test.go:243: (dbg) docker inspect addons-235235:

                                                
                                                
-- stdout --
	[
	    {
	        "Id": "d707243db987958c2a899d9ce1405bbbbc539e428eaf168e3fdcd254e5e44bc2",
	        "Created": "2025-09-17T00:21:46.080276124Z",
	        "Path": "/usr/local/bin/entrypoint",
	        "Args": [
	            "/sbin/init"
	        ],
	        "State": {
	            "Status": "running",
	            "Running": true,
	            "Paused": false,
	            "Restarting": false,
	            "OOMKilled": false,
	            "Dead": false,
	            "Pid": 579441,
	            "ExitCode": 0,
	            "Error": "",
	            "StartedAt": "2025-09-17T00:21:46.144946221Z",
	            "FinishedAt": "0001-01-01T00:00:00Z"
	        },
	        "Image": "sha256:3d6f74760dfc17060da5abc5d463d3d45b4ceea05955c9cc42b3ec56cb38cc48",
	        "ResolvConfPath": "/var/lib/docker/containers/d707243db987958c2a899d9ce1405bbbbc539e428eaf168e3fdcd254e5e44bc2/resolv.conf",
	        "HostnamePath": "/var/lib/docker/containers/d707243db987958c2a899d9ce1405bbbbc539e428eaf168e3fdcd254e5e44bc2/hostname",
	        "HostsPath": "/var/lib/docker/containers/d707243db987958c2a899d9ce1405bbbbc539e428eaf168e3fdcd254e5e44bc2/hosts",
	        "LogPath": "/var/lib/docker/containers/d707243db987958c2a899d9ce1405bbbbc539e428eaf168e3fdcd254e5e44bc2/d707243db987958c2a899d9ce1405bbbbc539e428eaf168e3fdcd254e5e44bc2-json.log",
	        "Name": "/addons-235235",
	        "RestartCount": 0,
	        "Driver": "overlay2",
	        "Platform": "linux",
	        "MountLabel": "",
	        "ProcessLabel": "",
	        "AppArmorProfile": "unconfined",
	        "ExecIDs": null,
	        "HostConfig": {
	            "Binds": [
	                "/lib/modules:/lib/modules:ro",
	                "addons-235235:/var"
	            ],
	            "ContainerIDFile": "",
	            "LogConfig": {
	                "Type": "json-file",
	                "Config": {}
	            },
	            "NetworkMode": "addons-235235",
	            "PortBindings": {
	                "22/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "2376/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "32443/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "5000/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "8443/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ]
	            },
	            "RestartPolicy": {
	                "Name": "no",
	                "MaximumRetryCount": 0
	            },
	            "AutoRemove": false,
	            "VolumeDriver": "",
	            "VolumesFrom": null,
	            "ConsoleSize": [
	                0,
	                0
	            ],
	            "CapAdd": null,
	            "CapDrop": null,
	            "CgroupnsMode": "host",
	            "Dns": [],
	            "DnsOptions": [],
	            "DnsSearch": [],
	            "ExtraHosts": null,
	            "GroupAdd": null,
	            "IpcMode": "private",
	            "Cgroup": "",
	            "Links": null,
	            "OomScoreAdj": 0,
	            "PidMode": "",
	            "Privileged": true,
	            "PublishAllPorts": false,
	            "ReadonlyRootfs": false,
	            "SecurityOpt": [
	                "seccomp=unconfined",
	                "apparmor=unconfined",
	                "label=disable"
	            ],
	            "Tmpfs": {
	                "/run": "",
	                "/tmp": ""
	            },
	            "UTSMode": "",
	            "UsernsMode": "",
	            "ShmSize": 67108864,
	            "Runtime": "runc",
	            "Isolation": "",
	            "CpuShares": 0,
	            "Memory": 4294967296,
	            "NanoCpus": 2000000000,
	            "CgroupParent": "",
	            "BlkioWeight": 0,
	            "BlkioWeightDevice": [],
	            "BlkioDeviceReadBps": [],
	            "BlkioDeviceWriteBps": [],
	            "BlkioDeviceReadIOps": [],
	            "BlkioDeviceWriteIOps": [],
	            "CpuPeriod": 0,
	            "CpuQuota": 0,
	            "CpuRealtimePeriod": 0,
	            "CpuRealtimeRuntime": 0,
	            "CpusetCpus": "",
	            "CpusetMems": "",
	            "Devices": [],
	            "DeviceCgroupRules": null,
	            "DeviceRequests": null,
	            "MemoryReservation": 0,
	            "MemorySwap": 8589934592,
	            "MemorySwappiness": null,
	            "OomKillDisable": false,
	            "PidsLimit": null,
	            "Ulimits": [],
	            "CpuCount": 0,
	            "CpuPercent": 0,
	            "IOMaximumIOps": 0,
	            "IOMaximumBandwidth": 0,
	            "MaskedPaths": null,
	            "ReadonlyPaths": null
	        },
	        "GraphDriver": {
	            "Data": {
	                "ID": "d707243db987958c2a899d9ce1405bbbbc539e428eaf168e3fdcd254e5e44bc2",
	                "LowerDir": "/var/lib/docker/overlay2/12b87eaa1e6f960abbe0d94b1cecd14dc21d2449be15a55da06824528543005b-init/diff:/var/lib/docker/overlay2/6bf7b6c5df3b8adf86744064027446440589049694f02d12745ec1de281bdb92/diff",
	                "MergedDir": "/var/lib/docker/overlay2/12b87eaa1e6f960abbe0d94b1cecd14dc21d2449be15a55da06824528543005b/merged",
	                "UpperDir": "/var/lib/docker/overlay2/12b87eaa1e6f960abbe0d94b1cecd14dc21d2449be15a55da06824528543005b/diff",
	                "WorkDir": "/var/lib/docker/overlay2/12b87eaa1e6f960abbe0d94b1cecd14dc21d2449be15a55da06824528543005b/work"
	            },
	            "Name": "overlay2"
	        },
	        "Mounts": [
	            {
	                "Type": "volume",
	                "Name": "addons-235235",
	                "Source": "/var/lib/docker/volumes/addons-235235/_data",
	                "Destination": "/var",
	                "Driver": "local",
	                "Mode": "z",
	                "RW": true,
	                "Propagation": ""
	            },
	            {
	                "Type": "bind",
	                "Source": "/lib/modules",
	                "Destination": "/lib/modules",
	                "Mode": "ro",
	                "RW": false,
	                "Propagation": "rprivate"
	            }
	        ],
	        "Config": {
	            "Hostname": "addons-235235",
	            "Domainname": "",
	            "User": "",
	            "AttachStdin": false,
	            "AttachStdout": false,
	            "AttachStderr": false,
	            "ExposedPorts": {
	                "22/tcp": {},
	                "2376/tcp": {},
	                "32443/tcp": {},
	                "5000/tcp": {},
	                "8443/tcp": {}
	            },
	            "Tty": true,
	            "OpenStdin": false,
	            "StdinOnce": false,
	            "Env": [
	                "container=docker",
	                "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
	            ],
	            "Cmd": null,
	            "Image": "gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1",
	            "Volumes": null,
	            "WorkingDir": "/",
	            "Entrypoint": [
	                "/usr/local/bin/entrypoint",
	                "/sbin/init"
	            ],
	            "OnBuild": null,
	            "Labels": {
	                "created_by.minikube.sigs.k8s.io": "true",
	                "mode.minikube.sigs.k8s.io": "addons-235235",
	                "name.minikube.sigs.k8s.io": "addons-235235",
	                "role.minikube.sigs.k8s.io": ""
	            },
	            "StopSignal": "SIGRTMIN+3"
	        },
	        "NetworkSettings": {
	            "Bridge": "",
	            "SandboxID": "a43a40c4fa98667e767bf14a111b23f82aa9a3ec39c79413d4bd461cc7ae9299",
	            "SandboxKey": "/var/run/docker/netns/a43a40c4fa98",
	            "Ports": {
	                "22/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "33505"
	                    }
	                ],
	                "2376/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "33506"
	                    }
	                ],
	                "32443/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "33509"
	                    }
	                ],
	                "5000/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "33507"
	                    }
	                ],
	                "8443/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "33508"
	                    }
	                ]
	            },
	            "HairpinMode": false,
	            "LinkLocalIPv6Address": "",
	            "LinkLocalIPv6PrefixLen": 0,
	            "SecondaryIPAddresses": null,
	            "SecondaryIPv6Addresses": null,
	            "EndpointID": "",
	            "Gateway": "",
	            "GlobalIPv6Address": "",
	            "GlobalIPv6PrefixLen": 0,
	            "IPAddress": "",
	            "IPPrefixLen": 0,
	            "IPv6Gateway": "",
	            "MacAddress": "",
	            "Networks": {
	                "addons-235235": {
	                    "IPAMConfig": {
	                        "IPv4Address": "192.168.49.2"
	                    },
	                    "Links": null,
	                    "Aliases": null,
	                    "MacAddress": "62:f3:eb:53:e2:f6",
	                    "DriverOpts": null,
	                    "GwPriority": 0,
	                    "NetworkID": "60650bbded5295efe88096468307a83afd002d873e3296f6c84175bd2508d292",
	                    "EndpointID": "edd46cd0c3a6ca4e5cb23dc4c256bcb2950d4889ac948e44e2d64fdfed41cc13",
	                    "Gateway": "192.168.49.1",
	                    "IPAddress": "192.168.49.2",
	                    "IPPrefixLen": 24,
	                    "IPv6Gateway": "",
	                    "GlobalIPv6Address": "",
	                    "GlobalIPv6PrefixLen": 0,
	                    "DNSNames": [
	                        "addons-235235",
	                        "d707243db987"
	                    ]
	                }
	            }
	        }
	    }
	]

                                                
                                                
-- /stdout --
helpers_test.go:247: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Host}} -p addons-235235 -n addons-235235
helpers_test.go:252: <<< TestAddons/parallel/LocalPath FAILED: start of post-mortem logs <<<
helpers_test.go:253: ======>  post-mortem[TestAddons/parallel/LocalPath]: minikube logs <======
helpers_test.go:255: (dbg) Run:  out/minikube-linux-arm64 -p addons-235235 logs -n 25
helpers_test.go:255: (dbg) Done: out/minikube-linux-arm64 -p addons-235235 logs -n 25: (1.366342078s)
helpers_test.go:260: TestAddons/parallel/LocalPath logs: 
-- stdout --
	
	==> Audit <==
	┌─────────┬───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬────────────────────────┬─────────┬─────────┬─────────────────────┬─────────────────────┐
	│ COMMAND │                                                                                                                                                                                                                                    ARGS                                                                                                                                                                                                                                    │        PROFILE         │  USER   │ VERSION │     START TIME      │      END TIME       │
	├─────────┼───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼────────────────────────┼─────────┼─────────┼─────────────────────┼─────────────────────┤
	│ delete  │ -p download-docker-416078                                                                                                                                                                                                                                                                                                                                                                                                                                                  │ download-docker-416078 │ jenkins │ v1.37.0 │ 17 Sep 25 00:21 UTC │ 17 Sep 25 00:21 UTC │
	│ start   │ --download-only -p binary-mirror-287818 --alsologtostderr --binary-mirror http://127.0.0.1:36367 --driver=docker  --container-runtime=docker                                                                                                                                                                                                                                                                                                                               │ binary-mirror-287818   │ jenkins │ v1.37.0 │ 17 Sep 25 00:21 UTC │                     │
	│ delete  │ -p binary-mirror-287818                                                                                                                                                                                                                                                                                                                                                                                                                                                    │ binary-mirror-287818   │ jenkins │ v1.37.0 │ 17 Sep 25 00:21 UTC │ 17 Sep 25 00:21 UTC │
	│ addons  │ enable dashboard -p addons-235235                                                                                                                                                                                                                                                                                                                                                                                                                                          │ addons-235235          │ jenkins │ v1.37.0 │ 17 Sep 25 00:21 UTC │                     │
	│ addons  │ disable dashboard -p addons-235235                                                                                                                                                                                                                                                                                                                                                                                                                                         │ addons-235235          │ jenkins │ v1.37.0 │ 17 Sep 25 00:21 UTC │                     │
	│ start   │ -p addons-235235 --wait=true --memory=4096 --alsologtostderr --addons=registry --addons=registry-creds --addons=metrics-server --addons=volumesnapshots --addons=csi-hostpath-driver --addons=gcp-auth --addons=cloud-spanner --addons=inspektor-gadget --addons=nvidia-device-plugin --addons=yakd --addons=volcano --addons=amd-gpu-device-plugin --driver=docker  --container-runtime=docker --addons=ingress --addons=ingress-dns --addons=storage-provisioner-rancher │ addons-235235          │ jenkins │ v1.37.0 │ 17 Sep 25 00:21 UTC │ 17 Sep 25 00:23 UTC │
	│ addons  │ addons-235235 addons disable volcano --alsologtostderr -v=1                                                                                                                                                                                                                                                                                                                                                                                                                │ addons-235235          │ jenkins │ v1.37.0 │ 17 Sep 25 00:24 UTC │ 17 Sep 25 00:24 UTC │
	│ addons  │ addons-235235 addons disable gcp-auth --alsologtostderr -v=1                                                                                                                                                                                                                                                                                                                                                                                                               │ addons-235235          │ jenkins │ v1.37.0 │ 17 Sep 25 00:24 UTC │ 17 Sep 25 00:24 UTC │
	│ addons  │ enable headlamp -p addons-235235 --alsologtostderr -v=1                                                                                                                                                                                                                                                                                                                                                                                                                    │ addons-235235          │ jenkins │ v1.37.0 │ 17 Sep 25 00:24 UTC │ 17 Sep 25 00:24 UTC │
	│ addons  │ addons-235235 addons disable headlamp --alsologtostderr -v=1                                                                                                                                                                                                                                                                                                                                                                                                               │ addons-235235          │ jenkins │ v1.37.0 │ 17 Sep 25 00:25 UTC │ 17 Sep 25 00:25 UTC │
	│ ip      │ addons-235235 ip                                                                                                                                                                                                                                                                                                                                                                                                                                                           │ addons-235235          │ jenkins │ v1.37.0 │ 17 Sep 25 00:25 UTC │ 17 Sep 25 00:25 UTC │
	│ addons  │ addons-235235 addons disable registry --alsologtostderr -v=1                                                                                                                                                                                                                                                                                                                                                                                                               │ addons-235235          │ jenkins │ v1.37.0 │ 17 Sep 25 00:25 UTC │ 17 Sep 25 00:25 UTC │
	│ addons  │ addons-235235 addons disable metrics-server --alsologtostderr -v=1                                                                                                                                                                                                                                                                                                                                                                                                         │ addons-235235          │ jenkins │ v1.37.0 │ 17 Sep 25 00:25 UTC │ 17 Sep 25 00:25 UTC │
	│ addons  │ addons-235235 addons disable inspektor-gadget --alsologtostderr -v=1                                                                                                                                                                                                                                                                                                                                                                                                       │ addons-235235          │ jenkins │ v1.37.0 │ 17 Sep 25 00:25 UTC │ 17 Sep 25 00:25 UTC │
	│ ssh     │ addons-235235 ssh curl -s http://127.0.0.1/ -H 'Host: nginx.example.com'                                                                                                                                                                                                                                                                                                                                                                                                   │ addons-235235          │ jenkins │ v1.37.0 │ 17 Sep 25 00:25 UTC │ 17 Sep 25 00:25 UTC │
	│ ip      │ addons-235235 ip                                                                                                                                                                                                                                                                                                                                                                                                                                                           │ addons-235235          │ jenkins │ v1.37.0 │ 17 Sep 25 00:25 UTC │ 17 Sep 25 00:25 UTC │
	│ addons  │ addons-235235 addons disable ingress-dns --alsologtostderr -v=1                                                                                                                                                                                                                                                                                                                                                                                                            │ addons-235235          │ jenkins │ v1.37.0 │ 17 Sep 25 00:25 UTC │ 17 Sep 25 00:25 UTC │
	│ addons  │ addons-235235 addons disable ingress --alsologtostderr -v=1                                                                                                                                                                                                                                                                                                                                                                                                                │ addons-235235          │ jenkins │ v1.37.0 │ 17 Sep 25 00:25 UTC │ 17 Sep 25 00:25 UTC │
	│ addons  │ configure registry-creds -f ./testdata/addons_testconfig.json -p addons-235235                                                                                                                                                                                                                                                                                                                                                                                             │ addons-235235          │ jenkins │ v1.37.0 │ 17 Sep 25 00:25 UTC │ 17 Sep 25 00:25 UTC │
	│ addons  │ addons-235235 addons disable registry-creds --alsologtostderr -v=1                                                                                                                                                                                                                                                                                                                                                                                                         │ addons-235235          │ jenkins │ v1.37.0 │ 17 Sep 25 00:25 UTC │ 17 Sep 25 00:25 UTC │
	│ addons  │ addons-235235 addons disable nvidia-device-plugin --alsologtostderr -v=1                                                                                                                                                                                                                                                                                                                                                                                                   │ addons-235235          │ jenkins │ v1.37.0 │ 17 Sep 25 00:25 UTC │ 17 Sep 25 00:25 UTC │
	│ addons  │ addons-235235 addons disable yakd --alsologtostderr -v=1                                                                                                                                                                                                                                                                                                                                                                                                                   │ addons-235235          │ jenkins │ v1.37.0 │ 17 Sep 25 00:25 UTC │ 17 Sep 25 00:26 UTC │
	│ addons  │ addons-235235 addons disable volumesnapshots --alsologtostderr -v=1                                                                                                                                                                                                                                                                                                                                                                                                        │ addons-235235          │ jenkins │ v1.37.0 │ 17 Sep 25 00:26 UTC │ 17 Sep 25 00:26 UTC │
	│ addons  │ addons-235235 addons disable csi-hostpath-driver --alsologtostderr -v=1                                                                                                                                                                                                                                                                                                                                                                                                    │ addons-235235          │ jenkins │ v1.37.0 │ 17 Sep 25 00:26 UTC │ 17 Sep 25 00:26 UTC │
	│ addons  │ addons-235235 addons disable cloud-spanner --alsologtostderr -v=1                                                                                                                                                                                                                                                                                                                                                                                                          │ addons-235235          │ jenkins │ v1.37.0 │ 17 Sep 25 00:26 UTC │ 17 Sep 25 00:26 UTC │
	└─────────┴───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴────────────────────────┴─────────┴─────────┴─────────────────────┴─────────────────────┘
	
	
	==> Last Start <==
	Log file created at: 2025/09/17 00:21:21
	Running on machine: ip-172-31-21-244
	Binary: Built with gc go1.24.6 for linux/arm64
	Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
	I0917 00:21:21.575028  579049 out.go:360] Setting OutFile to fd 1 ...
	I0917 00:21:21.575207  579049 out.go:408] TERM=,COLORTERM=, which probably does not support color
	I0917 00:21:21.575232  579049 out.go:374] Setting ErrFile to fd 2...
	I0917 00:21:21.575254  579049 out.go:408] TERM=,COLORTERM=, which probably does not support color
	I0917 00:21:21.575549  579049 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21550-576428/.minikube/bin
	I0917 00:21:21.576022  579049 out.go:368] Setting JSON to false
	I0917 00:21:21.576893  579049 start.go:130] hostinfo: {"hostname":"ip-172-31-21-244","uptime":11027,"bootTime":1758057455,"procs":148,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1084-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"da8ac1fd-6236-412a-a346-95873c98230d"}
	I0917 00:21:21.576984  579049 start.go:140] virtualization:  
	I0917 00:21:21.580251  579049 out.go:179] * [addons-235235] minikube v1.37.0 on Ubuntu 20.04 (arm64)
	I0917 00:21:21.583995  579049 out.go:179]   - MINIKUBE_LOCATION=21550
	I0917 00:21:21.584058  579049 notify.go:220] Checking for updates...
	I0917 00:21:21.589730  579049 out.go:179]   - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	I0917 00:21:21.592567  579049 out.go:179]   - KUBECONFIG=/home/jenkins/minikube-integration/21550-576428/kubeconfig
	I0917 00:21:21.595509  579049 out.go:179]   - MINIKUBE_HOME=/home/jenkins/minikube-integration/21550-576428/.minikube
	I0917 00:21:21.598356  579049 out.go:179]   - MINIKUBE_BIN=out/minikube-linux-arm64
	I0917 00:21:21.601213  579049 out.go:179]   - MINIKUBE_FORCE_SYSTEMD=
	I0917 00:21:21.604306  579049 driver.go:421] Setting default libvirt URI to qemu:///system
	I0917 00:21:21.625256  579049 docker.go:123] docker version: linux-28.1.1:Docker Engine - Community
	I0917 00:21:21.625390  579049 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0917 00:21:21.685346  579049 info.go:266] docker info: {ID:5FDH:SA5P:5GCT:NLAS:B73P:SGDQ:PBG5:UBVH:UZY3:RXGO:CI7S:WAIH Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:29 OomKillDisable:true NGoroutines:47 SystemTime:2025-09-17 00:21:21.676486714 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1084-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214835200 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-21-244 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx P
ath:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1]] Warnings:<nil>}}
	I0917 00:21:21.685483  579049 docker.go:318] overlay module found
	I0917 00:21:21.688541  579049 out.go:179] * Using the docker driver based on user configuration
	I0917 00:21:21.691206  579049 start.go:304] selected driver: docker
	I0917 00:21:21.691230  579049 start.go:918] validating driver "docker" against <nil>
	I0917 00:21:21.691252  579049 start.go:929] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
	I0917 00:21:21.691974  579049 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0917 00:21:21.747252  579049 info.go:266] docker info: {ID:5FDH:SA5P:5GCT:NLAS:B73P:SGDQ:PBG5:UBVH:UZY3:RXGO:CI7S:WAIH Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:29 OomKillDisable:true NGoroutines:47 SystemTime:2025-09-17 00:21:21.737982702 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1084-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214835200 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-21-244 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx P
ath:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1]] Warnings:<nil>}}
	I0917 00:21:21.747418  579049 start_flags.go:327] no existing cluster config was found, will generate one from the flags 
	I0917 00:21:21.747655  579049 start_flags.go:992] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
	I0917 00:21:21.750585  579049 out.go:179] * Using Docker driver with root privileges
	I0917 00:21:21.753414  579049 cni.go:84] Creating CNI manager for ""
	I0917 00:21:21.753496  579049 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
	I0917 00:21:21.753509  579049 start_flags.go:336] Found "bridge CNI" CNI - setting NetworkPlugin=cni
	I0917 00:21:21.753597  579049 start.go:348] cluster config:
	{Name:addons-235235 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:4096 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:addons-235235 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: Netwo
rkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1
m0s}
	I0917 00:21:21.756589  579049 out.go:179] * Starting "addons-235235" primary control-plane node in "addons-235235" cluster
	I0917 00:21:21.759495  579049 cache.go:123] Beginning downloading kic base image for docker with docker
	I0917 00:21:21.762345  579049 out.go:179] * Pulling base image v0.0.48 ...
	I0917 00:21:21.765171  579049 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime docker
	I0917 00:21:21.765228  579049 preload.go:146] Found local preload: /home/jenkins/minikube-integration/21550-576428/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-docker-overlay2-arm64.tar.lz4
	I0917 00:21:21.765240  579049 cache.go:58] Caching tarball of preloaded images
	I0917 00:21:21.765246  579049 image.go:81] Checking for gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 in local docker daemon
	I0917 00:21:21.765334  579049 preload.go:172] Found /home/jenkins/minikube-integration/21550-576428/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-docker-overlay2-arm64.tar.lz4 in cache, skipping download
	I0917 00:21:21.765346  579049 cache.go:61] Finished verifying existence of preloaded tar for v1.34.0 on docker
	I0917 00:21:21.765742  579049 profile.go:143] Saving config to /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/config.json ...
	I0917 00:21:21.765772  579049 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/config.json: {Name:mka6cd9dd804af0eb234f92dd0d9458ad6607892 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0917 00:21:21.780916  579049 cache.go:152] Downloading gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 to local cache
	I0917 00:21:21.781018  579049 image.go:65] Checking for gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 in local cache directory
	I0917 00:21:21.781042  579049 image.go:68] Found gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 in local cache directory, skipping pull
	I0917 00:21:21.781050  579049 image.go:137] gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 exists in cache, skipping pull
	I0917 00:21:21.781057  579049 cache.go:155] successfully saved gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 as a tarball
	I0917 00:21:21.781063  579049 cache.go:165] Loading gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 from local cache
	I0917 00:21:39.237948  579049 cache.go:167] successfully loaded and using gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 from cached tarball
	I0917 00:21:39.237990  579049 cache.go:232] Successfully downloaded all kic artifacts
	I0917 00:21:39.238038  579049 start.go:360] acquireMachinesLock for addons-235235: {Name:mk420ada22966e913cee54d953c4c96eb7228735 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
	I0917 00:21:39.238736  579049 start.go:364] duration metric: took 670.901µs to acquireMachinesLock for "addons-235235"
	I0917 00:21:39.238775  579049 start.go:93] Provisioning new machine with config: &{Name:addons-235235 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:4096 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:addons-235235 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[
] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPat
h: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}
	I0917 00:21:39.238850  579049 start.go:125] createHost starting for "" (driver="docker")
	I0917 00:21:39.242186  579049 out.go:252] * Creating docker container (CPUs=2, Memory=4096MB) ...
	I0917 00:21:39.242413  579049 start.go:159] libmachine.API.Create for "addons-235235" (driver="docker")
	I0917 00:21:39.242449  579049 client.go:168] LocalClient.Create starting
	I0917 00:21:39.242573  579049 main.go:141] libmachine: Creating CA: /home/jenkins/minikube-integration/21550-576428/.minikube/certs/ca.pem
	I0917 00:21:39.701609  579049 main.go:141] libmachine: Creating client certificate: /home/jenkins/minikube-integration/21550-576428/.minikube/certs/cert.pem
	I0917 00:21:40.116625  579049 cli_runner.go:164] Run: docker network inspect addons-235235 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
	W0917 00:21:40.132770  579049 cli_runner.go:211] docker network inspect addons-235235 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
	I0917 00:21:40.132854  579049 network_create.go:284] running [docker network inspect addons-235235] to gather additional debugging logs...
	I0917 00:21:40.132893  579049 cli_runner.go:164] Run: docker network inspect addons-235235
	W0917 00:21:40.148702  579049 cli_runner.go:211] docker network inspect addons-235235 returned with exit code 1
	I0917 00:21:40.148750  579049 network_create.go:287] error running [docker network inspect addons-235235]: docker network inspect addons-235235: exit status 1
	stdout:
	[]
	
	stderr:
	Error response from daemon: network addons-235235 not found
	I0917 00:21:40.148764  579049 network_create.go:289] output of [docker network inspect addons-235235]: -- stdout --
	[]
	
	-- /stdout --
	** stderr ** 
	Error response from daemon: network addons-235235 not found
	
	** /stderr **
	I0917 00:21:40.148907  579049 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
	I0917 00:21:40.165466  579049 network.go:206] using free private subnet 192.168.49.0/24: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0x40017f8e00}
	I0917 00:21:40.165507  579049 network_create.go:124] attempt to create docker network addons-235235 192.168.49.0/24 with gateway 192.168.49.1 and MTU of 1500 ...
	I0917 00:21:40.165567  579049 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.49.0/24 --gateway=192.168.49.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=addons-235235 addons-235235
	I0917 00:21:40.224981  579049 network_create.go:108] docker network addons-235235 192.168.49.0/24 created
	I0917 00:21:40.225012  579049 kic.go:121] calculated static IP "192.168.49.2" for the "addons-235235" container
	I0917 00:21:40.225096  579049 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
	I0917 00:21:40.240204  579049 cli_runner.go:164] Run: docker volume create addons-235235 --label name.minikube.sigs.k8s.io=addons-235235 --label created_by.minikube.sigs.k8s.io=true
	I0917 00:21:40.257434  579049 oci.go:103] Successfully created a docker volume addons-235235
	I0917 00:21:40.257520  579049 cli_runner.go:164] Run: docker run --rm --name addons-235235-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-235235 --entrypoint /usr/bin/test -v addons-235235:/var gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -d /var/lib
	I0917 00:21:42.332421  579049 cli_runner.go:217] Completed: docker run --rm --name addons-235235-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-235235 --entrypoint /usr/bin/test -v addons-235235:/var gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -d /var/lib: (2.074852116s)
	I0917 00:21:42.332482  579049 oci.go:107] Successfully prepared a docker volume addons-235235
	I0917 00:21:42.332519  579049 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime docker
	I0917 00:21:42.332540  579049 kic.go:194] Starting extracting preloaded images to volume ...
	I0917 00:21:42.332620  579049 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21550-576428/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-docker-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v addons-235235:/extractDir gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -I lz4 -xf /preloaded.tar -C /extractDir
	I0917 00:21:46.003007  579049 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21550-576428/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-docker-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v addons-235235:/extractDir gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -I lz4 -xf /preloaded.tar -C /extractDir: (3.670344697s)
	I0917 00:21:46.003047  579049 kic.go:203] duration metric: took 3.670502674s to extract preloaded images to volume ...
	W0917 00:21:46.003215  579049 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
	I0917 00:21:46.003342  579049 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
	I0917 00:21:46.065517  579049 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname addons-235235 --name addons-235235 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-235235 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=addons-235235 --network addons-235235 --ip 192.168.49.2 --volume addons-235235:/var --security-opt apparmor=unconfined --memory=4096mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1
	I0917 00:21:46.363848  579049 cli_runner.go:164] Run: docker container inspect addons-235235 --format={{.State.Running}}
	I0917 00:21:46.384702  579049 cli_runner.go:164] Run: docker container inspect addons-235235 --format={{.State.Status}}
	I0917 00:21:46.403801  579049 cli_runner.go:164] Run: docker exec addons-235235 stat /var/lib/dpkg/alternatives/iptables
	I0917 00:21:46.455139  579049 oci.go:144] the created container "addons-235235" has a running status.
	I0917 00:21:46.455168  579049 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/21550-576428/.minikube/machines/addons-235235/id_rsa...
	I0917 00:21:46.510166  579049 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/21550-576428/.minikube/machines/addons-235235/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
	I0917 00:21:46.530338  579049 cli_runner.go:164] Run: docker container inspect addons-235235 --format={{.State.Status}}
	I0917 00:21:46.549851  579049 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
	I0917 00:21:46.549871  579049 kic_runner.go:114] Args: [docker exec --privileged addons-235235 chown docker:docker /home/docker/.ssh/authorized_keys]
	I0917 00:21:46.600724  579049 cli_runner.go:164] Run: docker container inspect addons-235235 --format={{.State.Status}}
	I0917 00:21:46.626397  579049 machine.go:93] provisionDockerMachine start ...
	I0917 00:21:46.626500  579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
	I0917 00:21:46.647098  579049 main.go:141] libmachine: Using SSH client type: native
	I0917 00:21:46.647544  579049 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3ef1e0] 0x3f19a0 <nil>  [] 0s} 127.0.0.1 33505 <nil> <nil>}
	I0917 00:21:46.647565  579049 main.go:141] libmachine: About to run SSH command:
	hostname
	I0917 00:21:46.648327  579049 main.go:141] libmachine: Error dialing TCP: ssh: handshake failed: EOF
	I0917 00:21:49.787981  579049 main.go:141] libmachine: SSH cmd err, output: <nil>: addons-235235
	
	I0917 00:21:49.788006  579049 ubuntu.go:182] provisioning hostname "addons-235235"
	I0917 00:21:49.788066  579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
	I0917 00:21:49.806736  579049 main.go:141] libmachine: Using SSH client type: native
	I0917 00:21:49.807057  579049 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3ef1e0] 0x3f19a0 <nil>  [] 0s} 127.0.0.1 33505 <nil> <nil>}
	I0917 00:21:49.807073  579049 main.go:141] libmachine: About to run SSH command:
	sudo hostname addons-235235 && echo "addons-235235" | sudo tee /etc/hostname
	I0917 00:21:49.959594  579049 main.go:141] libmachine: SSH cmd err, output: <nil>: addons-235235
	
	I0917 00:21:49.959672  579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
	I0917 00:21:49.976980  579049 main.go:141] libmachine: Using SSH client type: native
	I0917 00:21:49.977314  579049 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3ef1e0] 0x3f19a0 <nil>  [] 0s} 127.0.0.1 33505 <nil> <nil>}
	I0917 00:21:49.977346  579049 main.go:141] libmachine: About to run SSH command:
	
			if ! grep -xq '.*\saddons-235235' /etc/hosts; then
				if grep -xq '127.0.1.1\s.*' /etc/hosts; then
					sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 addons-235235/g' /etc/hosts;
				else 
					echo '127.0.1.1 addons-235235' | sudo tee -a /etc/hosts; 
				fi
			fi
	I0917 00:21:50.117147  579049 main.go:141] libmachine: SSH cmd err, output: <nil>: 
	I0917 00:21:50.117174  579049 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/21550-576428/.minikube CaCertPath:/home/jenkins/minikube-integration/21550-576428/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21550-576428/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21550-576428/.minikube}
	I0917 00:21:50.117193  579049 ubuntu.go:190] setting up certificates
	I0917 00:21:50.117204  579049 provision.go:84] configureAuth start
	I0917 00:21:50.117265  579049 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-235235
	I0917 00:21:50.135563  579049 provision.go:143] copyHostCerts
	I0917 00:21:50.135658  579049 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21550-576428/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21550-576428/.minikube/ca.pem (1082 bytes)
	I0917 00:21:50.135794  579049 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21550-576428/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21550-576428/.minikube/cert.pem (1123 bytes)
	I0917 00:21:50.135873  579049 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21550-576428/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21550-576428/.minikube/key.pem (1675 bytes)
	I0917 00:21:50.135938  579049 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21550-576428/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21550-576428/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21550-576428/.minikube/certs/ca-key.pem org=jenkins.addons-235235 san=[127.0.0.1 192.168.49.2 addons-235235 localhost minikube]
	I0917 00:21:50.541509  579049 provision.go:177] copyRemoteCerts
	I0917 00:21:50.541577  579049 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
	I0917 00:21:50.541623  579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
	I0917 00:21:50.558273  579049 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33505 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/addons-235235/id_rsa Username:docker}
	I0917 00:21:50.657010  579049 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-576428/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
	I0917 00:21:50.681604  579049 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-576428/.minikube/machines/server.pem --> /etc/docker/server.pem (1208 bytes)
	I0917 00:21:50.705284  579049 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-576428/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
	I0917 00:21:50.728479  579049 provision.go:87] duration metric: took 611.21857ms to configureAuth
	I0917 00:21:50.728507  579049 ubuntu.go:206] setting minikube options for container-runtime
	I0917 00:21:50.728697  579049 config.go:182] Loaded profile config "addons-235235": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
	I0917 00:21:50.728759  579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
	I0917 00:21:50.745617  579049 main.go:141] libmachine: Using SSH client type: native
	I0917 00:21:50.745937  579049 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3ef1e0] 0x3f19a0 <nil>  [] 0s} 127.0.0.1 33505 <nil> <nil>}
	I0917 00:21:50.745953  579049 main.go:141] libmachine: About to run SSH command:
	df --output=fstype / | tail -n 1
	I0917 00:21:50.884904  579049 main.go:141] libmachine: SSH cmd err, output: <nil>: overlay
	
	I0917 00:21:50.884930  579049 ubuntu.go:71] root file system type: overlay
	I0917 00:21:50.885055  579049 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
	I0917 00:21:50.885124  579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
	I0917 00:21:50.901703  579049 main.go:141] libmachine: Using SSH client type: native
	I0917 00:21:50.902000  579049 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3ef1e0] 0x3f19a0 <nil>  [] 0s} 127.0.0.1 33505 <nil> <nil>}
	I0917 00:21:50.902092  579049 main.go:141] libmachine: About to run SSH command:
	sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
	Description=Docker Application Container Engine
	Documentation=https://docs.docker.com
	After=network-online.target nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
	Wants=network-online.target containerd.service
	Requires=docker.socket
	StartLimitBurst=3
	StartLimitIntervalSec=60
	
	[Service]
	Type=notify
	Restart=always
	
	
	
	# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
	# The base configuration already specifies an 'ExecStart=...' command. The first directive
	# here is to clear out that command inherited from the base configuration. Without this,
	# the command from the base configuration and the command specified here are treated as
	# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
	# will catch this invalid input and refuse to start the service with an error like:
	#  Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
	
	# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
	# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
	ExecStart=
	ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 \
		-H fd:// --containerd=/run/containerd/containerd.sock \
		-H unix:///var/run/docker.sock \
		--default-ulimit=nofile=1048576:1048576 \
		--tlsverify \
		--tlscacert /etc/docker/ca.pem \
		--tlscert /etc/docker/server.pem \
		--tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12 
	ExecReload=/bin/kill -s HUP \$MAINPID
	
	# Having non-zero Limit*s causes performance problems due to accounting overhead
	# in the kernel. We recommend using cgroups to do container-local accounting.
	LimitNOFILE=infinity
	LimitNPROC=infinity
	LimitCORE=infinity
	
	# Uncomment TasksMax if your systemd version supports it.
	# Only systemd 226 and above support this version.
	TasksMax=infinity
	TimeoutStartSec=0
	
	# set delegate yes so that systemd does not reset the cgroups of docker containers
	Delegate=yes
	
	# kill only the docker process, not all processes in the cgroup
	KillMode=process
	OOMScoreAdjust=-500
	
	[Install]
	WantedBy=multi-user.target
	" | sudo tee /lib/systemd/system/docker.service.new
	I0917 00:21:51.051889  579049 main.go:141] libmachine: SSH cmd err, output: <nil>: [Unit]
	Description=Docker Application Container Engine
	Documentation=https://docs.docker.com
	After=network-online.target nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
	Wants=network-online.target containerd.service
	Requires=docker.socket
	StartLimitBurst=3
	StartLimitIntervalSec=60
	
	[Service]
	Type=notify
	Restart=always
	
	
	
	# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
	# The base configuration already specifies an 'ExecStart=...' command. The first directive
	# here is to clear out that command inherited from the base configuration. Without this,
	# the command from the base configuration and the command specified here are treated as
	# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
	# will catch this invalid input and refuse to start the service with an error like:
	#  Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
	
	# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
	# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
	ExecStart=
	ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 	-H fd:// --containerd=/run/containerd/containerd.sock 	-H unix:///var/run/docker.sock 	--default-ulimit=nofile=1048576:1048576 	--tlsverify 	--tlscacert /etc/docker/ca.pem 	--tlscert /etc/docker/server.pem 	--tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12 
	ExecReload=/bin/kill -s HUP $MAINPID
	
	# Having non-zero Limit*s causes performance problems due to accounting overhead
	# in the kernel. We recommend using cgroups to do container-local accounting.
	LimitNOFILE=infinity
	LimitNPROC=infinity
	LimitCORE=infinity
	
	# Uncomment TasksMax if your systemd version supports it.
	# Only systemd 226 and above support this version.
	TasksMax=infinity
	TimeoutStartSec=0
	
	# set delegate yes so that systemd does not reset the cgroups of docker containers
	Delegate=yes
	
	# kill only the docker process, not all processes in the cgroup
	KillMode=process
	OOMScoreAdjust=-500
	
	[Install]
	WantedBy=multi-user.target
	
	I0917 00:21:51.051978  579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
	I0917 00:21:51.070361  579049 main.go:141] libmachine: Using SSH client type: native
	I0917 00:21:51.070688  579049 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3ef1e0] 0x3f19a0 <nil>  [] 0s} 127.0.0.1 33505 <nil> <nil>}
	I0917 00:21:51.070712  579049 main.go:141] libmachine: About to run SSH command:
	sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
	I0917 00:21:51.887936  579049 main.go:141] libmachine: SSH cmd err, output: <nil>: --- /lib/systemd/system/docker.service	2025-09-03 20:57:01.000000000 +0000
	+++ /lib/systemd/system/docker.service.new	2025-09-17 00:21:51.044692854 +0000
	@@ -9,23 +9,34 @@
	 
	 [Service]
	 Type=notify
	-# the default is not to use systemd for cgroups because the delegate issues still
	-# exists and systemd currently does not support the cgroup feature set required
	-# for containers run by docker
	-ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
	-ExecReload=/bin/kill -s HUP $MAINPID
	-TimeoutStartSec=0
	-RestartSec=2
	 Restart=always
	 
	+
	+
	+# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
	+# The base configuration already specifies an 'ExecStart=...' command. The first directive
	+# here is to clear out that command inherited from the base configuration. Without this,
	+# the command from the base configuration and the command specified here are treated as
	+# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
	+# will catch this invalid input and refuse to start the service with an error like:
	+#  Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
	+
	+# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
	+# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
	+ExecStart=
	+ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 	-H fd:// --containerd=/run/containerd/containerd.sock 	-H unix:///var/run/docker.sock 	--default-ulimit=nofile=1048576:1048576 	--tlsverify 	--tlscacert /etc/docker/ca.pem 	--tlscert /etc/docker/server.pem 	--tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12 
	+ExecReload=/bin/kill -s HUP $MAINPID
	+
	 # Having non-zero Limit*s causes performance problems due to accounting overhead
	 # in the kernel. We recommend using cgroups to do container-local accounting.
	+LimitNOFILE=infinity
	 LimitNPROC=infinity
	 LimitCORE=infinity
	 
	-# Comment TasksMax if your systemd version does not support it.
	-# Only systemd 226 and above support this option.
	+# Uncomment TasksMax if your systemd version supports it.
	+# Only systemd 226 and above support this version.
	 TasksMax=infinity
	+TimeoutStartSec=0
	 
	 # set delegate yes so that systemd does not reset the cgroups of docker containers
	 Delegate=yes
	Synchronizing state of docker.service with SysV service script with /lib/systemd/systemd-sysv-install.
	Executing: /lib/systemd/systemd-sysv-install enable docker
	
	I0917 00:21:51.887961  579049 machine.go:96] duration metric: took 5.261543896s to provisionDockerMachine
	I0917 00:21:51.887974  579049 client.go:171] duration metric: took 12.645517134s to LocalClient.Create
	I0917 00:21:51.887992  579049 start.go:167] duration metric: took 12.645579343s to libmachine.API.Create "addons-235235"
	I0917 00:21:51.888003  579049 start.go:293] postStartSetup for "addons-235235" (driver="docker")
	I0917 00:21:51.888013  579049 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
	I0917 00:21:51.888093  579049 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
	I0917 00:21:51.888152  579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
	I0917 00:21:51.905418  579049 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33505 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/addons-235235/id_rsa Username:docker}
	I0917 00:21:52.018304  579049 ssh_runner.go:195] Run: cat /etc/os-release
	I0917 00:21:52.021860  579049 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
	I0917 00:21:52.021893  579049 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
	I0917 00:21:52.021904  579049 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
	I0917 00:21:52.021911  579049 info.go:137] Remote host: Ubuntu 22.04.5 LTS
	I0917 00:21:52.021922  579049 filesync.go:126] Scanning /home/jenkins/minikube-integration/21550-576428/.minikube/addons for local assets ...
	I0917 00:21:52.021997  579049 filesync.go:126] Scanning /home/jenkins/minikube-integration/21550-576428/.minikube/files for local assets ...
	I0917 00:21:52.022025  579049 start.go:296] duration metric: took 134.016299ms for postStartSetup
	I0917 00:21:52.022352  579049 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-235235
	I0917 00:21:52.041209  579049 profile.go:143] Saving config to /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/config.json ...
	I0917 00:21:52.041537  579049 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
	I0917 00:21:52.041600  579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
	I0917 00:21:52.062226  579049 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33505 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/addons-235235/id_rsa Username:docker}
	I0917 00:21:52.165262  579049 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
	I0917 00:21:52.169996  579049 start.go:128] duration metric: took 12.931126842s to createHost
	I0917 00:21:52.170022  579049 start.go:83] releasing machines lock for "addons-235235", held for 12.931267713s
	I0917 00:21:52.170096  579049 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-235235
	I0917 00:21:52.186894  579049 ssh_runner.go:195] Run: cat /version.json
	I0917 00:21:52.186955  579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
	I0917 00:21:52.187205  579049 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
	I0917 00:21:52.187269  579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
	I0917 00:21:52.204716  579049 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33505 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/addons-235235/id_rsa Username:docker}
	I0917 00:21:52.223706  579049 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33505 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/addons-235235/id_rsa Username:docker}
	I0917 00:21:52.300108  579049 ssh_runner.go:195] Run: systemctl --version
	I0917 00:21:52.429222  579049 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
	I0917 00:21:52.433763  579049 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
	I0917 00:21:52.458377  579049 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
	I0917 00:21:52.458464  579049 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
	I0917 00:21:52.489630  579049 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/100-crio-bridge.conf] bridge cni config(s)
	I0917 00:21:52.489700  579049 start.go:495] detecting cgroup driver to use...
	I0917 00:21:52.489742  579049 detect.go:187] detected "cgroupfs" cgroup driver on host os
	I0917 00:21:52.489851  579049 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
	" | sudo tee /etc/crictl.yaml"
	I0917 00:21:52.505791  579049 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
	I0917 00:21:52.517581  579049 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
	I0917 00:21:52.528064  579049 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
	I0917 00:21:52.528150  579049 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
	I0917 00:21:52.539055  579049 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
	I0917 00:21:52.548794  579049 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
	I0917 00:21:52.558539  579049 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
	I0917 00:21:52.569361  579049 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
	I0917 00:21:52.578347  579049 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
	I0917 00:21:52.587980  579049 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
	I0917 00:21:52.597499  579049 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1  enable_unprivileged_ports = true|' /etc/containerd/config.toml"
	I0917 00:21:52.607117  579049 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
	I0917 00:21:52.615625  579049 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
	I0917 00:21:52.624283  579049 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I0917 00:21:52.715592  579049 ssh_runner.go:195] Run: sudo systemctl restart containerd
	I0917 00:21:52.798437  579049 start.go:495] detecting cgroup driver to use...
	I0917 00:21:52.798486  579049 detect.go:187] detected "cgroupfs" cgroup driver on host os
	I0917 00:21:52.798536  579049 ssh_runner.go:195] Run: sudo systemctl cat docker.service
	I0917 00:21:52.811932  579049 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
	I0917 00:21:52.824748  579049 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
	I0917 00:21:52.841549  579049 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
	I0917 00:21:52.853296  579049 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
	I0917 00:21:52.864655  579049 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
	" | sudo tee /etc/crictl.yaml"
	I0917 00:21:52.880551  579049 ssh_runner.go:195] Run: which cri-dockerd
	I0917 00:21:52.883971  579049 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
	I0917 00:21:52.892513  579049 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (192 bytes)
	I0917 00:21:52.910305  579049 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
	I0917 00:21:53.006270  579049 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
	I0917 00:21:53.106321  579049 docker.go:575] configuring docker to use "cgroupfs" as cgroup driver...
	I0917 00:21:53.106467  579049 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (130 bytes)
	I0917 00:21:53.124526  579049 ssh_runner.go:195] Run: sudo systemctl reset-failed docker
	I0917 00:21:53.135633  579049 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I0917 00:21:53.222370  579049 ssh_runner.go:195] Run: sudo systemctl restart docker
	I0917 00:21:53.592077  579049 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
	I0917 00:21:53.603412  579049 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
	I0917 00:21:53.615682  579049 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
	I0917 00:21:53.627819  579049 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
	I0917 00:21:53.716473  579049 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
	I0917 00:21:53.805388  579049 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I0917 00:21:53.891592  579049 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
	I0917 00:21:53.904811  579049 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
	I0917 00:21:53.915815  579049 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I0917 00:21:54.000668  579049 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
	I0917 00:21:54.067673  579049 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
	I0917 00:21:54.081897  579049 start.go:542] Will wait 60s for socket path /var/run/cri-dockerd.sock
	I0917 00:21:54.082049  579049 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
	I0917 00:21:54.085819  579049 start.go:563] Will wait 60s for crictl version
	I0917 00:21:54.085910  579049 ssh_runner.go:195] Run: which crictl
	I0917 00:21:54.089251  579049 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
	I0917 00:21:54.126515  579049 start.go:579] Version:  0.1.0
	RuntimeName:  docker
	RuntimeVersion:  28.4.0
	RuntimeApiVersion:  v1
	I0917 00:21:54.126630  579049 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
	I0917 00:21:54.147693  579049 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
	I0917 00:21:54.181014  579049 out.go:252] * Preparing Kubernetes v1.34.0 on Docker 28.4.0 ...
	I0917 00:21:54.181127  579049 cli_runner.go:164] Run: docker network inspect addons-235235 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
	I0917 00:21:54.196147  579049 ssh_runner.go:195] Run: grep 192.168.49.1	host.minikube.internal$ /etc/hosts
	I0917 00:21:54.199689  579049 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.49.1	host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
	I0917 00:21:54.210089  579049 kubeadm.go:875] updating cluster {Name:addons-235235 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:4096 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:addons-235235 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] D
NSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: Sock
etVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
	I0917 00:21:54.210217  579049 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime docker
	I0917 00:21:54.210286  579049 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
	I0917 00:21:54.227698  579049 docker.go:691] Got preloaded images: -- stdout --
	registry.k8s.io/kube-apiserver:v1.34.0
	registry.k8s.io/kube-scheduler:v1.34.0
	registry.k8s.io/kube-controller-manager:v1.34.0
	registry.k8s.io/kube-proxy:v1.34.0
	registry.k8s.io/etcd:3.6.4-0
	registry.k8s.io/pause:3.10.1
	registry.k8s.io/coredns/coredns:v1.12.1
	gcr.io/k8s-minikube/storage-provisioner:v5
	
	-- /stdout --
	I0917 00:21:54.227719  579049 docker.go:621] Images already preloaded, skipping extraction
	I0917 00:21:54.227842  579049 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
	I0917 00:21:54.246045  579049 docker.go:691] Got preloaded images: -- stdout --
	registry.k8s.io/kube-apiserver:v1.34.0
	registry.k8s.io/kube-scheduler:v1.34.0
	registry.k8s.io/kube-controller-manager:v1.34.0
	registry.k8s.io/kube-proxy:v1.34.0
	registry.k8s.io/etcd:3.6.4-0
	registry.k8s.io/pause:3.10.1
	registry.k8s.io/coredns/coredns:v1.12.1
	gcr.io/k8s-minikube/storage-provisioner:v5
	
	-- /stdout --
	I0917 00:21:54.246070  579049 cache_images.go:85] Images are preloaded, skipping loading
	I0917 00:21:54.246084  579049 kubeadm.go:926] updating node { 192.168.49.2 8443 v1.34.0 docker true true} ...
	I0917 00:21:54.246180  579049 kubeadm.go:938] kubelet [Unit]
	Wants=docker.socket
	
	[Service]
	ExecStart=
	ExecStart=/var/lib/minikube/binaries/v1.34.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=addons-235235 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.49.2
	
	[Install]
	 config:
	{KubernetesVersion:v1.34.0 ClusterName:addons-235235 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
	I0917 00:21:54.246249  579049 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
	I0917 00:21:54.293114  579049 cni.go:84] Creating CNI manager for ""
	I0917 00:21:54.293139  579049 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
	I0917 00:21:54.293150  579049 kubeadm.go:84] Using pod CIDR: 10.244.0.0/16
	I0917 00:21:54.293173  579049 kubeadm.go:189] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.49.2 APIServerPort:8443 KubernetesVersion:v1.34.0 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:addons-235235 NodeName:addons-235235 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.49.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.49.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:/etc/kuber
netes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///var/run/cri-dockerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
	I0917 00:21:54.293297  579049 kubeadm.go:195] kubeadm config:
	apiVersion: kubeadm.k8s.io/v1beta4
	kind: InitConfiguration
	localAPIEndpoint:
	  advertiseAddress: 192.168.49.2
	  bindPort: 8443
	bootstrapTokens:
	  - groups:
	      - system:bootstrappers:kubeadm:default-node-token
	    ttl: 24h0m0s
	    usages:
	      - signing
	      - authentication
	nodeRegistration:
	  criSocket: unix:///var/run/cri-dockerd.sock
	  name: "addons-235235"
	  kubeletExtraArgs:
	    - name: "node-ip"
	      value: "192.168.49.2"
	  taints: []
	---
	apiVersion: kubeadm.k8s.io/v1beta4
	kind: ClusterConfiguration
	apiServer:
	  certSANs: ["127.0.0.1", "localhost", "192.168.49.2"]
	  extraArgs:
	    - name: "enable-admission-plugins"
	      value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
	controllerManager:
	  extraArgs:
	    - name: "allocate-node-cidrs"
	      value: "true"
	    - name: "leader-elect"
	      value: "false"
	scheduler:
	  extraArgs:
	    - name: "leader-elect"
	      value: "false"
	certificatesDir: /var/lib/minikube/certs
	clusterName: mk
	controlPlaneEndpoint: control-plane.minikube.internal:8443
	etcd:
	  local:
	    dataDir: /var/lib/minikube/etcd
	kubernetesVersion: v1.34.0
	networking:
	  dnsDomain: cluster.local
	  podSubnet: "10.244.0.0/16"
	  serviceSubnet: 10.96.0.0/12
	---
	apiVersion: kubelet.config.k8s.io/v1beta1
	kind: KubeletConfiguration
	authentication:
	  x509:
	    clientCAFile: /var/lib/minikube/certs/ca.crt
	cgroupDriver: cgroupfs
	containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock
	hairpinMode: hairpin-veth
	runtimeRequestTimeout: 15m
	clusterDomain: "cluster.local"
	# disable disk resource management by default
	imageGCHighThresholdPercent: 100
	evictionHard:
	  nodefs.available: "0%"
	  nodefs.inodesFree: "0%"
	  imagefs.available: "0%"
	failSwapOn: false
	staticPodPath: /etc/kubernetes/manifests
	---
	apiVersion: kubeproxy.config.k8s.io/v1alpha1
	kind: KubeProxyConfiguration
	clusterCIDR: "10.244.0.0/16"
	metricsBindAddress: 0.0.0.0:10249
	conntrack:
	  maxPerCore: 0
	# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
	  tcpEstablishedTimeout: 0s
	# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
	  tcpCloseWaitTimeout: 0s
	
	I0917 00:21:54.293364  579049 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.0
	I0917 00:21:54.302150  579049 binaries.go:44] Found k8s binaries, skipping transfer
	I0917 00:21:54.302218  579049 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
	I0917 00:21:54.310488  579049 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (312 bytes)
	I0917 00:21:54.327652  579049 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
	I0917 00:21:54.345355  579049 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2214 bytes)
	I0917 00:21:54.362817  579049 ssh_runner.go:195] Run: grep 192.168.49.2	control-plane.minikube.internal$ /etc/hosts
	I0917 00:21:54.366255  579049 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.49.2	control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
	I0917 00:21:54.376487  579049 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I0917 00:21:54.462688  579049 ssh_runner.go:195] Run: sudo systemctl start kubelet
	I0917 00:21:54.478534  579049 certs.go:68] Setting up /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235 for IP: 192.168.49.2
	I0917 00:21:54.478555  579049 certs.go:194] generating shared ca certs ...
	I0917 00:21:54.478571  579049 certs.go:226] acquiring lock for ca certs: {Name:mk04b183dabeee5957951eb115c646a018da171d Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0917 00:21:54.479358  579049 certs.go:240] generating "minikubeCA" ca cert: /home/jenkins/minikube-integration/21550-576428/.minikube/ca.key
	I0917 00:21:54.996977  579049 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21550-576428/.minikube/ca.crt ...
	I0917 00:21:54.997009  579049 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21550-576428/.minikube/ca.crt: {Name:mkf0a697fcbe2a0d7404f79998ce7d05a56f9b21 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0917 00:21:54.997771  579049 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21550-576428/.minikube/ca.key ...
	I0917 00:21:54.997789  579049 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21550-576428/.minikube/ca.key: {Name:mk1affc9bfbafcb9724a2477ed588b90f352fe2b Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0917 00:21:54.997896  579049 certs.go:240] generating "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21550-576428/.minikube/proxy-client-ca.key
	I0917 00:21:55.508397  579049 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21550-576428/.minikube/proxy-client-ca.crt ...
	I0917 00:21:55.508430  579049 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21550-576428/.minikube/proxy-client-ca.crt: {Name:mk63f938da2b7f092b546377ccdb97820e1deb65 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0917 00:21:55.509217  579049 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21550-576428/.minikube/proxy-client-ca.key ...
	I0917 00:21:55.509235  579049 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21550-576428/.minikube/proxy-client-ca.key: {Name:mk0b72927909072f74409784a63f5cf1a0f52efa Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0917 00:21:55.510579  579049 certs.go:256] generating profile certs ...
	I0917 00:21:55.510647  579049 certs.go:363] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/client.key
	I0917 00:21:55.510660  579049 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/client.crt with IP's: []
	I0917 00:21:56.217917  579049 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/client.crt ...
	I0917 00:21:56.217949  579049 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/client.crt: {Name:mk4b75e1b38631d642201a7e78d079bdbcb807a0 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0917 00:21:56.218141  579049 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/client.key ...
	I0917 00:21:56.218154  579049 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/client.key: {Name:mk43515ac5339f5f12428b15b1543e198224bd20 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0917 00:21:56.218240  579049 certs.go:363] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/apiserver.key.07c8a096
	I0917 00:21:56.218261  579049 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/apiserver.crt.07c8a096 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.49.2]
	I0917 00:21:57.306815  579049 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/apiserver.crt.07c8a096 ...
	I0917 00:21:57.306850  579049 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/apiserver.crt.07c8a096: {Name:mk2d071d9f4b6dc2f36419dd2e49c9cd929adc28 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0917 00:21:57.307044  579049 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/apiserver.key.07c8a096 ...
	I0917 00:21:57.307058  579049 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/apiserver.key.07c8a096: {Name:mk0977e0aa9abadab25a197623ceabb9d71ac336 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0917 00:21:57.308130  579049 certs.go:381] copying /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/apiserver.crt.07c8a096 -> /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/apiserver.crt
	I0917 00:21:57.308231  579049 certs.go:385] copying /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/apiserver.key.07c8a096 -> /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/apiserver.key
	I0917 00:21:57.308292  579049 certs.go:363] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/proxy-client.key
	I0917 00:21:57.308321  579049 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/proxy-client.crt with IP's: []
	I0917 00:21:57.456147  579049 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/proxy-client.crt ...
	I0917 00:21:57.456177  579049 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/proxy-client.crt: {Name:mk2ab853f095cd406bcc75fee4a7d90cf2717a27 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0917 00:21:57.456925  579049 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/proxy-client.key ...
	I0917 00:21:57.456943  579049 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/proxy-client.key: {Name:mk0068b799899ac8e9a1b70775fefb46581bed03 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0917 00:21:57.457587  579049 certs.go:484] found cert: /home/jenkins/minikube-integration/21550-576428/.minikube/certs/ca-key.pem (1671 bytes)
	I0917 00:21:57.457629  579049 certs.go:484] found cert: /home/jenkins/minikube-integration/21550-576428/.minikube/certs/ca.pem (1082 bytes)
	I0917 00:21:57.457664  579049 certs.go:484] found cert: /home/jenkins/minikube-integration/21550-576428/.minikube/certs/cert.pem (1123 bytes)
	I0917 00:21:57.457699  579049 certs.go:484] found cert: /home/jenkins/minikube-integration/21550-576428/.minikube/certs/key.pem (1675 bytes)
	I0917 00:21:57.458310  579049 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-576428/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
	I0917 00:21:57.482173  579049 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-576428/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
	I0917 00:21:57.509090  579049 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-576428/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
	I0917 00:21:57.535147  579049 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-576428/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
	I0917 00:21:57.564117  579049 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1419 bytes)
	I0917 00:21:57.587464  579049 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes)
	I0917 00:21:57.611051  579049 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
	I0917 00:21:57.634617  579049 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
	I0917 00:21:57.657866  579049 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-576428/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
	I0917 00:21:57.681005  579049 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
	I0917 00:21:57.699236  579049 ssh_runner.go:195] Run: openssl version
	I0917 00:21:57.704793  579049 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
	I0917 00:21:57.714086  579049 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
	I0917 00:21:57.717662  579049 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Sep 17 00:21 /usr/share/ca-certificates/minikubeCA.pem
	I0917 00:21:57.717720  579049 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
	I0917 00:21:57.724265  579049 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
	I0917 00:21:57.733683  579049 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
	I0917 00:21:57.736972  579049 certs.go:399] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
	stdout:
	
	stderr:
	stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
	I0917 00:21:57.737019  579049 kubeadm.go:392] StartCluster: {Name:addons-235235 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:4096 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:addons-235235 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSD
omain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketV
MnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
	I0917 00:21:57.737137  579049 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
	I0917 00:21:57.753540  579049 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
	I0917 00:21:57.762649  579049 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
	I0917 00:21:57.771322  579049 kubeadm.go:214] ignoring SystemVerification for kubeadm because of docker driver
	I0917 00:21:57.771406  579049 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
	I0917 00:21:57.779897  579049 kubeadm.go:155] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
	stdout:
	
	stderr:
	ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
	ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
	ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
	ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
	I0917 00:21:57.779915  579049 kubeadm.go:157] found existing configuration files:
	
	I0917 00:21:57.779963  579049 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
	I0917 00:21:57.788488  579049 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
	stdout:
	
	stderr:
	grep: /etc/kubernetes/admin.conf: No such file or directory
	I0917 00:21:57.788580  579049 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
	I0917 00:21:57.797066  579049 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
	I0917 00:21:57.805092  579049 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
	stdout:
	
	stderr:
	grep: /etc/kubernetes/kubelet.conf: No such file or directory
	I0917 00:21:57.805181  579049 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
	I0917 00:21:57.813476  579049 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
	I0917 00:21:57.822730  579049 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
	stdout:
	
	stderr:
	grep: /etc/kubernetes/controller-manager.conf: No such file or directory
	I0917 00:21:57.822825  579049 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
	I0917 00:21:57.831201  579049 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
	I0917 00:21:57.839955  579049 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
	stdout:
	
	stderr:
	grep: /etc/kubernetes/scheduler.conf: No such file or directory
	I0917 00:21:57.840048  579049 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
	I0917 00:21:57.848234  579049 ssh_runner.go:286] Start: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml  --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
	I0917 00:21:57.889439  579049 kubeadm.go:310] [init] Using Kubernetes version: v1.34.0
	I0917 00:21:57.889533  579049 kubeadm.go:310] [preflight] Running pre-flight checks
	I0917 00:21:57.909841  579049 kubeadm.go:310] [preflight] The system verification failed. Printing the output from the verification:
	I0917 00:21:57.909941  579049 kubeadm.go:310] KERNEL_VERSION: 5.15.0-1084-aws
	I0917 00:21:57.909991  579049 kubeadm.go:310] OS: Linux
	I0917 00:21:57.910097  579049 kubeadm.go:310] CGROUPS_CPU: enabled
	I0917 00:21:57.910180  579049 kubeadm.go:310] CGROUPS_CPUACCT: enabled
	I0917 00:21:57.910262  579049 kubeadm.go:310] CGROUPS_CPUSET: enabled
	I0917 00:21:57.910368  579049 kubeadm.go:310] CGROUPS_DEVICES: enabled
	I0917 00:21:57.910448  579049 kubeadm.go:310] CGROUPS_FREEZER: enabled
	I0917 00:21:57.910522  579049 kubeadm.go:310] CGROUPS_MEMORY: enabled
	I0917 00:21:57.910620  579049 kubeadm.go:310] CGROUPS_PIDS: enabled
	I0917 00:21:57.910695  579049 kubeadm.go:310] CGROUPS_HUGETLB: enabled
	I0917 00:21:57.910769  579049 kubeadm.go:310] CGROUPS_BLKIO: enabled
	I0917 00:21:57.974704  579049 kubeadm.go:310] [preflight] Pulling images required for setting up a Kubernetes cluster
	I0917 00:21:57.974855  579049 kubeadm.go:310] [preflight] This might take a minute or two, depending on the speed of your internet connection
	I0917 00:21:57.974967  579049 kubeadm.go:310] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
	I0917 00:21:57.992912  579049 kubeadm.go:310] [certs] Using certificateDir folder "/var/lib/minikube/certs"
	I0917 00:21:57.999386  579049 out.go:252]   - Generating certificates and keys ...
	I0917 00:21:57.999562  579049 kubeadm.go:310] [certs] Using existing ca certificate authority
	I0917 00:21:57.999687  579049 kubeadm.go:310] [certs] Using existing apiserver certificate and key on disk
	I0917 00:21:58.805184  579049 kubeadm.go:310] [certs] Generating "apiserver-kubelet-client" certificate and key
	I0917 00:21:58.947726  579049 kubeadm.go:310] [certs] Generating "front-proxy-ca" certificate and key
	I0917 00:21:59.277680  579049 kubeadm.go:310] [certs] Generating "front-proxy-client" certificate and key
	I0917 00:21:59.723727  579049 kubeadm.go:310] [certs] Generating "etcd/ca" certificate and key
	I0917 00:21:59.795659  579049 kubeadm.go:310] [certs] Generating "etcd/server" certificate and key
	I0917 00:21:59.796771  579049 kubeadm.go:310] [certs] etcd/server serving cert is signed for DNS names [addons-235235 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
	I0917 00:22:00.174798  579049 kubeadm.go:310] [certs] Generating "etcd/peer" certificate and key
	I0917 00:22:00.179633  579049 kubeadm.go:310] [certs] etcd/peer serving cert is signed for DNS names [addons-235235 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
	I0917 00:22:00.645369  579049 kubeadm.go:310] [certs] Generating "etcd/healthcheck-client" certificate and key
	I0917 00:22:00.971446  579049 kubeadm.go:310] [certs] Generating "apiserver-etcd-client" certificate and key
	I0917 00:22:01.466751  579049 kubeadm.go:310] [certs] Generating "sa" key and public key
	I0917 00:22:01.467061  579049 kubeadm.go:310] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
	I0917 00:22:01.991011  579049 kubeadm.go:310] [kubeconfig] Writing "admin.conf" kubeconfig file
	I0917 00:22:02.830639  579049 kubeadm.go:310] [kubeconfig] Writing "super-admin.conf" kubeconfig file
	I0917 00:22:03.820572  579049 kubeadm.go:310] [kubeconfig] Writing "kubelet.conf" kubeconfig file
	I0917 00:22:04.480770  579049 kubeadm.go:310] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
	I0917 00:22:04.729327  579049 kubeadm.go:310] [kubeconfig] Writing "scheduler.conf" kubeconfig file
	I0917 00:22:04.730011  579049 kubeadm.go:310] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
	I0917 00:22:04.732630  579049 kubeadm.go:310] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
	I0917 00:22:04.736203  579049 out.go:252]   - Booting up control plane ...
	I0917 00:22:04.736314  579049 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-apiserver"
	I0917 00:22:04.736404  579049 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-controller-manager"
	I0917 00:22:04.736498  579049 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-scheduler"
	I0917 00:22:04.746717  579049 kubeadm.go:310] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
	I0917 00:22:04.746839  579049 kubeadm.go:310] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
	I0917 00:22:04.752934  579049 kubeadm.go:310] [patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
	I0917 00:22:04.753312  579049 kubeadm.go:310] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
	I0917 00:22:04.753371  579049 kubeadm.go:310] [kubelet-start] Starting the kubelet
	I0917 00:22:04.860970  579049 kubeadm.go:310] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
	I0917 00:22:04.861096  579049 kubeadm.go:310] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
	I0917 00:22:06.360579  579049 kubeadm.go:310] [kubelet-check] The kubelet is healthy after 1.501263247s
	I0917 00:22:06.364324  579049 kubeadm.go:310] [control-plane-check] Waiting for healthy control plane components. This can take up to 4m0s
	I0917 00:22:06.364453  579049 kubeadm.go:310] [control-plane-check] Checking kube-apiserver at https://192.168.49.2:8443/livez
	I0917 00:22:06.364562  579049 kubeadm.go:310] [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz
	I0917 00:22:06.364651  579049 kubeadm.go:310] [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez
	I0917 00:22:09.897887  579049 kubeadm.go:310] [control-plane-check] kube-controller-manager is healthy after 3.532628125s
	I0917 00:22:11.771445  579049 kubeadm.go:310] [control-plane-check] kube-scheduler is healthy after 5.407083046s
	I0917 00:22:12.865779  579049 kubeadm.go:310] [control-plane-check] kube-apiserver is healthy after 6.501347269s
	I0917 00:22:12.885266  579049 kubeadm.go:310] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
	I0917 00:22:12.898862  579049 kubeadm.go:310] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
	I0917 00:22:12.913392  579049 kubeadm.go:310] [upload-certs] Skipping phase. Please see --upload-certs
	I0917 00:22:12.913662  579049 kubeadm.go:310] [mark-control-plane] Marking the node addons-235235 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
	I0917 00:22:12.925485  579049 kubeadm.go:310] [bootstrap-token] Using token: cvmje2.b2v7plsvdopt3p6d
	I0917 00:22:12.928401  579049 out.go:252]   - Configuring RBAC rules ...
	I0917 00:22:12.928561  579049 kubeadm.go:310] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
	I0917 00:22:12.932858  579049 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
	I0917 00:22:12.940381  579049 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
	I0917 00:22:12.946541  579049 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
	I0917 00:22:12.950757  579049 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
	I0917 00:22:12.954681  579049 kubeadm.go:310] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
	I0917 00:22:13.273886  579049 kubeadm.go:310] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
	I0917 00:22:13.709285  579049 kubeadm.go:310] [addons] Applied essential addon: CoreDNS
	I0917 00:22:14.273326  579049 kubeadm.go:310] [addons] Applied essential addon: kube-proxy
	I0917 00:22:14.274605  579049 kubeadm.go:310] 
	I0917 00:22:14.274689  579049 kubeadm.go:310] Your Kubernetes control-plane has initialized successfully!
	I0917 00:22:14.274695  579049 kubeadm.go:310] 
	I0917 00:22:14.274776  579049 kubeadm.go:310] To start using your cluster, you need to run the following as a regular user:
	I0917 00:22:14.274781  579049 kubeadm.go:310] 
	I0917 00:22:14.274807  579049 kubeadm.go:310]   mkdir -p $HOME/.kube
	I0917 00:22:14.274869  579049 kubeadm.go:310]   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
	I0917 00:22:14.274922  579049 kubeadm.go:310]   sudo chown $(id -u):$(id -g) $HOME/.kube/config
	I0917 00:22:14.274926  579049 kubeadm.go:310] 
	I0917 00:22:14.274983  579049 kubeadm.go:310] Alternatively, if you are the root user, you can run:
	I0917 00:22:14.274987  579049 kubeadm.go:310] 
	I0917 00:22:14.275038  579049 kubeadm.go:310]   export KUBECONFIG=/etc/kubernetes/admin.conf
	I0917 00:22:14.275042  579049 kubeadm.go:310] 
	I0917 00:22:14.275096  579049 kubeadm.go:310] You should now deploy a pod network to the cluster.
	I0917 00:22:14.275188  579049 kubeadm.go:310] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
	I0917 00:22:14.275260  579049 kubeadm.go:310]   https://kubernetes.io/docs/concepts/cluster-administration/addons/
	I0917 00:22:14.275264  579049 kubeadm.go:310] 
	I0917 00:22:14.275352  579049 kubeadm.go:310] You can now join any number of control-plane nodes by copying certificate authorities
	I0917 00:22:14.275432  579049 kubeadm.go:310] and service account keys on each node and then running the following as root:
	I0917 00:22:14.275436  579049 kubeadm.go:310] 
	I0917 00:22:14.275524  579049 kubeadm.go:310]   kubeadm join control-plane.minikube.internal:8443 --token cvmje2.b2v7plsvdopt3p6d \
	I0917 00:22:14.275631  579049 kubeadm.go:310] 	--discovery-token-ca-cert-hash sha256:35b41cd3859f8de52bd979a1594263a0a6bc8247f58714753744f2ce2587da45 \
	I0917 00:22:14.275652  579049 kubeadm.go:310] 	--control-plane 
	I0917 00:22:14.275656  579049 kubeadm.go:310] 
	I0917 00:22:14.275745  579049 kubeadm.go:310] Then you can join any number of worker nodes by running the following on each as root:
	I0917 00:22:14.275749  579049 kubeadm.go:310] 
	I0917 00:22:14.275835  579049 kubeadm.go:310] kubeadm join control-plane.minikube.internal:8443 --token cvmje2.b2v7plsvdopt3p6d \
	I0917 00:22:14.275941  579049 kubeadm.go:310] 	--discovery-token-ca-cert-hash sha256:35b41cd3859f8de52bd979a1594263a0a6bc8247f58714753744f2ce2587da45 
	I0917 00:22:14.279625  579049 kubeadm.go:310] 	[WARNING SystemVerification]: cgroups v1 support is in maintenance mode, please migrate to cgroups v2
	I0917 00:22:14.279883  579049 kubeadm.go:310] 	[WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1084-aws\n", err: exit status 1
	I0917 00:22:14.280015  579049 kubeadm.go:310] 	[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
	I0917 00:22:14.280030  579049 cni.go:84] Creating CNI manager for ""
	I0917 00:22:14.280044  579049 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
	I0917 00:22:14.283191  579049 out.go:179] * Configuring bridge CNI (Container Networking Interface) ...
	I0917 00:22:14.285942  579049 ssh_runner.go:195] Run: sudo mkdir -p /etc/cni/net.d
	I0917 00:22:14.294820  579049 ssh_runner.go:362] scp memory --> /etc/cni/net.d/1-k8s.conflist (496 bytes)
	I0917 00:22:14.313038  579049 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
	I0917 00:22:14.313179  579049 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
	I0917 00:22:14.313266  579049 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes addons-235235 minikube.k8s.io/updated_at=2025_09_17T00_22_14_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=9829f0bc17c523e4378d28e0c25741106f24f00a minikube.k8s.io/name=addons-235235 minikube.k8s.io/primary=true
	I0917 00:22:14.437821  579049 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0917 00:22:14.437912  579049 ops.go:34] apiserver oom_adj: -16
	I0917 00:22:14.937948  579049 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0917 00:22:15.438802  579049 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0917 00:22:15.938472  579049 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0917 00:22:16.438274  579049 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0917 00:22:16.937920  579049 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0917 00:22:17.438806  579049 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0917 00:22:17.938258  579049 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0917 00:22:18.438033  579049 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0917 00:22:18.938219  579049 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0917 00:22:19.437918  579049 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0917 00:22:19.527165  579049 kubeadm.go:1105] duration metric: took 5.214030381s to wait for elevateKubeSystemPrivileges
	I0917 00:22:19.527199  579049 kubeadm.go:394] duration metric: took 21.790182686s to StartCluster
	I0917 00:22:19.527217  579049 settings.go:142] acquiring lock: {Name:mkeeff7458e530a541c151580b54d47f2e77f0de Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0917 00:22:19.527863  579049 settings.go:150] Updating kubeconfig:  /home/jenkins/minikube-integration/21550-576428/kubeconfig
	I0917 00:22:19.528270  579049 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21550-576428/kubeconfig: {Name:mk3b9e4b05730cfa71613487e1675bc90b668ce8 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0917 00:22:19.528891  579049 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
	I0917 00:22:19.528923  579049 start.go:235] Will wait 6m0s for node &{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}
	I0917 00:22:19.529159  579049 config.go:182] Loaded profile config "addons-235235": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
	I0917 00:22:19.529188  579049 addons.go:511] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:true auto-pause:false cloud-spanner:true csi-hostpath-driver:true dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:true gvisor:false headlamp:false inaccel:false ingress:true ingress-dns:true inspektor-gadget:true istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:true nvidia-device-plugin:true nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:true registry-aliases:false registry-creds:true storage-provisioner:true storage-provisioner-gluster:false storage-provisioner-rancher:true volcano:true volumesnapshots:true yakd:true]
	I0917 00:22:19.529258  579049 addons.go:69] Setting yakd=true in profile "addons-235235"
	I0917 00:22:19.529272  579049 addons.go:238] Setting addon yakd=true in "addons-235235"
	I0917 00:22:19.529292  579049 addons.go:69] Setting inspektor-gadget=true in profile "addons-235235"
	I0917 00:22:19.529309  579049 addons.go:238] Setting addon inspektor-gadget=true in "addons-235235"
	I0917 00:22:19.529314  579049 addons.go:69] Setting metrics-server=true in profile "addons-235235"
	I0917 00:22:19.529329  579049 host.go:66] Checking if "addons-235235" exists ...
	I0917 00:22:19.529335  579049 addons.go:238] Setting addon metrics-server=true in "addons-235235"
	I0917 00:22:19.529351  579049 host.go:66] Checking if "addons-235235" exists ...
	I0917 00:22:19.529820  579049 cli_runner.go:164] Run: docker container inspect addons-235235 --format={{.State.Status}}
	I0917 00:22:19.529926  579049 cli_runner.go:164] Run: docker container inspect addons-235235 --format={{.State.Status}}
	I0917 00:22:19.530167  579049 addons.go:69] Setting nvidia-device-plugin=true in profile "addons-235235"
	I0917 00:22:19.530193  579049 addons.go:238] Setting addon nvidia-device-plugin=true in "addons-235235"
	I0917 00:22:19.530214  579049 host.go:66] Checking if "addons-235235" exists ...
	I0917 00:22:19.530608  579049 cli_runner.go:164] Run: docker container inspect addons-235235 --format={{.State.Status}}
	I0917 00:22:19.533005  579049 addons.go:69] Setting registry=true in profile "addons-235235"
	I0917 00:22:19.533064  579049 addons.go:238] Setting addon registry=true in "addons-235235"
	I0917 00:22:19.533105  579049 host.go:66] Checking if "addons-235235" exists ...
	I0917 00:22:19.533598  579049 cli_runner.go:164] Run: docker container inspect addons-235235 --format={{.State.Status}}
	I0917 00:22:19.533721  579049 addons.go:69] Setting registry-creds=true in profile "addons-235235"
	I0917 00:22:19.553513  579049 addons.go:238] Setting addon registry-creds=true in "addons-235235"
	I0917 00:22:19.553554  579049 host.go:66] Checking if "addons-235235" exists ...
	I0917 00:22:19.554001  579049 cli_runner.go:164] Run: docker container inspect addons-235235 --format={{.State.Status}}
	I0917 00:22:19.533731  579049 addons.go:69] Setting storage-provisioner=true in profile "addons-235235"
	I0917 00:22:19.554558  579049 addons.go:238] Setting addon storage-provisioner=true in "addons-235235"
	I0917 00:22:19.554586  579049 host.go:66] Checking if "addons-235235" exists ...
	I0917 00:22:19.555019  579049 cli_runner.go:164] Run: docker container inspect addons-235235 --format={{.State.Status}}
	I0917 00:22:19.533736  579049 addons.go:69] Setting storage-provisioner-rancher=true in profile "addons-235235"
	I0917 00:22:19.572135  579049 addons_storage_classes.go:33] enableOrDisableStorageClasses storage-provisioner-rancher=true on "addons-235235"
	I0917 00:22:19.533739  579049 addons.go:69] Setting volcano=true in profile "addons-235235"
	I0917 00:22:19.576505  579049 addons.go:238] Setting addon volcano=true in "addons-235235"
	I0917 00:22:19.533742  579049 addons.go:69] Setting volumesnapshots=true in profile "addons-235235"
	I0917 00:22:19.533785  579049 out.go:179] * Verifying Kubernetes components...
	I0917 00:22:19.533978  579049 addons.go:69] Setting default-storageclass=true in profile "addons-235235"
	I0917 00:22:19.533987  579049 addons.go:69] Setting amd-gpu-device-plugin=true in profile "addons-235235"
	I0917 00:22:19.534002  579049 addons.go:69] Setting cloud-spanner=true in profile "addons-235235"
	I0917 00:22:19.534006  579049 addons.go:69] Setting csi-hostpath-driver=true in profile "addons-235235"
	I0917 00:22:19.534010  579049 addons.go:69] Setting ingress=true in profile "addons-235235"
	I0917 00:22:19.534017  579049 addons.go:69] Setting gcp-auth=true in profile "addons-235235"
	I0917 00:22:19.534022  579049 addons.go:69] Setting ingress-dns=true in profile "addons-235235"
	I0917 00:22:19.529295  579049 host.go:66] Checking if "addons-235235" exists ...
	I0917 00:22:19.577395  579049 cli_runner.go:164] Run: docker container inspect addons-235235 --format={{.State.Status}}
	I0917 00:22:19.577415  579049 host.go:66] Checking if "addons-235235" exists ...
	I0917 00:22:19.577420  579049 addons.go:238] Setting addon volumesnapshots=true in "addons-235235"
	I0917 00:22:19.578621  579049 host.go:66] Checking if "addons-235235" exists ...
	I0917 00:22:19.589790  579049 cli_runner.go:164] Run: docker container inspect addons-235235 --format={{.State.Status}}
	I0917 00:22:19.591337  579049 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "addons-235235"
	I0917 00:22:19.591681  579049 cli_runner.go:164] Run: docker container inspect addons-235235 --format={{.State.Status}}
	I0917 00:22:19.605010  579049 addons.go:238] Setting addon amd-gpu-device-plugin=true in "addons-235235"
	I0917 00:22:19.605685  579049 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I0917 00:22:19.605936  579049 host.go:66] Checking if "addons-235235" exists ...
	I0917 00:22:19.606433  579049 cli_runner.go:164] Run: docker container inspect addons-235235 --format={{.State.Status}}
	I0917 00:22:19.610273  579049 mustload.go:65] Loading cluster: addons-235235
	I0917 00:22:19.610605  579049 config.go:182] Loaded profile config "addons-235235": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
	I0917 00:22:19.610965  579049 cli_runner.go:164] Run: docker container inspect addons-235235 --format={{.State.Status}}
	I0917 00:22:19.622466  579049 addons.go:238] Setting addon ingress-dns=true in "addons-235235"
	I0917 00:22:19.622523  579049 host.go:66] Checking if "addons-235235" exists ...
	I0917 00:22:19.627116  579049 cli_runner.go:164] Run: docker container inspect addons-235235 --format={{.State.Status}}
	I0917 00:22:19.663876  579049 cli_runner.go:164] Run: docker container inspect addons-235235 --format={{.State.Status}}
	I0917 00:22:19.678927  579049 addons.go:238] Setting addon cloud-spanner=true in "addons-235235"
	I0917 00:22:19.678989  579049 host.go:66] Checking if "addons-235235" exists ...
	I0917 00:22:19.679545  579049 cli_runner.go:164] Run: docker container inspect addons-235235 --format={{.State.Status}}
	I0917 00:22:19.693752  579049 addons.go:238] Setting addon csi-hostpath-driver=true in "addons-235235"
	I0917 00:22:19.693802  579049 host.go:66] Checking if "addons-235235" exists ...
	I0917 00:22:19.694361  579049 cli_runner.go:164] Run: docker container inspect addons-235235 --format={{.State.Status}}
	I0917 00:22:19.717165  579049 out.go:179]   - Using image nvcr.io/nvidia/k8s-device-plugin:v0.17.3
	I0917 00:22:19.717821  579049 cli_runner.go:164] Run: docker container inspect addons-235235 --format={{.State.Status}}
	I0917 00:22:19.723675  579049 addons.go:435] installing /etc/kubernetes/addons/nvidia-device-plugin.yaml
	I0917 00:22:19.723700  579049 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/nvidia-device-plugin.yaml (1966 bytes)
	I0917 00:22:19.723768  579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
	I0917 00:22:19.748756  579049 addons.go:238] Setting addon ingress=true in "addons-235235"
	I0917 00:22:19.748839  579049 host.go:66] Checking if "addons-235235" exists ...
	I0917 00:22:19.749572  579049 cli_runner.go:164] Run: docker container inspect addons-235235 --format={{.State.Status}}
	I0917 00:22:19.756519  579049 out.go:179]   - Using image registry.k8s.io/metrics-server/metrics-server:v0.8.0
	I0917 00:22:19.763197  579049 addons.go:435] installing /etc/kubernetes/addons/metrics-apiservice.yaml
	I0917 00:22:19.763223  579049 ssh_runner.go:362] scp metrics-server/metrics-apiservice.yaml --> /etc/kubernetes/addons/metrics-apiservice.yaml (424 bytes)
	I0917 00:22:19.763306  579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
	I0917 00:22:19.812721  579049 out.go:179]   - Using image docker.io/upmcenterprises/registry-creds:1.10
	I0917 00:22:19.815731  579049 addons.go:435] installing /etc/kubernetes/addons/registry-creds-rc.yaml
	I0917 00:22:19.815756  579049 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/registry-creds-rc.yaml (3306 bytes)
	I0917 00:22:19.815835  579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
	I0917 00:22:19.838323  579049 host.go:66] Checking if "addons-235235" exists ...
	I0917 00:22:19.845511  579049 addons.go:238] Setting addon default-storageclass=true in "addons-235235"
	I0917 00:22:19.845553  579049 host.go:66] Checking if "addons-235235" exists ...
	I0917 00:22:19.846041  579049 cli_runner.go:164] Run: docker container inspect addons-235235 --format={{.State.Status}}
	I0917 00:22:19.860247  579049 out.go:179]   - Using image registry.k8s.io/sig-storage/snapshot-controller:v6.1.0
	I0917 00:22:19.866691  579049 out.go:179]   - Using image docker.io/rocm/k8s-device-plugin:1.25.2.8
	I0917 00:22:19.868578  579049 out.go:179]   - Using image docker.io/kicbase/minikube-ingress-dns:0.0.4
	I0917 00:22:19.868786  579049 out.go:179]   - Using image ghcr.io/inspektor-gadget/inspektor-gadget:v0.44.1
	I0917 00:22:19.873647  579049 out.go:179]   - Using image gcr.io/cloud-spanner-emulator/emulator:1.5.41
	I0917 00:22:19.868799  579049 out.go:179]   - Using image gcr.io/k8s-minikube/kube-registry-proxy:0.0.9
	I0917 00:22:19.869804  579049 addons.go:435] installing /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml
	I0917 00:22:19.869810  579049 out.go:179]   - Using image gcr.io/k8s-minikube/storage-provisioner:v5
	I0917 00:22:19.898606  579049 addons.go:435] installing /etc/kubernetes/addons/deployment.yaml
	I0917 00:22:19.898675  579049 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/deployment.yaml (1004 bytes)
	I0917 00:22:19.898773  579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
	I0917 00:22:19.898982  579049 addons.go:435] installing /etc/kubernetes/addons/storage-provisioner.yaml
	I0917 00:22:19.899016  579049 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
	I0917 00:22:19.899071  579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
	I0917 00:22:19.919903  579049 addons.go:435] installing /etc/kubernetes/addons/ig-crd.yaml
	I0917 00:22:19.919926  579049 ssh_runner.go:362] scp inspektor-gadget/ig-crd.yaml --> /etc/kubernetes/addons/ig-crd.yaml (14 bytes)
	I0917 00:22:19.919997  579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
	I0917 00:22:19.922402  579049 out.go:179]   - Using image docker.io/marcnuri/yakd:0.0.5
	I0917 00:22:19.925824  579049 out.go:179]   - Using image docker.io/registry:3.0.0
	I0917 00:22:19.928851  579049 addons.go:435] installing /etc/kubernetes/addons/registry-rc.yaml
	I0917 00:22:19.928874  579049 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/registry-rc.yaml (860 bytes)
	I0917 00:22:19.928945  579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
	I0917 00:22:19.929156  579049 addons.go:435] installing /etc/kubernetes/addons/yakd-ns.yaml
	I0917 00:22:19.929181  579049 ssh_runner.go:362] scp yakd/yakd-ns.yaml --> /etc/kubernetes/addons/yakd-ns.yaml (171 bytes)
	I0917 00:22:19.929248  579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
	I0917 00:22:19.944797  579049 ssh_runner.go:362] scp volumesnapshots/csi-hostpath-snapshotclass.yaml --> /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml (934 bytes)
	I0917 00:22:19.944885  579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
	I0917 00:22:19.945181  579049 addons.go:435] installing /etc/kubernetes/addons/ingress-dns-pod.yaml
	I0917 00:22:19.945220  579049 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ingress-dns-pod.yaml (2889 bytes)
	I0917 00:22:19.945288  579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
	I0917 00:22:19.969993  579049 addons.go:435] installing /etc/kubernetes/addons/amd-gpu-device-plugin.yaml
	I0917 00:22:19.970015  579049 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/amd-gpu-device-plugin.yaml (1868 bytes)
	I0917 00:22:19.970084  579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
	I0917 00:22:19.987595  579049 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^        forward . \/etc\/resolv.conf.*/i \        hosts {\n           192.168.49.1 host.minikube.internal\n           fallthrough\n        }' -e '/^        errors *$/i \        log' | sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
	I0917 00:22:19.998391  579049 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33505 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/addons-235235/id_rsa Username:docker}
	I0917 00:22:20.010306  579049 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33505 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/addons-235235/id_rsa Username:docker}
	I0917 00:22:20.037808  579049 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33505 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/addons-235235/id_rsa Username:docker}
	I0917 00:22:20.046051  579049 out.go:179]   - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.6.2
	I0917 00:22:20.049476  579049 out.go:179]   - Using image registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0
	I0917 00:22:20.056378  579049 out.go:179]   - Using image registry.k8s.io/sig-storage/csi-provisioner:v3.3.0
	I0917 00:22:20.058379  579049 addons.go:238] Setting addon storage-provisioner-rancher=true in "addons-235235"
	I0917 00:22:20.058469  579049 host.go:66] Checking if "addons-235235" exists ...
	I0917 00:22:20.058947  579049 cli_runner.go:164] Run: docker container inspect addons-235235 --format={{.State.Status}}
	I0917 00:22:20.067368  579049 out.go:179]   - Using image docker.io/volcanosh/vc-scheduler:v1.12.2
	I0917 00:22:20.067553  579049 out.go:179]   - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.6.2
	I0917 00:22:20.068597  579049 addons.go:435] installing /etc/kubernetes/addons/storageclass.yaml
	I0917 00:22:20.068613  579049 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
	I0917 00:22:20.068675  579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
	I0917 00:22:20.077879  579049 out.go:179]   - Using image registry.k8s.io/sig-storage/csi-attacher:v4.0.0
	I0917 00:22:20.078875  579049 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33505 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/addons-235235/id_rsa Username:docker}
	I0917 00:22:20.084720  579049 out.go:179]   - Using image registry.k8s.io/ingress-nginx/controller:v1.13.2
	I0917 00:22:20.087502  579049 out.go:179]   - Using image registry.k8s.io/sig-storage/csi-external-health-monitor-controller:v0.7.0
	I0917 00:22:20.093179  579049 out.go:179]   - Using image registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.6.0
	I0917 00:22:20.093467  579049 addons.go:435] installing /etc/kubernetes/addons/ingress-deploy.yaml
	I0917 00:22:20.093503  579049 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ingress-deploy.yaml (16078 bytes)
	I0917 00:22:20.093618  579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
	I0917 00:22:20.093889  579049 out.go:179]   - Using image docker.io/volcanosh/vc-webhook-manager:v1.12.2
	I0917 00:22:20.094721  579049 ssh_runner.go:195] Run: sudo systemctl start kubelet
	I0917 00:22:20.102090  579049 out.go:179]   - Using image registry.k8s.io/sig-storage/hostpathplugin:v1.9.0
	I0917 00:22:20.102260  579049 out.go:179]   - Using image docker.io/volcanosh/vc-controller-manager:v1.12.2
	I0917 00:22:20.105802  579049 out.go:179]   - Using image registry.k8s.io/sig-storage/livenessprobe:v2.8.0
	I0917 00:22:20.110505  579049 addons.go:435] installing /etc/kubernetes/addons/volcano-deployment.yaml
	I0917 00:22:20.110593  579049 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/volcano-deployment.yaml (498149 bytes)
	I0917 00:22:20.110706  579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
	I0917 00:22:20.111106  579049 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33505 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/addons-235235/id_rsa Username:docker}
	I0917 00:22:20.117453  579049 out.go:179]   - Using image registry.k8s.io/sig-storage/csi-resizer:v1.6.0
	I0917 00:22:20.120309  579049 addons.go:435] installing /etc/kubernetes/addons/rbac-external-attacher.yaml
	I0917 00:22:20.120335  579049 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-attacher.yaml --> /etc/kubernetes/addons/rbac-external-attacher.yaml (3073 bytes)
	I0917 00:22:20.120410  579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
	I0917 00:22:20.202345  579049 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33505 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/addons-235235/id_rsa Username:docker}
	I0917 00:22:20.216867  579049 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33505 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/addons-235235/id_rsa Username:docker}
	I0917 00:22:20.218283  579049 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33505 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/addons-235235/id_rsa Username:docker}
	I0917 00:22:20.230169  579049 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33505 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/addons-235235/id_rsa Username:docker}
	I0917 00:22:20.231429  579049 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33505 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/addons-235235/id_rsa Username:docker}
	I0917 00:22:20.244728  579049 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33505 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/addons-235235/id_rsa Username:docker}
	I0917 00:22:20.265918  579049 out.go:179]   - Using image docker.io/rancher/local-path-provisioner:v0.0.22
	I0917 00:22:20.269669  579049 out.go:179]   - Using image docker.io/busybox:stable
	I0917 00:22:20.273694  579049 addons.go:435] installing /etc/kubernetes/addons/storage-provisioner-rancher.yaml
	I0917 00:22:20.273715  579049 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner-rancher.yaml (3113 bytes)
	I0917 00:22:20.274082  579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
	I0917 00:22:20.274501  579049 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33505 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/addons-235235/id_rsa Username:docker}
	I0917 00:22:20.283477  579049 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33505 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/addons-235235/id_rsa Username:docker}
	W0917 00:22:20.285066  579049 sshutil.go:64] dial failure (will retry): ssh: handshake failed: EOF
	I0917 00:22:20.285100  579049 retry.go:31] will retry after 252.540607ms: ssh: handshake failed: EOF
	W0917 00:22:20.288539  579049 sshutil.go:64] dial failure (will retry): ssh: handshake failed: EOF
	I0917 00:22:20.288569  579049 retry.go:31] will retry after 264.62158ms: ssh: handshake failed: EOF
	I0917 00:22:20.310748  579049 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33505 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/addons-235235/id_rsa Username:docker}
	I0917 00:22:20.320974  579049 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33505 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/addons-235235/id_rsa Username:docker}
	I0917 00:22:20.325337  579049 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33505 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/addons-235235/id_rsa Username:docker}
	W0917 00:22:20.330706  579049 sshutil.go:64] dial failure (will retry): ssh: handshake failed: EOF
	I0917 00:22:20.330738  579049 retry.go:31] will retry after 286.431927ms: ssh: handshake failed: EOF
	W0917 00:22:20.331009  579049 sshutil.go:64] dial failure (will retry): ssh: handshake failed: EOF
	I0917 00:22:20.331022  579049 retry.go:31] will retry after 225.439884ms: ssh: handshake failed: EOF
	W0917 00:22:20.560617  579049 sshutil.go:64] dial failure (will retry): ssh: handshake failed: EOF
	I0917 00:22:20.560694  579049 retry.go:31] will retry after 498.859934ms: ssh: handshake failed: EOF
	W0917 00:22:20.618933  579049 sshutil.go:64] dial failure (will retry): ssh: handshake failed: EOF
	I0917 00:22:20.619001  579049 retry.go:31] will retry after 250.417921ms: ssh: handshake failed: EOF
	I0917 00:22:20.999362  579049 addons.go:435] installing /etc/kubernetes/addons/ig-deployment.yaml
	I0917 00:22:20.999427  579049 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ig-deployment.yaml (15034 bytes)
	I0917 00:22:21.120689  579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/registry-creds-rc.yaml
	I0917 00:22:21.131890  579049 addons.go:435] installing /etc/kubernetes/addons/metrics-server-deployment.yaml
	I0917 00:22:21.131913  579049 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/metrics-server-deployment.yaml (1907 bytes)
	I0917 00:22:21.215923  579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/nvidia-device-plugin.yaml
	I0917 00:22:21.256240  579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/deployment.yaml
	I0917 00:22:21.278795  579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
	I0917 00:22:21.309143  579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml
	I0917 00:22:21.310139  579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/ingress-dns-pod.yaml
	I0917 00:22:21.320908  579049 addons.go:435] installing /etc/kubernetes/addons/metrics-server-rbac.yaml
	I0917 00:22:21.320973  579049 ssh_runner.go:362] scp metrics-server/metrics-server-rbac.yaml --> /etc/kubernetes/addons/metrics-server-rbac.yaml (2175 bytes)
	I0917 00:22:21.350747  579049 addons.go:435] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml
	I0917 00:22:21.350814  579049 ssh_runner.go:362] scp volumesnapshots/snapshot.storage.k8s.io_volumesnapshotclasses.yaml --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml (6471 bytes)
	I0917 00:22:21.363215  579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
	I0917 00:22:21.366593  579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/amd-gpu-device-plugin.yaml
	I0917 00:22:21.376668  579049 addons.go:435] installing /etc/kubernetes/addons/registry-svc.yaml
	I0917 00:22:21.376740  579049 ssh_runner.go:362] scp registry/registry-svc.yaml --> /etc/kubernetes/addons/registry-svc.yaml (398 bytes)
	I0917 00:22:21.379853  579049 addons.go:435] installing /etc/kubernetes/addons/yakd-sa.yaml
	I0917 00:22:21.379924  579049 ssh_runner.go:362] scp yakd/yakd-sa.yaml --> /etc/kubernetes/addons/yakd-sa.yaml (247 bytes)
	I0917 00:22:21.454575  579049 addons.go:435] installing /etc/kubernetes/addons/rbac-hostpath.yaml
	I0917 00:22:21.454649  579049 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-hostpath.yaml --> /etc/kubernetes/addons/rbac-hostpath.yaml (4266 bytes)
	I0917 00:22:21.481397  579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/ingress-deploy.yaml
	I0917 00:22:21.508249  579049 addons.go:435] installing /etc/kubernetes/addons/metrics-server-service.yaml
	I0917 00:22:21.508324  579049 ssh_runner.go:362] scp metrics-server/metrics-server-service.yaml --> /etc/kubernetes/addons/metrics-server-service.yaml (446 bytes)
	I0917 00:22:21.575611  579049 addons.go:435] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml
	I0917 00:22:21.575687  579049 ssh_runner.go:362] scp volumesnapshots/snapshot.storage.k8s.io_volumesnapshotcontents.yaml --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml (23126 bytes)
	I0917 00:22:21.588569  579049 addons.go:435] installing /etc/kubernetes/addons/yakd-crb.yaml
	I0917 00:22:21.588590  579049 ssh_runner.go:362] scp yakd/yakd-crb.yaml --> /etc/kubernetes/addons/yakd-crb.yaml (422 bytes)
	I0917 00:22:21.601465  579049 addons.go:435] installing /etc/kubernetes/addons/registry-proxy.yaml
	I0917 00:22:21.601485  579049 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/registry-proxy.yaml (947 bytes)
	I0917 00:22:21.639347  579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/volcano-deployment.yaml
	I0917 00:22:21.842186  579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
	I0917 00:22:21.852122  579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner-rancher.yaml
	I0917 00:22:21.855602  579049 addons.go:435] installing /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml
	I0917 00:22:21.855679  579049 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-health-monitor-controller.yaml --> /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml (3038 bytes)
	I0917 00:22:21.870985  579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/registry-rc.yaml -f /etc/kubernetes/addons/registry-svc.yaml -f /etc/kubernetes/addons/registry-proxy.yaml
	I0917 00:22:21.955013  579049 addons.go:435] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml
	I0917 00:22:21.955038  579049 ssh_runner.go:362] scp volumesnapshots/snapshot.storage.k8s.io_volumesnapshots.yaml --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml (19582 bytes)
	I0917 00:22:21.956595  579049 addons.go:435] installing /etc/kubernetes/addons/yakd-svc.yaml
	I0917 00:22:21.956611  579049 ssh_runner.go:362] scp yakd/yakd-svc.yaml --> /etc/kubernetes/addons/yakd-svc.yaml (412 bytes)
	I0917 00:22:22.004673  579049 addons.go:435] installing /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml
	I0917 00:22:22.004702  579049 ssh_runner.go:362] scp volumesnapshots/rbac-volume-snapshot-controller.yaml --> /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml (3545 bytes)
	I0917 00:22:22.015168  579049 addons.go:435] installing /etc/kubernetes/addons/yakd-dp.yaml
	I0917 00:22:22.015243  579049 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/yakd-dp.yaml (2017 bytes)
	I0917 00:22:22.054170  579049 addons.go:435] installing /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
	I0917 00:22:22.054234  579049 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml (1475 bytes)
	I0917 00:22:22.150580  579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
	I0917 00:22:22.163336  579049 addons.go:435] installing /etc/kubernetes/addons/rbac-external-provisioner.yaml
	I0917 00:22:22.163415  579049 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-provisioner.yaml --> /etc/kubernetes/addons/rbac-external-provisioner.yaml (4442 bytes)
	I0917 00:22:22.247512  579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/yakd-ns.yaml -f /etc/kubernetes/addons/yakd-sa.yaml -f /etc/kubernetes/addons/yakd-crb.yaml -f /etc/kubernetes/addons/yakd-svc.yaml -f /etc/kubernetes/addons/yakd-dp.yaml
	I0917 00:22:22.580252  579049 addons.go:435] installing /etc/kubernetes/addons/rbac-external-resizer.yaml
	I0917 00:22:22.580326  579049 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-resizer.yaml --> /etc/kubernetes/addons/rbac-external-resizer.yaml (2943 bytes)
	I0917 00:22:22.867348  579049 ssh_runner.go:235] Completed: sudo systemctl start kubelet: (2.772593683s)
	I0917 00:22:22.867983  579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/registry-creds-rc.yaml: (1.747263334s)
	I0917 00:22:22.868231  579049 ssh_runner.go:235] Completed: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^        forward . \/etc\/resolv.conf.*/i \        hosts {\n           192.168.49.1 host.minikube.internal\n           fallthrough\n        }' -e '/^        errors *$/i \        log' | sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -": (2.880521592s)
	I0917 00:22:22.868267  579049 start.go:976] {"host.minikube.internal": 192.168.49.1} host record injected into CoreDNS's ConfigMap
	I0917 00:22:22.868866  579049 node_ready.go:35] waiting up to 6m0s for node "addons-235235" to be "Ready" ...
	I0917 00:22:22.869306  579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/nvidia-device-plugin.yaml: (1.653355535s)
	I0917 00:22:22.873217  579049 node_ready.go:49] node "addons-235235" is "Ready"
	I0917 00:22:22.873266  579049 node_ready.go:38] duration metric: took 4.346882ms for node "addons-235235" to be "Ready" ...
	I0917 00:22:22.873304  579049 api_server.go:52] waiting for apiserver process to appear ...
	I0917 00:22:22.873388  579049 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
	I0917 00:22:23.325708  579049 addons.go:435] installing /etc/kubernetes/addons/rbac-external-snapshotter.yaml
	I0917 00:22:23.325782  579049 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-snapshotter.yaml --> /etc/kubernetes/addons/rbac-external-snapshotter.yaml (3149 bytes)
	I0917 00:22:23.372792  579049 kapi.go:214] "coredns" deployment in "kube-system" namespace and "addons-235235" context rescaled to 1 replicas
	I0917 00:22:23.671947  579049 addons.go:435] installing /etc/kubernetes/addons/csi-hostpath-attacher.yaml
	I0917 00:22:23.672021  579049 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-attacher.yaml (2143 bytes)
	I0917 00:22:23.831484  579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/deployment.yaml: (2.575195536s)
	I0917 00:22:24.232998  579049 addons.go:435] installing /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml
	I0917 00:22:24.233020  579049 ssh_runner.go:362] scp csi-hostpath-driver/deploy/csi-hostpath-driverinfo.yaml --> /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml (1274 bytes)
	I0917 00:22:24.555276  579049 addons.go:435] installing /etc/kubernetes/addons/csi-hostpath-plugin.yaml
	I0917 00:22:24.555301  579049 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-plugin.yaml (8201 bytes)
	I0917 00:22:25.321586  579049 addons.go:435] installing /etc/kubernetes/addons/csi-hostpath-resizer.yaml
	I0917 00:22:25.321609  579049 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-resizer.yaml (2191 bytes)
	I0917 00:22:25.652309  579049 addons.go:435] installing /etc/kubernetes/addons/csi-hostpath-storageclass.yaml
	I0917 00:22:25.652335  579049 ssh_runner.go:362] scp csi-hostpath-driver/deploy/csi-hostpath-storageclass.yaml --> /etc/kubernetes/addons/csi-hostpath-storageclass.yaml (846 bytes)
	I0917 00:22:25.894792  579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/rbac-external-attacher.yaml -f /etc/kubernetes/addons/rbac-hostpath.yaml -f /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml -f /etc/kubernetes/addons/rbac-external-provisioner.yaml -f /etc/kubernetes/addons/rbac-external-resizer.yaml -f /etc/kubernetes/addons/rbac-external-snapshotter.yaml -f /etc/kubernetes/addons/csi-hostpath-attacher.yaml -f /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml -f /etc/kubernetes/addons/csi-hostpath-plugin.yaml -f /etc/kubernetes/addons/csi-hostpath-resizer.yaml -f /etc/kubernetes/addons/csi-hostpath-storageclass.yaml
	I0917 00:22:27.067585  579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: (5.788759397s)
	I0917 00:22:27.067698  579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: (5.758533525s)
	W0917 00:22:27.067719  579049 addons.go:461] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: Process exited with status 1
	stdout:
	namespace/gadget created
	serviceaccount/gadget created
	configmap/gadget created
	clusterrole.rbac.authorization.k8s.io/gadget-cluster-role created
	clusterrolebinding.rbac.authorization.k8s.io/gadget-cluster-role-binding created
	role.rbac.authorization.k8s.io/gadget-role created
	rolebinding.rbac.authorization.k8s.io/gadget-role-binding created
	daemonset.apps/gadget created
	
	stderr:
	Warning: spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/gadget]: deprecated since v1.30; use the "appArmorProfile" field instead
	error: error validating "/etc/kubernetes/addons/ig-crd.yaml": error validating data: [apiVersion not set, kind not set]; if you choose to ignore these errors, turn validation off with --validate=false
	I0917 00:22:27.067740  579049 retry.go:31] will retry after 187.564418ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: Process exited with status 1
	stdout:
	namespace/gadget created
	serviceaccount/gadget created
	configmap/gadget created
	clusterrole.rbac.authorization.k8s.io/gadget-cluster-role created
	clusterrolebinding.rbac.authorization.k8s.io/gadget-cluster-role-binding created
	role.rbac.authorization.k8s.io/gadget-role created
	rolebinding.rbac.authorization.k8s.io/gadget-role-binding created
	daemonset.apps/gadget created
	
	stderr:
	Warning: spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/gadget]: deprecated since v1.30; use the "appArmorProfile" field instead
	error: error validating "/etc/kubernetes/addons/ig-crd.yaml": error validating data: [apiVersion not set, kind not set]; if you choose to ignore these errors, turn validation off with --validate=false
	I0917 00:22:27.067780  579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/ingress-dns-pod.yaml: (5.757625871s)
	I0917 00:22:27.067808  579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml: (5.704571183s)
	I0917 00:22:27.068035  579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/amd-gpu-device-plugin.yaml: (5.701381369s)
	I0917 00:22:27.255577  579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml
	I0917 00:22:27.282212  579049 ssh_runner.go:362] scp memory --> /var/lib/minikube/google_application_credentials.json (162 bytes)
	I0917 00:22:27.282292  579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
	I0917 00:22:27.307633  579049 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33505 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/addons-235235/id_rsa Username:docker}
	I0917 00:22:28.198316  579049 ssh_runner.go:362] scp memory --> /var/lib/minikube/google_cloud_project (12 bytes)
	I0917 00:22:28.732874  579049 addons.go:238] Setting addon gcp-auth=true in "addons-235235"
	I0917 00:22:28.732978  579049 host.go:66] Checking if "addons-235235" exists ...
	I0917 00:22:28.733534  579049 cli_runner.go:164] Run: docker container inspect addons-235235 --format={{.State.Status}}
	I0917 00:22:28.760672  579049 ssh_runner.go:195] Run: cat /var/lib/minikube/google_application_credentials.json
	I0917 00:22:28.760726  579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
	I0917 00:22:28.786938  579049 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33505 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/addons-235235/id_rsa Username:docker}
	I0917 00:22:29.463041  579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/ingress-deploy.yaml: (7.981552717s)
	I0917 00:22:29.463076  579049 addons.go:479] Verifying addon ingress=true in "addons-235235"
	I0917 00:22:29.475984  579049 out.go:179] * Verifying ingress addon...
	I0917 00:22:29.479749  579049 kapi.go:75] Waiting for pod with label "app.kubernetes.io/name=ingress-nginx" in ns "ingress-nginx" ...
	I0917 00:22:29.483072  579049 kapi.go:86] Found 3 Pods for label selector app.kubernetes.io/name=ingress-nginx
	I0917 00:22:29.483090  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:29.983564  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:30.520900  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:30.986245  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:31.513540  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:31.657033  579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/volcano-deployment.yaml: (10.017646567s)
	I0917 00:22:31.657111  579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: (9.814852375s)
	I0917 00:22:31.657121  579049 addons.go:479] Verifying addon metrics-server=true in "addons-235235"
	I0917 00:22:31.657163  579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner-rancher.yaml: (9.804981697s)
	I0917 00:22:31.657324  579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/registry-rc.yaml -f /etc/kubernetes/addons/registry-svc.yaml -f /etc/kubernetes/addons/registry-proxy.yaml: (9.786267653s)
	I0917 00:22:31.657335  579049 addons.go:479] Verifying addon registry=true in "addons-235235"
	I0917 00:22:31.657603  579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: (9.50694528s)
	W0917 00:22:31.657627  579049 addons.go:461] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: Process exited with status 1
	stdout:
	customresourcedefinition.apiextensions.k8s.io/volumesnapshotclasses.snapshot.storage.k8s.io created
	customresourcedefinition.apiextensions.k8s.io/volumesnapshotcontents.snapshot.storage.k8s.io created
	customresourcedefinition.apiextensions.k8s.io/volumesnapshots.snapshot.storage.k8s.io created
	serviceaccount/snapshot-controller created
	clusterrole.rbac.authorization.k8s.io/snapshot-controller-runner created
	clusterrolebinding.rbac.authorization.k8s.io/snapshot-controller-role created
	role.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
	rolebinding.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
	deployment.apps/snapshot-controller created
	
	stderr:
	Warning: unrecognized format "int64"
	error: resource mapping not found for name: "csi-hostpath-snapclass" namespace: "" from "/etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml": no matches for kind "VolumeSnapshotClass" in version "snapshot.storage.k8s.io/v1"
	ensure CRDs are installed first
	I0917 00:22:31.657644  579049 retry.go:31] will retry after 208.434362ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: Process exited with status 1
	stdout:
	customresourcedefinition.apiextensions.k8s.io/volumesnapshotclasses.snapshot.storage.k8s.io created
	customresourcedefinition.apiextensions.k8s.io/volumesnapshotcontents.snapshot.storage.k8s.io created
	customresourcedefinition.apiextensions.k8s.io/volumesnapshots.snapshot.storage.k8s.io created
	serviceaccount/snapshot-controller created
	clusterrole.rbac.authorization.k8s.io/snapshot-controller-runner created
	clusterrolebinding.rbac.authorization.k8s.io/snapshot-controller-role created
	role.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
	rolebinding.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
	deployment.apps/snapshot-controller created
	
	stderr:
	Warning: unrecognized format "int64"
	error: resource mapping not found for name: "csi-hostpath-snapclass" namespace: "" from "/etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml": no matches for kind "VolumeSnapshotClass" in version "snapshot.storage.k8s.io/v1"
	ensure CRDs are installed first
	I0917 00:22:31.657687  579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/yakd-ns.yaml -f /etc/kubernetes/addons/yakd-sa.yaml -f /etc/kubernetes/addons/yakd-crb.yaml -f /etc/kubernetes/addons/yakd-svc.yaml -f /etc/kubernetes/addons/yakd-dp.yaml: (9.410102979s)
	I0917 00:22:31.657913  579049 ssh_runner.go:235] Completed: sudo pgrep -xnf kube-apiserver.*minikube.*: (8.784482182s)
	I0917 00:22:31.657946  579049 api_server.go:72] duration metric: took 12.12899934s to wait for apiserver process to appear ...
	I0917 00:22:31.657961  579049 api_server.go:88] waiting for apiserver healthz status ...
	I0917 00:22:31.657976  579049 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8443/healthz ...
	I0917 00:22:31.660605  579049 out.go:179] * Verifying registry addon...
	I0917 00:22:31.662797  579049 out.go:179] * To access YAKD - Kubernetes Dashboard, wait for Pod to be ready and run the following command:
	
		minikube -p addons-235235 service yakd-dashboard -n yakd-dashboard
	
	I0917 00:22:31.665554  579049 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=registry" in ns "kube-system" ...
	I0917 00:22:31.705531  579049 api_server.go:279] https://192.168.49.2:8443/healthz returned 200:
	ok
	I0917 00:22:31.713969  579049 api_server.go:141] control plane version: v1.34.0
	I0917 00:22:31.714001  579049 api_server.go:131] duration metric: took 56.033586ms to wait for apiserver health ...
	I0917 00:22:31.714010  579049 system_pods.go:43] waiting for kube-system pods to appear ...
	I0917 00:22:31.734358  579049 kapi.go:86] Found 2 Pods for label selector kubernetes.io/minikube-addons=registry
	I0917 00:22:31.734427  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:22:31.735262  579049 system_pods.go:59] 16 kube-system pods found
	I0917 00:22:31.735335  579049 system_pods.go:61] "coredns-66bc5c9577-6jkl2" [b422966b-a3ef-457a-9695-5acb333105f2] Running / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
	I0917 00:22:31.735358  579049 system_pods.go:61] "coredns-66bc5c9577-s7lnm" [c37a81dc-9db1-4641-ad42-980e711a8985] Running / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
	I0917 00:22:31.735398  579049 system_pods.go:61] "etcd-addons-235235" [438d620c-e5f7-4568-8e78-168d015441a0] Running
	I0917 00:22:31.735420  579049 system_pods.go:61] "kube-apiserver-addons-235235" [04bff5c3-c307-41ec-84bd-84774ce5ac2b] Running
	I0917 00:22:31.735436  579049 system_pods.go:61] "kube-controller-manager-addons-235235" [11b56f68-ebfa-458b-9ab6-bc0a77081554] Running
	I0917 00:22:31.735456  579049 system_pods.go:61] "kube-ingress-dns-minikube" [3513e960-b391-45fd-bb89-61ffc65dd2d8] Pending / Ready:ContainersNotReady (containers with unready status: [minikube-ingress-dns]) / ContainersReady:ContainersNotReady (containers with unready status: [minikube-ingress-dns])
	I0917 00:22:31.735485  579049 system_pods.go:61] "kube-proxy-7ccvd" [afbd7d53-723a-44f0-a2bc-90e8e2f43355] Running
	I0917 00:22:31.735510  579049 system_pods.go:61] "kube-scheduler-addons-235235" [8fba6417-b80a-410d-ac0d-bdea71e0ad08] Running
	I0917 00:22:31.735528  579049 system_pods.go:61] "metrics-server-85b7d694d7-wxzn6" [07657805-0866-4a88-9dc0-bde04eb55366] Pending / Ready:ContainersNotReady (containers with unready status: [metrics-server]) / ContainersReady:ContainersNotReady (containers with unready status: [metrics-server])
	I0917 00:22:31.735567  579049 system_pods.go:61] "nvidia-device-plugin-daemonset-wwzd9" [62b9d30e-619a-4e29-816c-46c303ee603b] Pending / Ready:ContainersNotReady (containers with unready status: [nvidia-device-plugin-ctr]) / ContainersReady:ContainersNotReady (containers with unready status: [nvidia-device-plugin-ctr])
	I0917 00:22:31.735614  579049 system_pods.go:61] "registry-66898fdd98-wd8hb" [8ab107e4-0115-4366-9bd4-27d43e0f5fde] Pending / Ready:ContainersNotReady (containers with unready status: [registry]) / ContainersReady:ContainersNotReady (containers with unready status: [registry])
	I0917 00:22:31.735652  579049 system_pods.go:61] "registry-creds-764b6fb674-bpfj4" [fceed494-b40b-4705-bc7a-4fcd4cae83e7] Pending / Ready:ContainersNotReady (containers with unready status: [registry-creds]) / ContainersReady:ContainersNotReady (containers with unready status: [registry-creds])
	I0917 00:22:31.735677  579049 system_pods.go:61] "registry-proxy-w6q4j" [bd100a37-3e81-4ba5-9d5c-654e6cabeefe] Pending / Ready:ContainersNotReady (containers with unready status: [registry-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [registry-proxy])
	I0917 00:22:31.735704  579049 system_pods.go:61] "snapshot-controller-7d9fbc56b8-7dk8p" [40f72e03-be95-4f94-96f1-7fb9bfd7782e] Pending / Ready:ContainersNotReady (containers with unready status: [volume-snapshot-controller]) / ContainersReady:ContainersNotReady (containers with unready status: [volume-snapshot-controller])
	I0917 00:22:31.735737  579049 system_pods.go:61] "snapshot-controller-7d9fbc56b8-dr9xq" [5754c01a-b776-41d8-a534-a2efd263a92d] Pending / Ready:ContainersNotReady (containers with unready status: [volume-snapshot-controller]) / ContainersReady:ContainersNotReady (containers with unready status: [volume-snapshot-controller])
	I0917 00:22:31.735759  579049 system_pods.go:61] "storage-provisioner" [0da33418-3586-4572-95eb-054917cd0df1] Running
	I0917 00:22:31.735778  579049 system_pods.go:74] duration metric: took 21.762446ms to wait for pod list to return data ...
	I0917 00:22:31.735811  579049 default_sa.go:34] waiting for default service account to be created ...
	I0917 00:22:31.797765  579049 default_sa.go:45] found service account: "default"
	I0917 00:22:31.797840  579049 default_sa.go:55] duration metric: took 62.007252ms for default service account to be created ...
	I0917 00:22:31.797865  579049 system_pods.go:116] waiting for k8s-apps to be running ...
	I0917 00:22:31.866304  579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
	I0917 00:22:31.870565  579049 system_pods.go:86] 16 kube-system pods found
	I0917 00:22:31.870658  579049 system_pods.go:89] "coredns-66bc5c9577-6jkl2" [b422966b-a3ef-457a-9695-5acb333105f2] Running / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
	I0917 00:22:31.870682  579049 system_pods.go:89] "coredns-66bc5c9577-s7lnm" [c37a81dc-9db1-4641-ad42-980e711a8985] Running / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
	I0917 00:22:31.870721  579049 system_pods.go:89] "etcd-addons-235235" [438d620c-e5f7-4568-8e78-168d015441a0] Running
	I0917 00:22:31.870751  579049 system_pods.go:89] "kube-apiserver-addons-235235" [04bff5c3-c307-41ec-84bd-84774ce5ac2b] Running
	I0917 00:22:31.870775  579049 system_pods.go:89] "kube-controller-manager-addons-235235" [11b56f68-ebfa-458b-9ab6-bc0a77081554] Running
	I0917 00:22:31.870809  579049 system_pods.go:89] "kube-ingress-dns-minikube" [3513e960-b391-45fd-bb89-61ffc65dd2d8] Pending / Ready:ContainersNotReady (containers with unready status: [minikube-ingress-dns]) / ContainersReady:ContainersNotReady (containers with unready status: [minikube-ingress-dns])
	I0917 00:22:31.870833  579049 system_pods.go:89] "kube-proxy-7ccvd" [afbd7d53-723a-44f0-a2bc-90e8e2f43355] Running
	I0917 00:22:31.870855  579049 system_pods.go:89] "kube-scheduler-addons-235235" [8fba6417-b80a-410d-ac0d-bdea71e0ad08] Running
	I0917 00:22:31.870892  579049 system_pods.go:89] "metrics-server-85b7d694d7-wxzn6" [07657805-0866-4a88-9dc0-bde04eb55366] Pending / Ready:ContainersNotReady (containers with unready status: [metrics-server]) / ContainersReady:ContainersNotReady (containers with unready status: [metrics-server])
	I0917 00:22:31.870923  579049 system_pods.go:89] "nvidia-device-plugin-daemonset-wwzd9" [62b9d30e-619a-4e29-816c-46c303ee603b] Pending / Ready:ContainersNotReady (containers with unready status: [nvidia-device-plugin-ctr]) / ContainersReady:ContainersNotReady (containers with unready status: [nvidia-device-plugin-ctr])
	I0917 00:22:31.870961  579049 system_pods.go:89] "registry-66898fdd98-wd8hb" [8ab107e4-0115-4366-9bd4-27d43e0f5fde] Pending / Ready:ContainersNotReady (containers with unready status: [registry]) / ContainersReady:ContainersNotReady (containers with unready status: [registry])
	I0917 00:22:31.870986  579049 system_pods.go:89] "registry-creds-764b6fb674-bpfj4" [fceed494-b40b-4705-bc7a-4fcd4cae83e7] Pending / Ready:ContainersNotReady (containers with unready status: [registry-creds]) / ContainersReady:ContainersNotReady (containers with unready status: [registry-creds])
	I0917 00:22:31.871011  579049 system_pods.go:89] "registry-proxy-w6q4j" [bd100a37-3e81-4ba5-9d5c-654e6cabeefe] Pending / Ready:ContainersNotReady (containers with unready status: [registry-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [registry-proxy])
	I0917 00:22:31.871050  579049 system_pods.go:89] "snapshot-controller-7d9fbc56b8-7dk8p" [40f72e03-be95-4f94-96f1-7fb9bfd7782e] Pending / Ready:ContainersNotReady (containers with unready status: [volume-snapshot-controller]) / ContainersReady:ContainersNotReady (containers with unready status: [volume-snapshot-controller])
	I0917 00:22:31.871079  579049 system_pods.go:89] "snapshot-controller-7d9fbc56b8-dr9xq" [5754c01a-b776-41d8-a534-a2efd263a92d] Pending / Ready:ContainersNotReady (containers with unready status: [volume-snapshot-controller]) / ContainersReady:ContainersNotReady (containers with unready status: [volume-snapshot-controller])
	I0917 00:22:31.871098  579049 system_pods.go:89] "storage-provisioner" [0da33418-3586-4572-95eb-054917cd0df1] Running
	I0917 00:22:31.871133  579049 system_pods.go:126] duration metric: took 73.249376ms to wait for k8s-apps to be running ...
	I0917 00:22:31.871157  579049 system_svc.go:44] waiting for kubelet service to be running ....
	I0917 00:22:31.871237  579049 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
	I0917 00:22:32.006531  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:32.182920  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:22:32.494952  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:32.500319  579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/rbac-external-attacher.yaml -f /etc/kubernetes/addons/rbac-hostpath.yaml -f /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml -f /etc/kubernetes/addons/rbac-external-provisioner.yaml -f /etc/kubernetes/addons/rbac-external-resizer.yaml -f /etc/kubernetes/addons/rbac-external-snapshotter.yaml -f /etc/kubernetes/addons/csi-hostpath-attacher.yaml -f /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml -f /etc/kubernetes/addons/csi-hostpath-plugin.yaml -f /etc/kubernetes/addons/csi-hostpath-resizer.yaml -f /etc/kubernetes/addons/csi-hostpath-storageclass.yaml: (6.605437979s)
	I0917 00:22:32.500359  579049 addons.go:479] Verifying addon csi-hostpath-driver=true in "addons-235235"
	I0917 00:22:32.503793  579049 out.go:179] * Verifying csi-hostpath-driver addon...
	I0917 00:22:32.508235  579049 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=csi-hostpath-driver" in ns "kube-system" ...
	I0917 00:22:32.512657  579049 kapi.go:86] Found 3 Pods for label selector kubernetes.io/minikube-addons=csi-hostpath-driver
	I0917 00:22:32.512730  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:22:32.669884  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:22:32.983997  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:33.012337  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:22:33.044778  579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: (5.78915837s)
	W0917 00:22:33.044815  579049 addons.go:461] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: Process exited with status 1
	stdout:
	namespace/gadget unchanged
	serviceaccount/gadget unchanged
	configmap/gadget unchanged
	clusterrole.rbac.authorization.k8s.io/gadget-cluster-role unchanged
	clusterrolebinding.rbac.authorization.k8s.io/gadget-cluster-role-binding unchanged
	role.rbac.authorization.k8s.io/gadget-role unchanged
	rolebinding.rbac.authorization.k8s.io/gadget-role-binding unchanged
	daemonset.apps/gadget configured
	
	stderr:
	error: error validating "/etc/kubernetes/addons/ig-crd.yaml": error validating data: [apiVersion not set, kind not set]; if you choose to ignore these errors, turn validation off with --validate=false
	I0917 00:22:33.044834  579049 retry.go:31] will retry after 198.566688ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: Process exited with status 1
	stdout:
	namespace/gadget unchanged
	serviceaccount/gadget unchanged
	configmap/gadget unchanged
	clusterrole.rbac.authorization.k8s.io/gadget-cluster-role unchanged
	clusterrolebinding.rbac.authorization.k8s.io/gadget-cluster-role-binding unchanged
	role.rbac.authorization.k8s.io/gadget-role unchanged
	rolebinding.rbac.authorization.k8s.io/gadget-role-binding unchanged
	daemonset.apps/gadget configured
	
	stderr:
	error: error validating "/etc/kubernetes/addons/ig-crd.yaml": error validating data: [apiVersion not set, kind not set]; if you choose to ignore these errors, turn validation off with --validate=false
	I0917 00:22:33.044883  579049 ssh_runner.go:235] Completed: cat /var/lib/minikube/google_application_credentials.json: (4.284194298s)
	I0917 00:22:33.048034  579049 out.go:179]   - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.6.2
	I0917 00:22:33.050893  579049 out.go:179]   - Using image gcr.io/k8s-minikube/gcp-auth-webhook:v0.1.3
	I0917 00:22:33.054753  579049 addons.go:435] installing /etc/kubernetes/addons/gcp-auth-ns.yaml
	I0917 00:22:33.054792  579049 ssh_runner.go:362] scp gcp-auth/gcp-auth-ns.yaml --> /etc/kubernetes/addons/gcp-auth-ns.yaml (700 bytes)
	I0917 00:22:33.169519  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:22:33.172724  579049 addons.go:435] installing /etc/kubernetes/addons/gcp-auth-service.yaml
	I0917 00:22:33.172798  579049 ssh_runner.go:362] scp gcp-auth/gcp-auth-service.yaml --> /etc/kubernetes/addons/gcp-auth-service.yaml (788 bytes)
	I0917 00:22:33.243834  579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml
	I0917 00:22:33.301590  579049 addons.go:435] installing /etc/kubernetes/addons/gcp-auth-webhook.yaml
	I0917 00:22:33.301660  579049 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/gcp-auth-webhook.yaml (5421 bytes)
	I0917 00:22:33.406768  579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/gcp-auth-ns.yaml -f /etc/kubernetes/addons/gcp-auth-service.yaml -f /etc/kubernetes/addons/gcp-auth-webhook.yaml
	I0917 00:22:33.485607  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:33.586879  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:22:33.688148  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:22:33.984133  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:34.012679  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:22:34.168943  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:22:34.482387  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:34.511735  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:22:34.668984  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:22:34.710856  579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: (2.844466616s)
	I0917 00:22:34.710903  579049 ssh_runner.go:235] Completed: sudo systemctl is-active --quiet service kubelet: (2.839642651s)
	I0917 00:22:34.710915  579049 system_svc.go:56] duration metric: took 2.839756774s WaitForService to wait for kubelet
	I0917 00:22:34.710925  579049 kubeadm.go:578] duration metric: took 15.181976358s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
	I0917 00:22:34.710947  579049 node_conditions.go:102] verifying NodePressure condition ...
	I0917 00:22:34.714122  579049 node_conditions.go:122] node storage ephemeral capacity is 203034800Ki
	I0917 00:22:34.714155  579049 node_conditions.go:123] node cpu capacity is 2
	I0917 00:22:34.714169  579049 node_conditions.go:105] duration metric: took 3.215734ms to run NodePressure ...
	I0917 00:22:34.714181  579049 start.go:241] waiting for startup goroutines ...
	I0917 00:22:34.983917  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:35.013235  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:22:35.169149  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:22:35.419972  579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/gcp-auth-ns.yaml -f /etc/kubernetes/addons/gcp-auth-service.yaml -f /etc/kubernetes/addons/gcp-auth-webhook.yaml: (2.013163249s)
	I0917 00:22:35.420606  579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: (2.17666923s)
	W0917 00:22:35.420684  579049 addons.go:461] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: Process exited with status 1
	stdout:
	namespace/gadget unchanged
	serviceaccount/gadget unchanged
	configmap/gadget unchanged
	clusterrole.rbac.authorization.k8s.io/gadget-cluster-role unchanged
	clusterrolebinding.rbac.authorization.k8s.io/gadget-cluster-role-binding unchanged
	role.rbac.authorization.k8s.io/gadget-role unchanged
	rolebinding.rbac.authorization.k8s.io/gadget-role-binding unchanged
	daemonset.apps/gadget configured
	
	stderr:
	error: error validating "/etc/kubernetes/addons/ig-crd.yaml": error validating data: [apiVersion not set, kind not set]; if you choose to ignore these errors, turn validation off with --validate=false
	I0917 00:22:35.420715  579049 retry.go:31] will retry after 478.039335ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: Process exited with status 1
	stdout:
	namespace/gadget unchanged
	serviceaccount/gadget unchanged
	configmap/gadget unchanged
	clusterrole.rbac.authorization.k8s.io/gadget-cluster-role unchanged
	clusterrolebinding.rbac.authorization.k8s.io/gadget-cluster-role-binding unchanged
	role.rbac.authorization.k8s.io/gadget-role unchanged
	rolebinding.rbac.authorization.k8s.io/gadget-role-binding unchanged
	daemonset.apps/gadget configured
	
	stderr:
	error: error validating "/etc/kubernetes/addons/ig-crd.yaml": error validating data: [apiVersion not set, kind not set]; if you choose to ignore these errors, turn validation off with --validate=false
	I0917 00:22:35.422881  579049 addons.go:479] Verifying addon gcp-auth=true in "addons-235235"
	I0917 00:22:35.426486  579049 out.go:179] * Verifying gcp-auth addon...
	I0917 00:22:35.430334  579049 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=gcp-auth" in ns "gcp-auth" ...
	I0917 00:22:35.433311  579049 kapi.go:86] Found 1 Pods for label selector kubernetes.io/minikube-addons=gcp-auth
	I0917 00:22:35.433335  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:22:35.483522  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:35.512985  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:22:35.669283  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:22:35.899660  579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml
	I0917 00:22:35.934061  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:22:35.983075  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:36.014126  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:22:36.169228  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:22:36.433391  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:22:36.484048  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:36.513719  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:22:36.669883  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:22:36.934019  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:22:36.967996  579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: (1.068279172s)
	W0917 00:22:36.968081  579049 addons.go:461] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: Process exited with status 1
	stdout:
	namespace/gadget unchanged
	serviceaccount/gadget unchanged
	configmap/gadget unchanged
	clusterrole.rbac.authorization.k8s.io/gadget-cluster-role unchanged
	clusterrolebinding.rbac.authorization.k8s.io/gadget-cluster-role-binding unchanged
	role.rbac.authorization.k8s.io/gadget-role unchanged
	rolebinding.rbac.authorization.k8s.io/gadget-role-binding unchanged
	daemonset.apps/gadget configured
	
	stderr:
	error: error validating "/etc/kubernetes/addons/ig-crd.yaml": error validating data: [apiVersion not set, kind not set]; if you choose to ignore these errors, turn validation off with --validate=false
	I0917 00:22:36.968114  579049 retry.go:31] will retry after 429.933718ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: Process exited with status 1
	stdout:
	namespace/gadget unchanged
	serviceaccount/gadget unchanged
	configmap/gadget unchanged
	clusterrole.rbac.authorization.k8s.io/gadget-cluster-role unchanged
	clusterrolebinding.rbac.authorization.k8s.io/gadget-cluster-role-binding unchanged
	role.rbac.authorization.k8s.io/gadget-role unchanged
	rolebinding.rbac.authorization.k8s.io/gadget-role-binding unchanged
	daemonset.apps/gadget configured
	
	stderr:
	error: error validating "/etc/kubernetes/addons/ig-crd.yaml": error validating data: [apiVersion not set, kind not set]; if you choose to ignore these errors, turn validation off with --validate=false
	I0917 00:22:36.983492  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:37.012578  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:22:37.169608  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:22:37.398868  579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml
	I0917 00:22:37.434035  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:22:37.483695  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:37.513097  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:22:37.669254  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:22:37.933828  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:22:37.983753  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:38.012359  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:22:38.169280  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:22:38.433659  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:22:38.472091  579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: (1.073117553s)
	W0917 00:22:38.472179  579049 addons.go:461] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: Process exited with status 1
	stdout:
	namespace/gadget unchanged
	serviceaccount/gadget unchanged
	configmap/gadget unchanged
	clusterrole.rbac.authorization.k8s.io/gadget-cluster-role unchanged
	clusterrolebinding.rbac.authorization.k8s.io/gadget-cluster-role-binding unchanged
	role.rbac.authorization.k8s.io/gadget-role unchanged
	rolebinding.rbac.authorization.k8s.io/gadget-role-binding unchanged
	daemonset.apps/gadget configured
	
	stderr:
	error: error validating "/etc/kubernetes/addons/ig-crd.yaml": error validating data: [apiVersion not set, kind not set]; if you choose to ignore these errors, turn validation off with --validate=false
	I0917 00:22:38.472211  579049 retry.go:31] will retry after 1.093261919s: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: Process exited with status 1
	stdout:
	namespace/gadget unchanged
	serviceaccount/gadget unchanged
	configmap/gadget unchanged
	clusterrole.rbac.authorization.k8s.io/gadget-cluster-role unchanged
	clusterrolebinding.rbac.authorization.k8s.io/gadget-cluster-role-binding unchanged
	role.rbac.authorization.k8s.io/gadget-role unchanged
	rolebinding.rbac.authorization.k8s.io/gadget-role-binding unchanged
	daemonset.apps/gadget configured
	
	stderr:
	error: error validating "/etc/kubernetes/addons/ig-crd.yaml": error validating data: [apiVersion not set, kind not set]; if you choose to ignore these errors, turn validation off with --validate=false
	I0917 00:22:38.483107  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:38.513748  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:22:38.669564  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:22:38.933810  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:22:39.035792  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:39.036182  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:22:39.169358  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:22:39.433484  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:22:39.483655  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:39.512275  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:22:39.566570  579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml
	I0917 00:22:39.671658  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:22:39.933810  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:22:39.982626  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:40.012312  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:22:40.168520  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:22:40.433728  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:22:40.482815  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:40.511323  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:22:40.663119  579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: (1.096514525s)
	W0917 00:22:40.663150  579049 addons.go:461] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: Process exited with status 1
	stdout:
	namespace/gadget unchanged
	serviceaccount/gadget unchanged
	configmap/gadget unchanged
	clusterrole.rbac.authorization.k8s.io/gadget-cluster-role unchanged
	clusterrolebinding.rbac.authorization.k8s.io/gadget-cluster-role-binding unchanged
	role.rbac.authorization.k8s.io/gadget-role unchanged
	rolebinding.rbac.authorization.k8s.io/gadget-role-binding unchanged
	daemonset.apps/gadget configured
	
	stderr:
	error: error validating "/etc/kubernetes/addons/ig-crd.yaml": error validating data: [apiVersion not set, kind not set]; if you choose to ignore these errors, turn validation off with --validate=false
	I0917 00:22:40.663168  579049 retry.go:31] will retry after 968.834ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: Process exited with status 1
	stdout:
	namespace/gadget unchanged
	serviceaccount/gadget unchanged
	configmap/gadget unchanged
	clusterrole.rbac.authorization.k8s.io/gadget-cluster-role unchanged
	clusterrolebinding.rbac.authorization.k8s.io/gadget-cluster-role-binding unchanged
	role.rbac.authorization.k8s.io/gadget-role unchanged
	rolebinding.rbac.authorization.k8s.io/gadget-role-binding unchanged
	daemonset.apps/gadget configured
	
	stderr:
	error: error validating "/etc/kubernetes/addons/ig-crd.yaml": error validating data: [apiVersion not set, kind not set]; if you choose to ignore these errors, turn validation off with --validate=false
	I0917 00:22:40.669504  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:22:40.934323  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:22:40.983478  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:41.012058  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:22:41.169418  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:22:41.433562  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:22:41.483160  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:41.511746  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:22:41.632349  579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml
	I0917 00:22:41.668836  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:22:41.939173  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:22:41.983603  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:42.013137  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:22:42.178902  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:22:42.434743  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:22:42.483015  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:42.512312  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:22:42.672319  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:22:42.698504  579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: (1.066119827s)
	W0917 00:22:42.698536  579049 addons.go:461] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: Process exited with status 1
	stdout:
	namespace/gadget unchanged
	serviceaccount/gadget unchanged
	configmap/gadget unchanged
	clusterrole.rbac.authorization.k8s.io/gadget-cluster-role unchanged
	clusterrolebinding.rbac.authorization.k8s.io/gadget-cluster-role-binding unchanged
	role.rbac.authorization.k8s.io/gadget-role unchanged
	rolebinding.rbac.authorization.k8s.io/gadget-role-binding unchanged
	daemonset.apps/gadget configured
	
	stderr:
	error: error validating "/etc/kubernetes/addons/ig-crd.yaml": error validating data: [apiVersion not set, kind not set]; if you choose to ignore these errors, turn validation off with --validate=false
	I0917 00:22:42.698554  579049 retry.go:31] will retry after 3.87974874s: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: Process exited with status 1
	stdout:
	namespace/gadget unchanged
	serviceaccount/gadget unchanged
	configmap/gadget unchanged
	clusterrole.rbac.authorization.k8s.io/gadget-cluster-role unchanged
	clusterrolebinding.rbac.authorization.k8s.io/gadget-cluster-role-binding unchanged
	role.rbac.authorization.k8s.io/gadget-role unchanged
	rolebinding.rbac.authorization.k8s.io/gadget-role-binding unchanged
	daemonset.apps/gadget configured
	
	stderr:
	error: error validating "/etc/kubernetes/addons/ig-crd.yaml": error validating data: [apiVersion not set, kind not set]; if you choose to ignore these errors, turn validation off with --validate=false
	I0917 00:22:42.935006  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:22:42.983177  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:43.011513  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:22:43.168634  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:22:43.434743  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:22:43.483672  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:43.512419  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:22:43.669559  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:22:43.934133  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:22:43.985194  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:44.013102  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:22:44.169064  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:22:44.434452  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:22:44.484735  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:44.516687  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:22:44.669016  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:22:44.934907  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:22:45.036222  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:22:45.036251  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:45.183204  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:22:45.435398  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:22:45.483797  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:45.512339  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:22:45.676517  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:22:45.933912  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:22:46.035397  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:46.036327  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:22:46.169499  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:22:46.433966  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:22:46.482966  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:46.511660  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:22:46.578871  579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml
	I0917 00:22:46.669739  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:22:46.934755  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:22:46.983637  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:47.014965  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:22:47.170179  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:22:47.434208  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:22:47.484303  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:47.512218  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:22:47.669451  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:22:47.722398  579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: (1.143486123s)
	W0917 00:22:47.722481  579049 addons.go:461] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: Process exited with status 1
	stdout:
	namespace/gadget unchanged
	serviceaccount/gadget unchanged
	configmap/gadget unchanged
	clusterrole.rbac.authorization.k8s.io/gadget-cluster-role unchanged
	clusterrolebinding.rbac.authorization.k8s.io/gadget-cluster-role-binding unchanged
	role.rbac.authorization.k8s.io/gadget-role unchanged
	rolebinding.rbac.authorization.k8s.io/gadget-role-binding unchanged
	daemonset.apps/gadget configured
	
	stderr:
	error: error validating "/etc/kubernetes/addons/ig-crd.yaml": error validating data: [apiVersion not set, kind not set]; if you choose to ignore these errors, turn validation off with --validate=false
	I0917 00:22:47.722516  579049 retry.go:31] will retry after 3.865203544s: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: Process exited with status 1
	stdout:
	namespace/gadget unchanged
	serviceaccount/gadget unchanged
	configmap/gadget unchanged
	clusterrole.rbac.authorization.k8s.io/gadget-cluster-role unchanged
	clusterrolebinding.rbac.authorization.k8s.io/gadget-cluster-role-binding unchanged
	role.rbac.authorization.k8s.io/gadget-role unchanged
	rolebinding.rbac.authorization.k8s.io/gadget-role-binding unchanged
	daemonset.apps/gadget configured
	
	stderr:
	error: error validating "/etc/kubernetes/addons/ig-crd.yaml": error validating data: [apiVersion not set, kind not set]; if you choose to ignore these errors, turn validation off with --validate=false
	I0917 00:22:47.934104  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:22:47.983251  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:48.012414  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:22:48.228151  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:22:48.441992  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:22:48.491006  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:48.517229  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:22:48.670336  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:22:48.935197  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:22:49.035902  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:22:49.036115  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:49.169418  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:22:49.434110  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:22:49.483835  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:49.512599  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:22:49.669628  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:22:49.934176  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:22:49.983522  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:50.013156  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:22:50.169713  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:22:50.434906  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:22:50.483748  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:50.512896  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:22:50.669471  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:22:50.934381  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:22:50.983906  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:51.016707  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:22:51.169495  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:22:51.433642  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:22:51.484021  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:51.512789  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:22:51.588323  579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml
	I0917 00:22:51.668870  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:22:51.934839  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:22:51.983106  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:52.013018  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:22:52.169566  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:22:52.445730  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:22:52.483931  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:52.511423  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:22:52.669221  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:22:52.937597  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:22:53.000759  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:53.016261  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:22:53.109924  579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: (1.521553496s)
	W0917 00:22:53.109964  579049 addons.go:461] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: Process exited with status 1
	stdout:
	namespace/gadget unchanged
	serviceaccount/gadget unchanged
	configmap/gadget unchanged
	clusterrole.rbac.authorization.k8s.io/gadget-cluster-role unchanged
	clusterrolebinding.rbac.authorization.k8s.io/gadget-cluster-role-binding unchanged
	role.rbac.authorization.k8s.io/gadget-role unchanged
	rolebinding.rbac.authorization.k8s.io/gadget-role-binding unchanged
	daemonset.apps/gadget configured
	
	stderr:
	error: error validating "/etc/kubernetes/addons/ig-crd.yaml": error validating data: [apiVersion not set, kind not set]; if you choose to ignore these errors, turn validation off with --validate=false
	I0917 00:22:53.110032  579049 retry.go:31] will retry after 5.363493142s: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: Process exited with status 1
	stdout:
	namespace/gadget unchanged
	serviceaccount/gadget unchanged
	configmap/gadget unchanged
	clusterrole.rbac.authorization.k8s.io/gadget-cluster-role unchanged
	clusterrolebinding.rbac.authorization.k8s.io/gadget-cluster-role-binding unchanged
	role.rbac.authorization.k8s.io/gadget-role unchanged
	rolebinding.rbac.authorization.k8s.io/gadget-role-binding unchanged
	daemonset.apps/gadget configured
	
	stderr:
	error: error validating "/etc/kubernetes/addons/ig-crd.yaml": error validating data: [apiVersion not set, kind not set]; if you choose to ignore these errors, turn validation off with --validate=false
	I0917 00:22:53.169589  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:22:53.436641  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:22:53.484203  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:53.512129  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:22:53.669404  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:22:53.933687  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:22:53.984009  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:54.012523  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:22:54.168759  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:22:54.436940  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:22:54.482957  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:54.512511  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:22:54.669715  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:22:54.933652  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:22:54.983709  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:55.016170  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:22:55.169198  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:22:55.434414  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:22:55.483608  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:55.511338  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:22:55.669199  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:22:55.934082  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:22:55.982975  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:56.012228  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:22:56.169443  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:22:56.433537  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:22:56.483574  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:56.511919  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:22:56.669526  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:22:56.934029  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:22:56.983078  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:57.012894  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:22:57.168345  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:22:57.433231  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:22:57.483322  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:57.511707  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:22:57.668839  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:22:57.933467  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:22:57.983865  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:58.012707  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:22:58.169780  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:22:58.433750  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:22:58.474033  579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml
	I0917 00:22:58.483484  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:58.512004  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:22:58.668825  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:22:58.934274  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:22:58.987068  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:59.035579  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:22:59.168985  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:22:59.433843  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:22:59.483657  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:22:59.511417  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:22:59.658322  579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: (1.184206111s)
	W0917 00:22:59.658400  579049 addons.go:461] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: Process exited with status 1
	stdout:
	namespace/gadget unchanged
	serviceaccount/gadget unchanged
	configmap/gadget unchanged
	clusterrole.rbac.authorization.k8s.io/gadget-cluster-role unchanged
	clusterrolebinding.rbac.authorization.k8s.io/gadget-cluster-role-binding unchanged
	role.rbac.authorization.k8s.io/gadget-role unchanged
	rolebinding.rbac.authorization.k8s.io/gadget-role-binding unchanged
	daemonset.apps/gadget configured
	
	stderr:
	error: error validating "/etc/kubernetes/addons/ig-crd.yaml": error validating data: [apiVersion not set, kind not set]; if you choose to ignore these errors, turn validation off with --validate=false
	I0917 00:22:59.658437  579049 retry.go:31] will retry after 9.746511661s: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: Process exited with status 1
	stdout:
	namespace/gadget unchanged
	serviceaccount/gadget unchanged
	configmap/gadget unchanged
	clusterrole.rbac.authorization.k8s.io/gadget-cluster-role unchanged
	clusterrolebinding.rbac.authorization.k8s.io/gadget-cluster-role-binding unchanged
	role.rbac.authorization.k8s.io/gadget-role unchanged
	rolebinding.rbac.authorization.k8s.io/gadget-role-binding unchanged
	daemonset.apps/gadget configured
	
	stderr:
	error: error validating "/etc/kubernetes/addons/ig-crd.yaml": error validating data: [apiVersion not set, kind not set]; if you choose to ignore these errors, turn validation off with --validate=false
	I0917 00:22:59.669263  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:22:59.934238  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:22:59.982864  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:00.016052  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:00.174691  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:23:00.434984  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:00.483876  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:00.513476  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:00.669584  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:23:00.933971  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:00.982750  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:01.011860  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:01.169096  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:23:01.436942  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:01.537211  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:01.538183  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:01.669014  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:23:01.935900  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:01.983355  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:02.012728  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:02.169264  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:23:02.438468  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:02.486595  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:02.518387  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:02.669406  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:23:02.976760  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:02.982537  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:03.012025  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:03.168995  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:23:03.434586  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:03.483947  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:03.513255  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:03.671520  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:23:03.934836  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:03.983476  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:04.012791  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:04.169795  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:23:04.434192  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:04.483011  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:04.512545  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:04.669008  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:23:04.934442  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:05.035350  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:05.035523  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:05.168304  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:23:05.433164  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:05.483321  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:05.511434  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:05.668126  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:23:05.933337  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:05.983583  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:06.012788  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:06.168724  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:23:06.434027  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:06.483588  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:06.512253  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:06.668834  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:23:06.934114  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:06.983375  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:07.011687  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:07.170210  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:23:07.434042  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:07.484735  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:07.512586  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:07.669109  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:23:07.949118  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:07.991221  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:08.017216  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:08.169471  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:23:08.433198  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:08.483313  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:08.511644  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:08.668653  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:23:08.933720  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:08.984291  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:09.012628  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:09.168820  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:23:09.405193  579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml
	I0917 00:23:09.434292  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:09.483808  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:09.512517  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:09.668467  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:23:09.933869  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:09.983452  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:10.013181  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:10.169685  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:23:10.433630  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:10.484107  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:10.512966  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:10.551154  579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: (1.145912711s)
	W0917 00:23:10.551189  579049 addons.go:461] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: Process exited with status 1
	stdout:
	namespace/gadget unchanged
	serviceaccount/gadget unchanged
	configmap/gadget unchanged
	clusterrole.rbac.authorization.k8s.io/gadget-cluster-role unchanged
	clusterrolebinding.rbac.authorization.k8s.io/gadget-cluster-role-binding unchanged
	role.rbac.authorization.k8s.io/gadget-role unchanged
	rolebinding.rbac.authorization.k8s.io/gadget-role-binding unchanged
	daemonset.apps/gadget configured
	
	stderr:
	error: error validating "/etc/kubernetes/addons/ig-crd.yaml": error validating data: [apiVersion not set, kind not set]; if you choose to ignore these errors, turn validation off with --validate=false
	I0917 00:23:10.551207  579049 retry.go:31] will retry after 10.609466096s: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: Process exited with status 1
	stdout:
	namespace/gadget unchanged
	serviceaccount/gadget unchanged
	configmap/gadget unchanged
	clusterrole.rbac.authorization.k8s.io/gadget-cluster-role unchanged
	clusterrolebinding.rbac.authorization.k8s.io/gadget-cluster-role-binding unchanged
	role.rbac.authorization.k8s.io/gadget-role unchanged
	rolebinding.rbac.authorization.k8s.io/gadget-role-binding unchanged
	daemonset.apps/gadget configured
	
	stderr:
	error: error validating "/etc/kubernetes/addons/ig-crd.yaml": error validating data: [apiVersion not set, kind not set]; if you choose to ignore these errors, turn validation off with --validate=false
	I0917 00:23:10.669918  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
	I0917 00:23:10.935398  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:11.035890  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:11.035927  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:11.170517  579049 kapi.go:107] duration metric: took 39.504954611s to wait for kubernetes.io/minikube-addons=registry ...
	I0917 00:23:11.435194  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:11.483475  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:11.512376  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:11.934148  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:11.983014  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:12.013691  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:12.433817  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:12.482974  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:12.512574  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:12.934094  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:12.987820  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:13.086240  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:13.434623  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:13.483502  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:13.511875  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:13.934639  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:14.035725  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:14.035890  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:14.441837  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:14.482977  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:14.511985  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:14.933846  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:14.982978  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:15.035820  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:15.434072  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:15.483545  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:15.528537  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:15.936337  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:15.984195  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:16.013827  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:16.433875  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:16.483351  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:16.511950  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:16.933619  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:16.983912  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:17.013130  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:17.433715  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:17.483887  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:17.512579  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:17.936202  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:17.983438  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:18.012291  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:18.438017  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:18.537157  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:18.537329  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:18.939228  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:18.983501  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:19.012876  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:19.434128  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:19.483738  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:19.511993  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:19.940784  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:19.984848  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:20.016983  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:20.443095  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:20.483572  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:20.512361  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:20.933072  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:20.982973  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:21.012269  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:21.161633  579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml
	I0917 00:23:21.434508  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:21.484749  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:21.518514  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:21.933502  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:21.983271  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:22.011521  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:22.438977  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:22.483224  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:22.520929  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:22.527795  579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: (1.36612796s)
	W0917 00:23:22.527836  579049 addons.go:461] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: Process exited with status 1
	stdout:
	namespace/gadget unchanged
	serviceaccount/gadget unchanged
	configmap/gadget unchanged
	clusterrole.rbac.authorization.k8s.io/gadget-cluster-role unchanged
	clusterrolebinding.rbac.authorization.k8s.io/gadget-cluster-role-binding unchanged
	role.rbac.authorization.k8s.io/gadget-role unchanged
	rolebinding.rbac.authorization.k8s.io/gadget-role-binding unchanged
	daemonset.apps/gadget configured
	
	stderr:
	error: error validating "/etc/kubernetes/addons/ig-crd.yaml": error validating data: [apiVersion not set, kind not set]; if you choose to ignore these errors, turn validation off with --validate=false
	I0917 00:23:22.527856  579049 retry.go:31] will retry after 27.856550582s: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: Process exited with status 1
	stdout:
	namespace/gadget unchanged
	serviceaccount/gadget unchanged
	configmap/gadget unchanged
	clusterrole.rbac.authorization.k8s.io/gadget-cluster-role unchanged
	clusterrolebinding.rbac.authorization.k8s.io/gadget-cluster-role-binding unchanged
	role.rbac.authorization.k8s.io/gadget-role unchanged
	rolebinding.rbac.authorization.k8s.io/gadget-role-binding unchanged
	daemonset.apps/gadget configured
	
	stderr:
	error: error validating "/etc/kubernetes/addons/ig-crd.yaml": error validating data: [apiVersion not set, kind not set]; if you choose to ignore these errors, turn validation off with --validate=false
	I0917 00:23:22.934334  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:23.013809  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:23.014105  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:23.433945  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:23.483038  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:23.512502  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:23.933589  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:23.983501  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:24.013504  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:24.441542  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:24.484414  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:24.512098  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:24.934001  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:24.983644  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:25.020238  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:25.433777  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:25.483554  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:25.511476  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:25.935514  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:26.036451  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:26.036871  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:26.434657  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:26.484766  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:26.512035  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:26.933479  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:26.983466  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:27.012054  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:27.433378  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:27.483258  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:27.511607  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:27.936267  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:27.983608  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:28.016562  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:28.434426  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:28.483992  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:28.512861  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:28.961694  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:29.055767  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:29.056248  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:29.433985  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:29.483331  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:29.511649  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:29.956412  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:29.983770  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:30.015076  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:30.433022  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:30.483280  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:30.511812  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:30.933487  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:30.984911  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:31.015167  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:31.433730  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:31.517425  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:31.518948  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:31.933295  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:31.983012  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:32.012898  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:32.433836  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:32.483221  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:32.512177  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:32.938217  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:33.014504  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:33.014790  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:33.434075  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:33.483110  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:33.512209  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:33.934251  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:33.984054  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:34.012293  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
	I0917 00:23:34.436238  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:34.483272  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:34.512989  579049 kapi.go:107] duration metric: took 1m2.004754049s to wait for kubernetes.io/minikube-addons=csi-hostpath-driver ...
	I0917 00:23:34.935107  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:35.035360  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:35.440491  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:35.483684  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:35.934278  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:35.983480  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:36.433866  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:36.483489  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:36.934081  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:36.983363  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:37.434402  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:37.483348  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:37.933582  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:37.983191  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:38.433926  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:38.483203  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:38.934315  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:38.984100  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:39.433869  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:39.483591  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:39.938352  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:40.039485  579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
	I0917 00:23:40.434201  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:40.483155  579049 kapi.go:107] duration metric: took 1m11.003407728s to wait for app.kubernetes.io/name=ingress-nginx ...
	I0917 00:23:40.933502  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:41.434827  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:41.933247  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:42.434075  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:42.934274  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:43.435033  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:43.934501  579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
	I0917 00:23:44.436090  579049 kapi.go:107] duration metric: took 1m9.005751766s to wait for kubernetes.io/minikube-addons=gcp-auth ...
	I0917 00:23:44.439490  579049 out.go:179] * Your GCP credentials will now be mounted into every pod created in the addons-235235 cluster.
	I0917 00:23:44.443208  579049 out.go:179] * If you don't want your credentials mounted into a specific pod, add a label with the `gcp-auth-skip-secret` key to your pod configuration.
	I0917 00:23:44.446137  579049 out.go:179] * If you want existing pods to be mounted with credentials, either recreate them or rerun addons enable with --refresh.
	I0917 00:23:50.384717  579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml
	W0917 00:23:51.262614  579049 addons.go:461] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: Process exited with status 1
	stdout:
	namespace/gadget unchanged
	serviceaccount/gadget unchanged
	configmap/gadget unchanged
	clusterrole.rbac.authorization.k8s.io/gadget-cluster-role unchanged
	clusterrolebinding.rbac.authorization.k8s.io/gadget-cluster-role-binding unchanged
	role.rbac.authorization.k8s.io/gadget-role unchanged
	rolebinding.rbac.authorization.k8s.io/gadget-role-binding unchanged
	daemonset.apps/gadget configured
	
	stderr:
	error: error validating "/etc/kubernetes/addons/ig-crd.yaml": error validating data: [apiVersion not set, kind not set]; if you choose to ignore these errors, turn validation off with --validate=false
	W0917 00:23:51.262722  579049 out.go:285] ! Enabling 'inspektor-gadget' returned an error: running callbacks: [sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: Process exited with status 1
	stdout:
	namespace/gadget unchanged
	serviceaccount/gadget unchanged
	configmap/gadget unchanged
	clusterrole.rbac.authorization.k8s.io/gadget-cluster-role unchanged
	clusterrolebinding.rbac.authorization.k8s.io/gadget-cluster-role-binding unchanged
	role.rbac.authorization.k8s.io/gadget-role unchanged
	rolebinding.rbac.authorization.k8s.io/gadget-role-binding unchanged
	daemonset.apps/gadget configured
	
	stderr:
	error: error validating "/etc/kubernetes/addons/ig-crd.yaml": error validating data: [apiVersion not set, kind not set]; if you choose to ignore these errors, turn validation off with --validate=false
	]
	I0917 00:23:51.267881  579049 out.go:179] * Enabled addons: registry-creds, nvidia-device-plugin, cloud-spanner, storage-provisioner, ingress-dns, amd-gpu-device-plugin, default-storageclass, volcano, metrics-server, yakd, storage-provisioner-rancher, volumesnapshots, registry, csi-hostpath-driver, ingress, gcp-auth
	I0917 00:23:51.271569  579049 addons.go:514] duration metric: took 1m31.741443043s for enable addons: enabled=[registry-creds nvidia-device-plugin cloud-spanner storage-provisioner ingress-dns amd-gpu-device-plugin default-storageclass volcano metrics-server yakd storage-provisioner-rancher volumesnapshots registry csi-hostpath-driver ingress gcp-auth]
	I0917 00:23:51.271626  579049 start.go:246] waiting for cluster config update ...
	I0917 00:23:51.271648  579049 start.go:255] writing updated cluster config ...
	I0917 00:23:51.271955  579049 ssh_runner.go:195] Run: rm -f paused
	I0917 00:23:51.276590  579049 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
	I0917 00:23:51.281629  579049 pod_ready.go:83] waiting for pod "coredns-66bc5c9577-s7lnm" in "kube-system" namespace to be "Ready" or be gone ...
	I0917 00:23:51.307466  579049 pod_ready.go:94] pod "coredns-66bc5c9577-s7lnm" is "Ready"
	I0917 00:23:51.307495  579049 pod_ready.go:86] duration metric: took 25.844332ms for pod "coredns-66bc5c9577-s7lnm" in "kube-system" namespace to be "Ready" or be gone ...
	I0917 00:23:51.312687  579049 pod_ready.go:83] waiting for pod "etcd-addons-235235" in "kube-system" namespace to be "Ready" or be gone ...
	I0917 00:23:51.317890  579049 pod_ready.go:94] pod "etcd-addons-235235" is "Ready"
	I0917 00:23:51.317973  579049 pod_ready.go:86] duration metric: took 5.198143ms for pod "etcd-addons-235235" in "kube-system" namespace to be "Ready" or be gone ...
	I0917 00:23:51.320783  579049 pod_ready.go:83] waiting for pod "kube-apiserver-addons-235235" in "kube-system" namespace to be "Ready" or be gone ...
	I0917 00:23:51.325836  579049 pod_ready.go:94] pod "kube-apiserver-addons-235235" is "Ready"
	I0917 00:23:51.325916  579049 pod_ready.go:86] duration metric: took 5.053621ms for pod "kube-apiserver-addons-235235" in "kube-system" namespace to be "Ready" or be gone ...
	I0917 00:23:51.328222  579049 pod_ready.go:83] waiting for pod "kube-controller-manager-addons-235235" in "kube-system" namespace to be "Ready" or be gone ...
	I0917 00:23:51.680573  579049 pod_ready.go:94] pod "kube-controller-manager-addons-235235" is "Ready"
	I0917 00:23:51.680603  579049 pod_ready.go:86] duration metric: took 352.316029ms for pod "kube-controller-manager-addons-235235" in "kube-system" namespace to be "Ready" or be gone ...
	I0917 00:23:51.880693  579049 pod_ready.go:83] waiting for pod "kube-proxy-7ccvd" in "kube-system" namespace to be "Ready" or be gone ...
	I0917 00:23:52.281104  579049 pod_ready.go:94] pod "kube-proxy-7ccvd" is "Ready"
	I0917 00:23:52.281132  579049 pod_ready.go:86] duration metric: took 400.409037ms for pod "kube-proxy-7ccvd" in "kube-system" namespace to be "Ready" or be gone ...
	I0917 00:23:52.480042  579049 pod_ready.go:83] waiting for pod "kube-scheduler-addons-235235" in "kube-system" namespace to be "Ready" or be gone ...
	I0917 00:23:52.880499  579049 pod_ready.go:94] pod "kube-scheduler-addons-235235" is "Ready"
	I0917 00:23:52.880528  579049 pod_ready.go:86] duration metric: took 400.460556ms for pod "kube-scheduler-addons-235235" in "kube-system" namespace to be "Ready" or be gone ...
	I0917 00:23:52.880542  579049 pod_ready.go:40] duration metric: took 1.603920721s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
	I0917 00:23:52.938822  579049 start.go:617] kubectl: 1.33.2, cluster: 1.34.0 (minor skew: 1)
	I0917 00:23:52.941854  579049 out.go:179] * Done! kubectl is now configured to use "addons-235235" cluster and "default" namespace by default
	
	
	==> Docker <==
	Sep 17 00:26:18 addons-235235 dockerd[1178]: time="2025-09-17T00:26:18.421150810Z" level=info msg="ignoring event" container=8310810da55356feadee71e0fdd7606d87d39af34fa418cf2a110898a0bb1229 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
	Sep 17 00:26:46 addons-235235 dockerd[1178]: time="2025-09-17T00:26:46.649330164Z" level=warning msg="reference for unknown type: " digest="sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79" remote="docker.io/library/busybox@sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79"
	Sep 17 00:26:46 addons-235235 dockerd[1178]: time="2025-09-17T00:26:46.737763322Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
	Sep 17 00:27:02 addons-235235 dockerd[1178]: time="2025-09-17T00:27:02.834303238Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
	Sep 17 00:27:37 addons-235235 dockerd[1178]: time="2025-09-17T00:27:37.650197669Z" level=warning msg="reference for unknown type: " digest="sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79" remote="docker.io/library/busybox@sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79"
	Sep 17 00:27:37 addons-235235 dockerd[1178]: time="2025-09-17T00:27:37.848639655Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
	Sep 17 00:27:37 addons-235235 cri-dockerd[1478]: time="2025-09-17T00:27:37Z" level=info msg="Stop pulling image docker.io/busybox:stable@sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79: docker.io/library/busybox@sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79: Pulling from library/busybox"
	Sep 17 00:28:05 addons-235235 dockerd[1178]: time="2025-09-17T00:28:05.120881541Z" level=info msg="ignoring event" container=d3e79390835e0e0e8cae64267db2629eb221a2249ed2e8df729cb3781e22ad61 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
	Sep 17 00:28:20 addons-235235 cri-dockerd[1478]: time="2025-09-17T00:28:20Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/b77675601378ff5c97c1767fba1e24aff7a14d87c69900baba5082df85dddfc7/resolv.conf as [nameserver 10.96.0.10 search local-path-storage.svc.cluster.local svc.cluster.local cluster.local us-east-2.compute.internal options ndots:5]"
	Sep 17 00:28:20 addons-235235 dockerd[1178]: time="2025-09-17T00:28:20.655165490Z" level=warning msg="reference for unknown type: " digest="sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79" remote="docker.io/library/busybox@sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79"
	Sep 17 00:28:20 addons-235235 dockerd[1178]: time="2025-09-17T00:28:20.741495844Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
	Sep 17 00:28:29 addons-235235 dockerd[1178]: time="2025-09-17T00:28:29.820977327Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
	Sep 17 00:28:34 addons-235235 dockerd[1178]: time="2025-09-17T00:28:34.642739380Z" level=warning msg="reference for unknown type: " digest="sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79" remote="docker.io/library/busybox@sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79"
	Sep 17 00:28:34 addons-235235 dockerd[1178]: time="2025-09-17T00:28:34.739117138Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
	Sep 17 00:28:57 addons-235235 dockerd[1178]: time="2025-09-17T00:28:57.652065247Z" level=warning msg="reference for unknown type: " digest="sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79" remote="docker.io/library/busybox@sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79"
	Sep 17 00:28:57 addons-235235 dockerd[1178]: time="2025-09-17T00:28:57.735030726Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
	Sep 17 00:29:46 addons-235235 dockerd[1178]: time="2025-09-17T00:29:46.652301167Z" level=warning msg="reference for unknown type: " digest="sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79" remote="docker.io/library/busybox@sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79"
	Sep 17 00:29:46 addons-235235 dockerd[1178]: time="2025-09-17T00:29:46.835696258Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
	Sep 17 00:29:46 addons-235235 cri-dockerd[1478]: time="2025-09-17T00:29:46Z" level=info msg="Stop pulling image docker.io/busybox:stable@sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79: docker.io/library/busybox@sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79: Pulling from library/busybox"
	Sep 17 00:30:20 addons-235235 dockerd[1178]: time="2025-09-17T00:30:20.707809978Z" level=info msg="ignoring event" container=b77675601378ff5c97c1767fba1e24aff7a14d87c69900baba5082df85dddfc7 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
	Sep 17 00:30:51 addons-235235 cri-dockerd[1478]: time="2025-09-17T00:30:51Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/80fc587f34c85f71988c3be8046cb4f607010d68fb7c68e94832bf4d5d59b4f6/resolv.conf as [nameserver 10.96.0.10 search local-path-storage.svc.cluster.local svc.cluster.local cluster.local us-east-2.compute.internal options ndots:5]"
	Sep 17 00:30:51 addons-235235 dockerd[1178]: time="2025-09-17T00:30:51.215693201Z" level=warning msg="reference for unknown type: " digest="sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79" remote="docker.io/library/busybox@sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79"
	Sep 17 00:30:51 addons-235235 dockerd[1178]: time="2025-09-17T00:30:51.307349823Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
	Sep 17 00:31:03 addons-235235 dockerd[1178]: time="2025-09-17T00:31:03.649158464Z" level=warning msg="reference for unknown type: " digest="sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79" remote="docker.io/library/busybox@sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79"
	Sep 17 00:31:03 addons-235235 dockerd[1178]: time="2025-09-17T00:31:03.740429266Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
	
	
	==> container status <==
	CONTAINER           IMAGE                                                                                                               CREATED             STATE               NAME                      ATTEMPT             POD ID              POD
	fba2a89454255       nginx@sha256:42a516af16b852e33b7682d5ef8acbd5d13fe08fecadc7ed98605ba5e3b26ab8                                       5 minutes ago       Running             nginx                     0                   63ebf5457695b       nginx
	e2c5ba2f8355f       gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e                 6 minutes ago       Running             busybox                   0                   bc4b89ca8105d       busybox
	82404e257ef79       rancher/local-path-provisioner@sha256:e34c88ae0affb1cdefbb874140d6339d4a27ec4ee420ae8199cd839997b05246              7 minutes ago       Running             local-path-provisioner    0                   ac4816af246bf       local-path-provisioner-648f6765c9-lpgz2
	cbc586ed79b42       ghcr.io/inspektor-gadget/inspektor-gadget@sha256:66fdf18cc8a577423b2a36b96a5be40fe690fdb986bfe7875f54edfa9c7d19a5   8 minutes ago       Running             gadget                    0                   bd72c74811a18       gadget-4bk2n
	b279765d02101       ba04bb24b9575                                                                                                       8 minutes ago       Running             storage-provisioner       0                   537bddb7fc1f8       storage-provisioner
	994b418390c96       138784d87c9c5                                                                                                       8 minutes ago       Running             coredns                   0                   9d39a312841ad       coredns-66bc5c9577-s7lnm
	e39e9e3a27c04       6fc32d66c1411                                                                                                       8 minutes ago       Running             kube-proxy                0                   20f8154ff37ed       kube-proxy-7ccvd
	c29889afefd1f       996be7e86d9b3                                                                                                       8 minutes ago       Running             kube-controller-manager   0                   005f2c1a7c358       kube-controller-manager-addons-235235
	3ba1c9fd171c9       a25f5ef9c34c3                                                                                                       8 minutes ago       Running             kube-scheduler            0                   d7b768bb065c7       kube-scheduler-addons-235235
	da4746efd3426       d291939e99406                                                                                                       8 minutes ago       Running             kube-apiserver            0                   55660481e3eb0       kube-apiserver-addons-235235
	afa22acfe31f9       a1894772a478e                                                                                                       8 minutes ago       Running             etcd                      0                   813d3a00d28e7       etcd-addons-235235
	
	
	==> coredns [994b418390c9] <==
	[INFO] 10.244.0.24:43663 - 23106 "AAAA IN hello-world-app.default.svc.cluster.local.cluster.local. udp 73 false 512" NXDOMAIN qr,aa,rd 166 0.000099444s
	[INFO] 10.244.0.24:43663 - 53628 "A IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.000927592s
	[INFO] 10.244.0.24:44728 - 6127 "A IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.002355348s
	[INFO] 10.244.0.24:43663 - 14310 "AAAA IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.001372479s
	[INFO] 10.244.0.24:44728 - 39738 "AAAA IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.001929118s
	[INFO] 10.244.0.24:44728 - 1608 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000105401s
	[INFO] 10.244.0.24:43663 - 24802 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.00009247s
	[INFO] 10.244.0.24:35588 - 21373 "A IN hello-world-app.default.svc.cluster.local.ingress-nginx.svc.cluster.local. udp 91 false 512" NXDOMAIN qr,aa,rd 184 0.000222632s
	[INFO] 10.244.0.24:60461 - 64061 "A IN hello-world-app.default.svc.cluster.local.ingress-nginx.svc.cluster.local. udp 91 false 512" NXDOMAIN qr,aa,rd 184 0.000114254s
	[INFO] 10.244.0.24:60461 - 14808 "AAAA IN hello-world-app.default.svc.cluster.local.ingress-nginx.svc.cluster.local. udp 91 false 512" NXDOMAIN qr,aa,rd 184 0.000106041s
	[INFO] 10.244.0.24:60461 - 58271 "A IN hello-world-app.default.svc.cluster.local.svc.cluster.local. udp 77 false 512" NXDOMAIN qr,aa,rd 170 0.000099132s
	[INFO] 10.244.0.24:60461 - 25338 "AAAA IN hello-world-app.default.svc.cluster.local.svc.cluster.local. udp 77 false 512" NXDOMAIN qr,aa,rd 170 0.000099887s
	[INFO] 10.244.0.24:60461 - 35311 "A IN hello-world-app.default.svc.cluster.local.cluster.local. udp 73 false 512" NXDOMAIN qr,aa,rd 166 0.000105031s
	[INFO] 10.244.0.24:60461 - 4918 "AAAA IN hello-world-app.default.svc.cluster.local.cluster.local. udp 73 false 512" NXDOMAIN qr,aa,rd 166 0.000100748s
	[INFO] 10.244.0.24:35588 - 63902 "AAAA IN hello-world-app.default.svc.cluster.local.ingress-nginx.svc.cluster.local. udp 91 false 512" NXDOMAIN qr,aa,rd 184 0.000112564s
	[INFO] 10.244.0.24:35588 - 58247 "A IN hello-world-app.default.svc.cluster.local.svc.cluster.local. udp 77 false 512" NXDOMAIN qr,aa,rd 170 0.000122754s
	[INFO] 10.244.0.24:60461 - 14588 "A IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.001645761s
	[INFO] 10.244.0.24:35588 - 64085 "AAAA IN hello-world-app.default.svc.cluster.local.svc.cluster.local. udp 77 false 512" NXDOMAIN qr,aa,rd 170 0.000096211s
	[INFO] 10.244.0.24:35588 - 61625 "A IN hello-world-app.default.svc.cluster.local.cluster.local. udp 73 false 512" NXDOMAIN qr,aa,rd 166 0.000116206s
	[INFO] 10.244.0.24:35588 - 3142 "AAAA IN hello-world-app.default.svc.cluster.local.cluster.local. udp 73 false 512" NXDOMAIN qr,aa,rd 166 0.000081606s
	[INFO] 10.244.0.24:60461 - 861 "AAAA IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.001721116s
	[INFO] 10.244.0.24:60461 - 41183 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000101241s
	[INFO] 10.244.0.24:35588 - 29487 "A IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.001062259s
	[INFO] 10.244.0.24:35588 - 58951 "AAAA IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.001063219s
	[INFO] 10.244.0.24:35588 - 25364 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000116518s
	
	
	==> describe nodes <==
	Name:               addons-235235
	Roles:              control-plane
	Labels:             beta.kubernetes.io/arch=arm64
	                    beta.kubernetes.io/os=linux
	                    kubernetes.io/arch=arm64
	                    kubernetes.io/hostname=addons-235235
	                    kubernetes.io/os=linux
	                    minikube.k8s.io/commit=9829f0bc17c523e4378d28e0c25741106f24f00a
	                    minikube.k8s.io/name=addons-235235
	                    minikube.k8s.io/primary=true
	                    minikube.k8s.io/updated_at=2025_09_17T00_22_14_0700
	                    minikube.k8s.io/version=v1.37.0
	                    node-role.kubernetes.io/control-plane=
	                    node.kubernetes.io/exclude-from-external-load-balancers=
	                    topology.hostpath.csi/node=addons-235235
	Annotations:        node.alpha.kubernetes.io/ttl: 0
	                    volumes.kubernetes.io/controller-managed-attach-detach: true
	CreationTimestamp:  Wed, 17 Sep 2025 00:22:11 +0000
	Taints:             <none>
	Unschedulable:      false
	Lease:
	  HolderIdentity:  addons-235235
	  AcquireTime:     <unset>
	  RenewTime:       Wed, 17 Sep 2025 00:31:04 +0000
	Conditions:
	  Type             Status  LastHeartbeatTime                 LastTransitionTime                Reason                       Message
	  ----             ------  -----------------                 ------------------                ------                       -------
	  MemoryPressure   False   Wed, 17 Sep 2025 00:25:47 +0000   Wed, 17 Sep 2025 00:22:07 +0000   KubeletHasSufficientMemory   kubelet has sufficient memory available
	  DiskPressure     False   Wed, 17 Sep 2025 00:25:47 +0000   Wed, 17 Sep 2025 00:22:07 +0000   KubeletHasNoDiskPressure     kubelet has no disk pressure
	  PIDPressure      False   Wed, 17 Sep 2025 00:25:47 +0000   Wed, 17 Sep 2025 00:22:07 +0000   KubeletHasSufficientPID      kubelet has sufficient PID available
	  Ready            True    Wed, 17 Sep 2025 00:25:47 +0000   Wed, 17 Sep 2025 00:22:11 +0000   KubeletReady                 kubelet is posting ready status
	Addresses:
	  InternalIP:  192.168.49.2
	  Hostname:    addons-235235
	Capacity:
	  cpu:                2
	  ephemeral-storage:  203034800Ki
	  hugepages-1Gi:      0
	  hugepages-2Mi:      0
	  hugepages-32Mi:     0
	  hugepages-64Ki:     0
	  memory:             8022300Ki
	  pods:               110
	Allocatable:
	  cpu:                2
	  ephemeral-storage:  203034800Ki
	  hugepages-1Gi:      0
	  hugepages-2Mi:      0
	  hugepages-32Mi:     0
	  hugepages-64Ki:     0
	  memory:             8022300Ki
	  pods:               110
	System Info:
	  Machine ID:                 b08b2949c1734d3d9463c78e2767d05b
	  System UUID:                992d6b4e-453a-4002-aab2-36cce8203392
	  Boot ID:                    54a40c62-e2ca-4fe1-8de3-5249514e3fbf
	  Kernel Version:             5.15.0-1084-aws
	  OS Image:                   Ubuntu 22.04.5 LTS
	  Operating System:           linux
	  Architecture:               arm64
	  Container Runtime Version:  docker://28.4.0
	  Kubelet Version:            v1.34.0
	  Kube-Proxy Version:         
	PodCIDR:                      10.244.0.0/24
	PodCIDRs:                     10.244.0.0/24
	Non-terminated Pods:          (13 in total)
	  Namespace                   Name                                                          CPU Requests  CPU Limits  Memory Requests  Memory Limits  Age
	  ---------                   ----                                                          ------------  ----------  ---------------  -------------  ---
	  default                     busybox                                                       0 (0%)        0 (0%)      0 (0%)           0 (0%)         6m31s
	  default                     hello-world-app-5d498dc89-8ftcl                               0 (0%)        0 (0%)      0 (0%)           0 (0%)         5m31s
	  default                     nginx                                                         0 (0%)        0 (0%)      0 (0%)           0 (0%)         5m42s
	  gadget                      gadget-4bk2n                                                  0 (0%)        0 (0%)      0 (0%)           0 (0%)         8m40s
	  kube-system                 coredns-66bc5c9577-s7lnm                                      100m (5%)     0 (0%)      70Mi (0%)        170Mi (2%)     8m46s
	  kube-system                 etcd-addons-235235                                            100m (5%)     0 (0%)      100Mi (1%)       0 (0%)         8m52s
	  kube-system                 kube-apiserver-addons-235235                                  250m (12%)    0 (0%)      0 (0%)           0 (0%)         8m52s
	  kube-system                 kube-controller-manager-addons-235235                         200m (10%)    0 (0%)      0 (0%)           0 (0%)         8m52s
	  kube-system                 kube-proxy-7ccvd                                              0 (0%)        0 (0%)      0 (0%)           0 (0%)         8m46s
	  kube-system                 kube-scheduler-addons-235235                                  100m (5%)     0 (0%)      0 (0%)           0 (0%)         8m52s
	  kube-system                 storage-provisioner                                           0 (0%)        0 (0%)      0 (0%)           0 (0%)         8m38s
	  local-path-storage          helper-pod-create-pvc-6b6d1188-0b38-4324-abab-29964246affb    0 (0%)        0 (0%)      0 (0%)           0 (0%)         15s
	  local-path-storage          local-path-provisioner-648f6765c9-lpgz2                       0 (0%)        0 (0%)      0 (0%)           0 (0%)         8m38s
	Allocated resources:
	  (Total limits may be over 100 percent, i.e., overcommitted.)
	  Resource           Requests    Limits
	  --------           --------    ------
	  cpu                750m (37%)  0 (0%)
	  memory             170Mi (2%)  170Mi (2%)
	  ephemeral-storage  0 (0%)      0 (0%)
	  hugepages-1Gi      0 (0%)      0 (0%)
	  hugepages-2Mi      0 (0%)      0 (0%)
	  hugepages-32Mi     0 (0%)      0 (0%)
	  hugepages-64Ki     0 (0%)      0 (0%)
	Events:
	  Type     Reason                   Age                    From             Message
	  ----     ------                   ----                   ----             -------
	  Normal   Starting                 8m44s                  kube-proxy       
	  Normal   NodeHasSufficientMemory  8m59s (x8 over 8m59s)  kubelet          Node addons-235235 status is now: NodeHasSufficientMemory
	  Normal   NodeHasNoDiskPressure    8m59s (x8 over 8m59s)  kubelet          Node addons-235235 status is now: NodeHasNoDiskPressure
	  Normal   NodeHasSufficientPID     8m59s (x7 over 8m59s)  kubelet          Node addons-235235 status is now: NodeHasSufficientPID
	  Normal   NodeAllocatableEnforced  8m59s                  kubelet          Updated Node Allocatable limit across pods
	  Normal   Starting                 8m52s                  kubelet          Starting kubelet.
	  Warning  CgroupV1                 8m52s                  kubelet          cgroup v1 support is in maintenance mode, please migrate to cgroup v2
	  Normal   NodeAllocatableEnforced  8m52s                  kubelet          Updated Node Allocatable limit across pods
	  Normal   NodeHasSufficientMemory  8m52s                  kubelet          Node addons-235235 status is now: NodeHasSufficientMemory
	  Normal   NodeHasNoDiskPressure    8m52s                  kubelet          Node addons-235235 status is now: NodeHasNoDiskPressure
	  Normal   NodeHasSufficientPID     8m52s                  kubelet          Node addons-235235 status is now: NodeHasSufficientPID
	  Normal   RegisteredNode           8m47s                  node-controller  Node addons-235235 event: Registered Node addons-235235 in Controller
	
	
	==> dmesg <==
	[Sep16 22:47] kauditd_printk_skb: 8 callbacks suppressed
	[Sep17 00:20] kauditd_printk_skb: 8 callbacks suppressed
	
	
	==> etcd [afa22acfe31f] <==
	{"level":"warn","ts":"2025-09-17T00:22:09.862673Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:48544","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:22:09.883446Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:48560","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:22:09.908885Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:48572","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:22:09.933076Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:48604","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:22:09.947149Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:48612","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:22:09.976939Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:48624","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:22:10.000993Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:48644","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:22:10.028362Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:48664","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:22:10.054661Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:48674","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:22:10.068432Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:48682","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:22:10.162261Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:48698","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:22:33.424386Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:43378","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:22:33.442190Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:43406","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:22:48.188356Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41564","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:22:48.275302Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41578","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:22:48.304728Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41596","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:22:48.372770Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41610","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:22:48.418026Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41638","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:22:48.451608Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41662","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:22:48.489918Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41666","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:22:48.523009Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41694","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:22:48.550961Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41714","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:22:48.606919Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41752","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:22:48.648329Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41756","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:22:48.688947Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41778","server-name":"","error":"EOF"}
	
	
	==> kernel <==
	 00:31:05 up  3:13,  0 users,  load average: 0.54, 2.40, 3.17
	Linux addons-235235 5.15.0-1084-aws #91~20.04.1-Ubuntu SMP Fri May 2 07:00:04 UTC 2025 aarch64 aarch64 aarch64 GNU/Linux
	PRETTY_NAME="Ubuntu 22.04.5 LTS"
	
	
	==> kube-apiserver [da4746efd342] <==
	I0917 00:25:32.453975       1 controller.go:667] quota admission added evaluator for: volumesnapshots.snapshot.storage.k8s.io
	I0917 00:25:34.605239       1 alloc.go:328] "allocated clusterIPs" service="default/hello-world-app" clusterIPs={"IPv4":"10.106.235.163"}
	I0917 00:26:02.165021       1 stats.go:136] "Error getting keys" err="empty key: \"\""
	I0917 00:26:02.410726       1 controller.go:129] OpenAPI AggregationController: action for item v1beta1.metrics.k8s.io: Nothing (removed from the queue).
	I0917 00:26:04.058365       1 handler.go:285] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
	I0917 00:26:04.058414       1 handler.go:285] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
	I0917 00:26:04.117836       1 handler.go:285] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
	I0917 00:26:04.117879       1 handler.go:285] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
	I0917 00:26:04.199437       1 handler.go:285] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
	I0917 00:26:04.199501       1 handler.go:285] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
	I0917 00:26:04.315955       1 handler.go:285] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
	I0917 00:26:04.316153       1 handler.go:285] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
	I0917 00:26:04.389300       1 handler.go:285] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
	I0917 00:26:04.389340       1 handler.go:285] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
	W0917 00:26:05.118236       1 cacher.go:182] Terminating all watchers from cacher volumesnapshotclasses.snapshot.storage.k8s.io
	W0917 00:26:05.389566       1 cacher.go:182] Terminating all watchers from cacher volumesnapshots.snapshot.storage.k8s.io
	W0917 00:26:05.486218       1 cacher.go:182] Terminating all watchers from cacher volumesnapshotcontents.snapshot.storage.k8s.io
	I0917 00:26:20.062672       1 stats.go:136] "Error getting keys" err="empty key: \"\""
	I0917 00:27:05.320870       1 stats.go:136] "Error getting keys" err="empty key: \"\""
	I0917 00:27:25.932699       1 stats.go:136] "Error getting keys" err="empty key: \"\""
	I0917 00:28:22.063797       1 stats.go:136] "Error getting keys" err="empty key: \"\""
	I0917 00:28:52.615213       1 stats.go:136] "Error getting keys" err="empty key: \"\""
	I0917 00:29:30.337032       1 stats.go:136] "Error getting keys" err="empty key: \"\""
	I0917 00:30:06.780130       1 stats.go:136] "Error getting keys" err="empty key: \"\""
	I0917 00:30:46.059351       1 stats.go:136] "Error getting keys" err="empty key: \"\""
	
	
	==> kube-controller-manager [c29889afefd1] <==
	E0917 00:30:06.198922       1 reflector.go:205] "Failed to watch" err="failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" type="*v1.PartialObjectMetadata"
	E0917 00:30:08.758062       1 reflector.go:422] "The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking" err="the server could not find the requested resource"
	E0917 00:30:08.759225       1 reflector.go:205] "Failed to watch" err="failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" type="*v1.PartialObjectMetadata"
	E0917 00:30:10.616644       1 reflector.go:422] "The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking" err="the server could not find the requested resource"
	E0917 00:30:10.617699       1 reflector.go:205] "Failed to watch" err="failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" type="*v1.PartialObjectMetadata"
	E0917 00:30:17.890021       1 reflector.go:422] "The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking" err="the server could not find the requested resource"
	E0917 00:30:17.891302       1 reflector.go:205] "Failed to watch" err="failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" type="*v1.PartialObjectMetadata"
	E0917 00:30:26.267407       1 reflector.go:422] "The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking" err="the server could not find the requested resource"
	E0917 00:30:26.268575       1 reflector.go:205] "Failed to watch" err="failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" type="*v1.PartialObjectMetadata"
	E0917 00:30:27.805363       1 reflector.go:422] "The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking" err="the server could not find the requested resource"
	E0917 00:30:27.806501       1 reflector.go:205] "Failed to watch" err="failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" type="*v1.PartialObjectMetadata"
	E0917 00:30:31.243089       1 reflector.go:422] "The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking" err="the server could not find the requested resource"
	E0917 00:30:31.244174       1 reflector.go:205] "Failed to watch" err="failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" type="*v1.PartialObjectMetadata"
	E0917 00:30:33.721970       1 reflector.go:422] "The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking" err="the server could not find the requested resource"
	E0917 00:30:33.723164       1 reflector.go:205] "Failed to watch" err="failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" type="*v1.PartialObjectMetadata"
	E0917 00:30:47.422934       1 reflector.go:422] "The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking" err="the server could not find the requested resource"
	E0917 00:30:47.424112       1 reflector.go:205] "Failed to watch" err="failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" type="*v1.PartialObjectMetadata"
	E0917 00:30:48.953425       1 reflector.go:422] "The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking" err="the server could not find the requested resource"
	E0917 00:30:48.954394       1 reflector.go:205] "Failed to watch" err="failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" type="*v1.PartialObjectMetadata"
	E0917 00:30:57.786179       1 reflector.go:422] "The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking" err="the server could not find the requested resource"
	E0917 00:30:57.787366       1 reflector.go:205] "Failed to watch" err="failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" type="*v1.PartialObjectMetadata"
	E0917 00:31:00.094533       1 reflector.go:422] "The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking" err="the server could not find the requested resource"
	E0917 00:31:00.100278       1 reflector.go:205] "Failed to watch" err="failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" type="*v1.PartialObjectMetadata"
	E0917 00:31:03.982717       1 reflector.go:422] "The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking" err="the server could not find the requested resource"
	E0917 00:31:03.983948       1 reflector.go:205] "Failed to watch" err="failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" type="*v1.PartialObjectMetadata"
	
	
	==> kube-proxy [e39e9e3a27c0] <==
	I0917 00:22:20.738742       1 server_linux.go:53] "Using iptables proxy"
	I0917 00:22:20.930177       1 shared_informer.go:349] "Waiting for caches to sync" controller="node informer cache"
	I0917 00:22:21.030707       1 shared_informer.go:356] "Caches are synced" controller="node informer cache"
	I0917 00:22:21.030776       1 server.go:219] "Successfully retrieved NodeIPs" NodeIPs=["192.168.49.2"]
	E0917 00:22:21.030914       1 server.go:256] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
	I0917 00:22:21.052890       1 server.go:265] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
	I0917 00:22:21.052944       1 server_linux.go:132] "Using iptables Proxier"
	I0917 00:22:21.059415       1 proxier.go:242] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
	I0917 00:22:21.060002       1 server.go:527] "Version info" version="v1.34.0"
	I0917 00:22:21.060027       1 server.go:529] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
	I0917 00:22:21.066591       1 config.go:106] "Starting endpoint slice config controller"
	I0917 00:22:21.066610       1 shared_informer.go:349] "Waiting for caches to sync" controller="endpoint slice config"
	I0917 00:22:21.066875       1 config.go:200] "Starting service config controller"
	I0917 00:22:21.066882       1 shared_informer.go:349] "Waiting for caches to sync" controller="service config"
	I0917 00:22:21.068286       1 config.go:403] "Starting serviceCIDR config controller"
	I0917 00:22:21.068297       1 shared_informer.go:349] "Waiting for caches to sync" controller="serviceCIDR config"
	I0917 00:22:21.069263       1 config.go:309] "Starting node config controller"
	I0917 00:22:21.069274       1 shared_informer.go:349] "Waiting for caches to sync" controller="node config"
	I0917 00:22:21.069280       1 shared_informer.go:356] "Caches are synced" controller="node config"
	I0917 00:22:21.166927       1 shared_informer.go:356] "Caches are synced" controller="endpoint slice config"
	I0917 00:22:21.166966       1 shared_informer.go:356] "Caches are synced" controller="service config"
	I0917 00:22:21.169420       1 shared_informer.go:356] "Caches are synced" controller="serviceCIDR config"
	
	
	==> kube-scheduler [3ba1c9fd171c] <==
	I0917 00:22:11.751477       1 server.go:177] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
	I0917 00:22:11.753675       1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
	I0917 00:22:11.753707       1 shared_informer.go:349] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
	I0917 00:22:11.754695       1 secure_serving.go:211] Serving securely on 127.0.0.1:10259
	I0917 00:22:11.754864       1 tlsconfig.go:243] "Starting DynamicServingCertificateController"
	E0917 00:22:11.756984       1 reflector.go:205] "Failed to watch" err="failed to list *v1.ConfigMap: configmaps \"extension-apiserver-authentication\" is forbidden: User \"system:kube-scheduler\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"kube-system\"" logger="UnhandledError" reflector="runtime/asm_arm64.s:1223" type="*v1.ConfigMap"
	E0917 00:22:11.765217       1 reflector.go:205] "Failed to watch" err="failed to list *v1.Service: services is forbidden: User \"system:kube-scheduler\" cannot list resource \"services\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Service"
	E0917 00:22:11.765491       1 reflector.go:205] "Failed to watch" err="failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolume"
	E0917 00:22:11.765716       1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csidrivers\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIDriver"
	E0917 00:22:11.765893       1 reflector.go:205] "Failed to watch" err="failed to list *v1.Namespace: namespaces is forbidden: User \"system:kube-scheduler\" cannot list resource \"namespaces\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Namespace"
	E0917 00:22:11.766045       1 reflector.go:205] "Failed to watch" err="failed to list *v1.Pod: pods is forbidden: User \"system:kube-scheduler\" cannot list resource \"pods\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Pod"
	E0917 00:22:11.766188       1 reflector.go:205] "Failed to watch" err="failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"storageclasses\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.StorageClass"
	E0917 00:22:11.766367       1 reflector.go:205] "Failed to watch" err="failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User \"system:kube-scheduler\" cannot list resource \"poddisruptionbudgets\" in API group \"policy\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PodDisruptionBudget"
	E0917 00:22:11.766507       1 reflector.go:205] "Failed to watch" err="failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicationcontrollers\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ReplicationController"
	E0917 00:22:11.766674       1 reflector.go:205] "Failed to watch" err="failed to list *v1.ResourceSlice: resourceslices.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"resourceslices\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ResourceSlice"
	E0917 00:22:11.766857       1 reflector.go:205] "Failed to watch" err="failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicasets\" in API group \"apps\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ReplicaSet"
	E0917 00:22:11.767028       1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csinodes\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSINode"
	E0917 00:22:11.767210       1 reflector.go:205] "Failed to watch" err="failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"statefulsets\" in API group \"apps\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.StatefulSet"
	E0917 00:22:11.767366       1 reflector.go:205] "Failed to watch" err="failed to list *v1.Node: nodes is forbidden: User \"system:kube-scheduler\" cannot list resource \"nodes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Node"
	E0917 00:22:11.767519       1 reflector.go:205] "Failed to watch" err="failed to list *v1.VolumeAttachment: volumeattachments.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"volumeattachments\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.VolumeAttachment"
	E0917 00:22:11.767626       1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csistoragecapacities\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIStorageCapacity"
	E0917 00:22:11.767736       1 reflector.go:205] "Failed to watch" err="failed to list *v1.DeviceClass: deviceclasses.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"deviceclasses\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.DeviceClass"
	E0917 00:22:11.767785       1 reflector.go:205] "Failed to watch" err="failed to list *v1.ResourceClaim: resourceclaims.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"resourceclaims\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ResourceClaim"
	E0917 00:22:11.767944       1 reflector.go:205] "Failed to watch" err="failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumeclaims\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolumeClaim"
	I0917 00:22:13.354770       1 shared_informer.go:356] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
	
	
	==> kubelet <==
	Sep 17 00:30:20 addons-235235 kubelet[2320]: I0917 00:30:20.843623    2320 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d1c771fa-70ab-40b4-aa3f-199a0039d79e-script" (OuterVolumeSpecName: "script") pod "d1c771fa-70ab-40b4-aa3f-199a0039d79e" (UID: "d1c771fa-70ab-40b4-aa3f-199a0039d79e"). InnerVolumeSpecName "script". PluginName "kubernetes.io/configmap", VolumeGIDValue ""
	Sep 17 00:30:20 addons-235235 kubelet[2320]: I0917 00:30:20.843688    2320 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d1c771fa-70ab-40b4-aa3f-199a0039d79e-data" (OuterVolumeSpecName: "data") pod "d1c771fa-70ab-40b4-aa3f-199a0039d79e" (UID: "d1c771fa-70ab-40b4-aa3f-199a0039d79e"). InnerVolumeSpecName "data". PluginName "kubernetes.io/host-path", VolumeGIDValue ""
	Sep 17 00:30:20 addons-235235 kubelet[2320]: I0917 00:30:20.847536    2320 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d1c771fa-70ab-40b4-aa3f-199a0039d79e-kube-api-access-fn25h" (OuterVolumeSpecName: "kube-api-access-fn25h") pod "d1c771fa-70ab-40b4-aa3f-199a0039d79e" (UID: "d1c771fa-70ab-40b4-aa3f-199a0039d79e"). InnerVolumeSpecName "kube-api-access-fn25h". PluginName "kubernetes.io/projected", VolumeGIDValue ""
	Sep 17 00:30:20 addons-235235 kubelet[2320]: I0917 00:30:20.943943    2320 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-fn25h\" (UniqueName: \"kubernetes.io/projected/d1c771fa-70ab-40b4-aa3f-199a0039d79e-kube-api-access-fn25h\") on node \"addons-235235\" DevicePath \"\""
	Sep 17 00:30:20 addons-235235 kubelet[2320]: I0917 00:30:20.943991    2320 reconciler_common.go:299] "Volume detached for volume \"script\" (UniqueName: \"kubernetes.io/configmap/d1c771fa-70ab-40b4-aa3f-199a0039d79e-script\") on node \"addons-235235\" DevicePath \"\""
	Sep 17 00:30:20 addons-235235 kubelet[2320]: I0917 00:30:20.944001    2320 reconciler_common.go:299] "Volume detached for volume \"data\" (UniqueName: \"kubernetes.io/host-path/d1c771fa-70ab-40b4-aa3f-199a0039d79e-data\") on node \"addons-235235\" DevicePath \"\""
	Sep 17 00:30:23 addons-235235 kubelet[2320]: I0917 00:30:23.605951    2320 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d1c771fa-70ab-40b4-aa3f-199a0039d79e" path="/var/lib/kubelet/pods/d1c771fa-70ab-40b4-aa3f-199a0039d79e/volumes"
	Sep 17 00:30:24 addons-235235 kubelet[2320]: E0917 00:30:24.599468    2320 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"hello-world-app\" with ImagePullBackOff: \"Back-off pulling image \\\"docker.io/kicbase/echo-server:1.0\\\": ErrImagePull: Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/hello-world-app-5d498dc89-8ftcl" podUID="df51ee14-35c6-4bbe-9f2e-9e2363828d12"
	Sep 17 00:30:36 addons-235235 kubelet[2320]: E0917 00:30:36.599303    2320 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"hello-world-app\" with ImagePullBackOff: \"Back-off pulling image \\\"docker.io/kicbase/echo-server:1.0\\\": ErrImagePull: Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/hello-world-app-5d498dc89-8ftcl" podUID="df51ee14-35c6-4bbe-9f2e-9e2363828d12"
	Sep 17 00:30:49 addons-235235 kubelet[2320]: E0917 00:30:49.599574    2320 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"hello-world-app\" with ImagePullBackOff: \"Back-off pulling image \\\"docker.io/kicbase/echo-server:1.0\\\": ErrImagePull: Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/hello-world-app-5d498dc89-8ftcl" podUID="df51ee14-35c6-4bbe-9f2e-9e2363828d12"
	Sep 17 00:30:50 addons-235235 kubelet[2320]: I0917 00:30:50.753986    2320 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data\" (UniqueName: \"kubernetes.io/host-path/c840396e-7af2-418e-938e-f4879a2f827c-data\") pod \"helper-pod-create-pvc-6b6d1188-0b38-4324-abab-29964246affb\" (UID: \"c840396e-7af2-418e-938e-f4879a2f827c\") " pod="local-path-storage/helper-pod-create-pvc-6b6d1188-0b38-4324-abab-29964246affb"
	Sep 17 00:30:50 addons-235235 kubelet[2320]: I0917 00:30:50.754038    2320 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qs76q\" (UniqueName: \"kubernetes.io/projected/c840396e-7af2-418e-938e-f4879a2f827c-kube-api-access-qs76q\") pod \"helper-pod-create-pvc-6b6d1188-0b38-4324-abab-29964246affb\" (UID: \"c840396e-7af2-418e-938e-f4879a2f827c\") " pod="local-path-storage/helper-pod-create-pvc-6b6d1188-0b38-4324-abab-29964246affb"
	Sep 17 00:30:50 addons-235235 kubelet[2320]: I0917 00:30:50.754067    2320 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"script\" (UniqueName: \"kubernetes.io/configmap/c840396e-7af2-418e-938e-f4879a2f827c-script\") pod \"helper-pod-create-pvc-6b6d1188-0b38-4324-abab-29964246affb\" (UID: \"c840396e-7af2-418e-938e-f4879a2f827c\") " pod="local-path-storage/helper-pod-create-pvc-6b6d1188-0b38-4324-abab-29964246affb"
	Sep 17 00:30:51 addons-235235 kubelet[2320]: I0917 00:30:51.103026    2320 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="80fc587f34c85f71988c3be8046cb4f607010d68fb7c68e94832bf4d5d59b4f6"
	Sep 17 00:30:51 addons-235235 kubelet[2320]: E0917 00:30:51.310745    2320 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit" image="docker.io/busybox:stable@sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79"
	Sep 17 00:30:51 addons-235235 kubelet[2320]: E0917 00:30:51.310800    2320 kuberuntime_image.go:43] "Failed to pull image" err="Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit" image="docker.io/busybox:stable@sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79"
	Sep 17 00:30:51 addons-235235 kubelet[2320]: E0917 00:30:51.310907    2320 kuberuntime_manager.go:1449] "Unhandled Error" err="container helper-pod start failed in pod helper-pod-create-pvc-6b6d1188-0b38-4324-abab-29964246affb_local-path-storage(c840396e-7af2-418e-938e-f4879a2f827c): ErrImagePull: Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit" logger="UnhandledError"
	Sep 17 00:30:51 addons-235235 kubelet[2320]: E0917 00:30:51.310946    2320 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"helper-pod\" with ErrImagePull: \"Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="local-path-storage/helper-pod-create-pvc-6b6d1188-0b38-4324-abab-29964246affb" podUID="c840396e-7af2-418e-938e-f4879a2f827c"
	Sep 17 00:30:52 addons-235235 kubelet[2320]: E0917 00:30:52.118753    2320 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"helper-pod\" with ImagePullBackOff: \"Back-off pulling image \\\"docker.io/busybox:stable@sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79\\\": ErrImagePull: Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="local-path-storage/helper-pod-create-pvc-6b6d1188-0b38-4324-abab-29964246affb" podUID="c840396e-7af2-418e-938e-f4879a2f827c"
	Sep 17 00:30:53 addons-235235 kubelet[2320]: E0917 00:30:53.126667    2320 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"helper-pod\" with ImagePullBackOff: \"Back-off pulling image \\\"docker.io/busybox:stable@sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79\\\": ErrImagePull: Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="local-path-storage/helper-pod-create-pvc-6b6d1188-0b38-4324-abab-29964246affb" podUID="c840396e-7af2-418e-938e-f4879a2f827c"
	Sep 17 00:31:00 addons-235235 kubelet[2320]: E0917 00:31:00.599529    2320 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"hello-world-app\" with ImagePullBackOff: \"Back-off pulling image \\\"docker.io/kicbase/echo-server:1.0\\\": ErrImagePull: Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/hello-world-app-5d498dc89-8ftcl" podUID="df51ee14-35c6-4bbe-9f2e-9e2363828d12"
	Sep 17 00:31:03 addons-235235 kubelet[2320]: E0917 00:31:03.744700    2320 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit" image="docker.io/busybox:stable@sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79"
	Sep 17 00:31:03 addons-235235 kubelet[2320]: E0917 00:31:03.744766    2320 kuberuntime_image.go:43] "Failed to pull image" err="Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit" image="docker.io/busybox:stable@sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79"
	Sep 17 00:31:03 addons-235235 kubelet[2320]: E0917 00:31:03.744853    2320 kuberuntime_manager.go:1449] "Unhandled Error" err="container helper-pod start failed in pod helper-pod-create-pvc-6b6d1188-0b38-4324-abab-29964246affb_local-path-storage(c840396e-7af2-418e-938e-f4879a2f827c): ErrImagePull: Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit" logger="UnhandledError"
	Sep 17 00:31:03 addons-235235 kubelet[2320]: E0917 00:31:03.744890    2320 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"helper-pod\" with ErrImagePull: \"Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="local-path-storage/helper-pod-create-pvc-6b6d1188-0b38-4324-abab-29964246affb" podUID="c840396e-7af2-418e-938e-f4879a2f827c"
	
	
	==> storage-provisioner [b279765d0210] <==
	W0917 00:30:39.684502       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:30:41.687992       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:30:41.692845       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:30:43.695566       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:30:43.700392       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:30:45.703777       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:30:45.708694       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:30:47.712540       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:30:47.717255       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:30:49.720314       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:30:49.724602       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:30:51.728415       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:30:51.732730       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:30:53.735553       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:30:53.750218       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:30:55.819023       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:30:55.825986       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:30:57.830431       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:30:57.835086       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:30:59.837939       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:30:59.842249       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:31:01.846579       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:31:01.852861       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:31:03.858323       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:31:03.865096       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	

                                                
                                                
-- /stdout --
helpers_test.go:262: (dbg) Run:  out/minikube-linux-arm64 status --format={{.APIServer}} -p addons-235235 -n addons-235235
helpers_test.go:269: (dbg) Run:  kubectl --context addons-235235 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:280: non-running pods: hello-world-app-5d498dc89-8ftcl test-local-path helper-pod-create-pvc-6b6d1188-0b38-4324-abab-29964246affb
helpers_test.go:282: ======> post-mortem[TestAddons/parallel/LocalPath]: describe non-running pods <======
helpers_test.go:285: (dbg) Run:  kubectl --context addons-235235 describe pod hello-world-app-5d498dc89-8ftcl test-local-path helper-pod-create-pvc-6b6d1188-0b38-4324-abab-29964246affb
helpers_test.go:285: (dbg) Non-zero exit: kubectl --context addons-235235 describe pod hello-world-app-5d498dc89-8ftcl test-local-path helper-pod-create-pvc-6b6d1188-0b38-4324-abab-29964246affb: exit status 1 (116.827786ms)

                                                
                                                
-- stdout --
	Name:             hello-world-app-5d498dc89-8ftcl
	Namespace:        default
	Priority:         0
	Service Account:  default
	Node:             addons-235235/192.168.49.2
	Start Time:       Wed, 17 Sep 2025 00:25:34 +0000
	Labels:           app=hello-world-app
	                  pod-template-hash=5d498dc89
	Annotations:      <none>
	Status:           Pending
	IP:               10.244.0.33
	IPs:
	  IP:           10.244.0.33
	Controlled By:  ReplicaSet/hello-world-app-5d498dc89
	Containers:
	  hello-world-app:
	    Container ID:   
	    Image:          docker.io/kicbase/echo-server:1.0
	    Image ID:       
	    Port:           8080/TCP
	    Host Port:      0/TCP
	    State:          Waiting
	      Reason:       ImagePullBackOff
	    Ready:          False
	    Restart Count:  0
	    Environment:    <none>
	    Mounts:
	      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-v5vm8 (ro)
	Conditions:
	  Type                        Status
	  PodReadyToStartContainers   True 
	  Initialized                 True 
	  Ready                       False 
	  ContainersReady             False 
	  PodScheduled                True 
	Volumes:
	  kube-api-access-v5vm8:
	    Type:                    Projected (a volume that contains injected data from multiple sources)
	    TokenExpirationSeconds:  3607
	    ConfigMapName:           kube-root-ca.crt
	    Optional:                false
	    DownwardAPI:             true
	QoS Class:                   BestEffort
	Node-Selectors:              <none>
	Tolerations:                 node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
	                             node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
	Events:
	  Type     Reason     Age                    From               Message
	  ----     ------     ----                   ----               -------
	  Normal   Scheduled  5m32s                  default-scheduler  Successfully assigned default/hello-world-app-5d498dc89-8ftcl to addons-235235
	  Warning  Failed     5m19s                  kubelet            Failed to pull image "docker.io/kicbase/echo-server:1.0": toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit
	  Normal   Pulling    2m37s (x5 over 5m31s)  kubelet            Pulling image "docker.io/kicbase/echo-server:1.0"
	  Warning  Failed     2m37s (x4 over 5m31s)  kubelet            Failed to pull image "docker.io/kicbase/echo-server:1.0": Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit
	  Warning  Failed     2m37s (x5 over 5m31s)  kubelet            Error: ErrImagePull
	  Normal   BackOff    30s (x21 over 5m31s)   kubelet            Back-off pulling image "docker.io/kicbase/echo-server:1.0"
	  Warning  Failed     30s (x21 over 5m31s)   kubelet            Error: ImagePullBackOff
	
	
	Name:             test-local-path
	Namespace:        default
	Priority:         0
	Service Account:  default
	Node:             <none>
	Labels:           run=test-local-path
	Annotations:      <none>
	Status:           Pending
	IP:               
	IPs:              <none>
	Containers:
	  busybox:
	    Image:      busybox:stable
	    Port:       <none>
	    Host Port:  <none>
	    Command:
	      sh
	      -c
	      echo 'local-path-provisioner' > /test/file1
	    Environment:  <none>
	    Mounts:
	      /test from data (rw)
	      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-f7bg7 (ro)
	Volumes:
	  data:
	    Type:       PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
	    ClaimName:  test-pvc
	    ReadOnly:   false
	  kube-api-access-f7bg7:
	    Type:                    Projected (a volume that contains injected data from multiple sources)
	    TokenExpirationSeconds:  3607
	    ConfigMapName:           kube-root-ca.crt
	    Optional:                false
	    DownwardAPI:             true
	QoS Class:                   BestEffort
	Node-Selectors:              <none>
	Tolerations:                 node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
	                             node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
	Events:                      <none>

                                                
                                                
-- /stdout --
** stderr ** 
	Error from server (NotFound): pods "helper-pod-create-pvc-6b6d1188-0b38-4324-abab-29964246affb" not found

                                                
                                                
** /stderr **
helpers_test.go:287: kubectl --context addons-235235 describe pod hello-world-app-5d498dc89-8ftcl test-local-path helper-pod-create-pvc-6b6d1188-0b38-4324-abab-29964246affb: exit status 1
addons_test.go:1053: (dbg) Run:  out/minikube-linux-arm64 -p addons-235235 addons disable storage-provisioner-rancher --alsologtostderr -v=1
addons_test.go:1053: (dbg) Done: out/minikube-linux-arm64 -p addons-235235 addons disable storage-provisioner-rancher --alsologtostderr -v=1: (42.809308777s)
--- FAIL: TestAddons/parallel/LocalPath (345.79s)

                                                
                                    
x
+
TestFunctional/parallel/DashboardCmd (302.48s)

                                                
                                                
=== RUN   TestFunctional/parallel/DashboardCmd
=== PAUSE TestFunctional/parallel/DashboardCmd

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/DashboardCmd
functional_test.go:920: (dbg) daemon: [out/minikube-linux-arm64 dashboard --url --port 36195 -p functional-918451 --alsologtostderr -v=1]
E0917 00:48:52.991080  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 00:50:16.065664  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
functional_test.go:933: output didn't produce a URL
functional_test.go:925: (dbg) stopping [out/minikube-linux-arm64 dashboard --url --port 36195 -p functional-918451 --alsologtostderr -v=1] ...
functional_test.go:925: (dbg) [out/minikube-linux-arm64 dashboard --url --port 36195 -p functional-918451 --alsologtostderr -v=1] stdout:
functional_test.go:925: (dbg) [out/minikube-linux-arm64 dashboard --url --port 36195 -p functional-918451 --alsologtostderr -v=1] stderr:
I0917 00:46:43.452124  628823 out.go:360] Setting OutFile to fd 1 ...
I0917 00:46:43.453367  628823 out.go:408] TERM=,COLORTERM=, which probably does not support color
I0917 00:46:43.453396  628823 out.go:374] Setting ErrFile to fd 2...
I0917 00:46:43.453413  628823 out.go:408] TERM=,COLORTERM=, which probably does not support color
I0917 00:46:43.453725  628823 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21550-576428/.minikube/bin
I0917 00:46:43.454031  628823 mustload.go:65] Loading cluster: functional-918451
I0917 00:46:43.454481  628823 config.go:182] Loaded profile config "functional-918451": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
I0917 00:46:43.455005  628823 cli_runner.go:164] Run: docker container inspect functional-918451 --format={{.State.Status}}
I0917 00:46:43.472405  628823 host.go:66] Checking if "functional-918451" exists ...
I0917 00:46:43.472786  628823 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0917 00:46:43.539186  628823 info.go:266] docker info: {ID:5FDH:SA5P:5GCT:NLAS:B73P:SGDQ:PBG5:UBVH:UZY3:RXGO:CI7S:WAIH Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:3 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:36 OomKillDisable:true NGoroutines:52 SystemTime:2025-09-17 00:46:43.529925586 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1084-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:aa
rch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214835200 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-21-244 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Pa
th:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1]] Warnings:<nil>}}
I0917 00:46:43.539292  628823 api_server.go:166] Checking apiserver status ...
I0917 00:46:43.539372  628823 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I0917 00:46:43.539413  628823 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-918451
I0917 00:46:43.555892  628823 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33515 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/functional-918451/id_rsa Username:docker}
I0917 00:46:43.665983  628823 ssh_runner.go:195] Run: sudo egrep ^[0-9]+:freezer: /proc/9131/cgroup
I0917 00:46:43.675623  628823 api_server.go:182] apiserver freezer: "3:freezer:/docker/6201077c0331af68c48befcadd9d1ecd3484991c08f561702e8963664e041a07/kubepods/burstable/pod45de3c8e16fbece0126e9017f8e2581d/810186cb500fd9496bda125dd887eab8f01715070b39a19fdf737f1b18f2017c"
I0917 00:46:43.675757  628823 ssh_runner.go:195] Run: sudo cat /sys/fs/cgroup/freezer/docker/6201077c0331af68c48befcadd9d1ecd3484991c08f561702e8963664e041a07/kubepods/burstable/pod45de3c8e16fbece0126e9017f8e2581d/810186cb500fd9496bda125dd887eab8f01715070b39a19fdf737f1b18f2017c/freezer.state
I0917 00:46:43.684503  628823 api_server.go:204] freezer state: "THAWED"
I0917 00:46:43.684541  628823 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8441/healthz ...
I0917 00:46:43.692936  628823 api_server.go:279] https://192.168.49.2:8441/healthz returned 200:
ok
W0917 00:46:43.692974  628823 out.go:285] * Enabling dashboard ...
* Enabling dashboard ...
I0917 00:46:43.693165  628823 config.go:182] Loaded profile config "functional-918451": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
I0917 00:46:43.693185  628823 addons.go:69] Setting dashboard=true in profile "functional-918451"
I0917 00:46:43.693204  628823 addons.go:238] Setting addon dashboard=true in "functional-918451"
I0917 00:46:43.693236  628823 host.go:66] Checking if "functional-918451" exists ...
I0917 00:46:43.693667  628823 cli_runner.go:164] Run: docker container inspect functional-918451 --format={{.State.Status}}
I0917 00:46:43.714035  628823 out.go:179]   - Using image docker.io/kubernetesui/dashboard:v2.7.0
I0917 00:46:43.716920  628823 out.go:179]   - Using image docker.io/kubernetesui/metrics-scraper:v1.0.8
I0917 00:46:43.719710  628823 addons.go:435] installing /etc/kubernetes/addons/dashboard-ns.yaml
I0917 00:46:43.719732  628823 ssh_runner.go:362] scp dashboard/dashboard-ns.yaml --> /etc/kubernetes/addons/dashboard-ns.yaml (759 bytes)
I0917 00:46:43.719820  628823 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-918451
I0917 00:46:43.744241  628823 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33515 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/functional-918451/id_rsa Username:docker}
I0917 00:46:43.864512  628823 addons.go:435] installing /etc/kubernetes/addons/dashboard-clusterrole.yaml
I0917 00:46:43.864538  628823 ssh_runner.go:362] scp dashboard/dashboard-clusterrole.yaml --> /etc/kubernetes/addons/dashboard-clusterrole.yaml (1001 bytes)
I0917 00:46:43.885654  628823 addons.go:435] installing /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml
I0917 00:46:43.885675  628823 ssh_runner.go:362] scp dashboard/dashboard-clusterrolebinding.yaml --> /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml (1018 bytes)
I0917 00:46:43.904862  628823 addons.go:435] installing /etc/kubernetes/addons/dashboard-configmap.yaml
I0917 00:46:43.904885  628823 ssh_runner.go:362] scp dashboard/dashboard-configmap.yaml --> /etc/kubernetes/addons/dashboard-configmap.yaml (837 bytes)
I0917 00:46:43.922520  628823 addons.go:435] installing /etc/kubernetes/addons/dashboard-dp.yaml
I0917 00:46:43.922540  628823 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/dashboard-dp.yaml (4288 bytes)
I0917 00:46:43.941070  628823 addons.go:435] installing /etc/kubernetes/addons/dashboard-role.yaml
I0917 00:46:43.941125  628823 ssh_runner.go:362] scp dashboard/dashboard-role.yaml --> /etc/kubernetes/addons/dashboard-role.yaml (1724 bytes)
I0917 00:46:43.959580  628823 addons.go:435] installing /etc/kubernetes/addons/dashboard-rolebinding.yaml
I0917 00:46:43.959607  628823 ssh_runner.go:362] scp dashboard/dashboard-rolebinding.yaml --> /etc/kubernetes/addons/dashboard-rolebinding.yaml (1046 bytes)
I0917 00:46:43.978237  628823 addons.go:435] installing /etc/kubernetes/addons/dashboard-sa.yaml
I0917 00:46:43.978273  628823 ssh_runner.go:362] scp dashboard/dashboard-sa.yaml --> /etc/kubernetes/addons/dashboard-sa.yaml (837 bytes)
I0917 00:46:43.996555  628823 addons.go:435] installing /etc/kubernetes/addons/dashboard-secret.yaml
I0917 00:46:43.996581  628823 ssh_runner.go:362] scp dashboard/dashboard-secret.yaml --> /etc/kubernetes/addons/dashboard-secret.yaml (1389 bytes)
I0917 00:46:44.016822  628823 addons.go:435] installing /etc/kubernetes/addons/dashboard-svc.yaml
I0917 00:46:44.016846  628823 ssh_runner.go:362] scp dashboard/dashboard-svc.yaml --> /etc/kubernetes/addons/dashboard-svc.yaml (1294 bytes)
I0917 00:46:44.035886  628823 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml
I0917 00:46:44.841685  628823 out.go:179] * Some dashboard features require the metrics-server addon. To enable all features please run:

                                                
                                                
	minikube -p functional-918451 addons enable metrics-server

                                                
                                                
I0917 00:46:44.844619  628823 addons.go:201] Writing out "functional-918451" config to set dashboard=true...
W0917 00:46:44.844922  628823 out.go:285] * Verifying dashboard health ...
* Verifying dashboard health ...
I0917 00:46:44.845609  628823 kapi.go:59] client config for functional-918451: &rest.Config{Host:"https://192.168.49.2:8441", APIPath:"", ContentConfig:rest.ContentConfig{AcceptContentTypes:"", ContentType:"", GroupVersion:(*schema.GroupVersion)(nil), NegotiatedSerializer:runtime.NegotiatedSerializer(nil)}, Username:"", Password:"", BearerToken:"", BearerTokenFile:"", Impersonate:rest.ImpersonationConfig{UserName:"", UID:"", Groups:[]string(nil), Extra:map[string][]string(nil)}, AuthProvider:<nil>, AuthConfigPersister:rest.AuthProviderConfigPersister(nil), ExecProvider:<nil>, TLSClientConfig:rest.sanitizedTLSClientConfig{Insecure:false, ServerName:"", CertFile:"/home/jenkins/minikube-integration/21550-576428/.minikube/profiles/functional-918451/client.crt", KeyFile:"/home/jenkins/minikube-integration/21550-576428/.minikube/profiles/functional-918451/client.key", CAFile:"/home/jenkins/minikube-integration/21550-576428/.minikube/ca.crt", CertData:[]uint8(nil), KeyData:[]uint8(nil), CAData:[]uint8(nil),
NextProtos:[]string(nil)}, UserAgent:"", DisableCompression:false, Transport:http.RoundTripper(nil), WrapTransport:(transport.WrapperFunc)(0x20f8410), QPS:0, Burst:0, RateLimiter:flowcontrol.RateLimiter(nil), WarningHandler:rest.WarningHandler(nil), WarningHandlerWithContext:rest.WarningHandlerWithContext(nil), Timeout:0, Dial:(func(context.Context, string, string) (net.Conn, error))(nil), Proxy:(func(*http.Request) (*url.URL, error))(nil)}
I0917 00:46:44.846170  628823 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false
I0917 00:46:44.846285  628823 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false
I0917 00:46:44.846304  628823 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false
I0917 00:46:44.846319  628823 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true
I0917 00:46:44.846338  628823 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false
I0917 00:46:44.862036  628823 service.go:215] Found service: &Service{ObjectMeta:{kubernetes-dashboard  kubernetes-dashboard  9973da68-9dcf-4d61-986f-a1f40fd4fee5 1593 0 2025-09-17 00:46:44 +0000 UTC <nil> <nil> map[addonmanager.kubernetes.io/mode:Reconcile k8s-app:kubernetes-dashboard kubernetes.io/minikube-addons:dashboard] map[kubectl.kubernetes.io/last-applied-configuration:{"apiVersion":"v1","kind":"Service","metadata":{"annotations":{},"labels":{"addonmanager.kubernetes.io/mode":"Reconcile","k8s-app":"kubernetes-dashboard","kubernetes.io/minikube-addons":"dashboard"},"name":"kubernetes-dashboard","namespace":"kubernetes-dashboard"},"spec":{"ports":[{"port":80,"targetPort":9090}],"selector":{"k8s-app":"kubernetes-dashboard"}}}
] [] [] [{kubectl-client-side-apply Update v1 2025-09-17 00:46:44 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:kubectl.kubernetes.io/last-applied-configuration":{}},"f:labels":{".":{},"f:addonmanager.kubernetes.io/mode":{},"f:k8s-app":{},"f:kubernetes.io/minikube-addons":{}}},"f:spec":{"f:internalTrafficPolicy":{},"f:ports":{".":{},"k:{\"port\":80,\"protocol\":\"TCP\"}":{".":{},"f:port":{},"f:protocol":{},"f:targetPort":{}}},"f:selector":{},"f:sessionAffinity":{},"f:type":{}}} }]},Spec:ServiceSpec{Ports:[]ServicePort{ServicePort{Name:,Protocol:TCP,Port:80,TargetPort:{0 9090 },NodePort:0,AppProtocol:nil,},},Selector:map[string]string{k8s-app: kubernetes-dashboard,},ClusterIP:10.99.59.195,Type:ClusterIP,ExternalIPs:[],SessionAffinity:None,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.99.59.195],IPFamilies:[IPv4],AllocateLoadBalancerN
odePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}
W0917 00:46:44.862195  628823 out.go:285] * Launching proxy ...
* Launching proxy ...
I0917 00:46:44.862277  628823 dashboard.go:152] Executing: /usr/local/bin/kubectl [/usr/local/bin/kubectl --context functional-918451 proxy --port 36195]
I0917 00:46:44.862540  628823 dashboard.go:157] Waiting for kubectl to output host:port ...
I0917 00:46:44.932892  628823 dashboard.go:175] proxy stdout: Starting to serve on 127.0.0.1:36195
W0917 00:46:44.932950  628823 out.go:285] * Verifying proxy health ...
* Verifying proxy health ...
I0917 00:46:44.950537  628823 dashboard.go:214] http://127.0.0.1:36195/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/ response: <nil> &{Status:503 Service Unavailable StatusCode:503 Proto:HTTP/1.1 ProtoMajor:1 ProtoMinor:1 Header:map[Audit-Id:[15f36a29-3803-49ab-b11b-a8276dc388e6] Cache-Control:[no-cache, private] Content-Length:[182] Content-Type:[application/json] Date:[Wed, 17 Sep 2025 00:46:44 GMT]] Body:0x400010b7c0 ContentLength:182 TransferEncoding:[] Close:false Uncompressed:false Trailer:map[] Request:0x40004723c0 TLS:<nil>}
I0917 00:46:44.950620  628823 retry.go:31] will retry after 116.894µs: Temporary Error: unexpected response code: 503
I0917 00:46:44.954319  628823 dashboard.go:214] http://127.0.0.1:36195/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/ response: <nil> &{Status:503 Service Unavailable StatusCode:503 Proto:HTTP/1.1 ProtoMajor:1 ProtoMinor:1 Header:map[Audit-Id:[7bb8ddc9-f349-42ca-97f4-7958781c9c0c] Cache-Control:[no-cache, private] Content-Length:[182] Content-Type:[application/json] Date:[Wed, 17 Sep 2025 00:46:44 GMT]] Body:0x400010b900 ContentLength:182 TransferEncoding:[] Close:false Uncompressed:false Trailer:map[] Request:0x4000472500 TLS:<nil>}
I0917 00:46:44.954375  628823 retry.go:31] will retry after 112.823µs: Temporary Error: unexpected response code: 503
I0917 00:46:44.958006  628823 dashboard.go:214] http://127.0.0.1:36195/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/ response: <nil> &{Status:503 Service Unavailable StatusCode:503 Proto:HTTP/1.1 ProtoMajor:1 ProtoMinor:1 Header:map[Audit-Id:[ad85e39d-e3d5-433a-88b2-7fd841d21931] Cache-Control:[no-cache, private] Content-Length:[182] Content-Type:[application/json] Date:[Wed, 17 Sep 2025 00:46:44 GMT]] Body:0x400010ba40 ContentLength:182 TransferEncoding:[] Close:false Uncompressed:false Trailer:map[] Request:0x4000472640 TLS:<nil>}
I0917 00:46:44.958060  628823 retry.go:31] will retry after 326.065µs: Temporary Error: unexpected response code: 503
I0917 00:46:44.961623  628823 dashboard.go:214] http://127.0.0.1:36195/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/ response: <nil> &{Status:503 Service Unavailable StatusCode:503 Proto:HTTP/1.1 ProtoMajor:1 ProtoMinor:1 Header:map[Audit-Id:[5dc21d9e-d30a-4fdc-9b77-feb5be4210aa] Cache-Control:[no-cache, private] Content-Length:[182] Content-Type:[application/json] Date:[Wed, 17 Sep 2025 00:46:44 GMT]] Body:0x400010bb40 ContentLength:182 TransferEncoding:[] Close:false Uncompressed:false Trailer:map[] Request:0x4000472780 TLS:<nil>}
I0917 00:46:44.961674  628823 retry.go:31] will retry after 251.311µs: Temporary Error: unexpected response code: 503
I0917 00:46:44.965374  628823 dashboard.go:214] http://127.0.0.1:36195/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/ response: <nil> &{Status:503 Service Unavailable StatusCode:503 Proto:HTTP/1.1 ProtoMajor:1 ProtoMinor:1 Header:map[Audit-Id:[6921c2bd-6907-45b7-a987-532ef8159870] Cache-Control:[no-cache, private] Content-Length:[182] Content-Type:[application/json] Date:[Wed, 17 Sep 2025 00:46:44 GMT]] Body:0x400010bc00 ContentLength:182 TransferEncoding:[] Close:false Uncompressed:false Trailer:map[] Request:0x40004728c0 TLS:<nil>}
I0917 00:46:44.965422  628823 retry.go:31] will retry after 408.925µs: Temporary Error: unexpected response code: 503
I0917 00:46:44.969142  628823 dashboard.go:214] http://127.0.0.1:36195/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/ response: <nil> &{Status:503 Service Unavailable StatusCode:503 Proto:HTTP/1.1 ProtoMajor:1 ProtoMinor:1 Header:map[Audit-Id:[357c1152-6ab9-44f0-80ba-9df4c1c14e39] Cache-Control:[no-cache, private] Content-Length:[182] Content-Type:[application/json] Date:[Wed, 17 Sep 2025 00:46:44 GMT]] Body:0x400010bc80 ContentLength:182 TransferEncoding:[] Close:false Uncompressed:false Trailer:map[] Request:0x4000472a00 TLS:<nil>}
I0917 00:46:44.969215  628823 retry.go:31] will retry after 695.477µs: Temporary Error: unexpected response code: 503
I0917 00:46:44.972866  628823 dashboard.go:214] http://127.0.0.1:36195/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/ response: <nil> &{Status:503 Service Unavailable StatusCode:503 Proto:HTTP/1.1 ProtoMajor:1 ProtoMinor:1 Header:map[Audit-Id:[aafb5942-4ee1-4cfc-926c-54358d6a1680] Cache-Control:[no-cache, private] Content-Length:[182] Content-Type:[application/json] Date:[Wed, 17 Sep 2025 00:46:44 GMT]] Body:0x400057c1c0 ContentLength:182 TransferEncoding:[] Close:false Uncompressed:false Trailer:map[] Request:0x4000390b40 TLS:<nil>}
I0917 00:46:44.972934  628823 retry.go:31] will retry after 1.360282ms: Temporary Error: unexpected response code: 503
I0917 00:46:44.979608  628823 dashboard.go:214] http://127.0.0.1:36195/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/ response: <nil> &{Status:503 Service Unavailable StatusCode:503 Proto:HTTP/1.1 ProtoMajor:1 ProtoMinor:1 Header:map[Audit-Id:[0c81b892-f38a-454c-af7f-a7807768d5a2] Cache-Control:[no-cache, private] Content-Length:[182] Content-Type:[application/json] Date:[Wed, 17 Sep 2025 00:46:44 GMT]] Body:0x400057c240 ContentLength:182 TransferEncoding:[] Close:false Uncompressed:false Trailer:map[] Request:0x4000391040 TLS:<nil>}
I0917 00:46:44.979683  628823 retry.go:31] will retry after 1.323514ms: Temporary Error: unexpected response code: 503
I0917 00:46:44.985682  628823 dashboard.go:214] http://127.0.0.1:36195/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/ response: <nil> &{Status:503 Service Unavailable StatusCode:503 Proto:HTTP/1.1 ProtoMajor:1 ProtoMinor:1 Header:map[Audit-Id:[01fc71ed-f6e4-49cf-b41b-691c4c4c80c8] Cache-Control:[no-cache, private] Content-Length:[182] Content-Type:[application/json] Date:[Wed, 17 Sep 2025 00:46:44 GMT]] Body:0x400010be00 ContentLength:182 TransferEncoding:[] Close:false Uncompressed:false Trailer:map[] Request:0x4000472b40 TLS:<nil>}
I0917 00:46:44.985737  628823 retry.go:31] will retry after 1.672657ms: Temporary Error: unexpected response code: 503
I0917 00:46:44.993080  628823 dashboard.go:214] http://127.0.0.1:36195/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/ response: <nil> &{Status:503 Service Unavailable StatusCode:503 Proto:HTTP/1.1 ProtoMajor:1 ProtoMinor:1 Header:map[Audit-Id:[a99eed3d-67c3-49e5-95da-c3750c56f7c6] Cache-Control:[no-cache, private] Content-Length:[182] Content-Type:[application/json] Date:[Wed, 17 Sep 2025 00:46:44 GMT]] Body:0x400057c340 ContentLength:182 TransferEncoding:[] Close:false Uncompressed:false Trailer:map[] Request:0x4000391180 TLS:<nil>}
I0917 00:46:44.993137  628823 retry.go:31] will retry after 5.716241ms: Temporary Error: unexpected response code: 503
I0917 00:46:45.002016  628823 dashboard.go:214] http://127.0.0.1:36195/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/ response: <nil> &{Status:503 Service Unavailable StatusCode:503 Proto:HTTP/1.1 ProtoMajor:1 ProtoMinor:1 Header:map[Audit-Id:[148c1bea-0785-4ce5-a9d2-748e1eb6a19c] Cache-Control:[no-cache, private] Content-Length:[182] Content-Type:[application/json] Date:[Wed, 17 Sep 2025 00:46:45 GMT]] Body:0x400057c3c0 ContentLength:182 TransferEncoding:[] Close:false Uncompressed:false Trailer:map[] Request:0x40003912c0 TLS:<nil>}
I0917 00:46:45.002086  628823 retry.go:31] will retry after 5.894435ms: Temporary Error: unexpected response code: 503
I0917 00:46:45.026832  628823 dashboard.go:214] http://127.0.0.1:36195/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/ response: <nil> &{Status:503 Service Unavailable StatusCode:503 Proto:HTTP/1.1 ProtoMajor:1 ProtoMinor:1 Header:map[Audit-Id:[787c26c9-2ea4-431f-aa03-c61fab0af8cf] Cache-Control:[no-cache, private] Content-Length:[182] Content-Type:[application/json] Date:[Wed, 17 Sep 2025 00:46:45 GMT]] Body:0x400057c440 ContentLength:182 TransferEncoding:[] Close:false Uncompressed:false Trailer:map[] Request:0x4000391400 TLS:<nil>}
I0917 00:46:45.026900  628823 retry.go:31] will retry after 9.211444ms: Temporary Error: unexpected response code: 503
I0917 00:46:45.045956  628823 dashboard.go:214] http://127.0.0.1:36195/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/ response: <nil> &{Status:503 Service Unavailable StatusCode:503 Proto:HTTP/1.1 ProtoMajor:1 ProtoMinor:1 Header:map[Audit-Id:[c32e1d25-d18c-45cc-9da0-f31faeb64fcb] Cache-Control:[no-cache, private] Content-Length:[182] Content-Type:[application/json] Date:[Wed, 17 Sep 2025 00:46:45 GMT]] Body:0x400062b080 ContentLength:182 TransferEncoding:[] Close:false Uncompressed:false Trailer:map[] Request:0x4000391540 TLS:<nil>}
I0917 00:46:45.046066  628823 retry.go:31] will retry after 9.280362ms: Temporary Error: unexpected response code: 503
I0917 00:46:45.068343  628823 dashboard.go:214] http://127.0.0.1:36195/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/ response: <nil> &{Status:503 Service Unavailable StatusCode:503 Proto:HTTP/1.1 ProtoMajor:1 ProtoMinor:1 Header:map[Audit-Id:[46122236-fca0-4b18-9da2-51a5c64de246] Cache-Control:[no-cache, private] Content-Length:[182] Content-Type:[application/json] Date:[Wed, 17 Sep 2025 00:46:45 GMT]] Body:0x400062b3c0 ContentLength:182 TransferEncoding:[] Close:false Uncompressed:false Trailer:map[] Request:0x4000472c80 TLS:<nil>}
I0917 00:46:45.068497  628823 retry.go:31] will retry after 16.371718ms: Temporary Error: unexpected response code: 503
I0917 00:46:45.090117  628823 dashboard.go:214] http://127.0.0.1:36195/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/ response: <nil> &{Status:503 Service Unavailable StatusCode:503 Proto:HTTP/1.1 ProtoMajor:1 ProtoMinor:1 Header:map[Audit-Id:[4e6e1ba2-c83f-4d16-82ef-1fb27cbe9ac7] Cache-Control:[no-cache, private] Content-Length:[182] Content-Type:[application/json] Date:[Wed, 17 Sep 2025 00:46:45 GMT]] Body:0x400057c580 ContentLength:182 TransferEncoding:[] Close:false Uncompressed:false Trailer:map[] Request:0x40003917c0 TLS:<nil>}
I0917 00:46:45.090190  628823 retry.go:31] will retry after 28.859351ms: Temporary Error: unexpected response code: 503
I0917 00:46:45.123795  628823 dashboard.go:214] http://127.0.0.1:36195/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/ response: <nil> &{Status:503 Service Unavailable StatusCode:503 Proto:HTTP/1.1 ProtoMajor:1 ProtoMinor:1 Header:map[Audit-Id:[0ed72753-aa66-465b-99be-d3753f806e65] Cache-Control:[no-cache, private] Content-Length:[182] Content-Type:[application/json] Date:[Wed, 17 Sep 2025 00:46:45 GMT]] Body:0x400062b4c0 ContentLength:182 TransferEncoding:[] Close:false Uncompressed:false Trailer:map[] Request:0x4000472dc0 TLS:<nil>}
I0917 00:46:45.123898  628823 retry.go:31] will retry after 53.233993ms: Temporary Error: unexpected response code: 503
I0917 00:46:45.182854  628823 dashboard.go:214] http://127.0.0.1:36195/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/ response: <nil> &{Status:503 Service Unavailable StatusCode:503 Proto:HTTP/1.1 ProtoMajor:1 ProtoMinor:1 Header:map[Audit-Id:[7abbedf9-3731-4ccd-a67e-d6d31f4cc9c2] Cache-Control:[no-cache, private] Content-Length:[182] Content-Type:[application/json] Date:[Wed, 17 Sep 2025 00:46:45 GMT]] Body:0x400057c680 ContentLength:182 TransferEncoding:[] Close:false Uncompressed:false Trailer:map[] Request:0x4000391900 TLS:<nil>}
I0917 00:46:45.182947  628823 retry.go:31] will retry after 67.485909ms: Temporary Error: unexpected response code: 503
I0917 00:46:45.255060  628823 dashboard.go:214] http://127.0.0.1:36195/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/ response: <nil> &{Status:503 Service Unavailable StatusCode:503 Proto:HTTP/1.1 ProtoMajor:1 ProtoMinor:1 Header:map[Audit-Id:[5adc6fc2-16ec-4f63-a4d3-7e2ed04df323] Cache-Control:[no-cache, private] Content-Length:[182] Content-Type:[application/json] Date:[Wed, 17 Sep 2025 00:46:45 GMT]] Body:0x400057c700 ContentLength:182 TransferEncoding:[] Close:false Uncompressed:false Trailer:map[] Request:0x4000391a40 TLS:<nil>}
I0917 00:46:45.255163  628823 retry.go:31] will retry after 74.107673ms: Temporary Error: unexpected response code: 503
I0917 00:46:45.333870  628823 dashboard.go:214] http://127.0.0.1:36195/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/ response: <nil> &{Status:503 Service Unavailable StatusCode:503 Proto:HTTP/1.1 ProtoMajor:1 ProtoMinor:1 Header:map[Audit-Id:[acbdf9a0-9ab7-4d95-8f19-b1ed1010dd94] Cache-Control:[no-cache, private] Content-Length:[182] Content-Type:[application/json] Date:[Wed, 17 Sep 2025 00:46:45 GMT]] Body:0x400057c780 ContentLength:182 TransferEncoding:[] Close:false Uncompressed:false Trailer:map[] Request:0x4000391b80 TLS:<nil>}
I0917 00:46:45.333997  628823 retry.go:31] will retry after 218.598125ms: Temporary Error: unexpected response code: 503
I0917 00:46:45.556159  628823 dashboard.go:214] http://127.0.0.1:36195/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/ response: <nil> &{Status:503 Service Unavailable StatusCode:503 Proto:HTTP/1.1 ProtoMajor:1 ProtoMinor:1 Header:map[Audit-Id:[7837811d-f2b5-454c-8ac9-80d2cbc31657] Cache-Control:[no-cache, private] Content-Length:[182] Content-Type:[application/json] Date:[Wed, 17 Sep 2025 00:46:45 GMT]] Body:0x400062b780 ContentLength:182 TransferEncoding:[] Close:false Uncompressed:false Trailer:map[] Request:0x4000472f00 TLS:<nil>}
I0917 00:46:45.556235  628823 retry.go:31] will retry after 312.33582ms: Temporary Error: unexpected response code: 503
I0917 00:46:45.872206  628823 dashboard.go:214] http://127.0.0.1:36195/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/ response: <nil> &{Status:503 Service Unavailable StatusCode:503 Proto:HTTP/1.1 ProtoMajor:1 ProtoMinor:1 Header:map[Audit-Id:[c174b1a7-2d91-42cc-b992-24fc3baf3d2b] Cache-Control:[no-cache, private] Content-Length:[182] Content-Type:[application/json] Date:[Wed, 17 Sep 2025 00:46:45 GMT]] Body:0x400062b840 ContentLength:182 TransferEncoding:[] Close:false Uncompressed:false Trailer:map[] Request:0x4000391e00 TLS:<nil>}
I0917 00:46:45.872270  628823 retry.go:31] will retry after 167.201018ms: Temporary Error: unexpected response code: 503
I0917 00:46:46.042497  628823 dashboard.go:214] http://127.0.0.1:36195/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/ response: <nil> &{Status:503 Service Unavailable StatusCode:503 Proto:HTTP/1.1 ProtoMajor:1 ProtoMinor:1 Header:map[Audit-Id:[2ba1c2af-e38a-451e-9fb4-5a946b412666] Cache-Control:[no-cache, private] Content-Length:[182] Content-Type:[application/json] Date:[Wed, 17 Sep 2025 00:46:46 GMT]] Body:0x400062b900 ContentLength:182 TransferEncoding:[] Close:false Uncompressed:false Trailer:map[] Request:0x40004732c0 TLS:<nil>}
I0917 00:46:46.042561  628823 retry.go:31] will retry after 463.190361ms: Temporary Error: unexpected response code: 503
I0917 00:46:46.508865  628823 dashboard.go:214] http://127.0.0.1:36195/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/ response: <nil> &{Status:503 Service Unavailable StatusCode:503 Proto:HTTP/1.1 ProtoMajor:1 ProtoMinor:1 Header:map[Audit-Id:[dfe01bf4-cd19-4007-be51-effdeeeb865f] Cache-Control:[no-cache, private] Content-Length:[188] Content-Type:[application/json] Date:[Wed, 17 Sep 2025 00:46:46 GMT]] Body:0x400057c940 ContentLength:188 TransferEncoding:[] Close:false Uncompressed:false Trailer:map[] Request:0x40001b2280 TLS:<nil>}
I0917 00:46:46.508960  628823 retry.go:31] will retry after 1.121891762s: Temporary Error: unexpected response code: 503
I0917 00:46:47.633545  628823 dashboard.go:214] http://127.0.0.1:36195/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/ response: <nil> &{Status:503 Service Unavailable StatusCode:503 Proto:HTTP/1.1 ProtoMajor:1 ProtoMinor:1 Header:map[Audit-Id:[8b6e095c-48e1-42d8-8cfb-b45bd4926eb0] Cache-Control:[no-cache, private] Content-Length:[188] Content-Type:[application/json] Date:[Wed, 17 Sep 2025 00:46:47 GMT]] Body:0x400062ba40 ContentLength:188 TransferEncoding:[] Close:false Uncompressed:false Trailer:map[] Request:0x4000473400 TLS:<nil>}
I0917 00:46:47.633607  628823 retry.go:31] will retry after 796.716026ms: Temporary Error: unexpected response code: 503
I0917 00:46:48.433463  628823 dashboard.go:214] http://127.0.0.1:36195/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/ response: <nil> &{Status:503 Service Unavailable StatusCode:503 Proto:HTTP/1.1 ProtoMajor:1 ProtoMinor:1 Header:map[Audit-Id:[c1e6bd2a-ffc6-4148-9ba5-407bc0672357] Cache-Control:[no-cache, private] Content-Length:[188] Content-Type:[application/json] Date:[Wed, 17 Sep 2025 00:46:48 GMT]] Body:0x400057ca40 ContentLength:188 TransferEncoding:[] Close:false Uncompressed:false Trailer:map[] Request:0x40001b23c0 TLS:<nil>}
I0917 00:46:48.433526  628823 retry.go:31] will retry after 1.518592933s: Temporary Error: unexpected response code: 503
I0917 00:46:49.955982  628823 dashboard.go:214] http://127.0.0.1:36195/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/ response: <nil> &{Status:503 Service Unavailable StatusCode:503 Proto:HTTP/1.1 ProtoMajor:1 ProtoMinor:1 Header:map[Audit-Id:[0cb96466-7216-416b-9f1b-cbcdcbc77a55] Cache-Control:[no-cache, private] Content-Length:[188] Content-Type:[application/json] Date:[Wed, 17 Sep 2025 00:46:49 GMT]] Body:0x400057cb00 ContentLength:188 TransferEncoding:[] Close:false Uncompressed:false Trailer:map[] Request:0x4000473540 TLS:<nil>}
I0917 00:46:49.956046  628823 retry.go:31] will retry after 1.536107615s: Temporary Error: unexpected response code: 503
I0917 00:46:51.495435  628823 dashboard.go:214] http://127.0.0.1:36195/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/ response: <nil> &{Status:503 Service Unavailable StatusCode:503 Proto:HTTP/1.1 ProtoMajor:1 ProtoMinor:1 Header:map[Audit-Id:[6495e2c6-e4d2-4e84-a44e-d1b9fc877158] Cache-Control:[no-cache, private] Content-Length:[188] Content-Type:[application/json] Date:[Wed, 17 Sep 2025 00:46:51 GMT]] Body:0x400062bbc0 ContentLength:188 TransferEncoding:[] Close:false Uncompressed:false Trailer:map[] Request:0x4000473680 TLS:<nil>}
I0917 00:46:51.495502  628823 retry.go:31] will retry after 4.737532339s: Temporary Error: unexpected response code: 503
I0917 00:46:56.236685  628823 dashboard.go:214] http://127.0.0.1:36195/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/ response: <nil> &{Status:503 Service Unavailable StatusCode:503 Proto:HTTP/1.1 ProtoMajor:1 ProtoMinor:1 Header:map[Audit-Id:[2ce74fee-47c1-4fbf-a4c0-4e29790b0943] Cache-Control:[no-cache, private] Content-Length:[188] Content-Type:[application/json] Date:[Wed, 17 Sep 2025 00:46:56 GMT]] Body:0x400057cc00 ContentLength:188 TransferEncoding:[] Close:false Uncompressed:false Trailer:map[] Request:0x40001b2500 TLS:<nil>}
I0917 00:46:56.236752  628823 retry.go:31] will retry after 4.607041674s: Temporary Error: unexpected response code: 503
I0917 00:47:00.847771  628823 dashboard.go:214] http://127.0.0.1:36195/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/ response: <nil> &{Status:503 Service Unavailable StatusCode:503 Proto:HTTP/1.1 ProtoMajor:1 ProtoMinor:1 Header:map[Audit-Id:[a0a87f3f-3686-4ad8-b008-9be33544a9e4] Cache-Control:[no-cache, private] Content-Length:[188] Content-Type:[application/json] Date:[Wed, 17 Sep 2025 00:47:00 GMT]] Body:0x400062bd40 ContentLength:188 TransferEncoding:[] Close:false Uncompressed:false Trailer:map[] Request:0x40001b2780 TLS:<nil>}
I0917 00:47:00.847836  628823 retry.go:31] will retry after 10.513402693s: Temporary Error: unexpected response code: 503
I0917 00:47:11.366531  628823 dashboard.go:214] http://127.0.0.1:36195/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/ response: <nil> &{Status:503 Service Unavailable StatusCode:503 Proto:HTTP/1.1 ProtoMajor:1 ProtoMinor:1 Header:map[Audit-Id:[213800fe-01b0-4b85-891f-1c893ede61fe] Cache-Control:[no-cache, private] Content-Length:[188] Content-Type:[application/json] Date:[Wed, 17 Sep 2025 00:47:11 GMT]] Body:0x400062be80 ContentLength:188 TransferEncoding:[] Close:false Uncompressed:false Trailer:map[] Request:0x4000473a40 TLS:<nil>}
I0917 00:47:11.366596  628823 retry.go:31] will retry after 10.090763077s: Temporary Error: unexpected response code: 503
I0917 00:47:21.463403  628823 dashboard.go:214] http://127.0.0.1:36195/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/ response: <nil> &{Status:503 Service Unavailable StatusCode:503 Proto:HTTP/1.1 ProtoMajor:1 ProtoMinor:1 Header:map[Audit-Id:[d7ade865-e086-40ee-b6cf-71bb6aba7ef5] Cache-Control:[no-cache, private] Content-Length:[188] Content-Type:[application/json] Date:[Wed, 17 Sep 2025 00:47:21 GMT]] Body:0x400057cd00 ContentLength:188 TransferEncoding:[] Close:false Uncompressed:false Trailer:map[] Request:0x40001b28c0 TLS:<nil>}
I0917 00:47:21.463469  628823 retry.go:31] will retry after 24.643631558s: Temporary Error: unexpected response code: 503
I0917 00:47:46.110926  628823 dashboard.go:214] http://127.0.0.1:36195/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/ response: <nil> &{Status:503 Service Unavailable StatusCode:503 Proto:HTTP/1.1 ProtoMajor:1 ProtoMinor:1 Header:map[Audit-Id:[2579bd51-fd44-440a-a11e-26a3e40d9cf5] Cache-Control:[no-cache, private] Content-Length:[188] Content-Type:[application/json] Date:[Wed, 17 Sep 2025 00:47:46 GMT]] Body:0x400057cdc0 ContentLength:188 TransferEncoding:[] Close:false Uncompressed:false Trailer:map[] Request:0x4000473b80 TLS:<nil>}
I0917 00:47:46.110987  628823 retry.go:31] will retry after 16.268750785s: Temporary Error: unexpected response code: 503
I0917 00:48:02.384370  628823 dashboard.go:214] http://127.0.0.1:36195/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/ response: <nil> &{Status:503 Service Unavailable StatusCode:503 Proto:HTTP/1.1 ProtoMajor:1 ProtoMinor:1 Header:map[Audit-Id:[37a4763c-0532-4fb4-96d2-79189e5b98de] Cache-Control:[no-cache, private] Content-Length:[188] Content-Type:[application/json] Date:[Wed, 17 Sep 2025 00:48:02 GMT]] Body:0x40007da180 ContentLength:188 TransferEncoding:[] Close:false Uncompressed:false Trailer:map[] Request:0x40001b2c80 TLS:<nil>}
I0917 00:48:02.384472  628823 retry.go:31] will retry after 29.02333531s: Temporary Error: unexpected response code: 503
I0917 00:48:31.411274  628823 dashboard.go:214] http://127.0.0.1:36195/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/ response: <nil> &{Status:503 Service Unavailable StatusCode:503 Proto:HTTP/1.1 ProtoMajor:1 ProtoMinor:1 Header:map[Audit-Id:[95e3322e-be1d-47c4-b360-b5903c748f2c] Cache-Control:[no-cache, private] Content-Length:[188] Content-Type:[application/json] Date:[Wed, 17 Sep 2025 00:48:31 GMT]] Body:0x40007da2c0 ContentLength:188 TransferEncoding:[] Close:false Uncompressed:false Trailer:map[] Request:0x40001b2dc0 TLS:<nil>}
I0917 00:48:31.411334  628823 retry.go:31] will retry after 47.644636274s: Temporary Error: unexpected response code: 503
I0917 00:49:19.058860  628823 dashboard.go:214] http://127.0.0.1:36195/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/ response: <nil> &{Status:503 Service Unavailable StatusCode:503 Proto:HTTP/1.1 ProtoMajor:1 ProtoMinor:1 Header:map[Audit-Id:[60006383-99f2-4468-99d5-04fb2c581e72] Cache-Control:[no-cache, private] Content-Length:[188] Content-Type:[application/json] Date:[Wed, 17 Sep 2025 00:49:19 GMT]] Body:0x40007da180 ContentLength:188 TransferEncoding:[] Close:false Uncompressed:false Trailer:map[] Request:0x40001b2f00 TLS:<nil>}
I0917 00:49:19.058924  628823 retry.go:31] will retry after 36.67160889s: Temporary Error: unexpected response code: 503
I0917 00:49:55.733757  628823 dashboard.go:214] http://127.0.0.1:36195/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/ response: <nil> &{Status:503 Service Unavailable StatusCode:503 Proto:HTTP/1.1 ProtoMajor:1 ProtoMinor:1 Header:map[Audit-Id:[be01f9ea-bc31-40cc-a70e-2e6d35d037b8] Cache-Control:[no-cache, private] Content-Length:[188] Content-Type:[application/json] Date:[Wed, 17 Sep 2025 00:49:55 GMT]] Body:0x400057c180 ContentLength:188 TransferEncoding:[] Close:false Uncompressed:false Trailer:map[] Request:0x4000472000 TLS:<nil>}
I0917 00:49:55.733842  628823 retry.go:31] will retry after 1m28.843735903s: Temporary Error: unexpected response code: 503
I0917 00:51:24.580577  628823 dashboard.go:214] http://127.0.0.1:36195/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/ response: <nil> &{Status:503 Service Unavailable StatusCode:503 Proto:HTTP/1.1 ProtoMajor:1 ProtoMinor:1 Header:map[Audit-Id:[6bc2acab-5aa0-45b8-ac75-edfecc4ec4f2] Cache-Control:[no-cache, private] Content-Length:[188] Content-Type:[application/json] Date:[Wed, 17 Sep 2025 00:51:24 GMT]] Body:0x40007da180 ContentLength:188 TransferEncoding:[] Close:false Uncompressed:false Trailer:map[] Request:0x4000472140 TLS:<nil>}
I0917 00:51:24.580642  628823 retry.go:31] will retry after 55.552699443s: Temporary Error: unexpected response code: 503
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:223: ======>  post-mortem[TestFunctional/parallel/DashboardCmd]: network settings <======
helpers_test.go:230: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:238: ======>  post-mortem[TestFunctional/parallel/DashboardCmd]: docker inspect <======
helpers_test.go:239: (dbg) Run:  docker inspect functional-918451
helpers_test.go:243: (dbg) docker inspect functional-918451:

                                                
                                                
-- stdout --
	[
	    {
	        "Id": "6201077c0331af68c48befcadd9d1ecd3484991c08f561702e8963664e041a07",
	        "Created": "2025-09-17T00:32:59.129348997Z",
	        "Path": "/usr/local/bin/entrypoint",
	        "Args": [
	            "/sbin/init"
	        ],
	        "State": {
	            "Status": "running",
	            "Running": true,
	            "Paused": false,
	            "Restarting": false,
	            "OOMKilled": false,
	            "Dead": false,
	            "Pid": 609481,
	            "ExitCode": 0,
	            "Error": "",
	            "StartedAt": "2025-09-17T00:32:59.19036605Z",
	            "FinishedAt": "0001-01-01T00:00:00Z"
	        },
	        "Image": "sha256:3d6f74760dfc17060da5abc5d463d3d45b4ceea05955c9cc42b3ec56cb38cc48",
	        "ResolvConfPath": "/var/lib/docker/containers/6201077c0331af68c48befcadd9d1ecd3484991c08f561702e8963664e041a07/resolv.conf",
	        "HostnamePath": "/var/lib/docker/containers/6201077c0331af68c48befcadd9d1ecd3484991c08f561702e8963664e041a07/hostname",
	        "HostsPath": "/var/lib/docker/containers/6201077c0331af68c48befcadd9d1ecd3484991c08f561702e8963664e041a07/hosts",
	        "LogPath": "/var/lib/docker/containers/6201077c0331af68c48befcadd9d1ecd3484991c08f561702e8963664e041a07/6201077c0331af68c48befcadd9d1ecd3484991c08f561702e8963664e041a07-json.log",
	        "Name": "/functional-918451",
	        "RestartCount": 0,
	        "Driver": "overlay2",
	        "Platform": "linux",
	        "MountLabel": "",
	        "ProcessLabel": "",
	        "AppArmorProfile": "unconfined",
	        "ExecIDs": null,
	        "HostConfig": {
	            "Binds": [
	                "functional-918451:/var",
	                "/lib/modules:/lib/modules:ro"
	            ],
	            "ContainerIDFile": "",
	            "LogConfig": {
	                "Type": "json-file",
	                "Config": {}
	            },
	            "NetworkMode": "functional-918451",
	            "PortBindings": {
	                "22/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "2376/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "32443/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "5000/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "8441/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ]
	            },
	            "RestartPolicy": {
	                "Name": "no",
	                "MaximumRetryCount": 0
	            },
	            "AutoRemove": false,
	            "VolumeDriver": "",
	            "VolumesFrom": null,
	            "ConsoleSize": [
	                0,
	                0
	            ],
	            "CapAdd": null,
	            "CapDrop": null,
	            "CgroupnsMode": "host",
	            "Dns": [],
	            "DnsOptions": [],
	            "DnsSearch": [],
	            "ExtraHosts": null,
	            "GroupAdd": null,
	            "IpcMode": "private",
	            "Cgroup": "",
	            "Links": null,
	            "OomScoreAdj": 0,
	            "PidMode": "",
	            "Privileged": true,
	            "PublishAllPorts": false,
	            "ReadonlyRootfs": false,
	            "SecurityOpt": [
	                "seccomp=unconfined",
	                "apparmor=unconfined",
	                "label=disable"
	            ],
	            "Tmpfs": {
	                "/run": "",
	                "/tmp": ""
	            },
	            "UTSMode": "",
	            "UsernsMode": "",
	            "ShmSize": 67108864,
	            "Runtime": "runc",
	            "Isolation": "",
	            "CpuShares": 0,
	            "Memory": 4294967296,
	            "NanoCpus": 2000000000,
	            "CgroupParent": "",
	            "BlkioWeight": 0,
	            "BlkioWeightDevice": [],
	            "BlkioDeviceReadBps": [],
	            "BlkioDeviceWriteBps": [],
	            "BlkioDeviceReadIOps": [],
	            "BlkioDeviceWriteIOps": [],
	            "CpuPeriod": 0,
	            "CpuQuota": 0,
	            "CpuRealtimePeriod": 0,
	            "CpuRealtimeRuntime": 0,
	            "CpusetCpus": "",
	            "CpusetMems": "",
	            "Devices": [],
	            "DeviceCgroupRules": null,
	            "DeviceRequests": null,
	            "MemoryReservation": 0,
	            "MemorySwap": 8589934592,
	            "MemorySwappiness": null,
	            "OomKillDisable": false,
	            "PidsLimit": null,
	            "Ulimits": [],
	            "CpuCount": 0,
	            "CpuPercent": 0,
	            "IOMaximumIOps": 0,
	            "IOMaximumBandwidth": 0,
	            "MaskedPaths": null,
	            "ReadonlyPaths": null
	        },
	        "GraphDriver": {
	            "Data": {
	                "ID": "6201077c0331af68c48befcadd9d1ecd3484991c08f561702e8963664e041a07",
	                "LowerDir": "/var/lib/docker/overlay2/a08a3a54fe3da53177e6d23662d948d962c761b52517c423e2277a8f513a4207-init/diff:/var/lib/docker/overlay2/6bf7b6c5df3b8adf86744064027446440589049694f02d12745ec1de281bdb92/diff",
	                "MergedDir": "/var/lib/docker/overlay2/a08a3a54fe3da53177e6d23662d948d962c761b52517c423e2277a8f513a4207/merged",
	                "UpperDir": "/var/lib/docker/overlay2/a08a3a54fe3da53177e6d23662d948d962c761b52517c423e2277a8f513a4207/diff",
	                "WorkDir": "/var/lib/docker/overlay2/a08a3a54fe3da53177e6d23662d948d962c761b52517c423e2277a8f513a4207/work"
	            },
	            "Name": "overlay2"
	        },
	        "Mounts": [
	            {
	                "Type": "bind",
	                "Source": "/lib/modules",
	                "Destination": "/lib/modules",
	                "Mode": "ro",
	                "RW": false,
	                "Propagation": "rprivate"
	            },
	            {
	                "Type": "volume",
	                "Name": "functional-918451",
	                "Source": "/var/lib/docker/volumes/functional-918451/_data",
	                "Destination": "/var",
	                "Driver": "local",
	                "Mode": "z",
	                "RW": true,
	                "Propagation": ""
	            }
	        ],
	        "Config": {
	            "Hostname": "functional-918451",
	            "Domainname": "",
	            "User": "",
	            "AttachStdin": false,
	            "AttachStdout": false,
	            "AttachStderr": false,
	            "ExposedPorts": {
	                "22/tcp": {},
	                "2376/tcp": {},
	                "32443/tcp": {},
	                "5000/tcp": {},
	                "8441/tcp": {}
	            },
	            "Tty": true,
	            "OpenStdin": false,
	            "StdinOnce": false,
	            "Env": [
	                "container=docker",
	                "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
	            ],
	            "Cmd": null,
	            "Image": "gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1",
	            "Volumes": null,
	            "WorkingDir": "/",
	            "Entrypoint": [
	                "/usr/local/bin/entrypoint",
	                "/sbin/init"
	            ],
	            "OnBuild": null,
	            "Labels": {
	                "created_by.minikube.sigs.k8s.io": "true",
	                "mode.minikube.sigs.k8s.io": "functional-918451",
	                "name.minikube.sigs.k8s.io": "functional-918451",
	                "role.minikube.sigs.k8s.io": ""
	            },
	            "StopSignal": "SIGRTMIN+3"
	        },
	        "NetworkSettings": {
	            "Bridge": "",
	            "SandboxID": "b8276d0e7a4a68853a13a364899f312a03083d4747586d37196fe37821cc60ca",
	            "SandboxKey": "/var/run/docker/netns/b8276d0e7a4a",
	            "Ports": {
	                "22/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "33515"
	                    }
	                ],
	                "2376/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "33516"
	                    }
	                ],
	                "32443/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "33519"
	                    }
	                ],
	                "5000/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "33517"
	                    }
	                ],
	                "8441/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "33518"
	                    }
	                ]
	            },
	            "HairpinMode": false,
	            "LinkLocalIPv6Address": "",
	            "LinkLocalIPv6PrefixLen": 0,
	            "SecondaryIPAddresses": null,
	            "SecondaryIPv6Addresses": null,
	            "EndpointID": "",
	            "Gateway": "",
	            "GlobalIPv6Address": "",
	            "GlobalIPv6PrefixLen": 0,
	            "IPAddress": "",
	            "IPPrefixLen": 0,
	            "IPv6Gateway": "",
	            "MacAddress": "",
	            "Networks": {
	                "functional-918451": {
	                    "IPAMConfig": {
	                        "IPv4Address": "192.168.49.2"
	                    },
	                    "Links": null,
	                    "Aliases": null,
	                    "MacAddress": "0e:c4:e0:02:03:54",
	                    "DriverOpts": null,
	                    "GwPriority": 0,
	                    "NetworkID": "6a04d22b3edf0df0fed6fcef6fdf3ac9b7a09ca25aa9a4da277d50b627d3354f",
	                    "EndpointID": "29d031089f2a9002698ecc91a4339af43c25eef05a79345c2e7da185073d2f3e",
	                    "Gateway": "192.168.49.1",
	                    "IPAddress": "192.168.49.2",
	                    "IPPrefixLen": 24,
	                    "IPv6Gateway": "",
	                    "GlobalIPv6Address": "",
	                    "GlobalIPv6PrefixLen": 0,
	                    "DNSNames": [
	                        "functional-918451",
	                        "6201077c0331"
	                    ]
	                }
	            }
	        }
	    }
	]

                                                
                                                
-- /stdout --
helpers_test.go:247: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Host}} -p functional-918451 -n functional-918451
helpers_test.go:252: <<< TestFunctional/parallel/DashboardCmd FAILED: start of post-mortem logs <<<
helpers_test.go:253: ======>  post-mortem[TestFunctional/parallel/DashboardCmd]: minikube logs <======
helpers_test.go:255: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 logs -n 25
helpers_test.go:255: (dbg) Done: out/minikube-linux-arm64 -p functional-918451 logs -n 25: (1.333769441s)
helpers_test.go:260: TestFunctional/parallel/DashboardCmd logs: 
-- stdout --
	
	==> Audit <==
	┌────────────────┬────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬───────────────────┬─────────┬─────────┬─────────────────────┬─────────────────────┐
	│    COMMAND     │                                                        ARGS                                                        │      PROFILE      │  USER   │ VERSION │     START TIME      │      END TIME       │
	├────────────────┼────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼───────────────────┼─────────┼─────────┼─────────────────────┼─────────────────────┤
	│ ssh            │ functional-918451 ssh findmnt -T /mount1                                                                           │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:46 UTC │                     │
	│ mount          │ -p functional-918451 /tmp/TestFunctionalparallelMountCmdVerifyCleanup3591012543/001:/mount1 --alsologtostderr -v=1 │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:46 UTC │                     │
	│ mount          │ -p functional-918451 /tmp/TestFunctionalparallelMountCmdVerifyCleanup3591012543/001:/mount2 --alsologtostderr -v=1 │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:46 UTC │                     │
	│ mount          │ -p functional-918451 /tmp/TestFunctionalparallelMountCmdVerifyCleanup3591012543/001:/mount3 --alsologtostderr -v=1 │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:46 UTC │                     │
	│ ssh            │ functional-918451 ssh findmnt -T /mount1                                                                           │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:46 UTC │ 17 Sep 25 00:46 UTC │
	│ ssh            │ functional-918451 ssh findmnt -T /mount2                                                                           │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:46 UTC │ 17 Sep 25 00:46 UTC │
	│ ssh            │ functional-918451 ssh findmnt -T /mount3                                                                           │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:46 UTC │ 17 Sep 25 00:46 UTC │
	│ mount          │ -p functional-918451 --kill=true                                                                                   │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:46 UTC │                     │
	│ start          │ -p functional-918451 --dry-run --memory 250MB --alsologtostderr --driver=docker  --container-runtime=docker        │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:46 UTC │                     │
	│ dashboard      │ --url --port 36195 -p functional-918451 --alsologtostderr -v=1                                                     │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:46 UTC │                     │
	│ service        │ functional-918451 service list                                                                                     │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:50 UTC │ 17 Sep 25 00:50 UTC │
	│ service        │ functional-918451 service list -o json                                                                             │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:50 UTC │ 17 Sep 25 00:50 UTC │
	│ service        │ functional-918451 service --namespace=default --https --url hello-node                                             │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:50 UTC │                     │
	│ service        │ functional-918451 service hello-node --url --format={{.IP}}                                                        │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:50 UTC │                     │
	│ service        │ functional-918451 service hello-node --url                                                                         │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:50 UTC │                     │
	│ update-context │ functional-918451 update-context --alsologtostderr -v=2                                                            │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:50 UTC │ 17 Sep 25 00:50 UTC │
	│ update-context │ functional-918451 update-context --alsologtostderr -v=2                                                            │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:50 UTC │ 17 Sep 25 00:50 UTC │
	│ update-context │ functional-918451 update-context --alsologtostderr -v=2                                                            │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:50 UTC │ 17 Sep 25 00:50 UTC │
	│ image          │ functional-918451 image ls --format short --alsologtostderr                                                        │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:50 UTC │ 17 Sep 25 00:50 UTC │
	│ image          │ functional-918451 image ls --format json --alsologtostderr                                                         │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:50 UTC │ 17 Sep 25 00:50 UTC │
	│ ssh            │ functional-918451 ssh pgrep buildkitd                                                                              │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:50 UTC │                     │
	│ image          │ functional-918451 image build -t localhost/my-image:functional-918451 testdata/build --alsologtostderr             │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:50 UTC │ 17 Sep 25 00:50 UTC │
	│ image          │ functional-918451 image ls                                                                                         │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:50 UTC │ 17 Sep 25 00:50 UTC │
	│ image          │ functional-918451 image ls --format yaml --alsologtostderr                                                         │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:50 UTC │ 17 Sep 25 00:50 UTC │
	│ image          │ functional-918451 image ls --format table --alsologtostderr                                                        │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:50 UTC │ 17 Sep 25 00:50 UTC │
	└────────────────┴────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴───────────────────┴─────────┴─────────┴─────────────────────┴─────────────────────┘
	
	
	==> Last Start <==
	Log file created at: 2025/09/17 00:46:43
	Running on machine: ip-172-31-21-244
	Binary: Built with gc go1.24.6 for linux/arm64
	Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
	I0917 00:46:43.249961  628774 out.go:360] Setting OutFile to fd 1 ...
	I0917 00:46:43.250084  628774 out.go:408] TERM=,COLORTERM=, which probably does not support color
	I0917 00:46:43.250103  628774 out.go:374] Setting ErrFile to fd 2...
	I0917 00:46:43.250108  628774 out.go:408] TERM=,COLORTERM=, which probably does not support color
	I0917 00:46:43.251102  628774 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21550-576428/.minikube/bin
	I0917 00:46:43.251493  628774 out.go:368] Setting JSON to false
	I0917 00:46:43.252476  628774 start.go:130] hostinfo: {"hostname":"ip-172-31-21-244","uptime":12549,"bootTime":1758057455,"procs":198,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1084-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"da8ac1fd-6236-412a-a346-95873c98230d"}
	I0917 00:46:43.252543  628774 start.go:140] virtualization:  
	I0917 00:46:43.256110  628774 out.go:179] * [functional-918451] minikube v1.37.0 sur Ubuntu 20.04 (arm64)
	I0917 00:46:43.259038  628774 out.go:179]   - MINIKUBE_LOCATION=21550
	I0917 00:46:43.259156  628774 notify.go:220] Checking for updates...
	I0917 00:46:43.264582  628774 out.go:179]   - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	I0917 00:46:43.267389  628774 out.go:179]   - KUBECONFIG=/home/jenkins/minikube-integration/21550-576428/kubeconfig
	I0917 00:46:43.270251  628774 out.go:179]   - MINIKUBE_HOME=/home/jenkins/minikube-integration/21550-576428/.minikube
	I0917 00:46:43.273089  628774 out.go:179]   - MINIKUBE_BIN=out/minikube-linux-arm64
	I0917 00:46:43.275977  628774 out.go:179]   - MINIKUBE_FORCE_SYSTEMD=
	I0917 00:46:43.279222  628774 config.go:182] Loaded profile config "functional-918451": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
	I0917 00:46:43.279769  628774 driver.go:421] Setting default libvirt URI to qemu:///system
	I0917 00:46:43.309959  628774 docker.go:123] docker version: linux-28.1.1:Docker Engine - Community
	I0917 00:46:43.310112  628774 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0917 00:46:43.378351  628774 info.go:266] docker info: {ID:5FDH:SA5P:5GCT:NLAS:B73P:SGDQ:PBG5:UBVH:UZY3:RXGO:CI7S:WAIH Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:3 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:36 OomKillDisable:true NGoroutines:52 SystemTime:2025-09-17 00:46:43.368740075 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1084-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214835200 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-21-244 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx P
ath:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1]] Warnings:<nil>}}
	I0917 00:46:43.378547  628774 docker.go:318] overlay module found
	I0917 00:46:43.382058  628774 out.go:179] * Utilisation du pilote docker basé sur le profil existant
	I0917 00:46:43.384892  628774 start.go:304] selected driver: docker
	I0917 00:46:43.384911  628774 start.go:918] validating driver "docker" against &{Name:functional-918451 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:4096 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8441 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:functional-918451 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerN
ames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:apiserver Key:enable-admission-plugins Value:NamespaceAutoProvision}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8441 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p
MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
	I0917 00:46:43.385020  628774 start.go:929] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
	I0917 00:46:43.388416  628774 out.go:203] 
	W0917 00:46:43.391369  628774 out.go:285] X Fermeture en raison de RSRC_INSUFFICIENT_REQ_MEMORY : L'allocation de mémoire demandée 250 Mio est inférieure au minimum utilisable de 1800 Mo
	I0917 00:46:43.394193  628774 out.go:203] 
	
	
	==> Docker <==
	Sep 17 00:46:45 functional-918451 dockerd[6835]: time="2025-09-17T00:46:45.620075627Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
	Sep 17 00:46:45 functional-918451 dockerd[6835]: time="2025-09-17T00:46:45.672313776Z" level=warning msg="reference for unknown type: " digest="sha256:2e500d29e9d5f4a086b908eb8dfe7ecac57d2ab09d65b24f588b1d449841ef93" remote="docker.io/kubernetesui/dashboard@sha256:2e500d29e9d5f4a086b908eb8dfe7ecac57d2ab09d65b24f588b1d449841ef93"
	Sep 17 00:46:45 functional-918451 dockerd[6835]: time="2025-09-17T00:46:45.760943425Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
	Sep 17 00:46:58 functional-918451 dockerd[6835]: time="2025-09-17T00:46:58.848982723Z" level=warning msg="reference for unknown type: " digest="sha256:76049887f07a0476dc93efc2d3569b9529bf982b22d29f356092ce206e98765c" remote="docker.io/kubernetesui/metrics-scraper@sha256:76049887f07a0476dc93efc2d3569b9529bf982b22d29f356092ce206e98765c"
	Sep 17 00:46:58 functional-918451 dockerd[6835]: time="2025-09-17T00:46:58.937485031Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
	Sep 17 00:47:00 functional-918451 dockerd[6835]: time="2025-09-17T00:47:00.849694317Z" level=warning msg="reference for unknown type: " digest="sha256:2e500d29e9d5f4a086b908eb8dfe7ecac57d2ab09d65b24f588b1d449841ef93" remote="docker.io/kubernetesui/dashboard@sha256:2e500d29e9d5f4a086b908eb8dfe7ecac57d2ab09d65b24f588b1d449841ef93"
	Sep 17 00:47:00 functional-918451 dockerd[6835]: time="2025-09-17T00:47:00.932889733Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
	Sep 17 00:47:20 functional-918451 dockerd[6835]: time="2025-09-17T00:47:20.015526177Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
	Sep 17 00:47:24 functional-918451 dockerd[6835]: time="2025-09-17T00:47:24.109252501Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
	Sep 17 00:47:24 functional-918451 cri-dockerd[7576]: time="2025-09-17T00:47:24Z" level=info msg="Stop pulling image docker.io/nginx:latest: latest: Pulling from library/nginx"
	Sep 17 00:47:24 functional-918451 dockerd[6835]: time="2025-09-17T00:47:24.852375060Z" level=warning msg="reference for unknown type: " digest="sha256:76049887f07a0476dc93efc2d3569b9529bf982b22d29f356092ce206e98765c" remote="docker.io/kubernetesui/metrics-scraper@sha256:76049887f07a0476dc93efc2d3569b9529bf982b22d29f356092ce206e98765c"
	Sep 17 00:47:24 functional-918451 dockerd[6835]: time="2025-09-17T00:47:24.932151515Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
	Sep 17 00:47:27 functional-918451 dockerd[6835]: time="2025-09-17T00:47:27.850301546Z" level=warning msg="reference for unknown type: " digest="sha256:2e500d29e9d5f4a086b908eb8dfe7ecac57d2ab09d65b24f588b1d449841ef93" remote="docker.io/kubernetesui/dashboard@sha256:2e500d29e9d5f4a086b908eb8dfe7ecac57d2ab09d65b24f588b1d449841ef93"
	Sep 17 00:47:27 functional-918451 dockerd[6835]: time="2025-09-17T00:47:27.934607352Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
	Sep 17 00:48:18 functional-918451 dockerd[6835]: time="2025-09-17T00:48:18.849676063Z" level=warning msg="reference for unknown type: " digest="sha256:76049887f07a0476dc93efc2d3569b9529bf982b22d29f356092ce206e98765c" remote="docker.io/kubernetesui/metrics-scraper@sha256:76049887f07a0476dc93efc2d3569b9529bf982b22d29f356092ce206e98765c"
	Sep 17 00:48:18 functional-918451 dockerd[6835]: time="2025-09-17T00:48:18.937136957Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
	Sep 17 00:48:21 functional-918451 dockerd[6835]: time="2025-09-17T00:48:21.856839786Z" level=warning msg="reference for unknown type: " digest="sha256:2e500d29e9d5f4a086b908eb8dfe7ecac57d2ab09d65b24f588b1d449841ef93" remote="docker.io/kubernetesui/dashboard@sha256:2e500d29e9d5f4a086b908eb8dfe7ecac57d2ab09d65b24f588b1d449841ef93"
	Sep 17 00:48:21 functional-918451 dockerd[6835]: time="2025-09-17T00:48:21.936011812Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
	Sep 17 00:49:46 functional-918451 dockerd[6835]: time="2025-09-17T00:49:46.848325559Z" level=warning msg="reference for unknown type: " digest="sha256:2e500d29e9d5f4a086b908eb8dfe7ecac57d2ab09d65b24f588b1d449841ef93" remote="docker.io/kubernetesui/dashboard@sha256:2e500d29e9d5f4a086b908eb8dfe7ecac57d2ab09d65b24f588b1d449841ef93"
	Sep 17 00:49:47 functional-918451 dockerd[6835]: time="2025-09-17T00:49:47.021302426Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
	Sep 17 00:49:47 functional-918451 cri-dockerd[7576]: time="2025-09-17T00:49:47Z" level=info msg="Stop pulling image docker.io/kubernetesui/dashboard:v2.7.0@sha256:2e500d29e9d5f4a086b908eb8dfe7ecac57d2ab09d65b24f588b1d449841ef93: docker.io/kubernetesui/dashboard@sha256:2e500d29e9d5f4a086b908eb8dfe7ecac57d2ab09d65b24f588b1d449841ef93: Pulling from kubernetesui/dashboard"
	Sep 17 00:49:48 functional-918451 dockerd[6835]: time="2025-09-17T00:49:48.855088139Z" level=warning msg="reference for unknown type: " digest="sha256:76049887f07a0476dc93efc2d3569b9529bf982b22d29f356092ce206e98765c" remote="docker.io/kubernetesui/metrics-scraper@sha256:76049887f07a0476dc93efc2d3569b9529bf982b22d29f356092ce206e98765c"
	Sep 17 00:49:48 functional-918451 dockerd[6835]: time="2025-09-17T00:49:48.945370378Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
	Sep 17 00:51:24 functional-918451 dockerd[6835]: time="2025-09-17T00:51:24.140342729Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
	Sep 17 00:51:24 functional-918451 cri-dockerd[7576]: time="2025-09-17T00:51:24Z" level=info msg="Stop pulling image kicbase/echo-server:latest: latest: Pulling from kicbase/echo-server"
	
	
	==> container status <==
	CONTAINER           IMAGE                                                                                                 CREATED             STATE               NAME                      ATTEMPT             POD ID              POD
	762ce3d5d73f9       gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e   5 minutes ago       Exited              mount-munger              0                   dafcb620980b8       busybox-mount
	7f05a20c84773       nginx@sha256:42a516af16b852e33b7682d5ef8acbd5d13fe08fecadc7ed98605ba5e3b26ab8                         15 minutes ago      Running             nginx                     0                   226891f84e087       nginx-svc
	bf9dde3577a17       138784d87c9c5                                                                                         15 minutes ago      Running             coredns                   2                   5a0f0f9b9bec0       coredns-66bc5c9577-q6x4w
	2249e1b1919d7       6fc32d66c1411                                                                                         15 minutes ago      Running             kube-proxy                3                   c70143b4207a5       kube-proxy-q4hcq
	cf11e0afbf55d       ba04bb24b9575                                                                                         15 minutes ago      Running             storage-provisioner       3                   9bd802eae08c9       storage-provisioner
	a800de00866e1       996be7e86d9b3                                                                                         16 minutes ago      Running             kube-controller-manager   3                   85599d5f0bdd8       kube-controller-manager-functional-918451
	8f45cd3852343       a25f5ef9c34c3                                                                                         16 minutes ago      Running             kube-scheduler            3                   adb05901f064d       kube-scheduler-functional-918451
	810186cb500fd       d291939e99406                                                                                         16 minutes ago      Running             kube-apiserver            0                   edfa6965041a5       kube-apiserver-functional-918451
	53d0462ffd2c6       a1894772a478e                                                                                         16 minutes ago      Running             etcd                      2                   299a608c07fcf       etcd-functional-918451
	bc1e5b3192079       6fc32d66c1411                                                                                         16 minutes ago      Created             kube-proxy                2                   3c0a229533b1f       kube-proxy-q4hcq
	7cda0e892061a       996be7e86d9b3                                                                                         16 minutes ago      Created             kube-controller-manager   2                   7a2c5910368f6       kube-controller-manager-functional-918451
	e3a6d89ad5b64       a25f5ef9c34c3                                                                                         16 minutes ago      Created             kube-scheduler            2                   ae4004a0e0f99       kube-scheduler-functional-918451
	e707505e3be85       ba04bb24b9575                                                                                         16 minutes ago      Exited              storage-provisioner       2                   56ab6c32da135       storage-provisioner
	d3d4055f8ecd2       138784d87c9c5                                                                                         17 minutes ago      Exited              coredns                   1                   ecc60cd02b8df       coredns-66bc5c9577-q6x4w
	bd4e50a8edbd5       a1894772a478e                                                                                         17 minutes ago      Exited              etcd                      1                   e00e50d78d699       etcd-functional-918451
	
	
	==> coredns [bf9dde3577a1] <==
	maxprocs: Leaving GOMAXPROCS=2: CPU quota undefined
	.:53
	[INFO] plugin/reload: Running configuration SHA512 = 9e2996f8cb67ac53e0259ab1f8d615d07d1beb0bd07e6a1e39769c3bf486a905bb991cc47f8d2f14d0d3a90a87dfc625a0b4c524fed169d8158c40657c0694b1
	CoreDNS-1.12.1
	linux/arm64, go1.24.1, 707c7c1
	[INFO] 127.0.0.1:40702 - 26535 "HINFO IN 1869121631954386126.875895371521758492. udp 56 false 512" NXDOMAIN qr,rd,ra 56 0.03198374s
	
	
	==> coredns [d3d4055f8ecd] <==
	maxprocs: Leaving GOMAXPROCS=2: CPU quota undefined
	[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
	[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
	[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
	[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
	[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
	[INFO] plugin/ready: Still waiting on: "kubernetes"
	[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
	[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
	[INFO] plugin/ready: Still waiting on: "kubernetes"
	.:53
	[INFO] plugin/reload: Running configuration SHA512 = 9e2996f8cb67ac53e0259ab1f8d615d07d1beb0bd07e6a1e39769c3bf486a905bb991cc47f8d2f14d0d3a90a87dfc625a0b4c524fed169d8158c40657c0694b1
	CoreDNS-1.12.1
	linux/arm64, go1.24.1, 707c7c1
	[INFO] 127.0.0.1:57340 - 21792 "HINFO IN 1884376929635518991.5843453519963972009. udp 57 false 512" NXDOMAIN qr,rd,ra 57 0.021793349s
	[INFO] SIGTERM: Shutting down servers then terminating
	[INFO] plugin/health: Going into lameduck mode for 5s
	
	
	==> describe nodes <==
	Name:               functional-918451
	Roles:              control-plane
	Labels:             beta.kubernetes.io/arch=arm64
	                    beta.kubernetes.io/os=linux
	                    kubernetes.io/arch=arm64
	                    kubernetes.io/hostname=functional-918451
	                    kubernetes.io/os=linux
	                    minikube.k8s.io/commit=9829f0bc17c523e4378d28e0c25741106f24f00a
	                    minikube.k8s.io/name=functional-918451
	                    minikube.k8s.io/primary=true
	                    minikube.k8s.io/updated_at=2025_09_17T00_33_25_0700
	                    minikube.k8s.io/version=v1.37.0
	                    node-role.kubernetes.io/control-plane=
	                    node.kubernetes.io/exclude-from-external-load-balancers=
	Annotations:        node.alpha.kubernetes.io/ttl: 0
	                    volumes.kubernetes.io/controller-managed-attach-detach: true
	CreationTimestamp:  Wed, 17 Sep 2025 00:33:22 +0000
	Taints:             <none>
	Unschedulable:      false
	Lease:
	  HolderIdentity:  functional-918451
	  AcquireTime:     <unset>
	  RenewTime:       Wed, 17 Sep 2025 00:51:39 +0000
	Conditions:
	  Type             Status  LastHeartbeatTime                 LastTransitionTime                Reason                       Message
	  ----             ------  -----------------                 ------------------                ------                       -------
	  MemoryPressure   False   Wed, 17 Sep 2025 00:50:47 +0000   Wed, 17 Sep 2025 00:33:18 +0000   KubeletHasSufficientMemory   kubelet has sufficient memory available
	  DiskPressure     False   Wed, 17 Sep 2025 00:50:47 +0000   Wed, 17 Sep 2025 00:33:18 +0000   KubeletHasNoDiskPressure     kubelet has no disk pressure
	  PIDPressure      False   Wed, 17 Sep 2025 00:50:47 +0000   Wed, 17 Sep 2025 00:33:18 +0000   KubeletHasSufficientPID      kubelet has sufficient PID available
	  Ready            True    Wed, 17 Sep 2025 00:50:47 +0000   Wed, 17 Sep 2025 00:33:22 +0000   KubeletReady                 kubelet is posting ready status
	Addresses:
	  InternalIP:  192.168.49.2
	  Hostname:    functional-918451
	Capacity:
	  cpu:                2
	  ephemeral-storage:  203034800Ki
	  hugepages-1Gi:      0
	  hugepages-2Mi:      0
	  hugepages-32Mi:     0
	  hugepages-64Ki:     0
	  memory:             8022300Ki
	  pods:               110
	Allocatable:
	  cpu:                2
	  ephemeral-storage:  203034800Ki
	  hugepages-1Gi:      0
	  hugepages-2Mi:      0
	  hugepages-32Mi:     0
	  hugepages-64Ki:     0
	  memory:             8022300Ki
	  pods:               110
	System Info:
	  Machine ID:                 787c0d47a41949608af3603ec5366447
	  System UUID:                f83af58d-c48d-4abe-ba83-7d4398f15ffc
	  Boot ID:                    54a40c62-e2ca-4fe1-8de3-5249514e3fbf
	  Kernel Version:             5.15.0-1084-aws
	  OS Image:                   Ubuntu 22.04.5 LTS
	  Operating System:           linux
	  Architecture:               arm64
	  Container Runtime Version:  docker://28.4.0
	  Kubelet Version:            v1.34.0
	  Kube-Proxy Version:         
	PodCIDR:                      10.244.0.0/24
	PodCIDRs:                     10.244.0.0/24
	Non-terminated Pods:          (13 in total)
	  Namespace                   Name                                          CPU Requests  CPU Limits  Memory Requests  Memory Limits  Age
	  ---------                   ----                                          ------------  ----------  ---------------  -------------  ---
	  default                     hello-node-75c85bcc94-nttx8                   0 (0%)        0 (0%)      0 (0%)           0 (0%)         11m
	  default                     hello-node-connect-7d85dfc575-t4gsf           0 (0%)        0 (0%)      0 (0%)           0 (0%)         15m
	  default                     nginx-svc                                     0 (0%)        0 (0%)      0 (0%)           0 (0%)         15m
	  default                     sp-pod                                        0 (0%)        0 (0%)      0 (0%)           0 (0%)         15m
	  kube-system                 coredns-66bc5c9577-q6x4w                      100m (5%)     0 (0%)      70Mi (0%)        170Mi (2%)     18m
	  kube-system                 etcd-functional-918451                        100m (5%)     0 (0%)      100Mi (1%)       0 (0%)         18m
	  kube-system                 kube-apiserver-functional-918451              250m (12%)    0 (0%)      0 (0%)           0 (0%)         15m
	  kube-system                 kube-controller-manager-functional-918451     200m (10%)    0 (0%)      0 (0%)           0 (0%)         18m
	  kube-system                 kube-proxy-q4hcq                              0 (0%)        0 (0%)      0 (0%)           0 (0%)         18m
	  kube-system                 kube-scheduler-functional-918451              100m (5%)     0 (0%)      0 (0%)           0 (0%)         18m
	  kube-system                 storage-provisioner                           0 (0%)        0 (0%)      0 (0%)           0 (0%)         18m
	  kubernetes-dashboard        dashboard-metrics-scraper-77bf4d6c4c-trldk    0 (0%)        0 (0%)      0 (0%)           0 (0%)         5m
	  kubernetes-dashboard        kubernetes-dashboard-855c9754f9-b8grq         0 (0%)        0 (0%)      0 (0%)           0 (0%)         5m
	Allocated resources:
	  (Total limits may be over 100 percent, i.e., overcommitted.)
	  Resource           Requests    Limits
	  --------           --------    ------
	  cpu                750m (37%)  0 (0%)
	  memory             170Mi (2%)  170Mi (2%)
	  ephemeral-storage  0 (0%)      0 (0%)
	  hugepages-1Gi      0 (0%)      0 (0%)
	  hugepages-2Mi      0 (0%)      0 (0%)
	  hugepages-32Mi     0 (0%)      0 (0%)
	  hugepages-64Ki     0 (0%)      0 (0%)
	Events:
	  Type     Reason                   Age                From             Message
	  ----     ------                   ----               ----             -------
	  Normal   Starting                 18m                kube-proxy       
	  Normal   Starting                 15m                kube-proxy       
	  Normal   Starting                 16m                kube-proxy       
	  Normal   Starting                 18m                kubelet          Starting kubelet.
	  Warning  CgroupV1                 18m                kubelet          cgroup v1 support is in maintenance mode, please migrate to cgroup v2
	  Normal   NodeHasSufficientMemory  18m (x8 over 18m)  kubelet          Node functional-918451 status is now: NodeHasSufficientMemory
	  Normal   NodeHasNoDiskPressure    18m (x8 over 18m)  kubelet          Node functional-918451 status is now: NodeHasNoDiskPressure
	  Normal   NodeHasSufficientPID     18m (x7 over 18m)  kubelet          Node functional-918451 status is now: NodeHasSufficientPID
	  Normal   NodeAllocatableEnforced  18m                kubelet          Updated Node Allocatable limit across pods
	  Normal   Starting                 18m                kubelet          Starting kubelet.
	  Warning  CgroupV1                 18m                kubelet          cgroup v1 support is in maintenance mode, please migrate to cgroup v2
	  Normal   NodeHasSufficientMemory  18m                kubelet          Node functional-918451 status is now: NodeHasSufficientMemory
	  Normal   NodeHasNoDiskPressure    18m                kubelet          Node functional-918451 status is now: NodeHasNoDiskPressure
	  Normal   NodeHasSufficientPID     18m                kubelet          Node functional-918451 status is now: NodeHasSufficientPID
	  Normal   NodeAllocatableEnforced  18m                kubelet          Updated Node Allocatable limit across pods
	  Normal   RegisteredNode           18m                node-controller  Node functional-918451 event: Registered Node functional-918451 in Controller
	  Normal   NodeNotReady             17m                kubelet          Node functional-918451 status is now: NodeNotReady
	  Normal   RegisteredNode           16m                node-controller  Node functional-918451 event: Registered Node functional-918451 in Controller
	  Normal   Starting                 16m                kubelet          Starting kubelet.
	  Warning  CgroupV1                 16m                kubelet          cgroup v1 support is in maintenance mode, please migrate to cgroup v2
	  Normal   NodeHasSufficientMemory  16m (x8 over 16m)  kubelet          Node functional-918451 status is now: NodeHasSufficientMemory
	  Normal   NodeHasNoDiskPressure    16m (x8 over 16m)  kubelet          Node functional-918451 status is now: NodeHasNoDiskPressure
	  Normal   NodeHasSufficientPID     16m (x7 over 16m)  kubelet          Node functional-918451 status is now: NodeHasSufficientPID
	  Normal   NodeAllocatableEnforced  16m                kubelet          Updated Node Allocatable limit across pods
	  Normal   RegisteredNode           15m                node-controller  Node functional-918451 event: Registered Node functional-918451 in Controller
	
	
	==> dmesg <==
	[Sep16 22:47] kauditd_printk_skb: 8 callbacks suppressed
	[Sep17 00:20] kauditd_printk_skb: 8 callbacks suppressed
	
	
	==> etcd [53d0462ffd2c] <==
	{"level":"warn","ts":"2025-09-17T00:35:48.193870Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41336","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.212630Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41368","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.231571Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41384","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.251324Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41398","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.266264Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41428","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.283844Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41438","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.305246Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41470","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.324766Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41492","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.338961Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41522","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.366501Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41540","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.407986Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41556","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.435057Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41568","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.452382Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41588","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.477192Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41616","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.510691Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41636","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.541591Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41670","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.545272Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41650","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.604666Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41692","server-name":"","error":"EOF"}
	{"level":"info","ts":"2025-09-17T00:39:31.191908Z","caller":"traceutil/trace.go:172","msg":"trace[793044705] transaction","detail":"{read_only:false; response_revision:1056; number_of_response:1; }","duration":"143.194218ms","start":"2025-09-17T00:39:31.048697Z","end":"2025-09-17T00:39:31.191891Z","steps":["trace[793044705] 'process raft request'  (duration: 143.096128ms)"],"step_count":1}
	{"level":"info","ts":"2025-09-17T00:45:47.323743Z","caller":"mvcc/index.go:194","msg":"compact tree index","revision":1172}
	{"level":"info","ts":"2025-09-17T00:45:47.351263Z","caller":"mvcc/kvstore_compaction.go:70","msg":"finished scheduled compaction","compact-revision":1172,"took":"27.026718ms","hash":3159752356,"current-db-size-bytes":3325952,"current-db-size":"3.3 MB","current-db-size-in-use-bytes":1540096,"current-db-size-in-use":"1.5 MB"}
	{"level":"info","ts":"2025-09-17T00:45:47.351322Z","caller":"mvcc/hash.go:157","msg":"storing new hash","hash":3159752356,"revision":1172,"compact-revision":-1}
	{"level":"info","ts":"2025-09-17T00:50:47.330293Z","caller":"mvcc/index.go:194","msg":"compact tree index","revision":1478}
	{"level":"info","ts":"2025-09-17T00:50:47.334438Z","caller":"mvcc/kvstore_compaction.go:70","msg":"finished scheduled compaction","compact-revision":1478,"took":"3.400753ms","hash":2640925780,"current-db-size-bytes":3325952,"current-db-size":"3.3 MB","current-db-size-in-use-bytes":2301952,"current-db-size-in-use":"2.3 MB"}
	{"level":"info","ts":"2025-09-17T00:50:47.334487Z","caller":"mvcc/hash.go:157","msg":"storing new hash","hash":2640925780,"revision":1478,"compact-revision":1172}
	
	
	==> etcd [bd4e50a8edbd] <==
	{"level":"warn","ts":"2025-09-17T00:34:46.313001Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49074","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:34:46.328657Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49100","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:34:46.352785Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49118","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:34:46.380490Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49136","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:34:46.400768Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49158","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:34:46.419290Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49186","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:34:46.492683Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49234","server-name":"","error":"EOF"}
	{"level":"info","ts":"2025-09-17T00:35:27.923747Z","caller":"osutil/interrupt_unix.go:65","msg":"received signal; shutting down","signal":"terminated"}
	{"level":"info","ts":"2025-09-17T00:35:27.923816Z","caller":"embed/etcd.go:426","msg":"closing etcd server","name":"functional-918451","data-dir":"/var/lib/minikube/etcd","advertise-peer-urls":["https://192.168.49.2:2380"],"advertise-client-urls":["https://192.168.49.2:2379"]}
	{"level":"error","ts":"2025-09-17T00:35:27.923929Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"http: Server closed","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*serveCtx).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/serve.go:90"}
	{"level":"error","ts":"2025-09-17T00:35:34.926453Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"http: Server closed","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*serveCtx).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/serve.go:90"}
	{"level":"error","ts":"2025-09-17T00:35:34.926573Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 127.0.0.1:2381: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
	{"level":"info","ts":"2025-09-17T00:35:34.926596Z","caller":"etcdserver/server.go:1281","msg":"skipped leadership transfer for single voting member cluster","local-member-id":"aec36adc501070cc","current-leader-member-id":"aec36adc501070cc"}
	{"level":"info","ts":"2025-09-17T00:35:34.926693Z","caller":"etcdserver/server.go:2319","msg":"server has stopped; stopping cluster version's monitor"}
	{"level":"info","ts":"2025-09-17T00:35:34.926711Z","caller":"etcdserver/server.go:2342","msg":"server has stopped; stopping storage version's monitor"}
	{"level":"warn","ts":"2025-09-17T00:35:34.927930Z","caller":"embed/serve.go:245","msg":"stopping secure grpc server due to error","error":"accept tcp 127.0.0.1:2379: use of closed network connection"}
	{"level":"warn","ts":"2025-09-17T00:35:34.927988Z","caller":"embed/serve.go:247","msg":"stopped secure grpc server due to error","error":"accept tcp 127.0.0.1:2379: use of closed network connection"}
	{"level":"error","ts":"2025-09-17T00:35:34.927997Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 127.0.0.1:2379: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
	{"level":"warn","ts":"2025-09-17T00:35:34.928037Z","caller":"embed/serve.go:245","msg":"stopping secure grpc server due to error","error":"accept tcp 192.168.49.2:2379: use of closed network connection"}
	{"level":"warn","ts":"2025-09-17T00:35:34.928052Z","caller":"embed/serve.go:247","msg":"stopped secure grpc server due to error","error":"accept tcp 192.168.49.2:2379: use of closed network connection"}
	{"level":"error","ts":"2025-09-17T00:35:34.928059Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 192.168.49.2:2379: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
	{"level":"info","ts":"2025-09-17T00:35:34.932196Z","caller":"embed/etcd.go:621","msg":"stopping serving peer traffic","address":"192.168.49.2:2380"}
	{"level":"error","ts":"2025-09-17T00:35:34.932268Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 192.168.49.2:2380: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
	{"level":"info","ts":"2025-09-17T00:35:34.932295Z","caller":"embed/etcd.go:626","msg":"stopped serving peer traffic","address":"192.168.49.2:2380"}
	{"level":"info","ts":"2025-09-17T00:35:34.932302Z","caller":"embed/etcd.go:428","msg":"closed etcd server","name":"functional-918451","data-dir":"/var/lib/minikube/etcd","advertise-peer-urls":["https://192.168.49.2:2380"],"advertise-client-urls":["https://192.168.49.2:2379"]}
	
	
	==> kernel <==
	 00:51:44 up  3:34,  0 users,  load average: 0.34, 0.43, 1.21
	Linux functional-918451 5.15.0-1084-aws #91~20.04.1-Ubuntu SMP Fri May 2 07:00:04 UTC 2025 aarch64 aarch64 aarch64 GNU/Linux
	PRETTY_NAME="Ubuntu 22.04.5 LTS"
	
	
	==> kube-apiserver [810186cb500f] <==
	I0917 00:39:05.611504       1 stats.go:136] "Error getting keys" err="empty key: \"\""
	I0917 00:39:11.198033       1 stats.go:136] "Error getting keys" err="empty key: \"\""
	I0917 00:40:26.622737       1 alloc.go:328] "allocated clusterIPs" service="default/hello-node" clusterIPs={"IPv4":"10.98.127.179"}
	I0917 00:40:29.268023       1 stats.go:136] "Error getting keys" err="empty key: \"\""
	I0917 00:40:34.849312       1 stats.go:136] "Error getting keys" err="empty key: \"\""
	I0917 00:41:44.951494       1 stats.go:136] "Error getting keys" err="empty key: \"\""
	I0917 00:41:46.296432       1 stats.go:136] "Error getting keys" err="empty key: \"\""
	I0917 00:42:52.426599       1 stats.go:136] "Error getting keys" err="empty key: \"\""
	I0917 00:43:07.339142       1 stats.go:136] "Error getting keys" err="empty key: \"\""
	I0917 00:44:21.162028       1 stats.go:136] "Error getting keys" err="empty key: \"\""
	I0917 00:44:26.453827       1 stats.go:136] "Error getting keys" err="empty key: \"\""
	I0917 00:45:46.909437       1 stats.go:136] "Error getting keys" err="empty key: \"\""
	I0917 00:45:48.754483       1 stats.go:136] "Error getting keys" err="empty key: \"\""
	I0917 00:45:49.285292       1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12
	I0917 00:46:44.490490       1 controller.go:667] quota admission added evaluator for: namespaces
	I0917 00:46:44.807593       1 alloc.go:328] "allocated clusterIPs" service="kubernetes-dashboard/kubernetes-dashboard" clusterIPs={"IPv4":"10.99.59.195"}
	I0917 00:46:44.829352       1 alloc.go:328] "allocated clusterIPs" service="kubernetes-dashboard/dashboard-metrics-scraper" clusterIPs={"IPv4":"10.96.25.233"}
	I0917 00:46:49.506494       1 stats.go:136] "Error getting keys" err="empty key: \"\""
	I0917 00:46:54.095683       1 stats.go:136] "Error getting keys" err="empty key: \"\""
	I0917 00:48:08.759828       1 stats.go:136] "Error getting keys" err="empty key: \"\""
	I0917 00:48:22.878755       1 stats.go:136] "Error getting keys" err="empty key: \"\""
	I0917 00:49:36.132678       1 stats.go:136] "Error getting keys" err="empty key: \"\""
	I0917 00:49:37.705383       1 stats.go:136] "Error getting keys" err="empty key: \"\""
	I0917 00:50:42.558555       1 stats.go:136] "Error getting keys" err="empty key: \"\""
	I0917 00:50:53.620407       1 stats.go:136] "Error getting keys" err="empty key: \"\""
	
	
	==> kube-controller-manager [7cda0e892061] <==
	
	
	==> kube-controller-manager [a800de00866e] <==
	I0917 00:35:52.664386       1 shared_informer.go:356] "Caches are synced" controller="ReplicationController"
	I0917 00:35:52.665624       1 shared_informer.go:356] "Caches are synced" controller="expand"
	I0917 00:35:52.665868       1 shared_informer.go:356] "Caches are synced" controller="endpoint_slice"
	I0917 00:35:52.668115       1 shared_informer.go:356] "Caches are synced" controller="endpoint_slice_mirroring"
	I0917 00:35:52.668389       1 shared_informer.go:356] "Caches are synced" controller="GC"
	I0917 00:35:52.670522       1 shared_informer.go:356] "Caches are synced" controller="ClusterRoleAggregator"
	I0917 00:35:52.672883       1 shared_informer.go:356] "Caches are synced" controller="crt configmap"
	I0917 00:35:52.691346       1 shared_informer.go:356] "Caches are synced" controller="garbage collector"
	I0917 00:35:52.694479       1 shared_informer.go:356] "Caches are synced" controller="endpoint"
	I0917 00:35:52.694480       1 shared_informer.go:356] "Caches are synced" controller="namespace"
	I0917 00:35:52.696661       1 shared_informer.go:356] "Caches are synced" controller="deployment"
	I0917 00:35:52.700607       1 shared_informer.go:356] "Caches are synced" controller="PVC protection"
	I0917 00:35:52.700607       1 shared_informer.go:356] "Caches are synced" controller="certificate-csrsigning-kubelet-serving"
	I0917 00:35:52.700739       1 shared_informer.go:356] "Caches are synced" controller="disruption"
	I0917 00:35:52.701699       1 shared_informer.go:356] "Caches are synced" controller="certificate-csrsigning-kubelet-client"
	I0917 00:35:52.701707       1 shared_informer.go:356] "Caches are synced" controller="certificate-csrapproving"
	I0917 00:35:52.701747       1 shared_informer.go:356] "Caches are synced" controller="stateful set"
	I0917 00:35:52.702840       1 shared_informer.go:356] "Caches are synced" controller="certificate-csrsigning-kube-apiserver-client"
	I0917 00:35:52.735080       1 shared_informer.go:356] "Caches are synced" controller="resource quota"
	E0917 00:46:44.649150       1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/dashboard-metrics-scraper-77bf4d6c4c\" failed with pods \"dashboard-metrics-scraper-77bf4d6c4c-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
	E0917 00:46:44.670953       1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/kubernetes-dashboard-855c9754f9\" failed with pods \"kubernetes-dashboard-855c9754f9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
	E0917 00:46:44.671320       1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/dashboard-metrics-scraper-77bf4d6c4c\" failed with pods \"dashboard-metrics-scraper-77bf4d6c4c-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
	E0917 00:46:44.686112       1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/kubernetes-dashboard-855c9754f9\" failed with pods \"kubernetes-dashboard-855c9754f9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
	E0917 00:46:44.692124       1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/kubernetes-dashboard-855c9754f9\" failed with pods \"kubernetes-dashboard-855c9754f9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
	E0917 00:46:44.695054       1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/dashboard-metrics-scraper-77bf4d6c4c\" failed with pods \"dashboard-metrics-scraper-77bf4d6c4c-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
	
	
	==> kube-proxy [2249e1b1919d] <==
	I0917 00:35:50.820804       1 server_linux.go:53] "Using iptables proxy"
	I0917 00:35:50.918756       1 shared_informer.go:349] "Waiting for caches to sync" controller="node informer cache"
	I0917 00:35:51.019889       1 shared_informer.go:356] "Caches are synced" controller="node informer cache"
	I0917 00:35:51.019925       1 server.go:219] "Successfully retrieved NodeIPs" NodeIPs=["192.168.49.2"]
	E0917 00:35:51.020010       1 server.go:256] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
	I0917 00:35:51.049955       1 server.go:265] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
	I0917 00:35:51.050178       1 server_linux.go:132] "Using iptables Proxier"
	I0917 00:35:51.058525       1 proxier.go:242] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
	I0917 00:35:51.058826       1 server.go:527] "Version info" version="v1.34.0"
	I0917 00:35:51.058853       1 server.go:529] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
	I0917 00:35:51.060083       1 config.go:200] "Starting service config controller"
	I0917 00:35:51.060215       1 shared_informer.go:349] "Waiting for caches to sync" controller="service config"
	I0917 00:35:51.065750       1 config.go:106] "Starting endpoint slice config controller"
	I0917 00:35:51.065775       1 shared_informer.go:349] "Waiting for caches to sync" controller="endpoint slice config"
	I0917 00:35:51.065792       1 config.go:403] "Starting serviceCIDR config controller"
	I0917 00:35:51.065797       1 shared_informer.go:349] "Waiting for caches to sync" controller="serviceCIDR config"
	I0917 00:35:51.066494       1 config.go:309] "Starting node config controller"
	I0917 00:35:51.066513       1 shared_informer.go:349] "Waiting for caches to sync" controller="node config"
	I0917 00:35:51.066520       1 shared_informer.go:356] "Caches are synced" controller="node config"
	I0917 00:35:51.161226       1 shared_informer.go:356] "Caches are synced" controller="service config"
	I0917 00:35:51.166495       1 shared_informer.go:356] "Caches are synced" controller="serviceCIDR config"
	I0917 00:35:51.166526       1 shared_informer.go:356] "Caches are synced" controller="endpoint slice config"
	
	
	==> kube-proxy [bc1e5b319207] <==
	
	
	==> kube-scheduler [8f45cd385234] <==
	I0917 00:35:46.968566       1 serving.go:386] Generated self-signed cert in-memory
	W0917 00:35:49.218994       1 requestheader_controller.go:204] Unable to get configmap/extension-apiserver-authentication in kube-system.  Usually fixed by 'kubectl create rolebinding -n kube-system ROLEBINDING_NAME --role=extension-apiserver-authentication-reader --serviceaccount=YOUR_NS:YOUR_SA'
	W0917 00:35:49.219031       1 authentication.go:397] Error looking up in-cluster authentication configuration: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot get resource "configmaps" in API group "" in the namespace "kube-system"
	W0917 00:35:49.219043       1 authentication.go:398] Continuing without authentication configuration. This may treat all requests as anonymous.
	W0917 00:35:49.219050       1 authentication.go:399] To require authentication configuration lookup to succeed, set --authentication-tolerate-lookup-failure=false
	I0917 00:35:49.292960       1 server.go:175] "Starting Kubernetes Scheduler" version="v1.34.0"
	I0917 00:35:49.292997       1 server.go:177] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
	I0917 00:35:49.297061       1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
	I0917 00:35:49.297101       1 shared_informer.go:349] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
	I0917 00:35:49.297962       1 secure_serving.go:211] Serving securely on 127.0.0.1:10259
	I0917 00:35:49.299271       1 tlsconfig.go:243] "Starting DynamicServingCertificateController"
	I0917 00:35:49.398254       1 shared_informer.go:356] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
	
	
	==> kube-scheduler [e3a6d89ad5b6] <==
	
	
	==> kubelet <==
	Sep 17 00:50:53 functional-918451 kubelet[8557]: E0917 00:50:53.815176    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"echo-server\" with ImagePullBackOff: \"Back-off pulling image \\\"kicbase/echo-server\\\": ErrImagePull: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/hello-node-75c85bcc94-nttx8" podUID="085d0fa5-d5c9-4d9a-ad8a-e78f727c62bd"
	Sep 17 00:50:53 functional-918451 kubelet[8557]: E0917 00:50:53.815641    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"myfrontend\" with ImagePullBackOff: \"Back-off pulling image \\\"docker.io/nginx\\\": ErrImagePull: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/sp-pod" podUID="1d4fda37-50c1-41c2-8b81-a48f9ab03c3d"
	Sep 17 00:50:54 functional-918451 kubelet[8557]: E0917 00:50:54.809930    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"dashboard-metrics-scraper\" with ImagePullBackOff: \"Back-off pulling image \\\"docker.io/kubernetesui/metrics-scraper:v1.0.8@sha256:76049887f07a0476dc93efc2d3569b9529bf982b22d29f356092ce206e98765c\\\": ErrImagePull: Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="kubernetes-dashboard/dashboard-metrics-scraper-77bf4d6c4c-trldk" podUID="75b224f3-e6ad-4930-be78-f3e01df10621"
	Sep 17 00:50:56 functional-918451 kubelet[8557]: E0917 00:50:56.807764    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"echo-server\" with ImagePullBackOff: \"Back-off pulling image \\\"kicbase/echo-server\\\": ErrImagePull: Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/hello-node-connect-7d85dfc575-t4gsf" podUID="f049199c-f82f-43ba-b926-425bd104b855"
	Sep 17 00:50:59 functional-918451 kubelet[8557]: E0917 00:50:59.812235    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kubernetes-dashboard\" with ImagePullBackOff: \"Back-off pulling image \\\"docker.io/kubernetesui/dashboard:v2.7.0@sha256:2e500d29e9d5f4a086b908eb8dfe7ecac57d2ab09d65b24f588b1d449841ef93\\\": ErrImagePull: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="kubernetes-dashboard/kubernetes-dashboard-855c9754f9-b8grq" podUID="4a584e7d-54d4-4c7d-b0b6-f2d90b45b7ce"
	Sep 17 00:51:05 functional-918451 kubelet[8557]: E0917 00:51:05.807625    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"myfrontend\" with ImagePullBackOff: \"Back-off pulling image \\\"docker.io/nginx\\\": ErrImagePull: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/sp-pod" podUID="1d4fda37-50c1-41c2-8b81-a48f9ab03c3d"
	Sep 17 00:51:05 functional-918451 kubelet[8557]: E0917 00:51:05.812182    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"dashboard-metrics-scraper\" with ImagePullBackOff: \"Back-off pulling image \\\"docker.io/kubernetesui/metrics-scraper:v1.0.8@sha256:76049887f07a0476dc93efc2d3569b9529bf982b22d29f356092ce206e98765c\\\": ErrImagePull: Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="kubernetes-dashboard/dashboard-metrics-scraper-77bf4d6c4c-trldk" podUID="75b224f3-e6ad-4930-be78-f3e01df10621"
	Sep 17 00:51:08 functional-918451 kubelet[8557]: E0917 00:51:08.807731    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"echo-server\" with ImagePullBackOff: \"Back-off pulling image \\\"kicbase/echo-server\\\": ErrImagePull: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/hello-node-75c85bcc94-nttx8" podUID="085d0fa5-d5c9-4d9a-ad8a-e78f727c62bd"
	Sep 17 00:51:10 functional-918451 kubelet[8557]: E0917 00:51:10.808412    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"echo-server\" with ImagePullBackOff: \"Back-off pulling image \\\"kicbase/echo-server\\\": ErrImagePull: Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/hello-node-connect-7d85dfc575-t4gsf" podUID="f049199c-f82f-43ba-b926-425bd104b855"
	Sep 17 00:51:12 functional-918451 kubelet[8557]: E0917 00:51:12.809607    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kubernetes-dashboard\" with ImagePullBackOff: \"Back-off pulling image \\\"docker.io/kubernetesui/dashboard:v2.7.0@sha256:2e500d29e9d5f4a086b908eb8dfe7ecac57d2ab09d65b24f588b1d449841ef93\\\": ErrImagePull: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="kubernetes-dashboard/kubernetes-dashboard-855c9754f9-b8grq" podUID="4a584e7d-54d4-4c7d-b0b6-f2d90b45b7ce"
	Sep 17 00:51:16 functional-918451 kubelet[8557]: E0917 00:51:16.807688    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"myfrontend\" with ImagePullBackOff: \"Back-off pulling image \\\"docker.io/nginx\\\": ErrImagePull: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/sp-pod" podUID="1d4fda37-50c1-41c2-8b81-a48f9ab03c3d"
	Sep 17 00:51:19 functional-918451 kubelet[8557]: E0917 00:51:19.819213    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"dashboard-metrics-scraper\" with ImagePullBackOff: \"Back-off pulling image \\\"docker.io/kubernetesui/metrics-scraper:v1.0.8@sha256:76049887f07a0476dc93efc2d3569b9529bf982b22d29f356092ce206e98765c\\\": ErrImagePull: Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="kubernetes-dashboard/dashboard-metrics-scraper-77bf4d6c4c-trldk" podUID="75b224f3-e6ad-4930-be78-f3e01df10621"
	Sep 17 00:51:24 functional-918451 kubelet[8557]: E0917 00:51:24.145340    8557 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit" image="kicbase/echo-server:latest"
	Sep 17 00:51:24 functional-918451 kubelet[8557]: E0917 00:51:24.145396    8557 kuberuntime_image.go:43] "Failed to pull image" err="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit" image="kicbase/echo-server:latest"
	Sep 17 00:51:24 functional-918451 kubelet[8557]: E0917 00:51:24.145536    8557 kuberuntime_manager.go:1449] "Unhandled Error" err="container echo-server start failed in pod hello-node-75c85bcc94-nttx8_default(085d0fa5-d5c9-4d9a-ad8a-e78f727c62bd): ErrImagePull: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit" logger="UnhandledError"
	Sep 17 00:51:24 functional-918451 kubelet[8557]: E0917 00:51:24.145567    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"echo-server\" with ErrImagePull: \"toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/hello-node-75c85bcc94-nttx8" podUID="085d0fa5-d5c9-4d9a-ad8a-e78f727c62bd"
	Sep 17 00:51:24 functional-918451 kubelet[8557]: E0917 00:51:24.808160    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"echo-server\" with ImagePullBackOff: \"Back-off pulling image \\\"kicbase/echo-server\\\": ErrImagePull: Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/hello-node-connect-7d85dfc575-t4gsf" podUID="f049199c-f82f-43ba-b926-425bd104b855"
	Sep 17 00:51:24 functional-918451 kubelet[8557]: E0917 00:51:24.810138    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kubernetes-dashboard\" with ImagePullBackOff: \"Back-off pulling image \\\"docker.io/kubernetesui/dashboard:v2.7.0@sha256:2e500d29e9d5f4a086b908eb8dfe7ecac57d2ab09d65b24f588b1d449841ef93\\\": ErrImagePull: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="kubernetes-dashboard/kubernetes-dashboard-855c9754f9-b8grq" podUID="4a584e7d-54d4-4c7d-b0b6-f2d90b45b7ce"
	Sep 17 00:51:30 functional-918451 kubelet[8557]: E0917 00:51:30.808161    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"myfrontend\" with ImagePullBackOff: \"Back-off pulling image \\\"docker.io/nginx\\\": ErrImagePull: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/sp-pod" podUID="1d4fda37-50c1-41c2-8b81-a48f9ab03c3d"
	Sep 17 00:51:33 functional-918451 kubelet[8557]: E0917 00:51:33.813639    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"dashboard-metrics-scraper\" with ImagePullBackOff: \"Back-off pulling image \\\"docker.io/kubernetesui/metrics-scraper:v1.0.8@sha256:76049887f07a0476dc93efc2d3569b9529bf982b22d29f356092ce206e98765c\\\": ErrImagePull: Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="kubernetes-dashboard/dashboard-metrics-scraper-77bf4d6c4c-trldk" podUID="75b224f3-e6ad-4930-be78-f3e01df10621"
	Sep 17 00:51:37 functional-918451 kubelet[8557]: E0917 00:51:37.807508    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"echo-server\" with ImagePullBackOff: \"Back-off pulling image \\\"kicbase/echo-server\\\": ErrImagePull: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/hello-node-75c85bcc94-nttx8" podUID="085d0fa5-d5c9-4d9a-ad8a-e78f727c62bd"
	Sep 17 00:51:39 functional-918451 kubelet[8557]: E0917 00:51:39.808384    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"echo-server\" with ImagePullBackOff: \"Back-off pulling image \\\"kicbase/echo-server\\\": ErrImagePull: Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/hello-node-connect-7d85dfc575-t4gsf" podUID="f049199c-f82f-43ba-b926-425bd104b855"
	Sep 17 00:51:39 functional-918451 kubelet[8557]: E0917 00:51:39.813951    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kubernetes-dashboard\" with ImagePullBackOff: \"Back-off pulling image \\\"docker.io/kubernetesui/dashboard:v2.7.0@sha256:2e500d29e9d5f4a086b908eb8dfe7ecac57d2ab09d65b24f588b1d449841ef93\\\": ErrImagePull: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="kubernetes-dashboard/kubernetes-dashboard-855c9754f9-b8grq" podUID="4a584e7d-54d4-4c7d-b0b6-f2d90b45b7ce"
	Sep 17 00:51:42 functional-918451 kubelet[8557]: E0917 00:51:42.807902    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"myfrontend\" with ImagePullBackOff: \"Back-off pulling image \\\"docker.io/nginx\\\": ErrImagePull: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/sp-pod" podUID="1d4fda37-50c1-41c2-8b81-a48f9ab03c3d"
	Sep 17 00:51:44 functional-918451 kubelet[8557]: E0917 00:51:44.809571    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"dashboard-metrics-scraper\" with ImagePullBackOff: \"Back-off pulling image \\\"docker.io/kubernetesui/metrics-scraper:v1.0.8@sha256:76049887f07a0476dc93efc2d3569b9529bf982b22d29f356092ce206e98765c\\\": ErrImagePull: Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="kubernetes-dashboard/dashboard-metrics-scraper-77bf4d6c4c-trldk" podUID="75b224f3-e6ad-4930-be78-f3e01df10621"
	
	
	==> storage-provisioner [cf11e0afbf55] <==
	W0917 00:51:20.389177       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:51:22.392215       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:51:22.398914       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:51:24.401486       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:51:24.406202       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:51:26.409592       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:51:26.417211       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:51:28.420008       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:51:28.425739       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:51:30.428971       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:51:30.433527       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:51:32.436860       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:51:32.443547       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:51:34.446777       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:51:34.451016       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:51:36.454217       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:51:36.460500       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:51:38.463726       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:51:38.468232       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:51:40.476461       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:51:40.483442       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:51:42.486260       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:51:42.493049       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:51:44.496413       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:51:44.501234       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	
	
	==> storage-provisioner [e707505e3be8] <==
	I0917 00:35:40.392782       1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
	F0917 00:35:40.397573       1 main.go:39] error getting server version: Get "https://10.96.0.1:443/version?timeout=32s": dial tcp 10.96.0.1:443: connect: connection refused
	

                                                
                                                
-- /stdout --
helpers_test.go:262: (dbg) Run:  out/minikube-linux-arm64 status --format={{.APIServer}} -p functional-918451 -n functional-918451
helpers_test.go:269: (dbg) Run:  kubectl --context functional-918451 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:280: non-running pods: busybox-mount hello-node-75c85bcc94-nttx8 hello-node-connect-7d85dfc575-t4gsf sp-pod dashboard-metrics-scraper-77bf4d6c4c-trldk kubernetes-dashboard-855c9754f9-b8grq
helpers_test.go:282: ======> post-mortem[TestFunctional/parallel/DashboardCmd]: describe non-running pods <======
helpers_test.go:285: (dbg) Run:  kubectl --context functional-918451 describe pod busybox-mount hello-node-75c85bcc94-nttx8 hello-node-connect-7d85dfc575-t4gsf sp-pod dashboard-metrics-scraper-77bf4d6c4c-trldk kubernetes-dashboard-855c9754f9-b8grq
helpers_test.go:285: (dbg) Non-zero exit: kubectl --context functional-918451 describe pod busybox-mount hello-node-75c85bcc94-nttx8 hello-node-connect-7d85dfc575-t4gsf sp-pod dashboard-metrics-scraper-77bf4d6c4c-trldk kubernetes-dashboard-855c9754f9-b8grq: exit status 1 (116.469007ms)

                                                
                                                
-- stdout --
	Name:             busybox-mount
	Namespace:        default
	Priority:         0
	Service Account:  default
	Node:             functional-918451/192.168.49.2
	Start Time:       Wed, 17 Sep 2025 00:46:32 +0000
	Labels:           integration-test=busybox-mount
	Annotations:      <none>
	Status:           Succeeded
	IP:               10.244.0.11
	IPs:
	  IP:  10.244.0.11
	Containers:
	  mount-munger:
	    Container ID:  docker://762ce3d5d73f96de400a2694286398ec031b53cc90f9fc8f1bad010f4f87cf0f
	    Image:         gcr.io/k8s-minikube/busybox:1.28.4-glibc
	    Image ID:      docker-pullable://gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e
	    Port:          <none>
	    Host Port:     <none>
	    Command:
	      /bin/sh
	      -c
	      --
	    Args:
	      cat /mount-9p/created-by-test; echo test > /mount-9p/created-by-pod; rm /mount-9p/created-by-test-removed-by-pod; echo test > /mount-9p/created-by-pod-removed-by-test date >> /mount-9p/pod-dates
	    State:          Terminated
	      Reason:       Completed
	      Exit Code:    0
	      Started:      Wed, 17 Sep 2025 00:46:35 +0000
	      Finished:     Wed, 17 Sep 2025 00:46:35 +0000
	    Ready:          False
	    Restart Count:  0
	    Environment:    <none>
	    Mounts:
	      /mount-9p from test-volume (rw)
	      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-d69cs (ro)
	Conditions:
	  Type                        Status
	  PodReadyToStartContainers   False 
	  Initialized                 True 
	  Ready                       False 
	  ContainersReady             False 
	  PodScheduled                True 
	Volumes:
	  test-volume:
	    Type:          HostPath (bare host directory volume)
	    Path:          /mount-9p
	    HostPathType:  
	  kube-api-access-d69cs:
	    Type:                    Projected (a volume that contains injected data from multiple sources)
	    TokenExpirationSeconds:  3607
	    ConfigMapName:           kube-root-ca.crt
	    Optional:                false
	    DownwardAPI:             true
	QoS Class:                   BestEffort
	Node-Selectors:              <none>
	Tolerations:                 node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
	                             node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
	Events:
	  Type    Reason     Age    From               Message
	  ----    ------     ----   ----               -------
	  Normal  Scheduled  5m13s  default-scheduler  Successfully assigned default/busybox-mount to functional-918451
	  Normal  Pulling    5m13s  kubelet            Pulling image "gcr.io/k8s-minikube/busybox:1.28.4-glibc"
	  Normal  Pulled     5m11s  kubelet            Successfully pulled image "gcr.io/k8s-minikube/busybox:1.28.4-glibc" in 2.273s (2.273s including waiting). Image size: 3547125 bytes.
	  Normal  Created    5m10s  kubelet            Created container: mount-munger
	  Normal  Started    5m10s  kubelet            Started container mount-munger
	
	
	Name:             hello-node-75c85bcc94-nttx8
	Namespace:        default
	Priority:         0
	Service Account:  default
	Node:             functional-918451/192.168.49.2
	Start Time:       Wed, 17 Sep 2025 00:40:26 +0000
	Labels:           app=hello-node
	                  pod-template-hash=75c85bcc94
	Annotations:      <none>
	Status:           Pending
	IP:               10.244.0.10
	IPs:
	  IP:           10.244.0.10
	Controlled By:  ReplicaSet/hello-node-75c85bcc94
	Containers:
	  echo-server:
	    Container ID:   
	    Image:          kicbase/echo-server
	    Image ID:       
	    Port:           <none>
	    Host Port:      <none>
	    State:          Waiting
	      Reason:       ErrImagePull
	    Ready:          False
	    Restart Count:  0
	    Environment:    <none>
	    Mounts:
	      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-949tz (ro)
	Conditions:
	  Type                        Status
	  PodReadyToStartContainers   True 
	  Initialized                 True 
	  Ready                       False 
	  ContainersReady             False 
	  PodScheduled                True 
	Volumes:
	  kube-api-access-949tz:
	    Type:                    Projected (a volume that contains injected data from multiple sources)
	    TokenExpirationSeconds:  3607
	    ConfigMapName:           kube-root-ca.crt
	    Optional:                false
	    DownwardAPI:             true
	QoS Class:                   BestEffort
	Node-Selectors:              <none>
	Tolerations:                 node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
	                             node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
	Events:
	  Type     Reason     Age                    From               Message
	  ----     ------     ----                   ----               -------
	  Normal   Scheduled  11m                    default-scheduler  Successfully assigned default/hello-node-75c85bcc94-nttx8 to functional-918451
	  Warning  Failed     10m (x3 over 11m)      kubelet            Failed to pull image "kicbase/echo-server": Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit
	  Normal   Pulling    8m13s (x5 over 11m)    kubelet            Pulling image "kicbase/echo-server"
	  Warning  Failed     8m12s (x5 over 11m)    kubelet            Error: ErrImagePull
	  Warning  Failed     8m12s (x2 over 9m46s)  kubelet            Failed to pull image "kicbase/echo-server": toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit
	  Normal   BackOff    76s (x41 over 11m)     kubelet            Back-off pulling image "kicbase/echo-server"
	  Warning  Failed     76s (x41 over 11m)     kubelet            Error: ImagePullBackOff
	
	
	Name:             hello-node-connect-7d85dfc575-t4gsf
	Namespace:        default
	Priority:         0
	Service Account:  default
	Node:             functional-918451/192.168.49.2
	Start Time:       Wed, 17 Sep 2025 00:36:27 +0000
	Labels:           app=hello-node-connect
	                  pod-template-hash=7d85dfc575
	Annotations:      <none>
	Status:           Pending
	IP:               10.244.0.9
	IPs:
	  IP:           10.244.0.9
	Controlled By:  ReplicaSet/hello-node-connect-7d85dfc575
	Containers:
	  echo-server:
	    Container ID:   
	    Image:          kicbase/echo-server
	    Image ID:       
	    Port:           <none>
	    Host Port:      <none>
	    State:          Waiting
	      Reason:       ImagePullBackOff
	    Ready:          False
	    Restart Count:  0
	    Environment:    <none>
	    Mounts:
	      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-kpg27 (ro)
	Conditions:
	  Type                        Status
	  PodReadyToStartContainers   True 
	  Initialized                 True 
	  Ready                       False 
	  ContainersReady             False 
	  PodScheduled                True 
	Volumes:
	  kube-api-access-kpg27:
	    Type:                    Projected (a volume that contains injected data from multiple sources)
	    TokenExpirationSeconds:  3607
	    ConfigMapName:           kube-root-ca.crt
	    Optional:                false
	    DownwardAPI:             true
	QoS Class:                   BestEffort
	Node-Selectors:              <none>
	Tolerations:                 node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
	                             node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
	Events:
	  Type     Reason     Age                From               Message
	  ----     ------     ----               ----               -------
	  Normal   Scheduled  15m                default-scheduler  Successfully assigned default/hello-node-connect-7d85dfc575-t4gsf to functional-918451
	  Normal   Pulling    12m (x5 over 15m)  kubelet            Pulling image "kicbase/echo-server"
	  Warning  Failed     12m (x5 over 15m)  kubelet            Failed to pull image "kicbase/echo-server": Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit
	  Warning  Failed     12m (x5 over 15m)  kubelet            Error: ErrImagePull
	  Normal   BackOff    6s (x64 over 15m)  kubelet            Back-off pulling image "kicbase/echo-server"
	  Warning  Failed     6s (x64 over 15m)  kubelet            Error: ImagePullBackOff
	
	
	Name:             sp-pod
	Namespace:        default
	Priority:         0
	Service Account:  default
	Node:             functional-918451/192.168.49.2
	Start Time:       Wed, 17 Sep 2025 00:36:23 +0000
	Labels:           test=storage-provisioner
	Annotations:      <none>
	Status:           Pending
	IP:               10.244.0.8
	IPs:
	  IP:  10.244.0.8
	Containers:
	  myfrontend:
	    Container ID:   
	    Image:          docker.io/nginx
	    Image ID:       
	    Port:           <none>
	    Host Port:      <none>
	    State:          Waiting
	      Reason:       ImagePullBackOff
	    Ready:          False
	    Restart Count:  0
	    Environment:    <none>
	    Mounts:
	      /tmp/mount from mypd (rw)
	      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-rrpzl (ro)
	Conditions:
	  Type                        Status
	  PodReadyToStartContainers   True 
	  Initialized                 True 
	  Ready                       False 
	  ContainersReady             False 
	  PodScheduled                True 
	Volumes:
	  mypd:
	    Type:       PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
	    ClaimName:  myclaim
	    ReadOnly:   false
	  kube-api-access-rrpzl:
	    Type:                    Projected (a volume that contains injected data from multiple sources)
	    TokenExpirationSeconds:  3607
	    ConfigMapName:           kube-root-ca.crt
	    Optional:                false
	    DownwardAPI:             true
	QoS Class:                   BestEffort
	Node-Selectors:              <none>
	Tolerations:                 node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
	                             node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
	Events:
	  Type     Reason     Age                 From               Message
	  ----     ------     ----                ----               -------
	  Normal   Scheduled  15m                 default-scheduler  Successfully assigned default/sp-pod to functional-918451
	  Warning  Failed     14m (x2 over 15m)   kubelet            Failed to pull image "docker.io/nginx": Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit
	  Normal   Pulling    12m (x5 over 15m)   kubelet            Pulling image "docker.io/nginx"
	  Warning  Failed     12m (x3 over 15m)   kubelet            Failed to pull image "docker.io/nginx": toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit
	  Warning  Failed     12m (x5 over 15m)   kubelet            Error: ErrImagePull
	  Normal   BackOff    15s (x64 over 15m)  kubelet            Back-off pulling image "docker.io/nginx"
	  Warning  Failed     15s (x64 over 15m)  kubelet            Error: ImagePullBackOff

                                                
                                                
-- /stdout --
** stderr ** 
	Error from server (NotFound): pods "dashboard-metrics-scraper-77bf4d6c4c-trldk" not found
	Error from server (NotFound): pods "kubernetes-dashboard-855c9754f9-b8grq" not found

                                                
                                                
** /stderr **
helpers_test.go:287: kubectl --context functional-918451 describe pod busybox-mount hello-node-75c85bcc94-nttx8 hello-node-connect-7d85dfc575-t4gsf sp-pod dashboard-metrics-scraper-77bf4d6c4c-trldk kubernetes-dashboard-855c9754f9-b8grq: exit status 1
--- FAIL: TestFunctional/parallel/DashboardCmd (302.48s)

                                                
                                    
x
+
TestFunctional/parallel/ServiceCmdConnect (603.18s)

                                                
                                                
=== RUN   TestFunctional/parallel/ServiceCmdConnect
=== PAUSE TestFunctional/parallel/ServiceCmdConnect

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/ServiceCmdConnect
functional_test.go:1636: (dbg) Run:  kubectl --context functional-918451 create deployment hello-node-connect --image kicbase/echo-server
functional_test.go:1640: (dbg) Run:  kubectl --context functional-918451 expose deployment hello-node-connect --type=NodePort --port=8080
functional_test.go:1645: (dbg) TestFunctional/parallel/ServiceCmdConnect: waiting 10m0s for pods matching "app=hello-node-connect" in namespace "default" ...
helpers_test.go:352: "hello-node-connect-7d85dfc575-t4gsf" [f049199c-f82f-43ba-b926-425bd104b855] Pending / Ready:ContainersNotReady (containers with unready status: [echo-server]) / ContainersReady:ContainersNotReady (containers with unready status: [echo-server])
E0917 00:36:36.862314  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 00:38:52.991673  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 00:39:20.703812  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
functional_test.go:1645: ***** TestFunctional/parallel/ServiceCmdConnect: pod "app=hello-node-connect" failed to start within 10m0s: context deadline exceeded ****
functional_test.go:1645: (dbg) Run:  out/minikube-linux-arm64 status --format={{.APIServer}} -p functional-918451 -n functional-918451
functional_test.go:1645: TestFunctional/parallel/ServiceCmdConnect: showing logs for failed pods as of 2025-09-17 00:46:27.481055859 +0000 UTC m=+1521.891681849
functional_test.go:1645: (dbg) Run:  kubectl --context functional-918451 describe po hello-node-connect-7d85dfc575-t4gsf -n default
functional_test.go:1645: (dbg) kubectl --context functional-918451 describe po hello-node-connect-7d85dfc575-t4gsf -n default:
Name:             hello-node-connect-7d85dfc575-t4gsf
Namespace:        default
Priority:         0
Service Account:  default
Node:             functional-918451/192.168.49.2
Start Time:       Wed, 17 Sep 2025 00:36:27 +0000
Labels:           app=hello-node-connect
pod-template-hash=7d85dfc575
Annotations:      <none>
Status:           Pending
IP:               10.244.0.9
IPs:
IP:           10.244.0.9
Controlled By:  ReplicaSet/hello-node-connect-7d85dfc575
Containers:
echo-server:
Container ID:   
Image:          kicbase/echo-server
Image ID:       
Port:           <none>
Host Port:      <none>
State:          Waiting
Reason:       ImagePullBackOff
Ready:          False
Restart Count:  0
Environment:    <none>
Mounts:
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-kpg27 (ro)
Conditions:
Type                        Status
PodReadyToStartContainers   True 
Initialized                 True 
Ready                       False 
ContainersReady             False 
PodScheduled                True 
Volumes:
kube-api-access-kpg27:
Type:                    Projected (a volume that contains injected data from multiple sources)
TokenExpirationSeconds:  3607
ConfigMapName:           kube-root-ca.crt
Optional:                false
DownwardAPI:             true
QoS Class:                   BestEffort
Node-Selectors:              <none>
Tolerations:                 node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
Type     Reason     Age                     From               Message
----     ------     ----                    ----               -------
Normal   Scheduled  10m                     default-scheduler  Successfully assigned default/hello-node-connect-7d85dfc575-t4gsf to functional-918451
Normal   Pulling    7m3s (x5 over 10m)      kubelet            Pulling image "kicbase/echo-server"
Warning  Failed     7m2s (x5 over 10m)      kubelet            Failed to pull image "kicbase/echo-server": Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit
Warning  Failed     7m2s (x5 over 10m)      kubelet            Error: ErrImagePull
Normal   BackOff    4m58s (x21 over 9m59s)  kubelet            Back-off pulling image "kicbase/echo-server"
Warning  Failed     4m58s (x21 over 9m59s)  kubelet            Error: ImagePullBackOff
functional_test.go:1645: (dbg) Run:  kubectl --context functional-918451 logs hello-node-connect-7d85dfc575-t4gsf -n default
functional_test.go:1645: (dbg) Non-zero exit: kubectl --context functional-918451 logs hello-node-connect-7d85dfc575-t4gsf -n default: exit status 1 (93.98828ms)

                                                
                                                
** stderr ** 
	Error from server (BadRequest): container "echo-server" in pod "hello-node-connect-7d85dfc575-t4gsf" is waiting to start: trying and failing to pull image

                                                
                                                
** /stderr **
functional_test.go:1645: kubectl --context functional-918451 logs hello-node-connect-7d85dfc575-t4gsf -n default: exit status 1
functional_test.go:1646: failed waiting for hello-node pod: app=hello-node-connect within 10m0s: context deadline exceeded
functional_test.go:1608: service test failed - dumping debug information
functional_test.go:1609: -----------------------service failure post-mortem--------------------------------
functional_test.go:1612: (dbg) Run:  kubectl --context functional-918451 describe po hello-node-connect
functional_test.go:1616: hello-node pod describe:
Name:             hello-node-connect-7d85dfc575-t4gsf
Namespace:        default
Priority:         0
Service Account:  default
Node:             functional-918451/192.168.49.2
Start Time:       Wed, 17 Sep 2025 00:36:27 +0000
Labels:           app=hello-node-connect
pod-template-hash=7d85dfc575
Annotations:      <none>
Status:           Pending
IP:               10.244.0.9
IPs:
IP:           10.244.0.9
Controlled By:  ReplicaSet/hello-node-connect-7d85dfc575
Containers:
echo-server:
Container ID:   
Image:          kicbase/echo-server
Image ID:       
Port:           <none>
Host Port:      <none>
State:          Waiting
Reason:       ImagePullBackOff
Ready:          False
Restart Count:  0
Environment:    <none>
Mounts:
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-kpg27 (ro)
Conditions:
Type                        Status
PodReadyToStartContainers   True 
Initialized                 True 
Ready                       False 
ContainersReady             False 
PodScheduled                True 
Volumes:
kube-api-access-kpg27:
Type:                    Projected (a volume that contains injected data from multiple sources)
TokenExpirationSeconds:  3607
ConfigMapName:           kube-root-ca.crt
Optional:                false
DownwardAPI:             true
QoS Class:                   BestEffort
Node-Selectors:              <none>
Tolerations:                 node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
Type     Reason     Age                     From               Message
----     ------     ----                    ----               -------
Normal   Scheduled  10m                     default-scheduler  Successfully assigned default/hello-node-connect-7d85dfc575-t4gsf to functional-918451
Normal   Pulling    7m3s (x5 over 10m)      kubelet            Pulling image "kicbase/echo-server"
Warning  Failed     7m2s (x5 over 10m)      kubelet            Failed to pull image "kicbase/echo-server": Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit
Warning  Failed     7m2s (x5 over 10m)      kubelet            Error: ErrImagePull
Normal   BackOff    4m58s (x21 over 9m59s)  kubelet            Back-off pulling image "kicbase/echo-server"
Warning  Failed     4m58s (x21 over 9m59s)  kubelet            Error: ImagePullBackOff

                                                
                                                
functional_test.go:1618: (dbg) Run:  kubectl --context functional-918451 logs -l app=hello-node-connect
functional_test.go:1618: (dbg) Non-zero exit: kubectl --context functional-918451 logs -l app=hello-node-connect: exit status 1 (86.731994ms)

                                                
                                                
** stderr ** 
	Error from server (BadRequest): container "echo-server" in pod "hello-node-connect-7d85dfc575-t4gsf" is waiting to start: trying and failing to pull image

                                                
                                                
** /stderr **
functional_test.go:1620: "kubectl --context functional-918451 logs -l app=hello-node-connect" failed: exit status 1
functional_test.go:1622: hello-node logs:
functional_test.go:1624: (dbg) Run:  kubectl --context functional-918451 describe svc hello-node-connect
functional_test.go:1628: hello-node svc describe:
Name:                     hello-node-connect
Namespace:                default
Labels:                   app=hello-node-connect
Annotations:              <none>
Selector:                 app=hello-node-connect
Type:                     NodePort
IP Family Policy:         SingleStack
IP Families:              IPv4
IP:                       10.102.46.233
IPs:                      10.102.46.233
Port:                     <unset>  8080/TCP
TargetPort:               8080/TCP
NodePort:                 <unset>  32532/TCP
Endpoints:                
Session Affinity:         None
External Traffic Policy:  Cluster
Internal Traffic Policy:  Cluster
Events:                   <none>
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:223: ======>  post-mortem[TestFunctional/parallel/ServiceCmdConnect]: network settings <======
helpers_test.go:230: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:238: ======>  post-mortem[TestFunctional/parallel/ServiceCmdConnect]: docker inspect <======
helpers_test.go:239: (dbg) Run:  docker inspect functional-918451
helpers_test.go:243: (dbg) docker inspect functional-918451:

                                                
                                                
-- stdout --
	[
	    {
	        "Id": "6201077c0331af68c48befcadd9d1ecd3484991c08f561702e8963664e041a07",
	        "Created": "2025-09-17T00:32:59.129348997Z",
	        "Path": "/usr/local/bin/entrypoint",
	        "Args": [
	            "/sbin/init"
	        ],
	        "State": {
	            "Status": "running",
	            "Running": true,
	            "Paused": false,
	            "Restarting": false,
	            "OOMKilled": false,
	            "Dead": false,
	            "Pid": 609481,
	            "ExitCode": 0,
	            "Error": "",
	            "StartedAt": "2025-09-17T00:32:59.19036605Z",
	            "FinishedAt": "0001-01-01T00:00:00Z"
	        },
	        "Image": "sha256:3d6f74760dfc17060da5abc5d463d3d45b4ceea05955c9cc42b3ec56cb38cc48",
	        "ResolvConfPath": "/var/lib/docker/containers/6201077c0331af68c48befcadd9d1ecd3484991c08f561702e8963664e041a07/resolv.conf",
	        "HostnamePath": "/var/lib/docker/containers/6201077c0331af68c48befcadd9d1ecd3484991c08f561702e8963664e041a07/hostname",
	        "HostsPath": "/var/lib/docker/containers/6201077c0331af68c48befcadd9d1ecd3484991c08f561702e8963664e041a07/hosts",
	        "LogPath": "/var/lib/docker/containers/6201077c0331af68c48befcadd9d1ecd3484991c08f561702e8963664e041a07/6201077c0331af68c48befcadd9d1ecd3484991c08f561702e8963664e041a07-json.log",
	        "Name": "/functional-918451",
	        "RestartCount": 0,
	        "Driver": "overlay2",
	        "Platform": "linux",
	        "MountLabel": "",
	        "ProcessLabel": "",
	        "AppArmorProfile": "unconfined",
	        "ExecIDs": null,
	        "HostConfig": {
	            "Binds": [
	                "functional-918451:/var",
	                "/lib/modules:/lib/modules:ro"
	            ],
	            "ContainerIDFile": "",
	            "LogConfig": {
	                "Type": "json-file",
	                "Config": {}
	            },
	            "NetworkMode": "functional-918451",
	            "PortBindings": {
	                "22/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "2376/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "32443/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "5000/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "8441/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ]
	            },
	            "RestartPolicy": {
	                "Name": "no",
	                "MaximumRetryCount": 0
	            },
	            "AutoRemove": false,
	            "VolumeDriver": "",
	            "VolumesFrom": null,
	            "ConsoleSize": [
	                0,
	                0
	            ],
	            "CapAdd": null,
	            "CapDrop": null,
	            "CgroupnsMode": "host",
	            "Dns": [],
	            "DnsOptions": [],
	            "DnsSearch": [],
	            "ExtraHosts": null,
	            "GroupAdd": null,
	            "IpcMode": "private",
	            "Cgroup": "",
	            "Links": null,
	            "OomScoreAdj": 0,
	            "PidMode": "",
	            "Privileged": true,
	            "PublishAllPorts": false,
	            "ReadonlyRootfs": false,
	            "SecurityOpt": [
	                "seccomp=unconfined",
	                "apparmor=unconfined",
	                "label=disable"
	            ],
	            "Tmpfs": {
	                "/run": "",
	                "/tmp": ""
	            },
	            "UTSMode": "",
	            "UsernsMode": "",
	            "ShmSize": 67108864,
	            "Runtime": "runc",
	            "Isolation": "",
	            "CpuShares": 0,
	            "Memory": 4294967296,
	            "NanoCpus": 2000000000,
	            "CgroupParent": "",
	            "BlkioWeight": 0,
	            "BlkioWeightDevice": [],
	            "BlkioDeviceReadBps": [],
	            "BlkioDeviceWriteBps": [],
	            "BlkioDeviceReadIOps": [],
	            "BlkioDeviceWriteIOps": [],
	            "CpuPeriod": 0,
	            "CpuQuota": 0,
	            "CpuRealtimePeriod": 0,
	            "CpuRealtimeRuntime": 0,
	            "CpusetCpus": "",
	            "CpusetMems": "",
	            "Devices": [],
	            "DeviceCgroupRules": null,
	            "DeviceRequests": null,
	            "MemoryReservation": 0,
	            "MemorySwap": 8589934592,
	            "MemorySwappiness": null,
	            "OomKillDisable": false,
	            "PidsLimit": null,
	            "Ulimits": [],
	            "CpuCount": 0,
	            "CpuPercent": 0,
	            "IOMaximumIOps": 0,
	            "IOMaximumBandwidth": 0,
	            "MaskedPaths": null,
	            "ReadonlyPaths": null
	        },
	        "GraphDriver": {
	            "Data": {
	                "ID": "6201077c0331af68c48befcadd9d1ecd3484991c08f561702e8963664e041a07",
	                "LowerDir": "/var/lib/docker/overlay2/a08a3a54fe3da53177e6d23662d948d962c761b52517c423e2277a8f513a4207-init/diff:/var/lib/docker/overlay2/6bf7b6c5df3b8adf86744064027446440589049694f02d12745ec1de281bdb92/diff",
	                "MergedDir": "/var/lib/docker/overlay2/a08a3a54fe3da53177e6d23662d948d962c761b52517c423e2277a8f513a4207/merged",
	                "UpperDir": "/var/lib/docker/overlay2/a08a3a54fe3da53177e6d23662d948d962c761b52517c423e2277a8f513a4207/diff",
	                "WorkDir": "/var/lib/docker/overlay2/a08a3a54fe3da53177e6d23662d948d962c761b52517c423e2277a8f513a4207/work"
	            },
	            "Name": "overlay2"
	        },
	        "Mounts": [
	            {
	                "Type": "volume",
	                "Name": "functional-918451",
	                "Source": "/var/lib/docker/volumes/functional-918451/_data",
	                "Destination": "/var",
	                "Driver": "local",
	                "Mode": "z",
	                "RW": true,
	                "Propagation": ""
	            },
	            {
	                "Type": "bind",
	                "Source": "/lib/modules",
	                "Destination": "/lib/modules",
	                "Mode": "ro",
	                "RW": false,
	                "Propagation": "rprivate"
	            }
	        ],
	        "Config": {
	            "Hostname": "functional-918451",
	            "Domainname": "",
	            "User": "",
	            "AttachStdin": false,
	            "AttachStdout": false,
	            "AttachStderr": false,
	            "ExposedPorts": {
	                "22/tcp": {},
	                "2376/tcp": {},
	                "32443/tcp": {},
	                "5000/tcp": {},
	                "8441/tcp": {}
	            },
	            "Tty": true,
	            "OpenStdin": false,
	            "StdinOnce": false,
	            "Env": [
	                "container=docker",
	                "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
	            ],
	            "Cmd": null,
	            "Image": "gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1",
	            "Volumes": null,
	            "WorkingDir": "/",
	            "Entrypoint": [
	                "/usr/local/bin/entrypoint",
	                "/sbin/init"
	            ],
	            "OnBuild": null,
	            "Labels": {
	                "created_by.minikube.sigs.k8s.io": "true",
	                "mode.minikube.sigs.k8s.io": "functional-918451",
	                "name.minikube.sigs.k8s.io": "functional-918451",
	                "role.minikube.sigs.k8s.io": ""
	            },
	            "StopSignal": "SIGRTMIN+3"
	        },
	        "NetworkSettings": {
	            "Bridge": "",
	            "SandboxID": "b8276d0e7a4a68853a13a364899f312a03083d4747586d37196fe37821cc60ca",
	            "SandboxKey": "/var/run/docker/netns/b8276d0e7a4a",
	            "Ports": {
	                "22/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "33515"
	                    }
	                ],
	                "2376/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "33516"
	                    }
	                ],
	                "32443/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "33519"
	                    }
	                ],
	                "5000/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "33517"
	                    }
	                ],
	                "8441/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "33518"
	                    }
	                ]
	            },
	            "HairpinMode": false,
	            "LinkLocalIPv6Address": "",
	            "LinkLocalIPv6PrefixLen": 0,
	            "SecondaryIPAddresses": null,
	            "SecondaryIPv6Addresses": null,
	            "EndpointID": "",
	            "Gateway": "",
	            "GlobalIPv6Address": "",
	            "GlobalIPv6PrefixLen": 0,
	            "IPAddress": "",
	            "IPPrefixLen": 0,
	            "IPv6Gateway": "",
	            "MacAddress": "",
	            "Networks": {
	                "functional-918451": {
	                    "IPAMConfig": {
	                        "IPv4Address": "192.168.49.2"
	                    },
	                    "Links": null,
	                    "Aliases": null,
	                    "MacAddress": "0e:c4:e0:02:03:54",
	                    "DriverOpts": null,
	                    "GwPriority": 0,
	                    "NetworkID": "6a04d22b3edf0df0fed6fcef6fdf3ac9b7a09ca25aa9a4da277d50b627d3354f",
	                    "EndpointID": "29d031089f2a9002698ecc91a4339af43c25eef05a79345c2e7da185073d2f3e",
	                    "Gateway": "192.168.49.1",
	                    "IPAddress": "192.168.49.2",
	                    "IPPrefixLen": 24,
	                    "IPv6Gateway": "",
	                    "GlobalIPv6Address": "",
	                    "GlobalIPv6PrefixLen": 0,
	                    "DNSNames": [
	                        "functional-918451",
	                        "6201077c0331"
	                    ]
	                }
	            }
	        }
	    }
	]

                                                
                                                
-- /stdout --
helpers_test.go:247: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Host}} -p functional-918451 -n functional-918451
helpers_test.go:252: <<< TestFunctional/parallel/ServiceCmdConnect FAILED: start of post-mortem logs <<<
helpers_test.go:253: ======>  post-mortem[TestFunctional/parallel/ServiceCmdConnect]: minikube logs <======
helpers_test.go:255: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 logs -n 25
helpers_test.go:255: (dbg) Done: out/minikube-linux-arm64 -p functional-918451 logs -n 25: (1.191634376s)
helpers_test.go:260: TestFunctional/parallel/ServiceCmdConnect logs: 
-- stdout --
	
	==> Audit <==
	┌────────────┬─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬───────────────────┬─────────┬─────────┬─────────────────────┬─────────────────────┐
	│  COMMAND   │                                                                            ARGS                                                                             │      PROFILE      │  USER   │ VERSION │     START TIME      │      END TIME       │
	├────────────┼─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼───────────────────┼─────────┼─────────┼─────────────────────┼─────────────────────┤
	│ docker-env │ functional-918451 docker-env                                                                                                                                │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:36 UTC │ 17 Sep 25 00:36 UTC │
	│ image      │ functional-918451 image load --daemon kicbase/echo-server:functional-918451 --alsologtostderr                                                               │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:36 UTC │ 17 Sep 25 00:36 UTC │
	│ ssh        │ functional-918451 ssh sudo cat /etc/ssl/certs/578284.pem                                                                                                    │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:36 UTC │ 17 Sep 25 00:36 UTC │
	│ ssh        │ functional-918451 ssh sudo cat /usr/share/ca-certificates/578284.pem                                                                                        │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:36 UTC │ 17 Sep 25 00:36 UTC │
	│ image      │ functional-918451 image ls                                                                                                                                  │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:36 UTC │ 17 Sep 25 00:36 UTC │
	│ ssh        │ functional-918451 ssh sudo cat /etc/ssl/certs/51391683.0                                                                                                    │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:36 UTC │ 17 Sep 25 00:36 UTC │
	│ image      │ functional-918451 image load --daemon kicbase/echo-server:functional-918451 --alsologtostderr                                                               │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:36 UTC │ 17 Sep 25 00:36 UTC │
	│ ssh        │ functional-918451 ssh sudo cat /etc/ssl/certs/5782842.pem                                                                                                   │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:36 UTC │ 17 Sep 25 00:36 UTC │
	│ ssh        │ functional-918451 ssh sudo cat /usr/share/ca-certificates/5782842.pem                                                                                       │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:36 UTC │ 17 Sep 25 00:36 UTC │
	│ image      │ functional-918451 image ls                                                                                                                                  │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:36 UTC │ 17 Sep 25 00:36 UTC │
	│ ssh        │ functional-918451 ssh sudo cat /etc/ssl/certs/3ec20f2e.0                                                                                                    │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:36 UTC │ 17 Sep 25 00:36 UTC │
	│ image      │ functional-918451 image save kicbase/echo-server:functional-918451 /home/jenkins/workspace/Docker_Linux_docker_arm64/echo-server-save.tar --alsologtostderr │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:36 UTC │ 17 Sep 25 00:36 UTC │
	│ ssh        │ functional-918451 ssh sudo cat /etc/test/nested/copy/578284/hosts                                                                                           │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:36 UTC │ 17 Sep 25 00:36 UTC │
	│ image      │ functional-918451 image rm kicbase/echo-server:functional-918451 --alsologtostderr                                                                          │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:36 UTC │ 17 Sep 25 00:36 UTC │
	│ image      │ functional-918451 image ls                                                                                                                                  │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:36 UTC │ 17 Sep 25 00:36 UTC │
	│ image      │ functional-918451 image load /home/jenkins/workspace/Docker_Linux_docker_arm64/echo-server-save.tar --alsologtostderr                                       │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:36 UTC │ 17 Sep 25 00:36 UTC │
	│ image      │ functional-918451 image ls                                                                                                                                  │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:36 UTC │ 17 Sep 25 00:36 UTC │
	│ image      │ functional-918451 image save --daemon kicbase/echo-server:functional-918451 --alsologtostderr                                                               │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:36 UTC │ 17 Sep 25 00:36 UTC │
	│ ssh        │ functional-918451 ssh echo hello                                                                                                                            │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:36 UTC │ 17 Sep 25 00:36 UTC │
	│ tunnel     │ functional-918451 tunnel --alsologtostderr                                                                                                                  │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:36 UTC │                     │
	│ tunnel     │ functional-918451 tunnel --alsologtostderr                                                                                                                  │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:36 UTC │                     │
	│ ssh        │ functional-918451 ssh cat /etc/hostname                                                                                                                     │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:36 UTC │ 17 Sep 25 00:36 UTC │
	│ tunnel     │ functional-918451 tunnel --alsologtostderr                                                                                                                  │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:36 UTC │                     │
	│ addons     │ functional-918451 addons list                                                                                                                               │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:36 UTC │ 17 Sep 25 00:36 UTC │
	│ addons     │ functional-918451 addons list -o json                                                                                                                       │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:36 UTC │ 17 Sep 25 00:36 UTC │
	└────────────┴─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴───────────────────┴─────────┴─────────┴─────────────────────┴─────────────────────┘
	
	
	==> Last Start <==
	Log file created at: 2025/09/17 00:35:08
	Running on machine: ip-172-31-21-244
	Binary: Built with gc go1.24.6 for linux/arm64
	Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
	I0917 00:35:08.882126  616831 out.go:360] Setting OutFile to fd 1 ...
	I0917 00:35:08.882236  616831 out.go:408] TERM=,COLORTERM=, which probably does not support color
	I0917 00:35:08.882240  616831 out.go:374] Setting ErrFile to fd 2...
	I0917 00:35:08.882244  616831 out.go:408] TERM=,COLORTERM=, which probably does not support color
	I0917 00:35:08.882557  616831 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21550-576428/.minikube/bin
	I0917 00:35:08.882972  616831 out.go:368] Setting JSON to false
	I0917 00:35:08.883969  616831 start.go:130] hostinfo: {"hostname":"ip-172-31-21-244","uptime":11854,"bootTime":1758057455,"procs":186,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1084-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"da8ac1fd-6236-412a-a346-95873c98230d"}
	I0917 00:35:08.884036  616831 start.go:140] virtualization:  
	I0917 00:35:08.887529  616831 out.go:179] * [functional-918451] minikube v1.37.0 on Ubuntu 20.04 (arm64)
	I0917 00:35:08.890675  616831 out.go:179]   - MINIKUBE_LOCATION=21550
	I0917 00:35:08.890750  616831 notify.go:220] Checking for updates...
	I0917 00:35:08.894474  616831 out.go:179]   - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	I0917 00:35:08.897515  616831 out.go:179]   - KUBECONFIG=/home/jenkins/minikube-integration/21550-576428/kubeconfig
	I0917 00:35:08.900558  616831 out.go:179]   - MINIKUBE_HOME=/home/jenkins/minikube-integration/21550-576428/.minikube
	I0917 00:35:08.903371  616831 out.go:179]   - MINIKUBE_BIN=out/minikube-linux-arm64
	I0917 00:35:08.906346  616831 out.go:179]   - MINIKUBE_FORCE_SYSTEMD=
	I0917 00:35:08.909688  616831 config.go:182] Loaded profile config "functional-918451": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
	I0917 00:35:08.909776  616831 driver.go:421] Setting default libvirt URI to qemu:///system
	I0917 00:35:08.933038  616831 docker.go:123] docker version: linux-28.1.1:Docker Engine - Community
	I0917 00:35:08.933180  616831 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0917 00:35:09.009791  616831 info.go:266] docker info: {ID:5FDH:SA5P:5GCT:NLAS:B73P:SGDQ:PBG5:UBVH:UZY3:RXGO:CI7S:WAIH Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:2 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:40 OomKillDisable:true NGoroutines:65 SystemTime:2025-09-17 00:35:08.994962348 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1084-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214835200 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-21-244 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx P
ath:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1]] Warnings:<nil>}}
	I0917 00:35:09.009948  616831 docker.go:318] overlay module found
	I0917 00:35:09.013229  616831 out.go:179] * Using the docker driver based on existing profile
	I0917 00:35:09.016057  616831 start.go:304] selected driver: docker
	I0917 00:35:09.016067  616831 start.go:918] validating driver "docker" against &{Name:functional-918451 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:4096 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8441 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:functional-918451 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerN
ames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8441 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false
DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
	I0917 00:35:09.016163  616831 start.go:929] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
	I0917 00:35:09.016275  616831 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0917 00:35:09.071705  616831 info.go:266] docker info: {ID:5FDH:SA5P:5GCT:NLAS:B73P:SGDQ:PBG5:UBVH:UZY3:RXGO:CI7S:WAIH Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:2 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:40 OomKillDisable:true NGoroutines:65 SystemTime:2025-09-17 00:35:09.061677264 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1084-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214835200 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-21-244 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx P
ath:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1]] Warnings:<nil>}}
	I0917 00:35:09.072135  616831 start_flags.go:992] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
	I0917 00:35:09.072151  616831 cni.go:84] Creating CNI manager for ""
	I0917 00:35:09.072218  616831 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
	I0917 00:35:09.072260  616831 start.go:348] cluster config:
	{Name:functional-918451 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:4096 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8441 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:functional-918451 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocke
t: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:apiserver Key:enable-admission-plugins Value:NamespaceAutoProvision}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8441 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false D
isableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
	I0917 00:35:09.075512  616831 out.go:179] * Starting "functional-918451" primary control-plane node in "functional-918451" cluster
	I0917 00:35:09.078305  616831 cache.go:123] Beginning downloading kic base image for docker with docker
	I0917 00:35:09.081218  616831 out.go:179] * Pulling base image v0.0.48 ...
	I0917 00:35:09.083964  616831 image.go:81] Checking for gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 in local docker daemon
	I0917 00:35:09.084125  616831 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime docker
	I0917 00:35:09.084153  616831 preload.go:146] Found local preload: /home/jenkins/minikube-integration/21550-576428/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-docker-overlay2-arm64.tar.lz4
	I0917 00:35:09.084157  616831 cache.go:58] Caching tarball of preloaded images
	I0917 00:35:09.084220  616831 preload.go:172] Found /home/jenkins/minikube-integration/21550-576428/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-docker-overlay2-arm64.tar.lz4 in cache, skipping download
	I0917 00:35:09.084228  616831 cache.go:61] Finished verifying existence of preloaded tar for v1.34.0 on docker
	I0917 00:35:09.084331  616831 profile.go:143] Saving config to /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/functional-918451/config.json ...
	I0917 00:35:09.103367  616831 image.go:100] Found gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 in local docker daemon, skipping pull
	I0917 00:35:09.103379  616831 cache.go:147] gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 exists in daemon, skipping load
	I0917 00:35:09.103399  616831 cache.go:232] Successfully downloaded all kic artifacts
	I0917 00:35:09.103423  616831 start.go:360] acquireMachinesLock for functional-918451: {Name:mkead936952bd6bbe9c88b989c6af54c1d0b5ecc Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
	I0917 00:35:09.103485  616831 start.go:364] duration metric: took 45.997µs to acquireMachinesLock for "functional-918451"
	I0917 00:35:09.103504  616831 start.go:96] Skipping create...Using existing machine configuration
	I0917 00:35:09.103509  616831 fix.go:54] fixHost starting: 
	I0917 00:35:09.103750  616831 cli_runner.go:164] Run: docker container inspect functional-918451 --format={{.State.Status}}
	I0917 00:35:09.119890  616831 fix.go:112] recreateIfNeeded on functional-918451: state=Running err=<nil>
	W0917 00:35:09.119909  616831 fix.go:138] unexpected machine state, will restart: <nil>
	I0917 00:35:09.123066  616831 out.go:252] * Updating the running docker "functional-918451" container ...
	I0917 00:35:09.123090  616831 machine.go:93] provisionDockerMachine start ...
	I0917 00:35:09.123171  616831 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-918451
	I0917 00:35:09.141133  616831 main.go:141] libmachine: Using SSH client type: native
	I0917 00:35:09.141444  616831 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3ef1e0] 0x3f19a0 <nil>  [] 0s} 127.0.0.1 33515 <nil> <nil>}
	I0917 00:35:09.141451  616831 main.go:141] libmachine: About to run SSH command:
	hostname
	I0917 00:35:09.279618  616831 main.go:141] libmachine: SSH cmd err, output: <nil>: functional-918451
	
	I0917 00:35:09.279641  616831 ubuntu.go:182] provisioning hostname "functional-918451"
	I0917 00:35:09.279702  616831 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-918451
	I0917 00:35:09.297731  616831 main.go:141] libmachine: Using SSH client type: native
	I0917 00:35:09.298100  616831 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3ef1e0] 0x3f19a0 <nil>  [] 0s} 127.0.0.1 33515 <nil> <nil>}
	I0917 00:35:09.298126  616831 main.go:141] libmachine: About to run SSH command:
	sudo hostname functional-918451 && echo "functional-918451" | sudo tee /etc/hostname
	I0917 00:35:09.452214  616831 main.go:141] libmachine: SSH cmd err, output: <nil>: functional-918451
	
	I0917 00:35:09.452284  616831 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-918451
	I0917 00:35:09.469797  616831 main.go:141] libmachine: Using SSH client type: native
	I0917 00:35:09.470111  616831 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3ef1e0] 0x3f19a0 <nil>  [] 0s} 127.0.0.1 33515 <nil> <nil>}
	I0917 00:35:09.470126  616831 main.go:141] libmachine: About to run SSH command:
	
			if ! grep -xq '.*\sfunctional-918451' /etc/hosts; then
				if grep -xq '127.0.1.1\s.*' /etc/hosts; then
					sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 functional-918451/g' /etc/hosts;
				else 
					echo '127.0.1.1 functional-918451' | sudo tee -a /etc/hosts; 
				fi
			fi
	I0917 00:35:09.608434  616831 main.go:141] libmachine: SSH cmd err, output: <nil>: 
	I0917 00:35:09.608471  616831 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/21550-576428/.minikube CaCertPath:/home/jenkins/minikube-integration/21550-576428/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21550-576428/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21550-576428/.minikube}
	I0917 00:35:09.608488  616831 ubuntu.go:190] setting up certificates
	I0917 00:35:09.608497  616831 provision.go:84] configureAuth start
	I0917 00:35:09.608556  616831 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" functional-918451
	I0917 00:35:09.628527  616831 provision.go:143] copyHostCerts
	I0917 00:35:09.628596  616831 exec_runner.go:144] found /home/jenkins/minikube-integration/21550-576428/.minikube/ca.pem, removing ...
	I0917 00:35:09.628610  616831 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21550-576428/.minikube/ca.pem
	I0917 00:35:09.628684  616831 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21550-576428/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21550-576428/.minikube/ca.pem (1082 bytes)
	I0917 00:35:09.628776  616831 exec_runner.go:144] found /home/jenkins/minikube-integration/21550-576428/.minikube/cert.pem, removing ...
	I0917 00:35:09.628780  616831 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21550-576428/.minikube/cert.pem
	I0917 00:35:09.628803  616831 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21550-576428/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21550-576428/.minikube/cert.pem (1123 bytes)
	I0917 00:35:09.628853  616831 exec_runner.go:144] found /home/jenkins/minikube-integration/21550-576428/.minikube/key.pem, removing ...
	I0917 00:35:09.628856  616831 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21550-576428/.minikube/key.pem
	I0917 00:35:09.628879  616831 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21550-576428/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21550-576428/.minikube/key.pem (1675 bytes)
	I0917 00:35:09.628921  616831 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21550-576428/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21550-576428/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21550-576428/.minikube/certs/ca-key.pem org=jenkins.functional-918451 san=[127.0.0.1 192.168.49.2 functional-918451 localhost minikube]
	I0917 00:35:09.918611  616831 provision.go:177] copyRemoteCerts
	I0917 00:35:09.918674  616831 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
	I0917 00:35:09.918710  616831 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-918451
	I0917 00:35:09.935939  616831 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33515 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/functional-918451/id_rsa Username:docker}
	I0917 00:35:10.039568  616831 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-576428/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
	I0917 00:35:10.066377  616831 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-576428/.minikube/machines/server.pem --> /etc/docker/server.pem (1220 bytes)
	I0917 00:35:10.095272  616831 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-576428/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
	I0917 00:35:10.122538  616831 provision.go:87] duration metric: took 514.027816ms to configureAuth
	I0917 00:35:10.122556  616831 ubuntu.go:206] setting minikube options for container-runtime
	I0917 00:35:10.122762  616831 config.go:182] Loaded profile config "functional-918451": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
	I0917 00:35:10.122824  616831 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-918451
	I0917 00:35:10.141296  616831 main.go:141] libmachine: Using SSH client type: native
	I0917 00:35:10.141604  616831 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3ef1e0] 0x3f19a0 <nil>  [] 0s} 127.0.0.1 33515 <nil> <nil>}
	I0917 00:35:10.141611  616831 main.go:141] libmachine: About to run SSH command:
	df --output=fstype / | tail -n 1
	I0917 00:35:10.281086  616831 main.go:141] libmachine: SSH cmd err, output: <nil>: overlay
	
	I0917 00:35:10.281097  616831 ubuntu.go:71] root file system type: overlay
	I0917 00:35:10.281198  616831 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
	I0917 00:35:10.281257  616831 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-918451
	I0917 00:35:10.298083  616831 main.go:141] libmachine: Using SSH client type: native
	I0917 00:35:10.298376  616831 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3ef1e0] 0x3f19a0 <nil>  [] 0s} 127.0.0.1 33515 <nil> <nil>}
	I0917 00:35:10.298471  616831 main.go:141] libmachine: About to run SSH command:
	sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
	Description=Docker Application Container Engine
	Documentation=https://docs.docker.com
	After=network-online.target nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
	Wants=network-online.target containerd.service
	Requires=docker.socket
	StartLimitBurst=3
	StartLimitIntervalSec=60
	
	[Service]
	Type=notify
	Restart=always
	
	
	
	# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
	# The base configuration already specifies an 'ExecStart=...' command. The first directive
	# here is to clear out that command inherited from the base configuration. Without this,
	# the command from the base configuration and the command specified here are treated as
	# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
	# will catch this invalid input and refuse to start the service with an error like:
	#  Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
	
	# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
	# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
	ExecStart=
	ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 \
		-H fd:// --containerd=/run/containerd/containerd.sock \
		-H unix:///var/run/docker.sock \
		--default-ulimit=nofile=1048576:1048576 \
		--tlsverify \
		--tlscacert /etc/docker/ca.pem \
		--tlscert /etc/docker/server.pem \
		--tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12 
	ExecReload=/bin/kill -s HUP \$MAINPID
	
	# Having non-zero Limit*s causes performance problems due to accounting overhead
	# in the kernel. We recommend using cgroups to do container-local accounting.
	LimitNOFILE=infinity
	LimitNPROC=infinity
	LimitCORE=infinity
	
	# Uncomment TasksMax if your systemd version supports it.
	# Only systemd 226 and above support this version.
	TasksMax=infinity
	TimeoutStartSec=0
	
	# set delegate yes so that systemd does not reset the cgroups of docker containers
	Delegate=yes
	
	# kill only the docker process, not all processes in the cgroup
	KillMode=process
	OOMScoreAdjust=-500
	
	[Install]
	WantedBy=multi-user.target
	" | sudo tee /lib/systemd/system/docker.service.new
	I0917 00:35:10.456882  616831 main.go:141] libmachine: SSH cmd err, output: <nil>: [Unit]
	Description=Docker Application Container Engine
	Documentation=https://docs.docker.com
	After=network-online.target nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
	Wants=network-online.target containerd.service
	Requires=docker.socket
	StartLimitBurst=3
	StartLimitIntervalSec=60
	
	[Service]
	Type=notify
	Restart=always
	
	
	
	# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
	# The base configuration already specifies an 'ExecStart=...' command. The first directive
	# here is to clear out that command inherited from the base configuration. Without this,
	# the command from the base configuration and the command specified here are treated as
	# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
	# will catch this invalid input and refuse to start the service with an error like:
	#  Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
	
	# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
	# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
	ExecStart=
	ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 	-H fd:// --containerd=/run/containerd/containerd.sock 	-H unix:///var/run/docker.sock 	--default-ulimit=nofile=1048576:1048576 	--tlsverify 	--tlscacert /etc/docker/ca.pem 	--tlscert /etc/docker/server.pem 	--tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12 
	ExecReload=/bin/kill -s HUP $MAINPID
	
	# Having non-zero Limit*s causes performance problems due to accounting overhead
	# in the kernel. We recommend using cgroups to do container-local accounting.
	LimitNOFILE=infinity
	LimitNPROC=infinity
	LimitCORE=infinity
	
	# Uncomment TasksMax if your systemd version supports it.
	# Only systemd 226 and above support this version.
	TasksMax=infinity
	TimeoutStartSec=0
	
	# set delegate yes so that systemd does not reset the cgroups of docker containers
	Delegate=yes
	
	# kill only the docker process, not all processes in the cgroup
	KillMode=process
	OOMScoreAdjust=-500
	
	[Install]
	WantedBy=multi-user.target
	
	I0917 00:35:10.456955  616831 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-918451
	I0917 00:35:10.475396  616831 main.go:141] libmachine: Using SSH client type: native
	I0917 00:35:10.475699  616831 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3ef1e0] 0x3f19a0 <nil>  [] 0s} 127.0.0.1 33515 <nil> <nil>}
	I0917 00:35:10.475724  616831 main.go:141] libmachine: About to run SSH command:
	sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
	I0917 00:35:10.625699  616831 main.go:141] libmachine: SSH cmd err, output: <nil>: 
	I0917 00:35:10.625713  616831 machine.go:96] duration metric: took 1.50261688s to provisionDockerMachine
	I0917 00:35:10.625722  616831 start.go:293] postStartSetup for "functional-918451" (driver="docker")
	I0917 00:35:10.625731  616831 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
	I0917 00:35:10.625799  616831 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
	I0917 00:35:10.625835  616831 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-918451
	I0917 00:35:10.643208  616831 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33515 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/functional-918451/id_rsa Username:docker}
	I0917 00:35:10.741407  616831 ssh_runner.go:195] Run: cat /etc/os-release
	I0917 00:35:10.744475  616831 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
	I0917 00:35:10.744498  616831 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
	I0917 00:35:10.744506  616831 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
	I0917 00:35:10.744512  616831 info.go:137] Remote host: Ubuntu 22.04.5 LTS
	I0917 00:35:10.744522  616831 filesync.go:126] Scanning /home/jenkins/minikube-integration/21550-576428/.minikube/addons for local assets ...
	I0917 00:35:10.744575  616831 filesync.go:126] Scanning /home/jenkins/minikube-integration/21550-576428/.minikube/files for local assets ...
	I0917 00:35:10.744660  616831 filesync.go:149] local asset: /home/jenkins/minikube-integration/21550-576428/.minikube/files/etc/ssl/certs/5782842.pem -> 5782842.pem in /etc/ssl/certs
	I0917 00:35:10.744735  616831 filesync.go:149] local asset: /home/jenkins/minikube-integration/21550-576428/.minikube/files/etc/test/nested/copy/578284/hosts -> hosts in /etc/test/nested/copy/578284
	I0917 00:35:10.744781  616831 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs /etc/test/nested/copy/578284
	I0917 00:35:10.753363  616831 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-576428/.minikube/files/etc/ssl/certs/5782842.pem --> /etc/ssl/certs/5782842.pem (1708 bytes)
	I0917 00:35:10.777494  616831 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-576428/.minikube/files/etc/test/nested/copy/578284/hosts --> /etc/test/nested/copy/578284/hosts (40 bytes)
	I0917 00:35:10.808587  616831 start.go:296] duration metric: took 182.835375ms for postStartSetup
	I0917 00:35:10.808661  616831 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
	I0917 00:35:10.808710  616831 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-918451
	I0917 00:35:10.825228  616831 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33515 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/functional-918451/id_rsa Username:docker}
	I0917 00:35:10.921615  616831 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
	I0917 00:35:10.926439  616831 fix.go:56] duration metric: took 1.822916967s for fixHost
	I0917 00:35:10.926455  616831 start.go:83] releasing machines lock for "functional-918451", held for 1.822962069s
	I0917 00:35:10.926523  616831 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" functional-918451
	I0917 00:35:10.944280  616831 ssh_runner.go:195] Run: cat /version.json
	I0917 00:35:10.944302  616831 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
	I0917 00:35:10.944320  616831 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-918451
	I0917 00:35:10.944363  616831 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-918451
	I0917 00:35:10.966713  616831 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33515 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/functional-918451/id_rsa Username:docker}
	I0917 00:35:10.978091  616831 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33515 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/functional-918451/id_rsa Username:docker}
	I0917 00:35:11.188990  616831 ssh_runner.go:195] Run: systemctl --version
	I0917 00:35:11.193766  616831 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
	I0917 00:35:11.198183  616831 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
	I0917 00:35:11.219806  616831 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
	I0917 00:35:11.219874  616831 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
	I0917 00:35:11.228951  616831 cni.go:259] no active bridge cni configs found in "/etc/cni/net.d" - nothing to disable
	I0917 00:35:11.228967  616831 start.go:495] detecting cgroup driver to use...
	I0917 00:35:11.228997  616831 detect.go:187] detected "cgroupfs" cgroup driver on host os
	I0917 00:35:11.229092  616831 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
	" | sudo tee /etc/crictl.yaml"
	I0917 00:35:11.247155  616831 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
	I0917 00:35:11.258611  616831 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
	I0917 00:35:11.270113  616831 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
	I0917 00:35:11.270169  616831 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
	I0917 00:35:11.280500  616831 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
	I0917 00:35:11.291337  616831 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
	I0917 00:35:11.305281  616831 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
	I0917 00:35:11.316659  616831 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
	I0917 00:35:11.326609  616831 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
	I0917 00:35:11.339061  616831 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
	I0917 00:35:11.350191  616831 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1  enable_unprivileged_ports = true|' /etc/containerd/config.toml"
	I0917 00:35:11.361784  616831 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
	I0917 00:35:11.371029  616831 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
	I0917 00:35:11.380076  616831 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I0917 00:35:11.491379  616831 ssh_runner.go:195] Run: sudo systemctl restart containerd
	I0917 00:35:11.707140  616831 start.go:495] detecting cgroup driver to use...
	I0917 00:35:11.707176  616831 detect.go:187] detected "cgroupfs" cgroup driver on host os
	I0917 00:35:11.707228  616831 ssh_runner.go:195] Run: sudo systemctl cat docker.service
	I0917 00:35:11.722959  616831 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
	I0917 00:35:11.735963  616831 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
	I0917 00:35:11.777124  616831 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
	I0917 00:35:11.790691  616831 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
	I0917 00:35:11.809931  616831 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
	" | sudo tee /etc/crictl.yaml"
	I0917 00:35:11.827868  616831 ssh_runner.go:195] Run: which cri-dockerd
	I0917 00:35:11.832090  616831 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
	I0917 00:35:11.842322  616831 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (192 bytes)
	I0917 00:35:11.861532  616831 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
	I0917 00:35:11.980450  616831 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
	I0917 00:35:12.101702  616831 docker.go:575] configuring docker to use "cgroupfs" as cgroup driver...
	I0917 00:35:12.101793  616831 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (130 bytes)
	I0917 00:35:12.126257  616831 ssh_runner.go:195] Run: sudo systemctl reset-failed docker
	I0917 00:35:12.138069  616831 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I0917 00:35:12.258999  616831 ssh_runner.go:195] Run: sudo systemctl restart docker
	I0917 00:35:38.407410  616831 ssh_runner.go:235] Completed: sudo systemctl restart docker: (26.148387762s)
	I0917 00:35:38.407470  616831 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
	I0917 00:35:38.434609  616831 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
	I0917 00:35:38.463774  616831 ssh_runner.go:195] Run: sudo systemctl stop cri-docker.socket
	I0917 00:35:38.484708  616831 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
	I0917 00:35:38.496143  616831 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
	I0917 00:35:38.594906  616831 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
	I0917 00:35:38.689669  616831 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I0917 00:35:38.785711  616831 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
	I0917 00:35:38.799399  616831 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
	I0917 00:35:38.811558  616831 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I0917 00:35:38.907605  616831 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
	I0917 00:35:38.991545  616831 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
	I0917 00:35:39.006514  616831 start.go:542] Will wait 60s for socket path /var/run/cri-dockerd.sock
	I0917 00:35:39.006588  616831 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
	I0917 00:35:39.012465  616831 start.go:563] Will wait 60s for crictl version
	I0917 00:35:39.012526  616831 ssh_runner.go:195] Run: which crictl
	I0917 00:35:39.016162  616831 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
	I0917 00:35:39.058036  616831 start.go:579] Version:  0.1.0
	RuntimeName:  docker
	RuntimeVersion:  28.4.0
	RuntimeApiVersion:  v1
	I0917 00:35:39.058095  616831 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
	I0917 00:35:39.079756  616831 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
	I0917 00:35:39.106648  616831 out.go:252] * Preparing Kubernetes v1.34.0 on Docker 28.4.0 ...
	I0917 00:35:39.106725  616831 cli_runner.go:164] Run: docker network inspect functional-918451 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
	I0917 00:35:39.122340  616831 ssh_runner.go:195] Run: grep 192.168.49.1	host.minikube.internal$ /etc/hosts
	I0917 00:35:39.130638  616831 out.go:179]   - apiserver.enable-admission-plugins=NamespaceAutoProvision
	I0917 00:35:39.133387  616831 kubeadm.go:875] updating cluster {Name:functional-918451 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:4096 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8441 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:functional-918451 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServer
IPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:apiserver Key:enable-admission-plugins Value:NamespaceAutoProvision}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8441 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker
BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
	I0917 00:35:39.133516  616831 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime docker
	I0917 00:35:39.133606  616831 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
	I0917 00:35:39.152835  616831 docker.go:691] Got preloaded images: -- stdout --
	minikube-local-cache-test:functional-918451
	registry.k8s.io/kube-apiserver:v1.34.0
	registry.k8s.io/kube-controller-manager:v1.34.0
	registry.k8s.io/kube-scheduler:v1.34.0
	registry.k8s.io/kube-proxy:v1.34.0
	registry.k8s.io/etcd:3.6.4-0
	registry.k8s.io/pause:3.10.1
	registry.k8s.io/coredns/coredns:v1.12.1
	gcr.io/k8s-minikube/storage-provisioner:v5
	registry.k8s.io/pause:3.3
	registry.k8s.io/pause:3.1
	registry.k8s.io/pause:latest
	
	-- /stdout --
	I0917 00:35:39.152848  616831 docker.go:621] Images already preloaded, skipping extraction
	I0917 00:35:39.152916  616831 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
	I0917 00:35:39.172196  616831 docker.go:691] Got preloaded images: -- stdout --
	minikube-local-cache-test:functional-918451
	registry.k8s.io/kube-apiserver:v1.34.0
	registry.k8s.io/kube-scheduler:v1.34.0
	registry.k8s.io/kube-controller-manager:v1.34.0
	registry.k8s.io/kube-proxy:v1.34.0
	registry.k8s.io/etcd:3.6.4-0
	registry.k8s.io/pause:3.10.1
	registry.k8s.io/coredns/coredns:v1.12.1
	gcr.io/k8s-minikube/storage-provisioner:v5
	registry.k8s.io/pause:3.3
	registry.k8s.io/pause:3.1
	registry.k8s.io/pause:latest
	
	-- /stdout --
	I0917 00:35:39.172209  616831 cache_images.go:85] Images are preloaded, skipping loading
	I0917 00:35:39.172217  616831 kubeadm.go:926] updating node { 192.168.49.2 8441 v1.34.0 docker true true} ...
	I0917 00:35:39.172329  616831 kubeadm.go:938] kubelet [Unit]
	Wants=docker.socket
	
	[Service]
	ExecStart=
	ExecStart=/var/lib/minikube/binaries/v1.34.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=functional-918451 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.49.2
	
	[Install]
	 config:
	{KubernetesVersion:v1.34.0 ClusterName:functional-918451 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:apiserver Key:enable-admission-plugins Value:NamespaceAutoProvision}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
	I0917 00:35:39.172397  616831 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
	I0917 00:35:39.219006  616831 extraconfig.go:124] Overwriting default enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota with user provided enable-admission-plugins=NamespaceAutoProvision for component apiserver
	I0917 00:35:39.219077  616831 cni.go:84] Creating CNI manager for ""
	I0917 00:35:39.219095  616831 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
	I0917 00:35:39.219102  616831 kubeadm.go:84] Using pod CIDR: 10.244.0.0/16
	I0917 00:35:39.219123  616831 kubeadm.go:189] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.49.2 APIServerPort:8441 KubernetesVersion:v1.34.0 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:functional-918451 NodeName:functional-918451 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceAutoProvision] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.49.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.49.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:
map[containerRuntimeEndpoint:unix:///var/run/cri-dockerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
	I0917 00:35:39.219247  616831 kubeadm.go:195] kubeadm config:
	apiVersion: kubeadm.k8s.io/v1beta4
	kind: InitConfiguration
	localAPIEndpoint:
	  advertiseAddress: 192.168.49.2
	  bindPort: 8441
	bootstrapTokens:
	  - groups:
	      - system:bootstrappers:kubeadm:default-node-token
	    ttl: 24h0m0s
	    usages:
	      - signing
	      - authentication
	nodeRegistration:
	  criSocket: unix:///var/run/cri-dockerd.sock
	  name: "functional-918451"
	  kubeletExtraArgs:
	    - name: "node-ip"
	      value: "192.168.49.2"
	  taints: []
	---
	apiVersion: kubeadm.k8s.io/v1beta4
	kind: ClusterConfiguration
	apiServer:
	  certSANs: ["127.0.0.1", "localhost", "192.168.49.2"]
	  extraArgs:
	    - name: "enable-admission-plugins"
	      value: "NamespaceAutoProvision"
	controllerManager:
	  extraArgs:
	    - name: "allocate-node-cidrs"
	      value: "true"
	    - name: "leader-elect"
	      value: "false"
	scheduler:
	  extraArgs:
	    - name: "leader-elect"
	      value: "false"
	certificatesDir: /var/lib/minikube/certs
	clusterName: mk
	controlPlaneEndpoint: control-plane.minikube.internal:8441
	etcd:
	  local:
	    dataDir: /var/lib/minikube/etcd
	kubernetesVersion: v1.34.0
	networking:
	  dnsDomain: cluster.local
	  podSubnet: "10.244.0.0/16"
	  serviceSubnet: 10.96.0.0/12
	---
	apiVersion: kubelet.config.k8s.io/v1beta1
	kind: KubeletConfiguration
	authentication:
	  x509:
	    clientCAFile: /var/lib/minikube/certs/ca.crt
	cgroupDriver: cgroupfs
	containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock
	hairpinMode: hairpin-veth
	runtimeRequestTimeout: 15m
	clusterDomain: "cluster.local"
	# disable disk resource management by default
	imageGCHighThresholdPercent: 100
	evictionHard:
	  nodefs.available: "0%"
	  nodefs.inodesFree: "0%"
	  imagefs.available: "0%"
	failSwapOn: false
	staticPodPath: /etc/kubernetes/manifests
	---
	apiVersion: kubeproxy.config.k8s.io/v1alpha1
	kind: KubeProxyConfiguration
	clusterCIDR: "10.244.0.0/16"
	metricsBindAddress: 0.0.0.0:10249
	conntrack:
	  maxPerCore: 0
	# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
	  tcpEstablishedTimeout: 0s
	# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
	  tcpCloseWaitTimeout: 0s
	
	I0917 00:35:39.219315  616831 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.0
	I0917 00:35:39.228223  616831 binaries.go:44] Found k8s binaries, skipping transfer
	I0917 00:35:39.228283  616831 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
	I0917 00:35:39.237303  616831 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (316 bytes)
	I0917 00:35:39.256906  616831 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
	I0917 00:35:39.274945  616831 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2068 bytes)
	I0917 00:35:39.293022  616831 ssh_runner.go:195] Run: grep 192.168.49.2	control-plane.minikube.internal$ /etc/hosts
	I0917 00:35:39.296647  616831 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I0917 00:35:39.393067  616831 ssh_runner.go:195] Run: sudo systemctl start kubelet
	I0917 00:35:39.405322  616831 certs.go:68] Setting up /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/functional-918451 for IP: 192.168.49.2
	I0917 00:35:39.405335  616831 certs.go:194] generating shared ca certs ...
	I0917 00:35:39.405349  616831 certs.go:226] acquiring lock for ca certs: {Name:mk04b183dabeee5957951eb115c646a018da171d Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0917 00:35:39.405482  616831 certs.go:235] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21550-576428/.minikube/ca.key
	I0917 00:35:39.405519  616831 certs.go:235] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21550-576428/.minikube/proxy-client-ca.key
	I0917 00:35:39.405525  616831 certs.go:256] generating profile certs ...
	I0917 00:35:39.405604  616831 certs.go:359] skipping valid signed profile cert regeneration for "minikube-user": /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/functional-918451/client.key
	I0917 00:35:39.405647  616831 certs.go:359] skipping valid signed profile cert regeneration for "minikube": /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/functional-918451/apiserver.key.052cde45
	I0917 00:35:39.405679  616831 certs.go:359] skipping valid signed profile cert regeneration for "aggregator": /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/functional-918451/proxy-client.key
	I0917 00:35:39.405780  616831 certs.go:484] found cert: /home/jenkins/minikube-integration/21550-576428/.minikube/certs/578284.pem (1338 bytes)
	W0917 00:35:39.405806  616831 certs.go:480] ignoring /home/jenkins/minikube-integration/21550-576428/.minikube/certs/578284_empty.pem, impossibly tiny 0 bytes
	I0917 00:35:39.405813  616831 certs.go:484] found cert: /home/jenkins/minikube-integration/21550-576428/.minikube/certs/ca-key.pem (1671 bytes)
	I0917 00:35:39.405835  616831 certs.go:484] found cert: /home/jenkins/minikube-integration/21550-576428/.minikube/certs/ca.pem (1082 bytes)
	I0917 00:35:39.405875  616831 certs.go:484] found cert: /home/jenkins/minikube-integration/21550-576428/.minikube/certs/cert.pem (1123 bytes)
	I0917 00:35:39.405896  616831 certs.go:484] found cert: /home/jenkins/minikube-integration/21550-576428/.minikube/certs/key.pem (1675 bytes)
	I0917 00:35:39.405934  616831 certs.go:484] found cert: /home/jenkins/minikube-integration/21550-576428/.minikube/files/etc/ssl/certs/5782842.pem (1708 bytes)
	I0917 00:35:39.406508  616831 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-576428/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
	I0917 00:35:39.430565  616831 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-576428/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
	I0917 00:35:39.455020  616831 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-576428/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
	I0917 00:35:39.478799  616831 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-576428/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
	I0917 00:35:39.506545  616831 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/functional-918451/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1424 bytes)
	I0917 00:35:39.542615  616831 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/functional-918451/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes)
	I0917 00:35:39.575447  616831 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/functional-918451/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
	I0917 00:35:39.627854  616831 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/functional-918451/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
	I0917 00:35:39.680326  616831 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-576428/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
	I0917 00:35:39.731613  616831 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-576428/.minikube/certs/578284.pem --> /usr/share/ca-certificates/578284.pem (1338 bytes)
	I0917 00:35:39.800308  616831 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-576428/.minikube/files/etc/ssl/certs/5782842.pem --> /usr/share/ca-certificates/5782842.pem (1708 bytes)
	I0917 00:35:39.863245  616831 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
	I0917 00:35:39.886427  616831 ssh_runner.go:195] Run: openssl version
	I0917 00:35:39.892218  616831 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/5782842.pem && ln -fs /usr/share/ca-certificates/5782842.pem /etc/ssl/certs/5782842.pem"
	I0917 00:35:39.909376  616831 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/5782842.pem
	I0917 00:35:39.920250  616831 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Sep 17 00:32 /usr/share/ca-certificates/5782842.pem
	I0917 00:35:39.920315  616831 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/5782842.pem
	I0917 00:35:39.933447  616831 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/5782842.pem /etc/ssl/certs/3ec20f2e.0"
	I0917 00:35:39.951211  616831 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
	I0917 00:35:39.975815  616831 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
	I0917 00:35:39.988600  616831 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Sep 17 00:21 /usr/share/ca-certificates/minikubeCA.pem
	I0917 00:35:39.988656  616831 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
	I0917 00:35:39.999233  616831 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
	I0917 00:35:40.010900  616831 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/578284.pem && ln -fs /usr/share/ca-certificates/578284.pem /etc/ssl/certs/578284.pem"
	I0917 00:35:40.027029  616831 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/578284.pem
	I0917 00:35:40.032099  616831 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Sep 17 00:32 /usr/share/ca-certificates/578284.pem
	I0917 00:35:40.032177  616831 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/578284.pem
	I0917 00:35:40.048270  616831 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/578284.pem /etc/ssl/certs/51391683.0"
	I0917 00:35:40.071955  616831 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
	I0917 00:35:40.079346  616831 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-etcd-client.crt -checkend 86400
	I0917 00:35:40.094668  616831 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-kubelet-client.crt -checkend 86400
	I0917 00:35:40.106687  616831 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/server.crt -checkend 86400
	I0917 00:35:40.116505  616831 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/healthcheck-client.crt -checkend 86400
	I0917 00:35:40.124315  616831 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/peer.crt -checkend 86400
	I0917 00:35:40.131800  616831 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/front-proxy-client.crt -checkend 86400
	I0917 00:35:40.139052  616831 kubeadm.go:392] StartCluster: {Name:functional-918451 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:4096 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8441 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:functional-918451 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs
:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:apiserver Key:enable-admission-plugins Value:NamespaceAutoProvision}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8441 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker Bin
aryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
	I0917 00:35:40.139197  616831 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
	I0917 00:35:40.197057  616831 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
	I0917 00:35:40.224036  616831 kubeadm.go:408] found existing configuration files, will attempt cluster restart
	I0917 00:35:40.224045  616831 kubeadm.go:589] restartPrimaryControlPlane start ...
	I0917 00:35:40.224108  616831 ssh_runner.go:195] Run: sudo test -d /data/minikube
	I0917 00:35:40.234943  616831 kubeadm.go:130] /data/minikube skipping compat symlinks: sudo test -d /data/minikube: Process exited with status 1
	stdout:
	
	stderr:
	I0917 00:35:40.235576  616831 kubeconfig.go:125] found "functional-918451" server: "https://192.168.49.2:8441"
	I0917 00:35:40.237322  616831 ssh_runner.go:195] Run: sudo diff -u /var/tmp/minikube/kubeadm.yaml /var/tmp/minikube/kubeadm.yaml.new
	I0917 00:35:40.255950  616831 kubeadm.go:636] detected kubeadm config drift (will reconfigure cluster from new /var/tmp/minikube/kubeadm.yaml):
	-- stdout --
	--- /var/tmp/minikube/kubeadm.yaml	2025-09-17 00:33:07.200319649 +0000
	+++ /var/tmp/minikube/kubeadm.yaml.new	2025-09-17 00:35:39.284623484 +0000
	@@ -24,7 +24,7 @@
	   certSANs: ["127.0.0.1", "localhost", "192.168.49.2"]
	   extraArgs:
	     - name: "enable-admission-plugins"
	-      value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
	+      value: "NamespaceAutoProvision"
	 controllerManager:
	   extraArgs:
	     - name: "allocate-node-cidrs"
	
	-- /stdout --
	I0917 00:35:40.255960  616831 kubeadm.go:1152] stopping kube-system containers ...
	I0917 00:35:40.256030  616831 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
	I0917 00:35:40.285051  616831 docker.go:484] Stopping containers: [e3a6d89ad5b6 e707505e3be8 3c0a229533b1 7a2c5910368f 56ab6c32da13 ae4004a0e0f9 3c026c8adcb2 ca82151f5d51 d3d4055f8ecd c07995bc710c bd4e50a8edbd 0dfa5a327ca4 4654ac6f884c 30225879638d 17077358dad7 ecc60cd02b8d e00e50d78d69 aeaa29ffb123 3b3c0e6a4d32 6d1c97d32588 8b0ba516c25d 0700097d1bcb 61f808cbe0f6 120f09d2a6bd 010fc9b02644 d163b7d82097 139117743d86 27be5cee6745 647b3f98f673 ac639e371291]
	I0917 00:35:40.285138  616831 ssh_runner.go:195] Run: docker stop e3a6d89ad5b6 e707505e3be8 3c0a229533b1 7a2c5910368f 56ab6c32da13 ae4004a0e0f9 3c026c8adcb2 ca82151f5d51 d3d4055f8ecd c07995bc710c bd4e50a8edbd 0dfa5a327ca4 4654ac6f884c 30225879638d 17077358dad7 ecc60cd02b8d e00e50d78d69 aeaa29ffb123 3b3c0e6a4d32 6d1c97d32588 8b0ba516c25d 0700097d1bcb 61f808cbe0f6 120f09d2a6bd 010fc9b02644 d163b7d82097 139117743d86 27be5cee6745 647b3f98f673 ac639e371291
	I0917 00:35:40.643651  616831 ssh_runner.go:195] Run: sudo systemctl stop kubelet
	I0917 00:35:40.780219  616831 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
	I0917 00:35:40.789089  616831 kubeadm.go:157] found existing configuration files:
	-rw------- 1 root root 5631 Sep 17 00:33 /etc/kubernetes/admin.conf
	-rw------- 1 root root 5640 Sep 17 00:33 /etc/kubernetes/controller-manager.conf
	-rw------- 1 root root 1972 Sep 17 00:33 /etc/kubernetes/kubelet.conf
	-rw------- 1 root root 5588 Sep 17 00:33 /etc/kubernetes/scheduler.conf
	
	I0917 00:35:40.789169  616831 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/admin.conf
	I0917 00:35:40.797724  616831 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/kubelet.conf
	I0917 00:35:40.806187  616831 kubeadm.go:163] "https://control-plane.minikube.internal:8441" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/kubelet.conf: Process exited with status 1
	stdout:
	
	stderr:
	I0917 00:35:40.806255  616831 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
	I0917 00:35:40.814650  616831 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/controller-manager.conf
	I0917 00:35:40.822929  616831 kubeadm.go:163] "https://control-plane.minikube.internal:8441" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/controller-manager.conf: Process exited with status 1
	stdout:
	
	stderr:
	I0917 00:35:40.822984  616831 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
	I0917 00:35:40.831609  616831 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/scheduler.conf
	I0917 00:35:40.839866  616831 kubeadm.go:163] "https://control-plane.minikube.internal:8441" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/scheduler.conf: Process exited with status 1
	stdout:
	
	stderr:
	I0917 00:35:40.839924  616831 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
	I0917 00:35:40.848095  616831 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
	I0917 00:35:40.856862  616831 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0:$PATH" kubeadm init phase certs all --config /var/tmp/minikube/kubeadm.yaml"
	I0917 00:35:40.903113  616831 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0:$PATH" kubeadm init phase kubeconfig all --config /var/tmp/minikube/kubeadm.yaml"
	I0917 00:35:43.491648  616831 ssh_runner.go:235] Completed: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0:$PATH" kubeadm init phase kubeconfig all --config /var/tmp/minikube/kubeadm.yaml": (2.588511393s)
	I0917 00:35:43.491665  616831 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0:$PATH" kubeadm init phase kubelet-start --config /var/tmp/minikube/kubeadm.yaml"
	I0917 00:35:43.659254  616831 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0:$PATH" kubeadm init phase control-plane all --config /var/tmp/minikube/kubeadm.yaml"
	I0917 00:35:43.739072  616831 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0:$PATH" kubeadm init phase etcd local --config /var/tmp/minikube/kubeadm.yaml"
	I0917 00:35:43.829895  616831 api_server.go:52] waiting for apiserver process to appear ...
	I0917 00:35:43.829967  616831 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
	I0917 00:35:44.330684  616831 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
	I0917 00:35:44.830547  616831 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
	I0917 00:35:45.330880  616831 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
	I0917 00:35:45.373119  616831 api_server.go:72] duration metric: took 1.543238975s to wait for apiserver process to appear ...
	I0917 00:35:45.373135  616831 api_server.go:88] waiting for apiserver healthz status ...
	I0917 00:35:45.373162  616831 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8441/healthz ...
	I0917 00:35:49.208165  616831 api_server.go:279] https://192.168.49.2:8441/healthz returned 403:
	{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403}
	W0917 00:35:49.208181  616831 api_server.go:103] status: https://192.168.49.2:8441/healthz returned error 403:
	{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403}
	I0917 00:35:49.208194  616831 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8441/healthz ...
	I0917 00:35:49.270648  616831 api_server.go:279] https://192.168.49.2:8441/healthz returned 403:
	{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403}
	W0917 00:35:49.270665  616831 api_server.go:103] status: https://192.168.49.2:8441/healthz returned error 403:
	{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403}
	I0917 00:35:49.373926  616831 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8441/healthz ...
	I0917 00:35:49.382729  616831 api_server.go:279] https://192.168.49.2:8441/healthz returned 500:
	[+]ping ok
	[+]log ok
	[+]etcd ok
	[+]poststarthook/start-apiserver-admission-initializer ok
	[+]poststarthook/generic-apiserver-start-informers ok
	[+]poststarthook/priority-and-fairness-config-consumer ok
	[+]poststarthook/priority-and-fairness-filter ok
	[+]poststarthook/storage-object-count-tracker-hook ok
	[+]poststarthook/start-apiextensions-informers ok
	[+]poststarthook/start-apiextensions-controllers ok
	[+]poststarthook/crd-informer-synced ok
	[+]poststarthook/start-system-namespaces-controller ok
	[+]poststarthook/start-cluster-authentication-info-controller ok
	[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
	[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
	[+]poststarthook/start-legacy-token-tracking-controller ok
	[+]poststarthook/start-service-ip-repair-controllers ok
	[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
	[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
	[+]poststarthook/priority-and-fairness-config-producer ok
	[+]poststarthook/bootstrap-controller ok
	[+]poststarthook/start-kubernetes-service-cidr-controller ok
	[+]poststarthook/aggregator-reload-proxy-client-cert ok
	[+]poststarthook/start-kube-aggregator-informers ok
	[+]poststarthook/apiservice-status-local-available-controller ok
	[+]poststarthook/apiservice-status-remote-available-controller ok
	[+]poststarthook/apiservice-registration-controller ok
	[+]poststarthook/apiservice-discovery-controller ok
	[+]poststarthook/kube-apiserver-autoregistration ok
	[+]autoregister-completion ok
	[+]poststarthook/apiservice-openapi-controller ok
	[+]poststarthook/apiservice-openapiv3-controller ok
	healthz check failed
	W0917 00:35:49.382744  616831 api_server.go:103] status: https://192.168.49.2:8441/healthz returned error 500:
	[+]ping ok
	[+]log ok
	[+]etcd ok
	[+]poststarthook/start-apiserver-admission-initializer ok
	[+]poststarthook/generic-apiserver-start-informers ok
	[+]poststarthook/priority-and-fairness-config-consumer ok
	[+]poststarthook/priority-and-fairness-filter ok
	[+]poststarthook/storage-object-count-tracker-hook ok
	[+]poststarthook/start-apiextensions-informers ok
	[+]poststarthook/start-apiextensions-controllers ok
	[+]poststarthook/crd-informer-synced ok
	[+]poststarthook/start-system-namespaces-controller ok
	[+]poststarthook/start-cluster-authentication-info-controller ok
	[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
	[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
	[+]poststarthook/start-legacy-token-tracking-controller ok
	[+]poststarthook/start-service-ip-repair-controllers ok
	[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
	[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
	[+]poststarthook/priority-and-fairness-config-producer ok
	[+]poststarthook/bootstrap-controller ok
	[+]poststarthook/start-kubernetes-service-cidr-controller ok
	[+]poststarthook/aggregator-reload-proxy-client-cert ok
	[+]poststarthook/start-kube-aggregator-informers ok
	[+]poststarthook/apiservice-status-local-available-controller ok
	[+]poststarthook/apiservice-status-remote-available-controller ok
	[+]poststarthook/apiservice-registration-controller ok
	[+]poststarthook/apiservice-discovery-controller ok
	[+]poststarthook/kube-apiserver-autoregistration ok
	[+]autoregister-completion ok
	[+]poststarthook/apiservice-openapi-controller ok
	[+]poststarthook/apiservice-openapiv3-controller ok
	healthz check failed
	I0917 00:35:49.873319  616831 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8441/healthz ...
	I0917 00:35:49.884187  616831 api_server.go:279] https://192.168.49.2:8441/healthz returned 500:
	[+]ping ok
	[+]log ok
	[+]etcd ok
	[+]poststarthook/start-apiserver-admission-initializer ok
	[+]poststarthook/generic-apiserver-start-informers ok
	[+]poststarthook/priority-and-fairness-config-consumer ok
	[+]poststarthook/priority-and-fairness-filter ok
	[+]poststarthook/storage-object-count-tracker-hook ok
	[+]poststarthook/start-apiextensions-informers ok
	[+]poststarthook/start-apiextensions-controllers ok
	[+]poststarthook/crd-informer-synced ok
	[+]poststarthook/start-system-namespaces-controller ok
	[+]poststarthook/start-cluster-authentication-info-controller ok
	[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
	[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
	[+]poststarthook/start-legacy-token-tracking-controller ok
	[+]poststarthook/start-service-ip-repair-controllers ok
	[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
	[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
	[+]poststarthook/priority-and-fairness-config-producer ok
	[+]poststarthook/bootstrap-controller ok
	[+]poststarthook/start-kubernetes-service-cidr-controller ok
	[+]poststarthook/aggregator-reload-proxy-client-cert ok
	[+]poststarthook/start-kube-aggregator-informers ok
	[+]poststarthook/apiservice-status-local-available-controller ok
	[+]poststarthook/apiservice-status-remote-available-controller ok
	[+]poststarthook/apiservice-registration-controller ok
	[+]poststarthook/apiservice-discovery-controller ok
	[+]poststarthook/kube-apiserver-autoregistration ok
	[+]autoregister-completion ok
	[+]poststarthook/apiservice-openapi-controller ok
	[+]poststarthook/apiservice-openapiv3-controller ok
	healthz check failed
	W0917 00:35:49.884206  616831 api_server.go:103] status: https://192.168.49.2:8441/healthz returned error 500:
	[+]ping ok
	[+]log ok
	[+]etcd ok
	[+]poststarthook/start-apiserver-admission-initializer ok
	[+]poststarthook/generic-apiserver-start-informers ok
	[+]poststarthook/priority-and-fairness-config-consumer ok
	[+]poststarthook/priority-and-fairness-filter ok
	[+]poststarthook/storage-object-count-tracker-hook ok
	[+]poststarthook/start-apiextensions-informers ok
	[+]poststarthook/start-apiextensions-controllers ok
	[+]poststarthook/crd-informer-synced ok
	[+]poststarthook/start-system-namespaces-controller ok
	[+]poststarthook/start-cluster-authentication-info-controller ok
	[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
	[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
	[+]poststarthook/start-legacy-token-tracking-controller ok
	[+]poststarthook/start-service-ip-repair-controllers ok
	[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
	[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
	[+]poststarthook/priority-and-fairness-config-producer ok
	[+]poststarthook/bootstrap-controller ok
	[+]poststarthook/start-kubernetes-service-cidr-controller ok
	[+]poststarthook/aggregator-reload-proxy-client-cert ok
	[+]poststarthook/start-kube-aggregator-informers ok
	[+]poststarthook/apiservice-status-local-available-controller ok
	[+]poststarthook/apiservice-status-remote-available-controller ok
	[+]poststarthook/apiservice-registration-controller ok
	[+]poststarthook/apiservice-discovery-controller ok
	[+]poststarthook/kube-apiserver-autoregistration ok
	[+]autoregister-completion ok
	[+]poststarthook/apiservice-openapi-controller ok
	[+]poststarthook/apiservice-openapiv3-controller ok
	healthz check failed
	I0917 00:35:50.373776  616831 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8441/healthz ...
	I0917 00:35:50.384331  616831 api_server.go:279] https://192.168.49.2:8441/healthz returned 500:
	[+]ping ok
	[+]log ok
	[+]etcd ok
	[+]poststarthook/start-apiserver-admission-initializer ok
	[+]poststarthook/generic-apiserver-start-informers ok
	[+]poststarthook/priority-and-fairness-config-consumer ok
	[+]poststarthook/priority-and-fairness-filter ok
	[+]poststarthook/storage-object-count-tracker-hook ok
	[+]poststarthook/start-apiextensions-informers ok
	[+]poststarthook/start-apiextensions-controllers ok
	[+]poststarthook/crd-informer-synced ok
	[+]poststarthook/start-system-namespaces-controller ok
	[+]poststarthook/start-cluster-authentication-info-controller ok
	[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
	[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
	[+]poststarthook/start-legacy-token-tracking-controller ok
	[+]poststarthook/start-service-ip-repair-controllers ok
	[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
	[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
	[+]poststarthook/priority-and-fairness-config-producer ok
	[+]poststarthook/bootstrap-controller ok
	[+]poststarthook/start-kubernetes-service-cidr-controller ok
	[+]poststarthook/aggregator-reload-proxy-client-cert ok
	[+]poststarthook/start-kube-aggregator-informers ok
	[+]poststarthook/apiservice-status-local-available-controller ok
	[+]poststarthook/apiservice-status-remote-available-controller ok
	[+]poststarthook/apiservice-registration-controller ok
	[+]poststarthook/apiservice-discovery-controller ok
	[+]poststarthook/kube-apiserver-autoregistration ok
	[+]autoregister-completion ok
	[+]poststarthook/apiservice-openapi-controller ok
	[+]poststarthook/apiservice-openapiv3-controller ok
	healthz check failed
	W0917 00:35:50.384350  616831 api_server.go:103] status: https://192.168.49.2:8441/healthz returned error 500:
	[+]ping ok
	[+]log ok
	[+]etcd ok
	[+]poststarthook/start-apiserver-admission-initializer ok
	[+]poststarthook/generic-apiserver-start-informers ok
	[+]poststarthook/priority-and-fairness-config-consumer ok
	[+]poststarthook/priority-and-fairness-filter ok
	[+]poststarthook/storage-object-count-tracker-hook ok
	[+]poststarthook/start-apiextensions-informers ok
	[+]poststarthook/start-apiextensions-controllers ok
	[+]poststarthook/crd-informer-synced ok
	[+]poststarthook/start-system-namespaces-controller ok
	[+]poststarthook/start-cluster-authentication-info-controller ok
	[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
	[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
	[+]poststarthook/start-legacy-token-tracking-controller ok
	[+]poststarthook/start-service-ip-repair-controllers ok
	[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
	[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
	[+]poststarthook/priority-and-fairness-config-producer ok
	[+]poststarthook/bootstrap-controller ok
	[+]poststarthook/start-kubernetes-service-cidr-controller ok
	[+]poststarthook/aggregator-reload-proxy-client-cert ok
	[+]poststarthook/start-kube-aggregator-informers ok
	[+]poststarthook/apiservice-status-local-available-controller ok
	[+]poststarthook/apiservice-status-remote-available-controller ok
	[+]poststarthook/apiservice-registration-controller ok
	[+]poststarthook/apiservice-discovery-controller ok
	[+]poststarthook/kube-apiserver-autoregistration ok
	[+]autoregister-completion ok
	[+]poststarthook/apiservice-openapi-controller ok
	[+]poststarthook/apiservice-openapiv3-controller ok
	healthz check failed
	I0917 00:35:50.873627  616831 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8441/healthz ...
	I0917 00:35:50.882213  616831 api_server.go:279] https://192.168.49.2:8441/healthz returned 200:
	ok
	I0917 00:35:50.895687  616831 api_server.go:141] control plane version: v1.34.0
	I0917 00:35:50.895703  616831 api_server.go:131] duration metric: took 5.522561671s to wait for apiserver health ...
	I0917 00:35:50.895711  616831 cni.go:84] Creating CNI manager for ""
	I0917 00:35:50.895720  616831 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
	I0917 00:35:50.899145  616831 out.go:179] * Configuring bridge CNI (Container Networking Interface) ...
	I0917 00:35:50.902037  616831 ssh_runner.go:195] Run: sudo mkdir -p /etc/cni/net.d
	I0917 00:35:50.911370  616831 ssh_runner.go:362] scp memory --> /etc/cni/net.d/1-k8s.conflist (496 bytes)
	I0917 00:35:50.933173  616831 system_pods.go:43] waiting for kube-system pods to appear ...
	I0917 00:35:50.936624  616831 system_pods.go:59] 7 kube-system pods found
	I0917 00:35:50.936650  616831 system_pods.go:61] "coredns-66bc5c9577-q6x4w" [26bb6d39-5353-4c95-97a6-b92f16438243] Running / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
	I0917 00:35:50.936657  616831 system_pods.go:61] "etcd-functional-918451" [9e135141-0acb-4f48-b555-59f828914b0f] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd])
	I0917 00:35:50.936665  616831 system_pods.go:61] "kube-apiserver-functional-918451" [a4257554-5980-4e52-a3d2-71b0e3865d7a] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
	I0917 00:35:50.936671  616831 system_pods.go:61] "kube-controller-manager-functional-918451" [c0879b40-7d6c-4cc9-a983-ef9e20f78509] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager])
	I0917 00:35:50.936676  616831 system_pods.go:61] "kube-proxy-q4hcq" [ea7eb633-0e1c-4fee-8469-b9203fba2e85] Pending / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy])
	I0917 00:35:50.936682  616831 system_pods.go:61] "kube-scheduler-functional-918451" [231d8a51-2a2f-4657-9efb-36d7413d7aac] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
	I0917 00:35:50.936687  616831 system_pods.go:61] "storage-provisioner" [52933a3d-318b-43c1-bb02-5115e2dcea88] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
	I0917 00:35:50.936691  616831 system_pods.go:74] duration metric: took 3.508165ms to wait for pod list to return data ...
	I0917 00:35:50.936698  616831 node_conditions.go:102] verifying NodePressure condition ...
	I0917 00:35:50.939626  616831 node_conditions.go:122] node storage ephemeral capacity is 203034800Ki
	I0917 00:35:50.939645  616831 node_conditions.go:123] node cpu capacity is 2
	I0917 00:35:50.939655  616831 node_conditions.go:105] duration metric: took 2.952864ms to run NodePressure ...
	I0917 00:35:50.939671  616831 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0:$PATH" kubeadm init phase addon all --config /var/tmp/minikube/kubeadm.yaml"
	I0917 00:35:51.197134  616831 kubeadm.go:720] waiting for restarted kubelet to initialise ...
	I0917 00:35:51.200827  616831 kubeadm.go:735] kubelet initialised
	I0917 00:35:51.200839  616831 kubeadm.go:736] duration metric: took 3.691915ms waiting for restarted kubelet to initialise ...
	I0917 00:35:51.200853  616831 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
	I0917 00:35:51.209223  616831 ops.go:34] apiserver oom_adj: -16
	I0917 00:35:51.209235  616831 kubeadm.go:593] duration metric: took 10.985184815s to restartPrimaryControlPlane
	I0917 00:35:51.209243  616831 kubeadm.go:394] duration metric: took 11.070201087s to StartCluster
	I0917 00:35:51.209257  616831 settings.go:142] acquiring lock: {Name:mkeeff7458e530a541c151580b54d47f2e77f0de Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0917 00:35:51.209311  616831 settings.go:150] Updating kubeconfig:  /home/jenkins/minikube-integration/21550-576428/kubeconfig
	I0917 00:35:51.209950  616831 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21550-576428/kubeconfig: {Name:mk3b9e4b05730cfa71613487e1675bc90b668ce8 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0917 00:35:51.210162  616831 start.go:235] Will wait 6m0s for node &{Name: IP:192.168.49.2 Port:8441 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}
	I0917 00:35:51.210539  616831 config.go:182] Loaded profile config "functional-918451": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
	I0917 00:35:51.210523  616831 addons.go:511] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-gluster:false storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
	I0917 00:35:51.210595  616831 addons.go:69] Setting default-storageclass=true in profile "functional-918451"
	I0917 00:35:51.210594  616831 addons.go:69] Setting storage-provisioner=true in profile "functional-918451"
	I0917 00:35:51.210606  616831 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "functional-918451"
	I0917 00:35:51.210607  616831 addons.go:238] Setting addon storage-provisioner=true in "functional-918451"
	W0917 00:35:51.210612  616831 addons.go:247] addon storage-provisioner should already be in state true
	I0917 00:35:51.210636  616831 host.go:66] Checking if "functional-918451" exists ...
	I0917 00:35:51.210902  616831 cli_runner.go:164] Run: docker container inspect functional-918451 --format={{.State.Status}}
	I0917 00:35:51.211036  616831 cli_runner.go:164] Run: docker container inspect functional-918451 --format={{.State.Status}}
	I0917 00:35:51.213522  616831 out.go:179] * Verifying Kubernetes components...
	I0917 00:35:51.216759  616831 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I0917 00:35:51.243427  616831 addons.go:238] Setting addon default-storageclass=true in "functional-918451"
	W0917 00:35:51.243437  616831 addons.go:247] addon default-storageclass should already be in state true
	I0917 00:35:51.243461  616831 host.go:66] Checking if "functional-918451" exists ...
	I0917 00:35:51.243855  616831 cli_runner.go:164] Run: docker container inspect functional-918451 --format={{.State.Status}}
	I0917 00:35:51.254231  616831 out.go:179]   - Using image gcr.io/k8s-minikube/storage-provisioner:v5
	I0917 00:35:51.257133  616831 addons.go:435] installing /etc/kubernetes/addons/storage-provisioner.yaml
	I0917 00:35:51.257149  616831 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
	I0917 00:35:51.257213  616831 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-918451
	I0917 00:35:51.279854  616831 addons.go:435] installing /etc/kubernetes/addons/storageclass.yaml
	I0917 00:35:51.279867  616831 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
	I0917 00:35:51.279941  616831 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-918451
	I0917 00:35:51.293176  616831 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33515 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/functional-918451/id_rsa Username:docker}
	I0917 00:35:51.310104  616831 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33515 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/functional-918451/id_rsa Username:docker}
	I0917 00:35:51.443085  616831 ssh_runner.go:195] Run: sudo systemctl start kubelet
	I0917 00:35:51.483844  616831 node_ready.go:35] waiting up to 6m0s for node "functional-918451" to be "Ready" ...
	I0917 00:35:51.490640  616831 node_ready.go:49] node "functional-918451" is "Ready"
	I0917 00:35:51.490655  616831 node_ready.go:38] duration metric: took 6.792097ms for node "functional-918451" to be "Ready" ...
	I0917 00:35:51.490669  616831 api_server.go:52] waiting for apiserver process to appear ...
	I0917 00:35:51.490723  616831 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
	I0917 00:35:51.499047  616831 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
	I0917 00:35:51.527212  616831 api_server.go:72] duration metric: took 317.021453ms to wait for apiserver process to appear ...
	I0917 00:35:51.527239  616831 api_server.go:88] waiting for apiserver healthz status ...
	I0917 00:35:51.527257  616831 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8441/healthz ...
	I0917 00:35:51.537261  616831 api_server.go:279] https://192.168.49.2:8441/healthz returned 200:
	ok
	I0917 00:35:51.540349  616831 api_server.go:141] control plane version: v1.34.0
	I0917 00:35:51.540365  616831 api_server.go:131] duration metric: took 13.120715ms to wait for apiserver health ...
	I0917 00:35:51.540373  616831 system_pods.go:43] waiting for kube-system pods to appear ...
	I0917 00:35:51.546242  616831 system_pods.go:59] 7 kube-system pods found
	I0917 00:35:51.546261  616831 system_pods.go:61] "coredns-66bc5c9577-q6x4w" [26bb6d39-5353-4c95-97a6-b92f16438243] Running / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
	I0917 00:35:51.546268  616831 system_pods.go:61] "etcd-functional-918451" [9e135141-0acb-4f48-b555-59f828914b0f] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd])
	I0917 00:35:51.546276  616831 system_pods.go:61] "kube-apiserver-functional-918451" [a4257554-5980-4e52-a3d2-71b0e3865d7a] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
	I0917 00:35:51.546281  616831 system_pods.go:61] "kube-controller-manager-functional-918451" [c0879b40-7d6c-4cc9-a983-ef9e20f78509] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager])
	I0917 00:35:51.546303  616831 system_pods.go:61] "kube-proxy-q4hcq" [ea7eb633-0e1c-4fee-8469-b9203fba2e85] Pending / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy])
	I0917 00:35:51.546309  616831 system_pods.go:61] "kube-scheduler-functional-918451" [231d8a51-2a2f-4657-9efb-36d7413d7aac] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
	I0917 00:35:51.546315  616831 system_pods.go:61] "storage-provisioner" [52933a3d-318b-43c1-bb02-5115e2dcea88] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
	I0917 00:35:51.546320  616831 system_pods.go:74] duration metric: took 5.942723ms to wait for pod list to return data ...
	I0917 00:35:51.546328  616831 default_sa.go:34] waiting for default service account to be created ...
	I0917 00:35:51.548754  616831 default_sa.go:45] found service account: "default"
	I0917 00:35:51.548767  616831 default_sa.go:55] duration metric: took 2.434427ms for default service account to be created ...
	I0917 00:35:51.548774  616831 system_pods.go:116] waiting for k8s-apps to be running ...
	I0917 00:35:51.556653  616831 system_pods.go:86] 7 kube-system pods found
	I0917 00:35:51.556671  616831 system_pods.go:89] "coredns-66bc5c9577-q6x4w" [26bb6d39-5353-4c95-97a6-b92f16438243] Running / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
	I0917 00:35:51.556689  616831 system_pods.go:89] "etcd-functional-918451" [9e135141-0acb-4f48-b555-59f828914b0f] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd])
	I0917 00:35:51.556696  616831 system_pods.go:89] "kube-apiserver-functional-918451" [a4257554-5980-4e52-a3d2-71b0e3865d7a] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
	I0917 00:35:51.556701  616831 system_pods.go:89] "kube-controller-manager-functional-918451" [c0879b40-7d6c-4cc9-a983-ef9e20f78509] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager])
	I0917 00:35:51.556707  616831 system_pods.go:89] "kube-proxy-q4hcq" [ea7eb633-0e1c-4fee-8469-b9203fba2e85] Pending / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy])
	I0917 00:35:51.556712  616831 system_pods.go:89] "kube-scheduler-functional-918451" [231d8a51-2a2f-4657-9efb-36d7413d7aac] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
	I0917 00:35:51.556717  616831 system_pods.go:89] "storage-provisioner" [52933a3d-318b-43c1-bb02-5115e2dcea88] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
	I0917 00:35:51.556741  616831 retry.go:31] will retry after 289.97205ms: missing components: kube-proxy
	I0917 00:35:51.587796  616831 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
	I0917 00:35:51.851992  616831 system_pods.go:86] 7 kube-system pods found
	I0917 00:35:51.852011  616831 system_pods.go:89] "coredns-66bc5c9577-q6x4w" [26bb6d39-5353-4c95-97a6-b92f16438243] Running / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
	I0917 00:35:51.852018  616831 system_pods.go:89] "etcd-functional-918451" [9e135141-0acb-4f48-b555-59f828914b0f] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd])
	I0917 00:35:51.852032  616831 system_pods.go:89] "kube-apiserver-functional-918451" [a4257554-5980-4e52-a3d2-71b0e3865d7a] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
	I0917 00:35:51.852038  616831 system_pods.go:89] "kube-controller-manager-functional-918451" [c0879b40-7d6c-4cc9-a983-ef9e20f78509] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager])
	I0917 00:35:51.852041  616831 system_pods.go:89] "kube-proxy-q4hcq" [ea7eb633-0e1c-4fee-8469-b9203fba2e85] Running
	I0917 00:35:51.852046  616831 system_pods.go:89] "kube-scheduler-functional-918451" [231d8a51-2a2f-4657-9efb-36d7413d7aac] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
	I0917 00:35:51.852049  616831 system_pods.go:89] "storage-provisioner" [52933a3d-318b-43c1-bb02-5115e2dcea88] Running
	I0917 00:35:51.852056  616831 system_pods.go:126] duration metric: took 303.277418ms to wait for k8s-apps to be running ...
	I0917 00:35:51.852071  616831 system_svc.go:44] waiting for kubelet service to be running ....
	I0917 00:35:51.852127  616831 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
	I0917 00:35:52.405444  616831 system_svc.go:56] duration metric: took 553.36513ms WaitForService to wait for kubelet
	I0917 00:35:52.405458  616831 kubeadm.go:578] duration metric: took 1.195275324s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
	I0917 00:35:52.405474  616831 node_conditions.go:102] verifying NodePressure condition ...
	I0917 00:35:52.407851  616831 node_conditions.go:122] node storage ephemeral capacity is 203034800Ki
	I0917 00:35:52.407865  616831 node_conditions.go:123] node cpu capacity is 2
	I0917 00:35:52.407875  616831 node_conditions.go:105] duration metric: took 2.396299ms to run NodePressure ...
	I0917 00:35:52.407885  616831 start.go:241] waiting for startup goroutines ...
	I0917 00:35:52.408987  616831 out.go:179] * Enabled addons: default-storageclass, storage-provisioner
	I0917 00:35:52.411837  616831 addons.go:514] duration metric: took 1.201321093s for enable addons: enabled=[default-storageclass storage-provisioner]
	I0917 00:35:52.411870  616831 start.go:246] waiting for cluster config update ...
	I0917 00:35:52.411882  616831 start.go:255] writing updated cluster config ...
	I0917 00:35:52.412173  616831 ssh_runner.go:195] Run: rm -f paused
	I0917 00:35:52.415576  616831 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
	I0917 00:35:52.418997  616831 pod_ready.go:83] waiting for pod "coredns-66bc5c9577-q6x4w" in "kube-system" namespace to be "Ready" or be gone ...
	W0917 00:35:54.424244  616831 pod_ready.go:104] pod "coredns-66bc5c9577-q6x4w" is not "Ready", error: <nil>
	I0917 00:35:56.942816  616831 pod_ready.go:94] pod "coredns-66bc5c9577-q6x4w" is "Ready"
	I0917 00:35:56.942832  616831 pod_ready.go:86] duration metric: took 4.52382257s for pod "coredns-66bc5c9577-q6x4w" in "kube-system" namespace to be "Ready" or be gone ...
	I0917 00:35:56.950902  616831 pod_ready.go:83] waiting for pod "etcd-functional-918451" in "kube-system" namespace to be "Ready" or be gone ...
	W0917 00:35:58.956638  616831 pod_ready.go:104] pod "etcd-functional-918451" is not "Ready", error: <nil>
	I0917 00:36:00.957005  616831 pod_ready.go:94] pod "etcd-functional-918451" is "Ready"
	I0917 00:36:00.957018  616831 pod_ready.go:86] duration metric: took 4.006102595s for pod "etcd-functional-918451" in "kube-system" namespace to be "Ready" or be gone ...
	I0917 00:36:00.959116  616831 pod_ready.go:83] waiting for pod "kube-apiserver-functional-918451" in "kube-system" namespace to be "Ready" or be gone ...
	I0917 00:36:00.962869  616831 pod_ready.go:94] pod "kube-apiserver-functional-918451" is "Ready"
	I0917 00:36:00.962882  616831 pod_ready.go:86] duration metric: took 3.751293ms for pod "kube-apiserver-functional-918451" in "kube-system" namespace to be "Ready" or be gone ...
	I0917 00:36:00.964932  616831 pod_ready.go:83] waiting for pod "kube-controller-manager-functional-918451" in "kube-system" namespace to be "Ready" or be gone ...
	I0917 00:36:00.968976  616831 pod_ready.go:94] pod "kube-controller-manager-functional-918451" is "Ready"
	I0917 00:36:00.968989  616831 pod_ready.go:86] duration metric: took 4.046539ms for pod "kube-controller-manager-functional-918451" in "kube-system" namespace to be "Ready" or be gone ...
	I0917 00:36:00.971150  616831 pod_ready.go:83] waiting for pod "kube-proxy-q4hcq" in "kube-system" namespace to be "Ready" or be gone ...
	I0917 00:36:01.155524  616831 pod_ready.go:94] pod "kube-proxy-q4hcq" is "Ready"
	I0917 00:36:01.155540  616831 pod_ready.go:86] duration metric: took 184.378745ms for pod "kube-proxy-q4hcq" in "kube-system" namespace to be "Ready" or be gone ...
	I0917 00:36:01.355133  616831 pod_ready.go:83] waiting for pod "kube-scheduler-functional-918451" in "kube-system" namespace to be "Ready" or be gone ...
	I0917 00:36:01.754304  616831 pod_ready.go:94] pod "kube-scheduler-functional-918451" is "Ready"
	I0917 00:36:01.754318  616831 pod_ready.go:86] duration metric: took 399.17189ms for pod "kube-scheduler-functional-918451" in "kube-system" namespace to be "Ready" or be gone ...
	I0917 00:36:01.754329  616831 pod_ready.go:40] duration metric: took 9.3387321s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
	I0917 00:36:01.810606  616831 start.go:617] kubectl: 1.33.2, cluster: 1.34.0 (minor skew: 1)
	I0917 00:36:01.813975  616831 out.go:179] * Done! kubectl is now configured to use "functional-918451" cluster and "default" namespace by default
	
	
	==> Docker <==
	Sep 17 00:36:27 functional-918451 dockerd[6835]: time="2025-09-17T00:36:27.820598487Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
	Sep 17 00:36:37 functional-918451 dockerd[6835]: time="2025-09-17T00:36:37.016993757Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
	Sep 17 00:36:41 functional-918451 dockerd[6835]: time="2025-09-17T00:36:41.014364003Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
	Sep 17 00:36:53 functional-918451 cri-dockerd[7576]: time="2025-09-17T00:36:53Z" level=error msg="error getting RW layer size for container ID '17077358dad774a1532240e9047a072ab229af420b91e24fdde2fb6d25bdfe5c': Error response from daemon: No such container: 17077358dad774a1532240e9047a072ab229af420b91e24fdde2fb6d25bdfe5c"
	Sep 17 00:36:53 functional-918451 cri-dockerd[7576]: time="2025-09-17T00:36:53Z" level=error msg="Set backoffDuration to : 1m0s for container ID '17077358dad774a1532240e9047a072ab229af420b91e24fdde2fb6d25bdfe5c'"
	Sep 17 00:37:05 functional-918451 dockerd[6835]: time="2025-09-17T00:37:05.036223709Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
	Sep 17 00:37:07 functional-918451 dockerd[6835]: time="2025-09-17T00:37:07.012067849Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
	Sep 17 00:37:59 functional-918451 dockerd[6835]: time="2025-09-17T00:37:59.123624420Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
	Sep 17 00:37:59 functional-918451 cri-dockerd[7576]: time="2025-09-17T00:37:59Z" level=info msg="Stop pulling image docker.io/nginx:latest: latest: Pulling from library/nginx"
	Sep 17 00:38:00 functional-918451 dockerd[6835]: time="2025-09-17T00:38:00.037097732Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
	Sep 17 00:39:25 functional-918451 dockerd[6835]: time="2025-09-17T00:39:25.034129019Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
	Sep 17 00:39:31 functional-918451 dockerd[6835]: time="2025-09-17T00:39:31.549684585Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
	Sep 17 00:39:31 functional-918451 cri-dockerd[7576]: time="2025-09-17T00:39:31Z" level=info msg="Stop pulling image docker.io/nginx:latest: latest: Pulling from library/nginx"
	Sep 17 00:40:26 functional-918451 cri-dockerd[7576]: time="2025-09-17T00:40:26Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/a2e900f93cd36cdef9d88b5b02a9cc87992e7a7806d060e0ad84e38cbc6a43db/resolv.conf as [nameserver 10.96.0.10 search default.svc.cluster.local svc.cluster.local cluster.local us-east-2.compute.internal options ndots:5]"
	Sep 17 00:40:27 functional-918451 dockerd[6835]: time="2025-09-17T00:40:27.246297920Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
	Sep 17 00:40:40 functional-918451 dockerd[6835]: time="2025-09-17T00:40:40.023255824Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
	Sep 17 00:41:06 functional-918451 dockerd[6835]: time="2025-09-17T00:41:06.999363441Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
	Sep 17 00:41:59 functional-918451 dockerd[6835]: time="2025-09-17T00:41:59.086100550Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
	Sep 17 00:41:59 functional-918451 cri-dockerd[7576]: time="2025-09-17T00:41:59Z" level=info msg="Stop pulling image kicbase/echo-server:latest: latest: Pulling from kicbase/echo-server"
	Sep 17 00:42:10 functional-918451 dockerd[6835]: time="2025-09-17T00:42:10.017732212Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
	Sep 17 00:42:21 functional-918451 dockerd[6835]: time="2025-09-17T00:42:21.015131499Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
	Sep 17 00:43:33 functional-918451 dockerd[6835]: time="2025-09-17T00:43:33.137945993Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
	Sep 17 00:43:33 functional-918451 cri-dockerd[7576]: time="2025-09-17T00:43:33Z" level=info msg="Stop pulling image kicbase/echo-server:latest: latest: Pulling from kicbase/echo-server"
	Sep 17 00:46:20 functional-918451 dockerd[6835]: time="2025-09-17T00:46:20.157384418Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
	Sep 17 00:46:20 functional-918451 cri-dockerd[7576]: time="2025-09-17T00:46:20Z" level=info msg="Stop pulling image kicbase/echo-server:latest: latest: Pulling from kicbase/echo-server"
	
	
	==> container status <==
	CONTAINER           IMAGE                                                                           CREATED             STATE               NAME                      ATTEMPT             POD ID              POD
	7f05a20c84773       nginx@sha256:42a516af16b852e33b7682d5ef8acbd5d13fe08fecadc7ed98605ba5e3b26ab8   10 minutes ago      Running             nginx                     0                   226891f84e087       nginx-svc
	bf9dde3577a17       138784d87c9c5                                                                   10 minutes ago      Running             coredns                   2                   5a0f0f9b9bec0       coredns-66bc5c9577-q6x4w
	2249e1b1919d7       6fc32d66c1411                                                                   10 minutes ago      Running             kube-proxy                3                   c70143b4207a5       kube-proxy-q4hcq
	cf11e0afbf55d       ba04bb24b9575                                                                   10 minutes ago      Running             storage-provisioner       3                   9bd802eae08c9       storage-provisioner
	8f45cd3852343       a25f5ef9c34c3                                                                   10 minutes ago      Running             kube-scheduler            3                   adb05901f064d       kube-scheduler-functional-918451
	810186cb500fd       d291939e99406                                                                   10 minutes ago      Running             kube-apiserver            0                   edfa6965041a5       kube-apiserver-functional-918451
	53d0462ffd2c6       a1894772a478e                                                                   10 minutes ago      Running             etcd                      2                   299a608c07fcf       etcd-functional-918451
	a800de00866e1       996be7e86d9b3                                                                   10 minutes ago      Running             kube-controller-manager   3                   85599d5f0bdd8       kube-controller-manager-functional-918451
	bc1e5b3192079       6fc32d66c1411                                                                   10 minutes ago      Created             kube-proxy                2                   3c0a229533b1f       kube-proxy-q4hcq
	7cda0e892061a       996be7e86d9b3                                                                   10 minutes ago      Created             kube-controller-manager   2                   7a2c5910368f6       kube-controller-manager-functional-918451
	e3a6d89ad5b64       a25f5ef9c34c3                                                                   10 minutes ago      Created             kube-scheduler            2                   ae4004a0e0f99       kube-scheduler-functional-918451
	e707505e3be85       ba04bb24b9575                                                                   10 minutes ago      Exited              storage-provisioner       2                   56ab6c32da135       storage-provisioner
	d3d4055f8ecd2       138784d87c9c5                                                                   11 minutes ago      Exited              coredns                   1                   ecc60cd02b8df       coredns-66bc5c9577-q6x4w
	bd4e50a8edbd5       a1894772a478e                                                                   11 minutes ago      Exited              etcd                      1                   e00e50d78d699       etcd-functional-918451
	
	
	==> coredns [bf9dde3577a1] <==
	maxprocs: Leaving GOMAXPROCS=2: CPU quota undefined
	.:53
	[INFO] plugin/reload: Running configuration SHA512 = 9e2996f8cb67ac53e0259ab1f8d615d07d1beb0bd07e6a1e39769c3bf486a905bb991cc47f8d2f14d0d3a90a87dfc625a0b4c524fed169d8158c40657c0694b1
	CoreDNS-1.12.1
	linux/arm64, go1.24.1, 707c7c1
	[INFO] 127.0.0.1:40702 - 26535 "HINFO IN 1869121631954386126.875895371521758492. udp 56 false 512" NXDOMAIN qr,rd,ra 56 0.03198374s
	
	
	==> coredns [d3d4055f8ecd] <==
	maxprocs: Leaving GOMAXPROCS=2: CPU quota undefined
	[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
	[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
	[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
	[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
	[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
	[INFO] plugin/ready: Still waiting on: "kubernetes"
	[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
	[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
	[INFO] plugin/ready: Still waiting on: "kubernetes"
	.:53
	[INFO] plugin/reload: Running configuration SHA512 = 9e2996f8cb67ac53e0259ab1f8d615d07d1beb0bd07e6a1e39769c3bf486a905bb991cc47f8d2f14d0d3a90a87dfc625a0b4c524fed169d8158c40657c0694b1
	CoreDNS-1.12.1
	linux/arm64, go1.24.1, 707c7c1
	[INFO] 127.0.0.1:57340 - 21792 "HINFO IN 1884376929635518991.5843453519963972009. udp 57 false 512" NXDOMAIN qr,rd,ra 57 0.021793349s
	[INFO] SIGTERM: Shutting down servers then terminating
	[INFO] plugin/health: Going into lameduck mode for 5s
	
	
	==> describe nodes <==
	Name:               functional-918451
	Roles:              control-plane
	Labels:             beta.kubernetes.io/arch=arm64
	                    beta.kubernetes.io/os=linux
	                    kubernetes.io/arch=arm64
	                    kubernetes.io/hostname=functional-918451
	                    kubernetes.io/os=linux
	                    minikube.k8s.io/commit=9829f0bc17c523e4378d28e0c25741106f24f00a
	                    minikube.k8s.io/name=functional-918451
	                    minikube.k8s.io/primary=true
	                    minikube.k8s.io/updated_at=2025_09_17T00_33_25_0700
	                    minikube.k8s.io/version=v1.37.0
	                    node-role.kubernetes.io/control-plane=
	                    node.kubernetes.io/exclude-from-external-load-balancers=
	Annotations:        node.alpha.kubernetes.io/ttl: 0
	                    volumes.kubernetes.io/controller-managed-attach-detach: true
	CreationTimestamp:  Wed, 17 Sep 2025 00:33:22 +0000
	Taints:             <none>
	Unschedulable:      false
	Lease:
	  HolderIdentity:  functional-918451
	  AcquireTime:     <unset>
	  RenewTime:       Wed, 17 Sep 2025 00:46:22 +0000
	Conditions:
	  Type             Status  LastHeartbeatTime                 LastTransitionTime                Reason                       Message
	  ----             ------  -----------------                 ------------------                ------                       -------
	  MemoryPressure   False   Wed, 17 Sep 2025 00:43:08 +0000   Wed, 17 Sep 2025 00:33:18 +0000   KubeletHasSufficientMemory   kubelet has sufficient memory available
	  DiskPressure     False   Wed, 17 Sep 2025 00:43:08 +0000   Wed, 17 Sep 2025 00:33:18 +0000   KubeletHasNoDiskPressure     kubelet has no disk pressure
	  PIDPressure      False   Wed, 17 Sep 2025 00:43:08 +0000   Wed, 17 Sep 2025 00:33:18 +0000   KubeletHasSufficientPID      kubelet has sufficient PID available
	  Ready            True    Wed, 17 Sep 2025 00:43:08 +0000   Wed, 17 Sep 2025 00:33:22 +0000   KubeletReady                 kubelet is posting ready status
	Addresses:
	  InternalIP:  192.168.49.2
	  Hostname:    functional-918451
	Capacity:
	  cpu:                2
	  ephemeral-storage:  203034800Ki
	  hugepages-1Gi:      0
	  hugepages-2Mi:      0
	  hugepages-32Mi:     0
	  hugepages-64Ki:     0
	  memory:             8022300Ki
	  pods:               110
	Allocatable:
	  cpu:                2
	  ephemeral-storage:  203034800Ki
	  hugepages-1Gi:      0
	  hugepages-2Mi:      0
	  hugepages-32Mi:     0
	  hugepages-64Ki:     0
	  memory:             8022300Ki
	  pods:               110
	System Info:
	  Machine ID:                 787c0d47a41949608af3603ec5366447
	  System UUID:                f83af58d-c48d-4abe-ba83-7d4398f15ffc
	  Boot ID:                    54a40c62-e2ca-4fe1-8de3-5249514e3fbf
	  Kernel Version:             5.15.0-1084-aws
	  OS Image:                   Ubuntu 22.04.5 LTS
	  Operating System:           linux
	  Architecture:               arm64
	  Container Runtime Version:  docker://28.4.0
	  Kubelet Version:            v1.34.0
	  Kube-Proxy Version:         
	PodCIDR:                      10.244.0.0/24
	PodCIDRs:                     10.244.0.0/24
	Non-terminated Pods:          (11 in total)
	  Namespace                   Name                                         CPU Requests  CPU Limits  Memory Requests  Memory Limits  Age
	  ---------                   ----                                         ------------  ----------  ---------------  -------------  ---
	  default                     hello-node-75c85bcc94-nttx8                  0 (0%)        0 (0%)      0 (0%)           0 (0%)         6m3s
	  default                     hello-node-connect-7d85dfc575-t4gsf          0 (0%)        0 (0%)      0 (0%)           0 (0%)         10m
	  default                     nginx-svc                                    0 (0%)        0 (0%)      0 (0%)           0 (0%)         10m
	  default                     sp-pod                                       0 (0%)        0 (0%)      0 (0%)           0 (0%)         10m
	  kube-system                 coredns-66bc5c9577-q6x4w                     100m (5%)     0 (0%)      70Mi (0%)        170Mi (2%)     12m
	  kube-system                 etcd-functional-918451                       100m (5%)     0 (0%)      100Mi (1%)       0 (0%)         13m
	  kube-system                 kube-apiserver-functional-918451             250m (12%)    0 (0%)      0 (0%)           0 (0%)         10m
	  kube-system                 kube-controller-manager-functional-918451    200m (10%)    0 (0%)      0 (0%)           0 (0%)         13m
	  kube-system                 kube-proxy-q4hcq                             0 (0%)        0 (0%)      0 (0%)           0 (0%)         12m
	  kube-system                 kube-scheduler-functional-918451             100m (5%)     0 (0%)      0 (0%)           0 (0%)         13m
	  kube-system                 storage-provisioner                          0 (0%)        0 (0%)      0 (0%)           0 (0%)         12m
	Allocated resources:
	  (Total limits may be over 100 percent, i.e., overcommitted.)
	  Resource           Requests    Limits
	  --------           --------    ------
	  cpu                750m (37%)  0 (0%)
	  memory             170Mi (2%)  170Mi (2%)
	  ephemeral-storage  0 (0%)      0 (0%)
	  hugepages-1Gi      0 (0%)      0 (0%)
	  hugepages-2Mi      0 (0%)      0 (0%)
	  hugepages-32Mi     0 (0%)      0 (0%)
	  hugepages-64Ki     0 (0%)      0 (0%)
	Events:
	  Type     Reason                   Age                From             Message
	  ----     ------                   ----               ----             -------
	  Normal   Starting                 12m                kube-proxy       
	  Normal   Starting                 10m                kube-proxy       
	  Normal   Starting                 11m                kube-proxy       
	  Normal   Starting                 13m                kubelet          Starting kubelet.
	  Warning  CgroupV1                 13m                kubelet          cgroup v1 support is in maintenance mode, please migrate to cgroup v2
	  Normal   NodeHasSufficientMemory  13m (x8 over 13m)  kubelet          Node functional-918451 status is now: NodeHasSufficientMemory
	  Normal   NodeHasNoDiskPressure    13m (x8 over 13m)  kubelet          Node functional-918451 status is now: NodeHasNoDiskPressure
	  Normal   NodeHasSufficientPID     13m (x7 over 13m)  kubelet          Node functional-918451 status is now: NodeHasSufficientPID
	  Normal   NodeAllocatableEnforced  13m                kubelet          Updated Node Allocatable limit across pods
	  Normal   Starting                 13m                kubelet          Starting kubelet.
	  Warning  CgroupV1                 13m                kubelet          cgroup v1 support is in maintenance mode, please migrate to cgroup v2
	  Normal   NodeHasSufficientMemory  13m                kubelet          Node functional-918451 status is now: NodeHasSufficientMemory
	  Normal   NodeHasNoDiskPressure    13m                kubelet          Node functional-918451 status is now: NodeHasNoDiskPressure
	  Normal   NodeHasSufficientPID     13m                kubelet          Node functional-918451 status is now: NodeHasSufficientPID
	  Normal   NodeAllocatableEnforced  13m                kubelet          Updated Node Allocatable limit across pods
	  Normal   RegisteredNode           13m                node-controller  Node functional-918451 event: Registered Node functional-918451 in Controller
	  Normal   NodeNotReady             11m                kubelet          Node functional-918451 status is now: NodeNotReady
	  Normal   RegisteredNode           11m                node-controller  Node functional-918451 event: Registered Node functional-918451 in Controller
	  Normal   Starting                 10m                kubelet          Starting kubelet.
	  Warning  CgroupV1                 10m                kubelet          cgroup v1 support is in maintenance mode, please migrate to cgroup v2
	  Normal   NodeHasSufficientMemory  10m (x8 over 10m)  kubelet          Node functional-918451 status is now: NodeHasSufficientMemory
	  Normal   NodeHasNoDiskPressure    10m (x8 over 10m)  kubelet          Node functional-918451 status is now: NodeHasNoDiskPressure
	  Normal   NodeHasSufficientPID     10m (x7 over 10m)  kubelet          Node functional-918451 status is now: NodeHasSufficientPID
	  Normal   NodeAllocatableEnforced  10m                kubelet          Updated Node Allocatable limit across pods
	  Normal   RegisteredNode           10m                node-controller  Node functional-918451 event: Registered Node functional-918451 in Controller
	
	
	==> dmesg <==
	[Sep16 22:47] kauditd_printk_skb: 8 callbacks suppressed
	[Sep17 00:20] kauditd_printk_skb: 8 callbacks suppressed
	
	
	==> etcd [53d0462ffd2c] <==
	{"level":"warn","ts":"2025-09-17T00:35:48.148362Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41280","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.161304Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41300","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.176646Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41320","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.193870Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41336","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.212630Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41368","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.231571Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41384","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.251324Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41398","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.266264Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41428","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.283844Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41438","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.305246Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41470","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.324766Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41492","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.338961Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41522","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.366501Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41540","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.407986Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41556","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.435057Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41568","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.452382Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41588","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.477192Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41616","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.510691Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41636","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.541591Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41670","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.545272Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41650","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.604666Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41692","server-name":"","error":"EOF"}
	{"level":"info","ts":"2025-09-17T00:39:31.191908Z","caller":"traceutil/trace.go:172","msg":"trace[793044705] transaction","detail":"{read_only:false; response_revision:1056; number_of_response:1; }","duration":"143.194218ms","start":"2025-09-17T00:39:31.048697Z","end":"2025-09-17T00:39:31.191891Z","steps":["trace[793044705] 'process raft request'  (duration: 143.096128ms)"],"step_count":1}
	{"level":"info","ts":"2025-09-17T00:45:47.323743Z","caller":"mvcc/index.go:194","msg":"compact tree index","revision":1172}
	{"level":"info","ts":"2025-09-17T00:45:47.351263Z","caller":"mvcc/kvstore_compaction.go:70","msg":"finished scheduled compaction","compact-revision":1172,"took":"27.026718ms","hash":3159752356,"current-db-size-bytes":3325952,"current-db-size":"3.3 MB","current-db-size-in-use-bytes":1540096,"current-db-size-in-use":"1.5 MB"}
	{"level":"info","ts":"2025-09-17T00:45:47.351322Z","caller":"mvcc/hash.go:157","msg":"storing new hash","hash":3159752356,"revision":1172,"compact-revision":-1}
	
	
	==> etcd [bd4e50a8edbd] <==
	{"level":"warn","ts":"2025-09-17T00:34:46.313001Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49074","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:34:46.328657Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49100","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:34:46.352785Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49118","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:34:46.380490Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49136","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:34:46.400768Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49158","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:34:46.419290Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49186","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:34:46.492683Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49234","server-name":"","error":"EOF"}
	{"level":"info","ts":"2025-09-17T00:35:27.923747Z","caller":"osutil/interrupt_unix.go:65","msg":"received signal; shutting down","signal":"terminated"}
	{"level":"info","ts":"2025-09-17T00:35:27.923816Z","caller":"embed/etcd.go:426","msg":"closing etcd server","name":"functional-918451","data-dir":"/var/lib/minikube/etcd","advertise-peer-urls":["https://192.168.49.2:2380"],"advertise-client-urls":["https://192.168.49.2:2379"]}
	{"level":"error","ts":"2025-09-17T00:35:27.923929Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"http: Server closed","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*serveCtx).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/serve.go:90"}
	{"level":"error","ts":"2025-09-17T00:35:34.926453Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"http: Server closed","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*serveCtx).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/serve.go:90"}
	{"level":"error","ts":"2025-09-17T00:35:34.926573Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 127.0.0.1:2381: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
	{"level":"info","ts":"2025-09-17T00:35:34.926596Z","caller":"etcdserver/server.go:1281","msg":"skipped leadership transfer for single voting member cluster","local-member-id":"aec36adc501070cc","current-leader-member-id":"aec36adc501070cc"}
	{"level":"info","ts":"2025-09-17T00:35:34.926693Z","caller":"etcdserver/server.go:2319","msg":"server has stopped; stopping cluster version's monitor"}
	{"level":"info","ts":"2025-09-17T00:35:34.926711Z","caller":"etcdserver/server.go:2342","msg":"server has stopped; stopping storage version's monitor"}
	{"level":"warn","ts":"2025-09-17T00:35:34.927930Z","caller":"embed/serve.go:245","msg":"stopping secure grpc server due to error","error":"accept tcp 127.0.0.1:2379: use of closed network connection"}
	{"level":"warn","ts":"2025-09-17T00:35:34.927988Z","caller":"embed/serve.go:247","msg":"stopped secure grpc server due to error","error":"accept tcp 127.0.0.1:2379: use of closed network connection"}
	{"level":"error","ts":"2025-09-17T00:35:34.927997Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 127.0.0.1:2379: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
	{"level":"warn","ts":"2025-09-17T00:35:34.928037Z","caller":"embed/serve.go:245","msg":"stopping secure grpc server due to error","error":"accept tcp 192.168.49.2:2379: use of closed network connection"}
	{"level":"warn","ts":"2025-09-17T00:35:34.928052Z","caller":"embed/serve.go:247","msg":"stopped secure grpc server due to error","error":"accept tcp 192.168.49.2:2379: use of closed network connection"}
	{"level":"error","ts":"2025-09-17T00:35:34.928059Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 192.168.49.2:2379: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
	{"level":"info","ts":"2025-09-17T00:35:34.932196Z","caller":"embed/etcd.go:621","msg":"stopping serving peer traffic","address":"192.168.49.2:2380"}
	{"level":"error","ts":"2025-09-17T00:35:34.932268Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 192.168.49.2:2380: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
	{"level":"info","ts":"2025-09-17T00:35:34.932295Z","caller":"embed/etcd.go:626","msg":"stopped serving peer traffic","address":"192.168.49.2:2380"}
	{"level":"info","ts":"2025-09-17T00:35:34.932302Z","caller":"embed/etcd.go:428","msg":"closed etcd server","name":"functional-918451","data-dir":"/var/lib/minikube/etcd","advertise-peer-urls":["https://192.168.49.2:2380"],"advertise-client-urls":["https://192.168.49.2:2379"]}
	
	
	==> kernel <==
	 00:46:29 up  3:28,  0 users,  load average: 0.09, 0.42, 1.52
	Linux functional-918451 5.15.0-1084-aws #91~20.04.1-Ubuntu SMP Fri May 2 07:00:04 UTC 2025 aarch64 aarch64 aarch64 GNU/Linux
	PRETTY_NAME="Ubuntu 22.04.5 LTS"
	
	
	==> kube-apiserver [810186cb500f] <==
	I0917 00:35:51.154903       1 controller.go:667] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
	I0917 00:35:52.907894       1 controller.go:667] quota admission added evaluator for: replicasets.apps
	I0917 00:35:52.957781       1 controller.go:667] quota admission added evaluator for: endpointslices.discovery.k8s.io
	I0917 00:35:53.007854       1 controller.go:667] quota admission added evaluator for: endpoints
	I0917 00:36:04.773604       1 alloc.go:328] "allocated clusterIPs" service="default/invalid-svc" clusterIPs={"IPv4":"10.101.248.159"}
	I0917 00:36:17.589382       1 alloc.go:328] "allocated clusterIPs" service="default/nginx-svc" clusterIPs={"IPv4":"10.111.232.235"}
	I0917 00:36:27.137014       1 alloc.go:328] "allocated clusterIPs" service="default/hello-node-connect" clusterIPs={"IPv4":"10.102.46.233"}
	I0917 00:36:48.326669       1 stats.go:136] "Error getting keys" err="empty key: \"\""
	I0917 00:36:51.406481       1 stats.go:136] "Error getting keys" err="empty key: \"\""
	I0917 00:38:01.898138       1 stats.go:136] "Error getting keys" err="empty key: \"\""
	I0917 00:38:06.934436       1 stats.go:136] "Error getting keys" err="empty key: \"\""
	I0917 00:39:05.611504       1 stats.go:136] "Error getting keys" err="empty key: \"\""
	I0917 00:39:11.198033       1 stats.go:136] "Error getting keys" err="empty key: \"\""
	I0917 00:40:26.622737       1 alloc.go:328] "allocated clusterIPs" service="default/hello-node" clusterIPs={"IPv4":"10.98.127.179"}
	I0917 00:40:29.268023       1 stats.go:136] "Error getting keys" err="empty key: \"\""
	I0917 00:40:34.849312       1 stats.go:136] "Error getting keys" err="empty key: \"\""
	I0917 00:41:44.951494       1 stats.go:136] "Error getting keys" err="empty key: \"\""
	I0917 00:41:46.296432       1 stats.go:136] "Error getting keys" err="empty key: \"\""
	I0917 00:42:52.426599       1 stats.go:136] "Error getting keys" err="empty key: \"\""
	I0917 00:43:07.339142       1 stats.go:136] "Error getting keys" err="empty key: \"\""
	I0917 00:44:21.162028       1 stats.go:136] "Error getting keys" err="empty key: \"\""
	I0917 00:44:26.453827       1 stats.go:136] "Error getting keys" err="empty key: \"\""
	I0917 00:45:46.909437       1 stats.go:136] "Error getting keys" err="empty key: \"\""
	I0917 00:45:48.754483       1 stats.go:136] "Error getting keys" err="empty key: \"\""
	I0917 00:45:49.285292       1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12
	
	
	==> kube-controller-manager [7cda0e892061] <==
	
	
	==> kube-controller-manager [a800de00866e] <==
	I0917 00:35:52.659077       1 shared_informer.go:356] "Caches are synced" controller="node"
	I0917 00:35:52.659241       1 range_allocator.go:177] "Sending events to api server" logger="node-ipam-controller"
	I0917 00:35:52.659372       1 range_allocator.go:183] "Starting range CIDR allocator" logger="node-ipam-controller"
	I0917 00:35:52.659462       1 shared_informer.go:349] "Waiting for caches to sync" controller="cidrallocator"
	I0917 00:35:52.659561       1 shared_informer.go:356] "Caches are synced" controller="cidrallocator"
	I0917 00:35:52.663056       1 shared_informer.go:356] "Caches are synced" controller="validatingadmissionpolicy-status"
	I0917 00:35:52.664386       1 shared_informer.go:356] "Caches are synced" controller="ReplicationController"
	I0917 00:35:52.665624       1 shared_informer.go:356] "Caches are synced" controller="expand"
	I0917 00:35:52.665868       1 shared_informer.go:356] "Caches are synced" controller="endpoint_slice"
	I0917 00:35:52.668115       1 shared_informer.go:356] "Caches are synced" controller="endpoint_slice_mirroring"
	I0917 00:35:52.668389       1 shared_informer.go:356] "Caches are synced" controller="GC"
	I0917 00:35:52.670522       1 shared_informer.go:356] "Caches are synced" controller="ClusterRoleAggregator"
	I0917 00:35:52.672883       1 shared_informer.go:356] "Caches are synced" controller="crt configmap"
	I0917 00:35:52.691346       1 shared_informer.go:356] "Caches are synced" controller="garbage collector"
	I0917 00:35:52.694479       1 shared_informer.go:356] "Caches are synced" controller="endpoint"
	I0917 00:35:52.694480       1 shared_informer.go:356] "Caches are synced" controller="namespace"
	I0917 00:35:52.696661       1 shared_informer.go:356] "Caches are synced" controller="deployment"
	I0917 00:35:52.700607       1 shared_informer.go:356] "Caches are synced" controller="PVC protection"
	I0917 00:35:52.700607       1 shared_informer.go:356] "Caches are synced" controller="certificate-csrsigning-kubelet-serving"
	I0917 00:35:52.700739       1 shared_informer.go:356] "Caches are synced" controller="disruption"
	I0917 00:35:52.701699       1 shared_informer.go:356] "Caches are synced" controller="certificate-csrsigning-kubelet-client"
	I0917 00:35:52.701707       1 shared_informer.go:356] "Caches are synced" controller="certificate-csrapproving"
	I0917 00:35:52.701747       1 shared_informer.go:356] "Caches are synced" controller="stateful set"
	I0917 00:35:52.702840       1 shared_informer.go:356] "Caches are synced" controller="certificate-csrsigning-kube-apiserver-client"
	I0917 00:35:52.735080       1 shared_informer.go:356] "Caches are synced" controller="resource quota"
	
	
	==> kube-proxy [2249e1b1919d] <==
	I0917 00:35:50.820804       1 server_linux.go:53] "Using iptables proxy"
	I0917 00:35:50.918756       1 shared_informer.go:349] "Waiting for caches to sync" controller="node informer cache"
	I0917 00:35:51.019889       1 shared_informer.go:356] "Caches are synced" controller="node informer cache"
	I0917 00:35:51.019925       1 server.go:219] "Successfully retrieved NodeIPs" NodeIPs=["192.168.49.2"]
	E0917 00:35:51.020010       1 server.go:256] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
	I0917 00:35:51.049955       1 server.go:265] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
	I0917 00:35:51.050178       1 server_linux.go:132] "Using iptables Proxier"
	I0917 00:35:51.058525       1 proxier.go:242] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
	I0917 00:35:51.058826       1 server.go:527] "Version info" version="v1.34.0"
	I0917 00:35:51.058853       1 server.go:529] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
	I0917 00:35:51.060083       1 config.go:200] "Starting service config controller"
	I0917 00:35:51.060215       1 shared_informer.go:349] "Waiting for caches to sync" controller="service config"
	I0917 00:35:51.065750       1 config.go:106] "Starting endpoint slice config controller"
	I0917 00:35:51.065775       1 shared_informer.go:349] "Waiting for caches to sync" controller="endpoint slice config"
	I0917 00:35:51.065792       1 config.go:403] "Starting serviceCIDR config controller"
	I0917 00:35:51.065797       1 shared_informer.go:349] "Waiting for caches to sync" controller="serviceCIDR config"
	I0917 00:35:51.066494       1 config.go:309] "Starting node config controller"
	I0917 00:35:51.066513       1 shared_informer.go:349] "Waiting for caches to sync" controller="node config"
	I0917 00:35:51.066520       1 shared_informer.go:356] "Caches are synced" controller="node config"
	I0917 00:35:51.161226       1 shared_informer.go:356] "Caches are synced" controller="service config"
	I0917 00:35:51.166495       1 shared_informer.go:356] "Caches are synced" controller="serviceCIDR config"
	I0917 00:35:51.166526       1 shared_informer.go:356] "Caches are synced" controller="endpoint slice config"
	
	
	==> kube-proxy [bc1e5b319207] <==
	
	
	==> kube-scheduler [8f45cd385234] <==
	I0917 00:35:46.968566       1 serving.go:386] Generated self-signed cert in-memory
	W0917 00:35:49.218994       1 requestheader_controller.go:204] Unable to get configmap/extension-apiserver-authentication in kube-system.  Usually fixed by 'kubectl create rolebinding -n kube-system ROLEBINDING_NAME --role=extension-apiserver-authentication-reader --serviceaccount=YOUR_NS:YOUR_SA'
	W0917 00:35:49.219031       1 authentication.go:397] Error looking up in-cluster authentication configuration: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot get resource "configmaps" in API group "" in the namespace "kube-system"
	W0917 00:35:49.219043       1 authentication.go:398] Continuing without authentication configuration. This may treat all requests as anonymous.
	W0917 00:35:49.219050       1 authentication.go:399] To require authentication configuration lookup to succeed, set --authentication-tolerate-lookup-failure=false
	I0917 00:35:49.292960       1 server.go:175] "Starting Kubernetes Scheduler" version="v1.34.0"
	I0917 00:35:49.292997       1 server.go:177] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
	I0917 00:35:49.297061       1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
	I0917 00:35:49.297101       1 shared_informer.go:349] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
	I0917 00:35:49.297962       1 secure_serving.go:211] Serving securely on 127.0.0.1:10259
	I0917 00:35:49.299271       1 tlsconfig.go:243] "Starting DynamicServingCertificateController"
	I0917 00:35:49.398254       1 shared_informer.go:356] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
	
	
	==> kube-scheduler [e3a6d89ad5b6] <==
	
	
	==> kubelet <==
	Sep 17 00:44:53 functional-918451 kubelet[8557]: E0917 00:44:53.808726    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"echo-server\" with ImagePullBackOff: \"Back-off pulling image \\\"kicbase/echo-server\\\": ErrImagePull: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/hello-node-75c85bcc94-nttx8" podUID="085d0fa5-d5c9-4d9a-ad8a-e78f727c62bd"
	Sep 17 00:44:57 functional-918451 kubelet[8557]: E0917 00:44:57.807841    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"myfrontend\" with ImagePullBackOff: \"Back-off pulling image \\\"docker.io/nginx\\\": ErrImagePull: Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/sp-pod" podUID="1d4fda37-50c1-41c2-8b81-a48f9ab03c3d"
	Sep 17 00:45:04 functional-918451 kubelet[8557]: E0917 00:45:04.807743    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"echo-server\" with ImagePullBackOff: \"Back-off pulling image \\\"kicbase/echo-server\\\": ErrImagePull: Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/hello-node-connect-7d85dfc575-t4gsf" podUID="f049199c-f82f-43ba-b926-425bd104b855"
	Sep 17 00:45:08 functional-918451 kubelet[8557]: E0917 00:45:08.808148    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"echo-server\" with ImagePullBackOff: \"Back-off pulling image \\\"kicbase/echo-server\\\": ErrImagePull: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/hello-node-75c85bcc94-nttx8" podUID="085d0fa5-d5c9-4d9a-ad8a-e78f727c62bd"
	Sep 17 00:45:10 functional-918451 kubelet[8557]: E0917 00:45:10.808426    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"myfrontend\" with ImagePullBackOff: \"Back-off pulling image \\\"docker.io/nginx\\\": ErrImagePull: Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/sp-pod" podUID="1d4fda37-50c1-41c2-8b81-a48f9ab03c3d"
	Sep 17 00:45:18 functional-918451 kubelet[8557]: E0917 00:45:18.807859    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"echo-server\" with ImagePullBackOff: \"Back-off pulling image \\\"kicbase/echo-server\\\": ErrImagePull: Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/hello-node-connect-7d85dfc575-t4gsf" podUID="f049199c-f82f-43ba-b926-425bd104b855"
	Sep 17 00:45:22 functional-918451 kubelet[8557]: E0917 00:45:22.808160    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"myfrontend\" with ImagePullBackOff: \"Back-off pulling image \\\"docker.io/nginx\\\": ErrImagePull: Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/sp-pod" podUID="1d4fda37-50c1-41c2-8b81-a48f9ab03c3d"
	Sep 17 00:45:23 functional-918451 kubelet[8557]: E0917 00:45:23.807839    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"echo-server\" with ImagePullBackOff: \"Back-off pulling image \\\"kicbase/echo-server\\\": ErrImagePull: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/hello-node-75c85bcc94-nttx8" podUID="085d0fa5-d5c9-4d9a-ad8a-e78f727c62bd"
	Sep 17 00:45:33 functional-918451 kubelet[8557]: E0917 00:45:33.808431    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"echo-server\" with ImagePullBackOff: \"Back-off pulling image \\\"kicbase/echo-server\\\": ErrImagePull: Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/hello-node-connect-7d85dfc575-t4gsf" podUID="f049199c-f82f-43ba-b926-425bd104b855"
	Sep 17 00:45:35 functional-918451 kubelet[8557]: E0917 00:45:35.808634    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"myfrontend\" with ImagePullBackOff: \"Back-off pulling image \\\"docker.io/nginx\\\": ErrImagePull: Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/sp-pod" podUID="1d4fda37-50c1-41c2-8b81-a48f9ab03c3d"
	Sep 17 00:45:38 functional-918451 kubelet[8557]: E0917 00:45:38.808435    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"echo-server\" with ImagePullBackOff: \"Back-off pulling image \\\"kicbase/echo-server\\\": ErrImagePull: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/hello-node-75c85bcc94-nttx8" podUID="085d0fa5-d5c9-4d9a-ad8a-e78f727c62bd"
	Sep 17 00:45:46 functional-918451 kubelet[8557]: E0917 00:45:46.807641    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"echo-server\" with ImagePullBackOff: \"Back-off pulling image \\\"kicbase/echo-server\\\": ErrImagePull: Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/hello-node-connect-7d85dfc575-t4gsf" podUID="f049199c-f82f-43ba-b926-425bd104b855"
	Sep 17 00:45:47 functional-918451 kubelet[8557]: E0917 00:45:47.813942    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"myfrontend\" with ImagePullBackOff: \"Back-off pulling image \\\"docker.io/nginx\\\": ErrImagePull: Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/sp-pod" podUID="1d4fda37-50c1-41c2-8b81-a48f9ab03c3d"
	Sep 17 00:45:53 functional-918451 kubelet[8557]: E0917 00:45:53.808391    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"echo-server\" with ImagePullBackOff: \"Back-off pulling image \\\"kicbase/echo-server\\\": ErrImagePull: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/hello-node-75c85bcc94-nttx8" podUID="085d0fa5-d5c9-4d9a-ad8a-e78f727c62bd"
	Sep 17 00:46:01 functional-918451 kubelet[8557]: E0917 00:46:01.808163    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"echo-server\" with ImagePullBackOff: \"Back-off pulling image \\\"kicbase/echo-server\\\": ErrImagePull: Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/hello-node-connect-7d85dfc575-t4gsf" podUID="f049199c-f82f-43ba-b926-425bd104b855"
	Sep 17 00:46:01 functional-918451 kubelet[8557]: E0917 00:46:01.809388    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"myfrontend\" with ImagePullBackOff: \"Back-off pulling image \\\"docker.io/nginx\\\": ErrImagePull: Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/sp-pod" podUID="1d4fda37-50c1-41c2-8b81-a48f9ab03c3d"
	Sep 17 00:46:05 functional-918451 kubelet[8557]: E0917 00:46:05.807453    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"echo-server\" with ImagePullBackOff: \"Back-off pulling image \\\"kicbase/echo-server\\\": ErrImagePull: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/hello-node-75c85bcc94-nttx8" podUID="085d0fa5-d5c9-4d9a-ad8a-e78f727c62bd"
	Sep 17 00:46:14 functional-918451 kubelet[8557]: E0917 00:46:14.808282    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"myfrontend\" with ImagePullBackOff: \"Back-off pulling image \\\"docker.io/nginx\\\": ErrImagePull: Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/sp-pod" podUID="1d4fda37-50c1-41c2-8b81-a48f9ab03c3d"
	Sep 17 00:46:16 functional-918451 kubelet[8557]: E0917 00:46:16.808039    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"echo-server\" with ImagePullBackOff: \"Back-off pulling image \\\"kicbase/echo-server\\\": ErrImagePull: Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/hello-node-connect-7d85dfc575-t4gsf" podUID="f049199c-f82f-43ba-b926-425bd104b855"
	Sep 17 00:46:20 functional-918451 kubelet[8557]: E0917 00:46:20.162302    8557 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit" image="kicbase/echo-server:latest"
	Sep 17 00:46:20 functional-918451 kubelet[8557]: E0917 00:46:20.162355    8557 kuberuntime_image.go:43] "Failed to pull image" err="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit" image="kicbase/echo-server:latest"
	Sep 17 00:46:20 functional-918451 kubelet[8557]: E0917 00:46:20.162441    8557 kuberuntime_manager.go:1449] "Unhandled Error" err="container echo-server start failed in pod hello-node-75c85bcc94-nttx8_default(085d0fa5-d5c9-4d9a-ad8a-e78f727c62bd): ErrImagePull: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit" logger="UnhandledError"
	Sep 17 00:46:20 functional-918451 kubelet[8557]: E0917 00:46:20.162477    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"echo-server\" with ErrImagePull: \"toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/hello-node-75c85bcc94-nttx8" podUID="085d0fa5-d5c9-4d9a-ad8a-e78f727c62bd"
	Sep 17 00:46:28 functional-918451 kubelet[8557]: E0917 00:46:28.808147    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"echo-server\" with ImagePullBackOff: \"Back-off pulling image \\\"kicbase/echo-server\\\": ErrImagePull: Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/hello-node-connect-7d85dfc575-t4gsf" podUID="f049199c-f82f-43ba-b926-425bd104b855"
	Sep 17 00:46:28 functional-918451 kubelet[8557]: E0917 00:46:28.809580    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"myfrontend\" with ImagePullBackOff: \"Back-off pulling image \\\"docker.io/nginx\\\": ErrImagePull: Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/sp-pod" podUID="1d4fda37-50c1-41c2-8b81-a48f9ab03c3d"
	
	
	==> storage-provisioner [cf11e0afbf55] <==
	W0917 00:46:04.959309       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:46:06.963089       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:46:06.967483       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:46:08.970288       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:46:08.975595       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:46:10.979062       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:46:10.983529       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:46:12.986898       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:46:12.993576       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:46:14.996850       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:46:15.001228       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:46:17.004233       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:46:17.010040       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:46:19.013469       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:46:19.020166       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:46:21.022909       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:46:21.027422       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:46:23.030311       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:46:23.034763       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:46:25.038179       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:46:25.043440       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:46:27.047260       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:46:27.054110       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:46:29.058412       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:46:29.063894       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	
	
	==> storage-provisioner [e707505e3be8] <==
	I0917 00:35:40.392782       1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
	F0917 00:35:40.397573       1 main.go:39] error getting server version: Get "https://10.96.0.1:443/version?timeout=32s": dial tcp 10.96.0.1:443: connect: connection refused
	

                                                
                                                
-- /stdout --
helpers_test.go:262: (dbg) Run:  out/minikube-linux-arm64 status --format={{.APIServer}} -p functional-918451 -n functional-918451
helpers_test.go:269: (dbg) Run:  kubectl --context functional-918451 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:280: non-running pods: hello-node-75c85bcc94-nttx8 hello-node-connect-7d85dfc575-t4gsf sp-pod
helpers_test.go:282: ======> post-mortem[TestFunctional/parallel/ServiceCmdConnect]: describe non-running pods <======
helpers_test.go:285: (dbg) Run:  kubectl --context functional-918451 describe pod hello-node-75c85bcc94-nttx8 hello-node-connect-7d85dfc575-t4gsf sp-pod
helpers_test.go:290: (dbg) kubectl --context functional-918451 describe pod hello-node-75c85bcc94-nttx8 hello-node-connect-7d85dfc575-t4gsf sp-pod:

                                                
                                                
-- stdout --
	Name:             hello-node-75c85bcc94-nttx8
	Namespace:        default
	Priority:         0
	Service Account:  default
	Node:             functional-918451/192.168.49.2
	Start Time:       Wed, 17 Sep 2025 00:40:26 +0000
	Labels:           app=hello-node
	                  pod-template-hash=75c85bcc94
	Annotations:      <none>
	Status:           Pending
	IP:               10.244.0.10
	IPs:
	  IP:           10.244.0.10
	Controlled By:  ReplicaSet/hello-node-75c85bcc94
	Containers:
	  echo-server:
	    Container ID:   
	    Image:          kicbase/echo-server
	    Image ID:       
	    Port:           <none>
	    Host Port:      <none>
	    State:          Waiting
	      Reason:       ImagePullBackOff
	    Ready:          False
	    Restart Count:  0
	    Environment:    <none>
	    Mounts:
	      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-949tz (ro)
	Conditions:
	  Type                        Status
	  PodReadyToStartContainers   True 
	  Initialized                 True 
	  Ready                       False 
	  ContainersReady             False 
	  PodScheduled                True 
	Volumes:
	  kube-api-access-949tz:
	    Type:                    Projected (a volume that contains injected data from multiple sources)
	    TokenExpirationSeconds:  3607
	    ConfigMapName:           kube-root-ca.crt
	    Optional:                false
	    DownwardAPI:             true
	QoS Class:                   BestEffort
	Node-Selectors:              <none>
	Tolerations:                 node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
	                             node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
	Events:
	  Type     Reason     Age                    From               Message
	  ----     ------     ----                   ----               -------
	  Normal   Scheduled  6m4s                   default-scheduler  Successfully assigned default/hello-node-75c85bcc94-nttx8 to functional-918451
	  Warning  Failed     5m23s (x3 over 6m3s)   kubelet            Failed to pull image "kicbase/echo-server": Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit
	  Normal   Pulling    2m58s (x5 over 6m3s)   kubelet            Pulling image "kicbase/echo-server"
	  Warning  Failed     2m57s (x5 over 6m3s)   kubelet            Error: ErrImagePull
	  Warning  Failed     2m57s (x2 over 4m31s)  kubelet            Failed to pull image "kicbase/echo-server": toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit
	  Warning  Failed     52s (x20 over 6m2s)    kubelet            Error: ImagePullBackOff
	  Normal   BackOff    37s (x21 over 6m2s)    kubelet            Back-off pulling image "kicbase/echo-server"
	
	
	Name:             hello-node-connect-7d85dfc575-t4gsf
	Namespace:        default
	Priority:         0
	Service Account:  default
	Node:             functional-918451/192.168.49.2
	Start Time:       Wed, 17 Sep 2025 00:36:27 +0000
	Labels:           app=hello-node-connect
	                  pod-template-hash=7d85dfc575
	Annotations:      <none>
	Status:           Pending
	IP:               10.244.0.9
	IPs:
	  IP:           10.244.0.9
	Controlled By:  ReplicaSet/hello-node-connect-7d85dfc575
	Containers:
	  echo-server:
	    Container ID:   
	    Image:          kicbase/echo-server
	    Image ID:       
	    Port:           <none>
	    Host Port:      <none>
	    State:          Waiting
	      Reason:       ImagePullBackOff
	    Ready:          False
	    Restart Count:  0
	    Environment:    <none>
	    Mounts:
	      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-kpg27 (ro)
	Conditions:
	  Type                        Status
	  PodReadyToStartContainers   True 
	  Initialized                 True 
	  Ready                       False 
	  ContainersReady             False 
	  PodScheduled                True 
	Volumes:
	  kube-api-access-kpg27:
	    Type:                    Projected (a volume that contains injected data from multiple sources)
	    TokenExpirationSeconds:  3607
	    ConfigMapName:           kube-root-ca.crt
	    Optional:                false
	    DownwardAPI:             true
	QoS Class:                   BestEffort
	Node-Selectors:              <none>
	Tolerations:                 node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
	                             node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
	Events:
	  Type     Reason     Age                 From               Message
	  ----     ------     ----                ----               -------
	  Normal   Scheduled  10m                 default-scheduler  Successfully assigned default/hello-node-connect-7d85dfc575-t4gsf to functional-918451
	  Normal   Pulling    7m6s (x5 over 10m)  kubelet            Pulling image "kicbase/echo-server"
	  Warning  Failed     7m5s (x5 over 10m)  kubelet            Failed to pull image "kicbase/echo-server": Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit
	  Warning  Failed     7m5s (x5 over 10m)  kubelet            Error: ErrImagePull
	  Normal   BackOff    2s (x42 over 10m)   kubelet            Back-off pulling image "kicbase/echo-server"
	  Warning  Failed     2s (x42 over 10m)   kubelet            Error: ImagePullBackOff
	
	
	Name:             sp-pod
	Namespace:        default
	Priority:         0
	Service Account:  default
	Node:             functional-918451/192.168.49.2
	Start Time:       Wed, 17 Sep 2025 00:36:23 +0000
	Labels:           test=storage-provisioner
	Annotations:      <none>
	Status:           Pending
	IP:               10.244.0.8
	IPs:
	  IP:  10.244.0.8
	Containers:
	  myfrontend:
	    Container ID:   
	    Image:          docker.io/nginx
	    Image ID:       
	    Port:           <none>
	    Host Port:      <none>
	    State:          Waiting
	      Reason:       ImagePullBackOff
	    Ready:          False
	    Restart Count:  0
	    Environment:    <none>
	    Mounts:
	      /tmp/mount from mypd (rw)
	      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-rrpzl (ro)
	Conditions:
	  Type                        Status
	  PodReadyToStartContainers   True 
	  Initialized                 True 
	  Ready                       False 
	  ContainersReady             False 
	  PodScheduled                True 
	Volumes:
	  mypd:
	    Type:       PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
	    ClaimName:  myclaim
	    ReadOnly:   false
	  kube-api-access-rrpzl:
	    Type:                    Projected (a volume that contains injected data from multiple sources)
	    TokenExpirationSeconds:  3607
	    ConfigMapName:           kube-root-ca.crt
	    Optional:                false
	    DownwardAPI:             true
	QoS Class:                   BestEffort
	Node-Selectors:              <none>
	Tolerations:                 node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
	                             node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
	Events:
	  Type     Reason     Age                    From               Message
	  ----     ------     ----                   ----               -------
	  Normal   Scheduled  10m                    default-scheduler  Successfully assigned default/sp-pod to functional-918451
	  Warning  Failed     9m25s (x2 over 9m53s)  kubelet            Failed to pull image "docker.io/nginx": Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit
	  Normal   Pulling    7m (x5 over 10m)       kubelet            Pulling image "docker.io/nginx"
	  Warning  Failed     6m59s (x3 over 10m)    kubelet            Failed to pull image "docker.io/nginx": toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit
	  Warning  Failed     6m59s (x5 over 10m)    kubelet            Error: ErrImagePull
	  Normal   BackOff    2s (x42 over 10m)      kubelet            Back-off pulling image "docker.io/nginx"
	  Warning  Failed     2s (x42 over 10m)      kubelet            Error: ImagePullBackOff

                                                
                                                
-- /stdout --
helpers_test.go:293: <<< TestFunctional/parallel/ServiceCmdConnect FAILED: end of post-mortem logs <<<
helpers_test.go:294: ---------------------/post-mortem---------------------------------
--- FAIL: TestFunctional/parallel/ServiceCmdConnect (603.18s)

                                                
                                    
x
+
TestFunctional/parallel/PersistentVolumeClaim (249.01s)

                                                
                                                
=== RUN   TestFunctional/parallel/PersistentVolumeClaim
=== PAUSE TestFunctional/parallel/PersistentVolumeClaim

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/PersistentVolumeClaim
functional_test_pvc_test.go:50: (dbg) TestFunctional/parallel/PersistentVolumeClaim: waiting 4m0s for pods matching "integration-test=storage-provisioner" in namespace "kube-system" ...
helpers_test.go:352: "storage-provisioner" [52933a3d-318b-43c1-bb02-5115e2dcea88] Running
functional_test_pvc_test.go:50: (dbg) TestFunctional/parallel/PersistentVolumeClaim: integration-test=storage-provisioner healthy within 6.003951204s
functional_test_pvc_test.go:55: (dbg) Run:  kubectl --context functional-918451 get storageclass -o=json
functional_test_pvc_test.go:75: (dbg) Run:  kubectl --context functional-918451 apply -f testdata/storage-provisioner/pvc.yaml
functional_test_pvc_test.go:82: (dbg) Run:  kubectl --context functional-918451 get pvc myclaim -o=json
functional_test_pvc_test.go:131: (dbg) Run:  kubectl --context functional-918451 apply -f testdata/storage-provisioner/pod.yaml
functional_test_pvc_test.go:140: (dbg) TestFunctional/parallel/PersistentVolumeClaim: waiting 4m0s for pods matching "test=storage-provisioner" in namespace "default" ...
helpers_test.go:352: "sp-pod" [1d4fda37-50c1-41c2-8b81-a48f9ab03c3d] Pending
helpers_test.go:352: "sp-pod" [1d4fda37-50c1-41c2-8b81-a48f9ab03c3d] Pending / Ready:ContainersNotReady (containers with unready status: [myfrontend]) / ContainersReady:ContainersNotReady (containers with unready status: [myfrontend])
functional_test_pvc_test.go:140: ***** TestFunctional/parallel/PersistentVolumeClaim: pod "test=storage-provisioner" failed to start within 4m0s: context deadline exceeded ****
functional_test_pvc_test.go:140: (dbg) Run:  out/minikube-linux-arm64 status --format={{.APIServer}} -p functional-918451 -n functional-918451
functional_test_pvc_test.go:140: TestFunctional/parallel/PersistentVolumeClaim: showing logs for failed pods as of 2025-09-17 00:40:24.287128972 +0000 UTC m=+1158.697754995
functional_test_pvc_test.go:140: (dbg) Run:  kubectl --context functional-918451 describe po sp-pod -n default
functional_test_pvc_test.go:140: (dbg) kubectl --context functional-918451 describe po sp-pod -n default:
Name:             sp-pod
Namespace:        default
Priority:         0
Service Account:  default
Node:             functional-918451/192.168.49.2
Start Time:       Wed, 17 Sep 2025 00:36:23 +0000
Labels:           test=storage-provisioner
Annotations:      <none>
Status:           Pending
IP:               10.244.0.8
IPs:
IP:  10.244.0.8
Containers:
myfrontend:
Container ID:   
Image:          docker.io/nginx
Image ID:       
Port:           <none>
Host Port:      <none>
State:          Waiting
Reason:       ImagePullBackOff
Ready:          False
Restart Count:  0
Environment:    <none>
Mounts:
/tmp/mount from mypd (rw)
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-rrpzl (ro)
Conditions:
Type                        Status
PodReadyToStartContainers   True 
Initialized                 True 
Ready                       False 
ContainersReady             False 
PodScheduled                True 
Volumes:
mypd:
Type:       PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
ClaimName:  myclaim
ReadOnly:   false
kube-api-access-rrpzl:
Type:                    Projected (a volume that contains injected data from multiple sources)
TokenExpirationSeconds:  3607
ConfigMapName:           kube-root-ca.crt
Optional:                false
DownwardAPI:             true
QoS Class:                   BestEffort
Node-Selectors:              <none>
Tolerations:                 node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
Type     Reason     Age                    From               Message
----     ------     ----                   ----               -------
Normal   Scheduled  4m1s                   default-scheduler  Successfully assigned default/sp-pod to functional-918451
Warning  Failed     3m19s (x2 over 3m47s)  kubelet            Failed to pull image "docker.io/nginx": Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit
Normal   Pulling    54s (x5 over 4m)       kubelet            Pulling image "docker.io/nginx"
Warning  Failed     53s (x3 over 4m)       kubelet            Failed to pull image "docker.io/nginx": toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit
Warning  Failed     53s (x5 over 4m)       kubelet            Error: ErrImagePull
Normal   BackOff    2s (x15 over 4m)       kubelet            Back-off pulling image "docker.io/nginx"
Warning  Failed     2s (x15 over 4m)       kubelet            Error: ImagePullBackOff
functional_test_pvc_test.go:140: (dbg) Run:  kubectl --context functional-918451 logs sp-pod -n default
functional_test_pvc_test.go:140: (dbg) Non-zero exit: kubectl --context functional-918451 logs sp-pod -n default: exit status 1 (97.921088ms)

                                                
                                                
** stderr ** 
	Error from server (BadRequest): container "myfrontend" in pod "sp-pod" is waiting to start: trying and failing to pull image

                                                
                                                
** /stderr **
functional_test_pvc_test.go:140: kubectl --context functional-918451 logs sp-pod -n default: exit status 1
functional_test_pvc_test.go:141: failed waiting for pvctest pod : test=storage-provisioner within 4m0s: context deadline exceeded
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:223: ======>  post-mortem[TestFunctional/parallel/PersistentVolumeClaim]: network settings <======
helpers_test.go:230: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:238: ======>  post-mortem[TestFunctional/parallel/PersistentVolumeClaim]: docker inspect <======
helpers_test.go:239: (dbg) Run:  docker inspect functional-918451
helpers_test.go:243: (dbg) docker inspect functional-918451:

                                                
                                                
-- stdout --
	[
	    {
	        "Id": "6201077c0331af68c48befcadd9d1ecd3484991c08f561702e8963664e041a07",
	        "Created": "2025-09-17T00:32:59.129348997Z",
	        "Path": "/usr/local/bin/entrypoint",
	        "Args": [
	            "/sbin/init"
	        ],
	        "State": {
	            "Status": "running",
	            "Running": true,
	            "Paused": false,
	            "Restarting": false,
	            "OOMKilled": false,
	            "Dead": false,
	            "Pid": 609481,
	            "ExitCode": 0,
	            "Error": "",
	            "StartedAt": "2025-09-17T00:32:59.19036605Z",
	            "FinishedAt": "0001-01-01T00:00:00Z"
	        },
	        "Image": "sha256:3d6f74760dfc17060da5abc5d463d3d45b4ceea05955c9cc42b3ec56cb38cc48",
	        "ResolvConfPath": "/var/lib/docker/containers/6201077c0331af68c48befcadd9d1ecd3484991c08f561702e8963664e041a07/resolv.conf",
	        "HostnamePath": "/var/lib/docker/containers/6201077c0331af68c48befcadd9d1ecd3484991c08f561702e8963664e041a07/hostname",
	        "HostsPath": "/var/lib/docker/containers/6201077c0331af68c48befcadd9d1ecd3484991c08f561702e8963664e041a07/hosts",
	        "LogPath": "/var/lib/docker/containers/6201077c0331af68c48befcadd9d1ecd3484991c08f561702e8963664e041a07/6201077c0331af68c48befcadd9d1ecd3484991c08f561702e8963664e041a07-json.log",
	        "Name": "/functional-918451",
	        "RestartCount": 0,
	        "Driver": "overlay2",
	        "Platform": "linux",
	        "MountLabel": "",
	        "ProcessLabel": "",
	        "AppArmorProfile": "unconfined",
	        "ExecIDs": null,
	        "HostConfig": {
	            "Binds": [
	                "functional-918451:/var",
	                "/lib/modules:/lib/modules:ro"
	            ],
	            "ContainerIDFile": "",
	            "LogConfig": {
	                "Type": "json-file",
	                "Config": {}
	            },
	            "NetworkMode": "functional-918451",
	            "PortBindings": {
	                "22/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "2376/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "32443/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "5000/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "8441/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ]
	            },
	            "RestartPolicy": {
	                "Name": "no",
	                "MaximumRetryCount": 0
	            },
	            "AutoRemove": false,
	            "VolumeDriver": "",
	            "VolumesFrom": null,
	            "ConsoleSize": [
	                0,
	                0
	            ],
	            "CapAdd": null,
	            "CapDrop": null,
	            "CgroupnsMode": "host",
	            "Dns": [],
	            "DnsOptions": [],
	            "DnsSearch": [],
	            "ExtraHosts": null,
	            "GroupAdd": null,
	            "IpcMode": "private",
	            "Cgroup": "",
	            "Links": null,
	            "OomScoreAdj": 0,
	            "PidMode": "",
	            "Privileged": true,
	            "PublishAllPorts": false,
	            "ReadonlyRootfs": false,
	            "SecurityOpt": [
	                "seccomp=unconfined",
	                "apparmor=unconfined",
	                "label=disable"
	            ],
	            "Tmpfs": {
	                "/run": "",
	                "/tmp": ""
	            },
	            "UTSMode": "",
	            "UsernsMode": "",
	            "ShmSize": 67108864,
	            "Runtime": "runc",
	            "Isolation": "",
	            "CpuShares": 0,
	            "Memory": 4294967296,
	            "NanoCpus": 2000000000,
	            "CgroupParent": "",
	            "BlkioWeight": 0,
	            "BlkioWeightDevice": [],
	            "BlkioDeviceReadBps": [],
	            "BlkioDeviceWriteBps": [],
	            "BlkioDeviceReadIOps": [],
	            "BlkioDeviceWriteIOps": [],
	            "CpuPeriod": 0,
	            "CpuQuota": 0,
	            "CpuRealtimePeriod": 0,
	            "CpuRealtimeRuntime": 0,
	            "CpusetCpus": "",
	            "CpusetMems": "",
	            "Devices": [],
	            "DeviceCgroupRules": null,
	            "DeviceRequests": null,
	            "MemoryReservation": 0,
	            "MemorySwap": 8589934592,
	            "MemorySwappiness": null,
	            "OomKillDisable": false,
	            "PidsLimit": null,
	            "Ulimits": [],
	            "CpuCount": 0,
	            "CpuPercent": 0,
	            "IOMaximumIOps": 0,
	            "IOMaximumBandwidth": 0,
	            "MaskedPaths": null,
	            "ReadonlyPaths": null
	        },
	        "GraphDriver": {
	            "Data": {
	                "ID": "6201077c0331af68c48befcadd9d1ecd3484991c08f561702e8963664e041a07",
	                "LowerDir": "/var/lib/docker/overlay2/a08a3a54fe3da53177e6d23662d948d962c761b52517c423e2277a8f513a4207-init/diff:/var/lib/docker/overlay2/6bf7b6c5df3b8adf86744064027446440589049694f02d12745ec1de281bdb92/diff",
	                "MergedDir": "/var/lib/docker/overlay2/a08a3a54fe3da53177e6d23662d948d962c761b52517c423e2277a8f513a4207/merged",
	                "UpperDir": "/var/lib/docker/overlay2/a08a3a54fe3da53177e6d23662d948d962c761b52517c423e2277a8f513a4207/diff",
	                "WorkDir": "/var/lib/docker/overlay2/a08a3a54fe3da53177e6d23662d948d962c761b52517c423e2277a8f513a4207/work"
	            },
	            "Name": "overlay2"
	        },
	        "Mounts": [
	            {
	                "Type": "volume",
	                "Name": "functional-918451",
	                "Source": "/var/lib/docker/volumes/functional-918451/_data",
	                "Destination": "/var",
	                "Driver": "local",
	                "Mode": "z",
	                "RW": true,
	                "Propagation": ""
	            },
	            {
	                "Type": "bind",
	                "Source": "/lib/modules",
	                "Destination": "/lib/modules",
	                "Mode": "ro",
	                "RW": false,
	                "Propagation": "rprivate"
	            }
	        ],
	        "Config": {
	            "Hostname": "functional-918451",
	            "Domainname": "",
	            "User": "",
	            "AttachStdin": false,
	            "AttachStdout": false,
	            "AttachStderr": false,
	            "ExposedPorts": {
	                "22/tcp": {},
	                "2376/tcp": {},
	                "32443/tcp": {},
	                "5000/tcp": {},
	                "8441/tcp": {}
	            },
	            "Tty": true,
	            "OpenStdin": false,
	            "StdinOnce": false,
	            "Env": [
	                "container=docker",
	                "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
	            ],
	            "Cmd": null,
	            "Image": "gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1",
	            "Volumes": null,
	            "WorkingDir": "/",
	            "Entrypoint": [
	                "/usr/local/bin/entrypoint",
	                "/sbin/init"
	            ],
	            "OnBuild": null,
	            "Labels": {
	                "created_by.minikube.sigs.k8s.io": "true",
	                "mode.minikube.sigs.k8s.io": "functional-918451",
	                "name.minikube.sigs.k8s.io": "functional-918451",
	                "role.minikube.sigs.k8s.io": ""
	            },
	            "StopSignal": "SIGRTMIN+3"
	        },
	        "NetworkSettings": {
	            "Bridge": "",
	            "SandboxID": "b8276d0e7a4a68853a13a364899f312a03083d4747586d37196fe37821cc60ca",
	            "SandboxKey": "/var/run/docker/netns/b8276d0e7a4a",
	            "Ports": {
	                "22/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "33515"
	                    }
	                ],
	                "2376/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "33516"
	                    }
	                ],
	                "32443/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "33519"
	                    }
	                ],
	                "5000/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "33517"
	                    }
	                ],
	                "8441/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "33518"
	                    }
	                ]
	            },
	            "HairpinMode": false,
	            "LinkLocalIPv6Address": "",
	            "LinkLocalIPv6PrefixLen": 0,
	            "SecondaryIPAddresses": null,
	            "SecondaryIPv6Addresses": null,
	            "EndpointID": "",
	            "Gateway": "",
	            "GlobalIPv6Address": "",
	            "GlobalIPv6PrefixLen": 0,
	            "IPAddress": "",
	            "IPPrefixLen": 0,
	            "IPv6Gateway": "",
	            "MacAddress": "",
	            "Networks": {
	                "functional-918451": {
	                    "IPAMConfig": {
	                        "IPv4Address": "192.168.49.2"
	                    },
	                    "Links": null,
	                    "Aliases": null,
	                    "MacAddress": "0e:c4:e0:02:03:54",
	                    "DriverOpts": null,
	                    "GwPriority": 0,
	                    "NetworkID": "6a04d22b3edf0df0fed6fcef6fdf3ac9b7a09ca25aa9a4da277d50b627d3354f",
	                    "EndpointID": "29d031089f2a9002698ecc91a4339af43c25eef05a79345c2e7da185073d2f3e",
	                    "Gateway": "192.168.49.1",
	                    "IPAddress": "192.168.49.2",
	                    "IPPrefixLen": 24,
	                    "IPv6Gateway": "",
	                    "GlobalIPv6Address": "",
	                    "GlobalIPv6PrefixLen": 0,
	                    "DNSNames": [
	                        "functional-918451",
	                        "6201077c0331"
	                    ]
	                }
	            }
	        }
	    }
	]

                                                
                                                
-- /stdout --
helpers_test.go:247: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Host}} -p functional-918451 -n functional-918451
helpers_test.go:252: <<< TestFunctional/parallel/PersistentVolumeClaim FAILED: start of post-mortem logs <<<
helpers_test.go:253: ======>  post-mortem[TestFunctional/parallel/PersistentVolumeClaim]: minikube logs <======
helpers_test.go:255: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 logs -n 25
helpers_test.go:255: (dbg) Done: out/minikube-linux-arm64 -p functional-918451 logs -n 25: (1.044548546s)
helpers_test.go:260: TestFunctional/parallel/PersistentVolumeClaim logs: 
-- stdout --
	
	==> Audit <==
	┌────────────┬─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬───────────────────┬─────────┬─────────┬─────────────────────┬─────────────────────┐
	│  COMMAND   │                                                                            ARGS                                                                             │      PROFILE      │  USER   │ VERSION │     START TIME      │      END TIME       │
	├────────────┼─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼───────────────────┼─────────┼─────────┼─────────────────────┼─────────────────────┤
	│ docker-env │ functional-918451 docker-env                                                                                                                                │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:36 UTC │ 17 Sep 25 00:36 UTC │
	│ image      │ functional-918451 image load --daemon kicbase/echo-server:functional-918451 --alsologtostderr                                                               │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:36 UTC │ 17 Sep 25 00:36 UTC │
	│ ssh        │ functional-918451 ssh sudo cat /etc/ssl/certs/578284.pem                                                                                                    │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:36 UTC │ 17 Sep 25 00:36 UTC │
	│ ssh        │ functional-918451 ssh sudo cat /usr/share/ca-certificates/578284.pem                                                                                        │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:36 UTC │ 17 Sep 25 00:36 UTC │
	│ image      │ functional-918451 image ls                                                                                                                                  │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:36 UTC │ 17 Sep 25 00:36 UTC │
	│ ssh        │ functional-918451 ssh sudo cat /etc/ssl/certs/51391683.0                                                                                                    │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:36 UTC │ 17 Sep 25 00:36 UTC │
	│ image      │ functional-918451 image load --daemon kicbase/echo-server:functional-918451 --alsologtostderr                                                               │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:36 UTC │ 17 Sep 25 00:36 UTC │
	│ ssh        │ functional-918451 ssh sudo cat /etc/ssl/certs/5782842.pem                                                                                                   │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:36 UTC │ 17 Sep 25 00:36 UTC │
	│ ssh        │ functional-918451 ssh sudo cat /usr/share/ca-certificates/5782842.pem                                                                                       │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:36 UTC │ 17 Sep 25 00:36 UTC │
	│ image      │ functional-918451 image ls                                                                                                                                  │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:36 UTC │ 17 Sep 25 00:36 UTC │
	│ ssh        │ functional-918451 ssh sudo cat /etc/ssl/certs/3ec20f2e.0                                                                                                    │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:36 UTC │ 17 Sep 25 00:36 UTC │
	│ image      │ functional-918451 image save kicbase/echo-server:functional-918451 /home/jenkins/workspace/Docker_Linux_docker_arm64/echo-server-save.tar --alsologtostderr │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:36 UTC │ 17 Sep 25 00:36 UTC │
	│ ssh        │ functional-918451 ssh sudo cat /etc/test/nested/copy/578284/hosts                                                                                           │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:36 UTC │ 17 Sep 25 00:36 UTC │
	│ image      │ functional-918451 image rm kicbase/echo-server:functional-918451 --alsologtostderr                                                                          │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:36 UTC │ 17 Sep 25 00:36 UTC │
	│ image      │ functional-918451 image ls                                                                                                                                  │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:36 UTC │ 17 Sep 25 00:36 UTC │
	│ image      │ functional-918451 image load /home/jenkins/workspace/Docker_Linux_docker_arm64/echo-server-save.tar --alsologtostderr                                       │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:36 UTC │ 17 Sep 25 00:36 UTC │
	│ image      │ functional-918451 image ls                                                                                                                                  │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:36 UTC │ 17 Sep 25 00:36 UTC │
	│ image      │ functional-918451 image save --daemon kicbase/echo-server:functional-918451 --alsologtostderr                                                               │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:36 UTC │ 17 Sep 25 00:36 UTC │
	│ ssh        │ functional-918451 ssh echo hello                                                                                                                            │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:36 UTC │ 17 Sep 25 00:36 UTC │
	│ tunnel     │ functional-918451 tunnel --alsologtostderr                                                                                                                  │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:36 UTC │                     │
	│ tunnel     │ functional-918451 tunnel --alsologtostderr                                                                                                                  │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:36 UTC │                     │
	│ ssh        │ functional-918451 ssh cat /etc/hostname                                                                                                                     │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:36 UTC │ 17 Sep 25 00:36 UTC │
	│ tunnel     │ functional-918451 tunnel --alsologtostderr                                                                                                                  │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:36 UTC │                     │
	│ addons     │ functional-918451 addons list                                                                                                                               │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:36 UTC │ 17 Sep 25 00:36 UTC │
	│ addons     │ functional-918451 addons list -o json                                                                                                                       │ functional-918451 │ jenkins │ v1.37.0 │ 17 Sep 25 00:36 UTC │ 17 Sep 25 00:36 UTC │
	└────────────┴─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴───────────────────┴─────────┴─────────┴─────────────────────┴─────────────────────┘
	
	
	==> Last Start <==
	Log file created at: 2025/09/17 00:35:08
	Running on machine: ip-172-31-21-244
	Binary: Built with gc go1.24.6 for linux/arm64
	Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
	I0917 00:35:08.882126  616831 out.go:360] Setting OutFile to fd 1 ...
	I0917 00:35:08.882236  616831 out.go:408] TERM=,COLORTERM=, which probably does not support color
	I0917 00:35:08.882240  616831 out.go:374] Setting ErrFile to fd 2...
	I0917 00:35:08.882244  616831 out.go:408] TERM=,COLORTERM=, which probably does not support color
	I0917 00:35:08.882557  616831 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21550-576428/.minikube/bin
	I0917 00:35:08.882972  616831 out.go:368] Setting JSON to false
	I0917 00:35:08.883969  616831 start.go:130] hostinfo: {"hostname":"ip-172-31-21-244","uptime":11854,"bootTime":1758057455,"procs":186,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1084-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"da8ac1fd-6236-412a-a346-95873c98230d"}
	I0917 00:35:08.884036  616831 start.go:140] virtualization:  
	I0917 00:35:08.887529  616831 out.go:179] * [functional-918451] minikube v1.37.0 on Ubuntu 20.04 (arm64)
	I0917 00:35:08.890675  616831 out.go:179]   - MINIKUBE_LOCATION=21550
	I0917 00:35:08.890750  616831 notify.go:220] Checking for updates...
	I0917 00:35:08.894474  616831 out.go:179]   - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	I0917 00:35:08.897515  616831 out.go:179]   - KUBECONFIG=/home/jenkins/minikube-integration/21550-576428/kubeconfig
	I0917 00:35:08.900558  616831 out.go:179]   - MINIKUBE_HOME=/home/jenkins/minikube-integration/21550-576428/.minikube
	I0917 00:35:08.903371  616831 out.go:179]   - MINIKUBE_BIN=out/minikube-linux-arm64
	I0917 00:35:08.906346  616831 out.go:179]   - MINIKUBE_FORCE_SYSTEMD=
	I0917 00:35:08.909688  616831 config.go:182] Loaded profile config "functional-918451": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
	I0917 00:35:08.909776  616831 driver.go:421] Setting default libvirt URI to qemu:///system
	I0917 00:35:08.933038  616831 docker.go:123] docker version: linux-28.1.1:Docker Engine - Community
	I0917 00:35:08.933180  616831 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0917 00:35:09.009791  616831 info.go:266] docker info: {ID:5FDH:SA5P:5GCT:NLAS:B73P:SGDQ:PBG5:UBVH:UZY3:RXGO:CI7S:WAIH Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:2 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:40 OomKillDisable:true NGoroutines:65 SystemTime:2025-09-17 00:35:08.994962348 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1084-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214835200 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-21-244 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx P
ath:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1]] Warnings:<nil>}}
	I0917 00:35:09.009948  616831 docker.go:318] overlay module found
	I0917 00:35:09.013229  616831 out.go:179] * Using the docker driver based on existing profile
	I0917 00:35:09.016057  616831 start.go:304] selected driver: docker
	I0917 00:35:09.016067  616831 start.go:918] validating driver "docker" against &{Name:functional-918451 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:4096 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8441 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:functional-918451 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerN
ames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8441 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false
DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
	I0917 00:35:09.016163  616831 start.go:929] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
	I0917 00:35:09.016275  616831 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0917 00:35:09.071705  616831 info.go:266] docker info: {ID:5FDH:SA5P:5GCT:NLAS:B73P:SGDQ:PBG5:UBVH:UZY3:RXGO:CI7S:WAIH Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:2 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:40 OomKillDisable:true NGoroutines:65 SystemTime:2025-09-17 00:35:09.061677264 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1084-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214835200 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-21-244 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx P
ath:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1]] Warnings:<nil>}}
	I0917 00:35:09.072135  616831 start_flags.go:992] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
	I0917 00:35:09.072151  616831 cni.go:84] Creating CNI manager for ""
	I0917 00:35:09.072218  616831 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
	I0917 00:35:09.072260  616831 start.go:348] cluster config:
	{Name:functional-918451 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:4096 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8441 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:functional-918451 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocke
t: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:apiserver Key:enable-admission-plugins Value:NamespaceAutoProvision}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8441 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false D
isableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
	I0917 00:35:09.075512  616831 out.go:179] * Starting "functional-918451" primary control-plane node in "functional-918451" cluster
	I0917 00:35:09.078305  616831 cache.go:123] Beginning downloading kic base image for docker with docker
	I0917 00:35:09.081218  616831 out.go:179] * Pulling base image v0.0.48 ...
	I0917 00:35:09.083964  616831 image.go:81] Checking for gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 in local docker daemon
	I0917 00:35:09.084125  616831 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime docker
	I0917 00:35:09.084153  616831 preload.go:146] Found local preload: /home/jenkins/minikube-integration/21550-576428/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-docker-overlay2-arm64.tar.lz4
	I0917 00:35:09.084157  616831 cache.go:58] Caching tarball of preloaded images
	I0917 00:35:09.084220  616831 preload.go:172] Found /home/jenkins/minikube-integration/21550-576428/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-docker-overlay2-arm64.tar.lz4 in cache, skipping download
	I0917 00:35:09.084228  616831 cache.go:61] Finished verifying existence of preloaded tar for v1.34.0 on docker
	I0917 00:35:09.084331  616831 profile.go:143] Saving config to /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/functional-918451/config.json ...
	I0917 00:35:09.103367  616831 image.go:100] Found gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 in local docker daemon, skipping pull
	I0917 00:35:09.103379  616831 cache.go:147] gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 exists in daemon, skipping load
	I0917 00:35:09.103399  616831 cache.go:232] Successfully downloaded all kic artifacts
	I0917 00:35:09.103423  616831 start.go:360] acquireMachinesLock for functional-918451: {Name:mkead936952bd6bbe9c88b989c6af54c1d0b5ecc Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
	I0917 00:35:09.103485  616831 start.go:364] duration metric: took 45.997µs to acquireMachinesLock for "functional-918451"
	I0917 00:35:09.103504  616831 start.go:96] Skipping create...Using existing machine configuration
	I0917 00:35:09.103509  616831 fix.go:54] fixHost starting: 
	I0917 00:35:09.103750  616831 cli_runner.go:164] Run: docker container inspect functional-918451 --format={{.State.Status}}
	I0917 00:35:09.119890  616831 fix.go:112] recreateIfNeeded on functional-918451: state=Running err=<nil>
	W0917 00:35:09.119909  616831 fix.go:138] unexpected machine state, will restart: <nil>
	I0917 00:35:09.123066  616831 out.go:252] * Updating the running docker "functional-918451" container ...
	I0917 00:35:09.123090  616831 machine.go:93] provisionDockerMachine start ...
	I0917 00:35:09.123171  616831 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-918451
	I0917 00:35:09.141133  616831 main.go:141] libmachine: Using SSH client type: native
	I0917 00:35:09.141444  616831 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3ef1e0] 0x3f19a0 <nil>  [] 0s} 127.0.0.1 33515 <nil> <nil>}
	I0917 00:35:09.141451  616831 main.go:141] libmachine: About to run SSH command:
	hostname
	I0917 00:35:09.279618  616831 main.go:141] libmachine: SSH cmd err, output: <nil>: functional-918451
	
	I0917 00:35:09.279641  616831 ubuntu.go:182] provisioning hostname "functional-918451"
	I0917 00:35:09.279702  616831 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-918451
	I0917 00:35:09.297731  616831 main.go:141] libmachine: Using SSH client type: native
	I0917 00:35:09.298100  616831 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3ef1e0] 0x3f19a0 <nil>  [] 0s} 127.0.0.1 33515 <nil> <nil>}
	I0917 00:35:09.298126  616831 main.go:141] libmachine: About to run SSH command:
	sudo hostname functional-918451 && echo "functional-918451" | sudo tee /etc/hostname
	I0917 00:35:09.452214  616831 main.go:141] libmachine: SSH cmd err, output: <nil>: functional-918451
	
	I0917 00:35:09.452284  616831 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-918451
	I0917 00:35:09.469797  616831 main.go:141] libmachine: Using SSH client type: native
	I0917 00:35:09.470111  616831 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3ef1e0] 0x3f19a0 <nil>  [] 0s} 127.0.0.1 33515 <nil> <nil>}
	I0917 00:35:09.470126  616831 main.go:141] libmachine: About to run SSH command:
	
			if ! grep -xq '.*\sfunctional-918451' /etc/hosts; then
				if grep -xq '127.0.1.1\s.*' /etc/hosts; then
					sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 functional-918451/g' /etc/hosts;
				else 
					echo '127.0.1.1 functional-918451' | sudo tee -a /etc/hosts; 
				fi
			fi
	I0917 00:35:09.608434  616831 main.go:141] libmachine: SSH cmd err, output: <nil>: 
	I0917 00:35:09.608471  616831 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/21550-576428/.minikube CaCertPath:/home/jenkins/minikube-integration/21550-576428/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21550-576428/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21550-576428/.minikube}
	I0917 00:35:09.608488  616831 ubuntu.go:190] setting up certificates
	I0917 00:35:09.608497  616831 provision.go:84] configureAuth start
	I0917 00:35:09.608556  616831 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" functional-918451
	I0917 00:35:09.628527  616831 provision.go:143] copyHostCerts
	I0917 00:35:09.628596  616831 exec_runner.go:144] found /home/jenkins/minikube-integration/21550-576428/.minikube/ca.pem, removing ...
	I0917 00:35:09.628610  616831 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21550-576428/.minikube/ca.pem
	I0917 00:35:09.628684  616831 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21550-576428/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21550-576428/.minikube/ca.pem (1082 bytes)
	I0917 00:35:09.628776  616831 exec_runner.go:144] found /home/jenkins/minikube-integration/21550-576428/.minikube/cert.pem, removing ...
	I0917 00:35:09.628780  616831 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21550-576428/.minikube/cert.pem
	I0917 00:35:09.628803  616831 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21550-576428/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21550-576428/.minikube/cert.pem (1123 bytes)
	I0917 00:35:09.628853  616831 exec_runner.go:144] found /home/jenkins/minikube-integration/21550-576428/.minikube/key.pem, removing ...
	I0917 00:35:09.628856  616831 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21550-576428/.minikube/key.pem
	I0917 00:35:09.628879  616831 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21550-576428/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21550-576428/.minikube/key.pem (1675 bytes)
	I0917 00:35:09.628921  616831 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21550-576428/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21550-576428/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21550-576428/.minikube/certs/ca-key.pem org=jenkins.functional-918451 san=[127.0.0.1 192.168.49.2 functional-918451 localhost minikube]
	I0917 00:35:09.918611  616831 provision.go:177] copyRemoteCerts
	I0917 00:35:09.918674  616831 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
	I0917 00:35:09.918710  616831 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-918451
	I0917 00:35:09.935939  616831 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33515 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/functional-918451/id_rsa Username:docker}
	I0917 00:35:10.039568  616831 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-576428/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
	I0917 00:35:10.066377  616831 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-576428/.minikube/machines/server.pem --> /etc/docker/server.pem (1220 bytes)
	I0917 00:35:10.095272  616831 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-576428/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
	I0917 00:35:10.122538  616831 provision.go:87] duration metric: took 514.027816ms to configureAuth
	I0917 00:35:10.122556  616831 ubuntu.go:206] setting minikube options for container-runtime
	I0917 00:35:10.122762  616831 config.go:182] Loaded profile config "functional-918451": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
	I0917 00:35:10.122824  616831 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-918451
	I0917 00:35:10.141296  616831 main.go:141] libmachine: Using SSH client type: native
	I0917 00:35:10.141604  616831 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3ef1e0] 0x3f19a0 <nil>  [] 0s} 127.0.0.1 33515 <nil> <nil>}
	I0917 00:35:10.141611  616831 main.go:141] libmachine: About to run SSH command:
	df --output=fstype / | tail -n 1
	I0917 00:35:10.281086  616831 main.go:141] libmachine: SSH cmd err, output: <nil>: overlay
	
	I0917 00:35:10.281097  616831 ubuntu.go:71] root file system type: overlay
	I0917 00:35:10.281198  616831 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
	I0917 00:35:10.281257  616831 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-918451
	I0917 00:35:10.298083  616831 main.go:141] libmachine: Using SSH client type: native
	I0917 00:35:10.298376  616831 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3ef1e0] 0x3f19a0 <nil>  [] 0s} 127.0.0.1 33515 <nil> <nil>}
	I0917 00:35:10.298471  616831 main.go:141] libmachine: About to run SSH command:
	sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
	Description=Docker Application Container Engine
	Documentation=https://docs.docker.com
	After=network-online.target nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
	Wants=network-online.target containerd.service
	Requires=docker.socket
	StartLimitBurst=3
	StartLimitIntervalSec=60
	
	[Service]
	Type=notify
	Restart=always
	
	
	
	# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
	# The base configuration already specifies an 'ExecStart=...' command. The first directive
	# here is to clear out that command inherited from the base configuration. Without this,
	# the command from the base configuration and the command specified here are treated as
	# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
	# will catch this invalid input and refuse to start the service with an error like:
	#  Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
	
	# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
	# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
	ExecStart=
	ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 \
		-H fd:// --containerd=/run/containerd/containerd.sock \
		-H unix:///var/run/docker.sock \
		--default-ulimit=nofile=1048576:1048576 \
		--tlsverify \
		--tlscacert /etc/docker/ca.pem \
		--tlscert /etc/docker/server.pem \
		--tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12 
	ExecReload=/bin/kill -s HUP \$MAINPID
	
	# Having non-zero Limit*s causes performance problems due to accounting overhead
	# in the kernel. We recommend using cgroups to do container-local accounting.
	LimitNOFILE=infinity
	LimitNPROC=infinity
	LimitCORE=infinity
	
	# Uncomment TasksMax if your systemd version supports it.
	# Only systemd 226 and above support this version.
	TasksMax=infinity
	TimeoutStartSec=0
	
	# set delegate yes so that systemd does not reset the cgroups of docker containers
	Delegate=yes
	
	# kill only the docker process, not all processes in the cgroup
	KillMode=process
	OOMScoreAdjust=-500
	
	[Install]
	WantedBy=multi-user.target
	" | sudo tee /lib/systemd/system/docker.service.new
	I0917 00:35:10.456882  616831 main.go:141] libmachine: SSH cmd err, output: <nil>: [Unit]
	Description=Docker Application Container Engine
	Documentation=https://docs.docker.com
	After=network-online.target nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
	Wants=network-online.target containerd.service
	Requires=docker.socket
	StartLimitBurst=3
	StartLimitIntervalSec=60
	
	[Service]
	Type=notify
	Restart=always
	
	
	
	# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
	# The base configuration already specifies an 'ExecStart=...' command. The first directive
	# here is to clear out that command inherited from the base configuration. Without this,
	# the command from the base configuration and the command specified here are treated as
	# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
	# will catch this invalid input and refuse to start the service with an error like:
	#  Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
	
	# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
	# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
	ExecStart=
	ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 	-H fd:// --containerd=/run/containerd/containerd.sock 	-H unix:///var/run/docker.sock 	--default-ulimit=nofile=1048576:1048576 	--tlsverify 	--tlscacert /etc/docker/ca.pem 	--tlscert /etc/docker/server.pem 	--tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12 
	ExecReload=/bin/kill -s HUP $MAINPID
	
	# Having non-zero Limit*s causes performance problems due to accounting overhead
	# in the kernel. We recommend using cgroups to do container-local accounting.
	LimitNOFILE=infinity
	LimitNPROC=infinity
	LimitCORE=infinity
	
	# Uncomment TasksMax if your systemd version supports it.
	# Only systemd 226 and above support this version.
	TasksMax=infinity
	TimeoutStartSec=0
	
	# set delegate yes so that systemd does not reset the cgroups of docker containers
	Delegate=yes
	
	# kill only the docker process, not all processes in the cgroup
	KillMode=process
	OOMScoreAdjust=-500
	
	[Install]
	WantedBy=multi-user.target
	
	I0917 00:35:10.456955  616831 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-918451
	I0917 00:35:10.475396  616831 main.go:141] libmachine: Using SSH client type: native
	I0917 00:35:10.475699  616831 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3ef1e0] 0x3f19a0 <nil>  [] 0s} 127.0.0.1 33515 <nil> <nil>}
	I0917 00:35:10.475724  616831 main.go:141] libmachine: About to run SSH command:
	sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
	I0917 00:35:10.625699  616831 main.go:141] libmachine: SSH cmd err, output: <nil>: 
	I0917 00:35:10.625713  616831 machine.go:96] duration metric: took 1.50261688s to provisionDockerMachine
	I0917 00:35:10.625722  616831 start.go:293] postStartSetup for "functional-918451" (driver="docker")
	I0917 00:35:10.625731  616831 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
	I0917 00:35:10.625799  616831 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
	I0917 00:35:10.625835  616831 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-918451
	I0917 00:35:10.643208  616831 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33515 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/functional-918451/id_rsa Username:docker}
	I0917 00:35:10.741407  616831 ssh_runner.go:195] Run: cat /etc/os-release
	I0917 00:35:10.744475  616831 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
	I0917 00:35:10.744498  616831 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
	I0917 00:35:10.744506  616831 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
	I0917 00:35:10.744512  616831 info.go:137] Remote host: Ubuntu 22.04.5 LTS
	I0917 00:35:10.744522  616831 filesync.go:126] Scanning /home/jenkins/minikube-integration/21550-576428/.minikube/addons for local assets ...
	I0917 00:35:10.744575  616831 filesync.go:126] Scanning /home/jenkins/minikube-integration/21550-576428/.minikube/files for local assets ...
	I0917 00:35:10.744660  616831 filesync.go:149] local asset: /home/jenkins/minikube-integration/21550-576428/.minikube/files/etc/ssl/certs/5782842.pem -> 5782842.pem in /etc/ssl/certs
	I0917 00:35:10.744735  616831 filesync.go:149] local asset: /home/jenkins/minikube-integration/21550-576428/.minikube/files/etc/test/nested/copy/578284/hosts -> hosts in /etc/test/nested/copy/578284
	I0917 00:35:10.744781  616831 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs /etc/test/nested/copy/578284
	I0917 00:35:10.753363  616831 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-576428/.minikube/files/etc/ssl/certs/5782842.pem --> /etc/ssl/certs/5782842.pem (1708 bytes)
	I0917 00:35:10.777494  616831 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-576428/.minikube/files/etc/test/nested/copy/578284/hosts --> /etc/test/nested/copy/578284/hosts (40 bytes)
	I0917 00:35:10.808587  616831 start.go:296] duration metric: took 182.835375ms for postStartSetup
	I0917 00:35:10.808661  616831 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
	I0917 00:35:10.808710  616831 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-918451
	I0917 00:35:10.825228  616831 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33515 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/functional-918451/id_rsa Username:docker}
	I0917 00:35:10.921615  616831 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
	I0917 00:35:10.926439  616831 fix.go:56] duration metric: took 1.822916967s for fixHost
	I0917 00:35:10.926455  616831 start.go:83] releasing machines lock for "functional-918451", held for 1.822962069s
	I0917 00:35:10.926523  616831 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" functional-918451
	I0917 00:35:10.944280  616831 ssh_runner.go:195] Run: cat /version.json
	I0917 00:35:10.944302  616831 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
	I0917 00:35:10.944320  616831 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-918451
	I0917 00:35:10.944363  616831 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-918451
	I0917 00:35:10.966713  616831 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33515 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/functional-918451/id_rsa Username:docker}
	I0917 00:35:10.978091  616831 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33515 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/functional-918451/id_rsa Username:docker}
	I0917 00:35:11.188990  616831 ssh_runner.go:195] Run: systemctl --version
	I0917 00:35:11.193766  616831 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
	I0917 00:35:11.198183  616831 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
	I0917 00:35:11.219806  616831 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
	I0917 00:35:11.219874  616831 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
	I0917 00:35:11.228951  616831 cni.go:259] no active bridge cni configs found in "/etc/cni/net.d" - nothing to disable
	I0917 00:35:11.228967  616831 start.go:495] detecting cgroup driver to use...
	I0917 00:35:11.228997  616831 detect.go:187] detected "cgroupfs" cgroup driver on host os
	I0917 00:35:11.229092  616831 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
	" | sudo tee /etc/crictl.yaml"
	I0917 00:35:11.247155  616831 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
	I0917 00:35:11.258611  616831 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
	I0917 00:35:11.270113  616831 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
	I0917 00:35:11.270169  616831 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
	I0917 00:35:11.280500  616831 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
	I0917 00:35:11.291337  616831 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
	I0917 00:35:11.305281  616831 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
	I0917 00:35:11.316659  616831 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
	I0917 00:35:11.326609  616831 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
	I0917 00:35:11.339061  616831 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
	I0917 00:35:11.350191  616831 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1  enable_unprivileged_ports = true|' /etc/containerd/config.toml"
	I0917 00:35:11.361784  616831 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
	I0917 00:35:11.371029  616831 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
	I0917 00:35:11.380076  616831 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I0917 00:35:11.491379  616831 ssh_runner.go:195] Run: sudo systemctl restart containerd
	I0917 00:35:11.707140  616831 start.go:495] detecting cgroup driver to use...
	I0917 00:35:11.707176  616831 detect.go:187] detected "cgroupfs" cgroup driver on host os
	I0917 00:35:11.707228  616831 ssh_runner.go:195] Run: sudo systemctl cat docker.service
	I0917 00:35:11.722959  616831 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
	I0917 00:35:11.735963  616831 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
	I0917 00:35:11.777124  616831 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
	I0917 00:35:11.790691  616831 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
	I0917 00:35:11.809931  616831 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
	" | sudo tee /etc/crictl.yaml"
	I0917 00:35:11.827868  616831 ssh_runner.go:195] Run: which cri-dockerd
	I0917 00:35:11.832090  616831 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
	I0917 00:35:11.842322  616831 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (192 bytes)
	I0917 00:35:11.861532  616831 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
	I0917 00:35:11.980450  616831 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
	I0917 00:35:12.101702  616831 docker.go:575] configuring docker to use "cgroupfs" as cgroup driver...
	I0917 00:35:12.101793  616831 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (130 bytes)
	I0917 00:35:12.126257  616831 ssh_runner.go:195] Run: sudo systemctl reset-failed docker
	I0917 00:35:12.138069  616831 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I0917 00:35:12.258999  616831 ssh_runner.go:195] Run: sudo systemctl restart docker
	I0917 00:35:38.407410  616831 ssh_runner.go:235] Completed: sudo systemctl restart docker: (26.148387762s)
	I0917 00:35:38.407470  616831 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
	I0917 00:35:38.434609  616831 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
	I0917 00:35:38.463774  616831 ssh_runner.go:195] Run: sudo systemctl stop cri-docker.socket
	I0917 00:35:38.484708  616831 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
	I0917 00:35:38.496143  616831 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
	I0917 00:35:38.594906  616831 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
	I0917 00:35:38.689669  616831 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I0917 00:35:38.785711  616831 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
	I0917 00:35:38.799399  616831 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
	I0917 00:35:38.811558  616831 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I0917 00:35:38.907605  616831 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
	I0917 00:35:38.991545  616831 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
	I0917 00:35:39.006514  616831 start.go:542] Will wait 60s for socket path /var/run/cri-dockerd.sock
	I0917 00:35:39.006588  616831 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
	I0917 00:35:39.012465  616831 start.go:563] Will wait 60s for crictl version
	I0917 00:35:39.012526  616831 ssh_runner.go:195] Run: which crictl
	I0917 00:35:39.016162  616831 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
	I0917 00:35:39.058036  616831 start.go:579] Version:  0.1.0
	RuntimeName:  docker
	RuntimeVersion:  28.4.0
	RuntimeApiVersion:  v1
	I0917 00:35:39.058095  616831 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
	I0917 00:35:39.079756  616831 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
	I0917 00:35:39.106648  616831 out.go:252] * Preparing Kubernetes v1.34.0 on Docker 28.4.0 ...
	I0917 00:35:39.106725  616831 cli_runner.go:164] Run: docker network inspect functional-918451 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
	I0917 00:35:39.122340  616831 ssh_runner.go:195] Run: grep 192.168.49.1	host.minikube.internal$ /etc/hosts
	I0917 00:35:39.130638  616831 out.go:179]   - apiserver.enable-admission-plugins=NamespaceAutoProvision
	I0917 00:35:39.133387  616831 kubeadm.go:875] updating cluster {Name:functional-918451 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:4096 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8441 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:functional-918451 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServer
IPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:apiserver Key:enable-admission-plugins Value:NamespaceAutoProvision}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8441 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker
BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
	I0917 00:35:39.133516  616831 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime docker
	I0917 00:35:39.133606  616831 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
	I0917 00:35:39.152835  616831 docker.go:691] Got preloaded images: -- stdout --
	minikube-local-cache-test:functional-918451
	registry.k8s.io/kube-apiserver:v1.34.0
	registry.k8s.io/kube-controller-manager:v1.34.0
	registry.k8s.io/kube-scheduler:v1.34.0
	registry.k8s.io/kube-proxy:v1.34.0
	registry.k8s.io/etcd:3.6.4-0
	registry.k8s.io/pause:3.10.1
	registry.k8s.io/coredns/coredns:v1.12.1
	gcr.io/k8s-minikube/storage-provisioner:v5
	registry.k8s.io/pause:3.3
	registry.k8s.io/pause:3.1
	registry.k8s.io/pause:latest
	
	-- /stdout --
	I0917 00:35:39.152848  616831 docker.go:621] Images already preloaded, skipping extraction
	I0917 00:35:39.152916  616831 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
	I0917 00:35:39.172196  616831 docker.go:691] Got preloaded images: -- stdout --
	minikube-local-cache-test:functional-918451
	registry.k8s.io/kube-apiserver:v1.34.0
	registry.k8s.io/kube-scheduler:v1.34.0
	registry.k8s.io/kube-controller-manager:v1.34.0
	registry.k8s.io/kube-proxy:v1.34.0
	registry.k8s.io/etcd:3.6.4-0
	registry.k8s.io/pause:3.10.1
	registry.k8s.io/coredns/coredns:v1.12.1
	gcr.io/k8s-minikube/storage-provisioner:v5
	registry.k8s.io/pause:3.3
	registry.k8s.io/pause:3.1
	registry.k8s.io/pause:latest
	
	-- /stdout --
	I0917 00:35:39.172209  616831 cache_images.go:85] Images are preloaded, skipping loading
	I0917 00:35:39.172217  616831 kubeadm.go:926] updating node { 192.168.49.2 8441 v1.34.0 docker true true} ...
	I0917 00:35:39.172329  616831 kubeadm.go:938] kubelet [Unit]
	Wants=docker.socket
	
	[Service]
	ExecStart=
	ExecStart=/var/lib/minikube/binaries/v1.34.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=functional-918451 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.49.2
	
	[Install]
	 config:
	{KubernetesVersion:v1.34.0 ClusterName:functional-918451 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:apiserver Key:enable-admission-plugins Value:NamespaceAutoProvision}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
	I0917 00:35:39.172397  616831 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
	I0917 00:35:39.219006  616831 extraconfig.go:124] Overwriting default enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota with user provided enable-admission-plugins=NamespaceAutoProvision for component apiserver
	I0917 00:35:39.219077  616831 cni.go:84] Creating CNI manager for ""
	I0917 00:35:39.219095  616831 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
	I0917 00:35:39.219102  616831 kubeadm.go:84] Using pod CIDR: 10.244.0.0/16
	I0917 00:35:39.219123  616831 kubeadm.go:189] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.49.2 APIServerPort:8441 KubernetesVersion:v1.34.0 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:functional-918451 NodeName:functional-918451 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceAutoProvision] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.49.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.49.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:
map[containerRuntimeEndpoint:unix:///var/run/cri-dockerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
	I0917 00:35:39.219247  616831 kubeadm.go:195] kubeadm config:
	apiVersion: kubeadm.k8s.io/v1beta4
	kind: InitConfiguration
	localAPIEndpoint:
	  advertiseAddress: 192.168.49.2
	  bindPort: 8441
	bootstrapTokens:
	  - groups:
	      - system:bootstrappers:kubeadm:default-node-token
	    ttl: 24h0m0s
	    usages:
	      - signing
	      - authentication
	nodeRegistration:
	  criSocket: unix:///var/run/cri-dockerd.sock
	  name: "functional-918451"
	  kubeletExtraArgs:
	    - name: "node-ip"
	      value: "192.168.49.2"
	  taints: []
	---
	apiVersion: kubeadm.k8s.io/v1beta4
	kind: ClusterConfiguration
	apiServer:
	  certSANs: ["127.0.0.1", "localhost", "192.168.49.2"]
	  extraArgs:
	    - name: "enable-admission-plugins"
	      value: "NamespaceAutoProvision"
	controllerManager:
	  extraArgs:
	    - name: "allocate-node-cidrs"
	      value: "true"
	    - name: "leader-elect"
	      value: "false"
	scheduler:
	  extraArgs:
	    - name: "leader-elect"
	      value: "false"
	certificatesDir: /var/lib/minikube/certs
	clusterName: mk
	controlPlaneEndpoint: control-plane.minikube.internal:8441
	etcd:
	  local:
	    dataDir: /var/lib/minikube/etcd
	kubernetesVersion: v1.34.0
	networking:
	  dnsDomain: cluster.local
	  podSubnet: "10.244.0.0/16"
	  serviceSubnet: 10.96.0.0/12
	---
	apiVersion: kubelet.config.k8s.io/v1beta1
	kind: KubeletConfiguration
	authentication:
	  x509:
	    clientCAFile: /var/lib/minikube/certs/ca.crt
	cgroupDriver: cgroupfs
	containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock
	hairpinMode: hairpin-veth
	runtimeRequestTimeout: 15m
	clusterDomain: "cluster.local"
	# disable disk resource management by default
	imageGCHighThresholdPercent: 100
	evictionHard:
	  nodefs.available: "0%"
	  nodefs.inodesFree: "0%"
	  imagefs.available: "0%"
	failSwapOn: false
	staticPodPath: /etc/kubernetes/manifests
	---
	apiVersion: kubeproxy.config.k8s.io/v1alpha1
	kind: KubeProxyConfiguration
	clusterCIDR: "10.244.0.0/16"
	metricsBindAddress: 0.0.0.0:10249
	conntrack:
	  maxPerCore: 0
	# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
	  tcpEstablishedTimeout: 0s
	# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
	  tcpCloseWaitTimeout: 0s
	
	I0917 00:35:39.219315  616831 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.0
	I0917 00:35:39.228223  616831 binaries.go:44] Found k8s binaries, skipping transfer
	I0917 00:35:39.228283  616831 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
	I0917 00:35:39.237303  616831 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (316 bytes)
	I0917 00:35:39.256906  616831 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
	I0917 00:35:39.274945  616831 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2068 bytes)
	I0917 00:35:39.293022  616831 ssh_runner.go:195] Run: grep 192.168.49.2	control-plane.minikube.internal$ /etc/hosts
	I0917 00:35:39.296647  616831 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I0917 00:35:39.393067  616831 ssh_runner.go:195] Run: sudo systemctl start kubelet
	I0917 00:35:39.405322  616831 certs.go:68] Setting up /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/functional-918451 for IP: 192.168.49.2
	I0917 00:35:39.405335  616831 certs.go:194] generating shared ca certs ...
	I0917 00:35:39.405349  616831 certs.go:226] acquiring lock for ca certs: {Name:mk04b183dabeee5957951eb115c646a018da171d Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0917 00:35:39.405482  616831 certs.go:235] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21550-576428/.minikube/ca.key
	I0917 00:35:39.405519  616831 certs.go:235] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21550-576428/.minikube/proxy-client-ca.key
	I0917 00:35:39.405525  616831 certs.go:256] generating profile certs ...
	I0917 00:35:39.405604  616831 certs.go:359] skipping valid signed profile cert regeneration for "minikube-user": /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/functional-918451/client.key
	I0917 00:35:39.405647  616831 certs.go:359] skipping valid signed profile cert regeneration for "minikube": /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/functional-918451/apiserver.key.052cde45
	I0917 00:35:39.405679  616831 certs.go:359] skipping valid signed profile cert regeneration for "aggregator": /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/functional-918451/proxy-client.key
	I0917 00:35:39.405780  616831 certs.go:484] found cert: /home/jenkins/minikube-integration/21550-576428/.minikube/certs/578284.pem (1338 bytes)
	W0917 00:35:39.405806  616831 certs.go:480] ignoring /home/jenkins/minikube-integration/21550-576428/.minikube/certs/578284_empty.pem, impossibly tiny 0 bytes
	I0917 00:35:39.405813  616831 certs.go:484] found cert: /home/jenkins/minikube-integration/21550-576428/.minikube/certs/ca-key.pem (1671 bytes)
	I0917 00:35:39.405835  616831 certs.go:484] found cert: /home/jenkins/minikube-integration/21550-576428/.minikube/certs/ca.pem (1082 bytes)
	I0917 00:35:39.405875  616831 certs.go:484] found cert: /home/jenkins/minikube-integration/21550-576428/.minikube/certs/cert.pem (1123 bytes)
	I0917 00:35:39.405896  616831 certs.go:484] found cert: /home/jenkins/minikube-integration/21550-576428/.minikube/certs/key.pem (1675 bytes)
	I0917 00:35:39.405934  616831 certs.go:484] found cert: /home/jenkins/minikube-integration/21550-576428/.minikube/files/etc/ssl/certs/5782842.pem (1708 bytes)
	I0917 00:35:39.406508  616831 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-576428/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
	I0917 00:35:39.430565  616831 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-576428/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
	I0917 00:35:39.455020  616831 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-576428/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
	I0917 00:35:39.478799  616831 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-576428/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
	I0917 00:35:39.506545  616831 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/functional-918451/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1424 bytes)
	I0917 00:35:39.542615  616831 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/functional-918451/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes)
	I0917 00:35:39.575447  616831 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/functional-918451/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
	I0917 00:35:39.627854  616831 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/functional-918451/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
	I0917 00:35:39.680326  616831 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-576428/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
	I0917 00:35:39.731613  616831 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-576428/.minikube/certs/578284.pem --> /usr/share/ca-certificates/578284.pem (1338 bytes)
	I0917 00:35:39.800308  616831 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-576428/.minikube/files/etc/ssl/certs/5782842.pem --> /usr/share/ca-certificates/5782842.pem (1708 bytes)
	I0917 00:35:39.863245  616831 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
	I0917 00:35:39.886427  616831 ssh_runner.go:195] Run: openssl version
	I0917 00:35:39.892218  616831 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/5782842.pem && ln -fs /usr/share/ca-certificates/5782842.pem /etc/ssl/certs/5782842.pem"
	I0917 00:35:39.909376  616831 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/5782842.pem
	I0917 00:35:39.920250  616831 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Sep 17 00:32 /usr/share/ca-certificates/5782842.pem
	I0917 00:35:39.920315  616831 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/5782842.pem
	I0917 00:35:39.933447  616831 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/5782842.pem /etc/ssl/certs/3ec20f2e.0"
	I0917 00:35:39.951211  616831 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
	I0917 00:35:39.975815  616831 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
	I0917 00:35:39.988600  616831 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Sep 17 00:21 /usr/share/ca-certificates/minikubeCA.pem
	I0917 00:35:39.988656  616831 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
	I0917 00:35:39.999233  616831 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
	I0917 00:35:40.010900  616831 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/578284.pem && ln -fs /usr/share/ca-certificates/578284.pem /etc/ssl/certs/578284.pem"
	I0917 00:35:40.027029  616831 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/578284.pem
	I0917 00:35:40.032099  616831 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Sep 17 00:32 /usr/share/ca-certificates/578284.pem
	I0917 00:35:40.032177  616831 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/578284.pem
	I0917 00:35:40.048270  616831 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/578284.pem /etc/ssl/certs/51391683.0"
	I0917 00:35:40.071955  616831 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
	I0917 00:35:40.079346  616831 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-etcd-client.crt -checkend 86400
	I0917 00:35:40.094668  616831 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-kubelet-client.crt -checkend 86400
	I0917 00:35:40.106687  616831 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/server.crt -checkend 86400
	I0917 00:35:40.116505  616831 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/healthcheck-client.crt -checkend 86400
	I0917 00:35:40.124315  616831 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/peer.crt -checkend 86400
	I0917 00:35:40.131800  616831 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/front-proxy-client.crt -checkend 86400
	I0917 00:35:40.139052  616831 kubeadm.go:392] StartCluster: {Name:functional-918451 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:4096 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8441 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:functional-918451 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs
:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:apiserver Key:enable-admission-plugins Value:NamespaceAutoProvision}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8441 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker Bin
aryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
	I0917 00:35:40.139197  616831 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
	I0917 00:35:40.197057  616831 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
	I0917 00:35:40.224036  616831 kubeadm.go:408] found existing configuration files, will attempt cluster restart
	I0917 00:35:40.224045  616831 kubeadm.go:589] restartPrimaryControlPlane start ...
	I0917 00:35:40.224108  616831 ssh_runner.go:195] Run: sudo test -d /data/minikube
	I0917 00:35:40.234943  616831 kubeadm.go:130] /data/minikube skipping compat symlinks: sudo test -d /data/minikube: Process exited with status 1
	stdout:
	
	stderr:
	I0917 00:35:40.235576  616831 kubeconfig.go:125] found "functional-918451" server: "https://192.168.49.2:8441"
	I0917 00:35:40.237322  616831 ssh_runner.go:195] Run: sudo diff -u /var/tmp/minikube/kubeadm.yaml /var/tmp/minikube/kubeadm.yaml.new
	I0917 00:35:40.255950  616831 kubeadm.go:636] detected kubeadm config drift (will reconfigure cluster from new /var/tmp/minikube/kubeadm.yaml):
	-- stdout --
	--- /var/tmp/minikube/kubeadm.yaml	2025-09-17 00:33:07.200319649 +0000
	+++ /var/tmp/minikube/kubeadm.yaml.new	2025-09-17 00:35:39.284623484 +0000
	@@ -24,7 +24,7 @@
	   certSANs: ["127.0.0.1", "localhost", "192.168.49.2"]
	   extraArgs:
	     - name: "enable-admission-plugins"
	-      value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
	+      value: "NamespaceAutoProvision"
	 controllerManager:
	   extraArgs:
	     - name: "allocate-node-cidrs"
	
	-- /stdout --
	I0917 00:35:40.255960  616831 kubeadm.go:1152] stopping kube-system containers ...
	I0917 00:35:40.256030  616831 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
	I0917 00:35:40.285051  616831 docker.go:484] Stopping containers: [e3a6d89ad5b6 e707505e3be8 3c0a229533b1 7a2c5910368f 56ab6c32da13 ae4004a0e0f9 3c026c8adcb2 ca82151f5d51 d3d4055f8ecd c07995bc710c bd4e50a8edbd 0dfa5a327ca4 4654ac6f884c 30225879638d 17077358dad7 ecc60cd02b8d e00e50d78d69 aeaa29ffb123 3b3c0e6a4d32 6d1c97d32588 8b0ba516c25d 0700097d1bcb 61f808cbe0f6 120f09d2a6bd 010fc9b02644 d163b7d82097 139117743d86 27be5cee6745 647b3f98f673 ac639e371291]
	I0917 00:35:40.285138  616831 ssh_runner.go:195] Run: docker stop e3a6d89ad5b6 e707505e3be8 3c0a229533b1 7a2c5910368f 56ab6c32da13 ae4004a0e0f9 3c026c8adcb2 ca82151f5d51 d3d4055f8ecd c07995bc710c bd4e50a8edbd 0dfa5a327ca4 4654ac6f884c 30225879638d 17077358dad7 ecc60cd02b8d e00e50d78d69 aeaa29ffb123 3b3c0e6a4d32 6d1c97d32588 8b0ba516c25d 0700097d1bcb 61f808cbe0f6 120f09d2a6bd 010fc9b02644 d163b7d82097 139117743d86 27be5cee6745 647b3f98f673 ac639e371291
	I0917 00:35:40.643651  616831 ssh_runner.go:195] Run: sudo systemctl stop kubelet
	I0917 00:35:40.780219  616831 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
	I0917 00:35:40.789089  616831 kubeadm.go:157] found existing configuration files:
	-rw------- 1 root root 5631 Sep 17 00:33 /etc/kubernetes/admin.conf
	-rw------- 1 root root 5640 Sep 17 00:33 /etc/kubernetes/controller-manager.conf
	-rw------- 1 root root 1972 Sep 17 00:33 /etc/kubernetes/kubelet.conf
	-rw------- 1 root root 5588 Sep 17 00:33 /etc/kubernetes/scheduler.conf
	
	I0917 00:35:40.789169  616831 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/admin.conf
	I0917 00:35:40.797724  616831 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/kubelet.conf
	I0917 00:35:40.806187  616831 kubeadm.go:163] "https://control-plane.minikube.internal:8441" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/kubelet.conf: Process exited with status 1
	stdout:
	
	stderr:
	I0917 00:35:40.806255  616831 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
	I0917 00:35:40.814650  616831 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/controller-manager.conf
	I0917 00:35:40.822929  616831 kubeadm.go:163] "https://control-plane.minikube.internal:8441" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/controller-manager.conf: Process exited with status 1
	stdout:
	
	stderr:
	I0917 00:35:40.822984  616831 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
	I0917 00:35:40.831609  616831 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/scheduler.conf
	I0917 00:35:40.839866  616831 kubeadm.go:163] "https://control-plane.minikube.internal:8441" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/scheduler.conf: Process exited with status 1
	stdout:
	
	stderr:
	I0917 00:35:40.839924  616831 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
	I0917 00:35:40.848095  616831 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
	I0917 00:35:40.856862  616831 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0:$PATH" kubeadm init phase certs all --config /var/tmp/minikube/kubeadm.yaml"
	I0917 00:35:40.903113  616831 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0:$PATH" kubeadm init phase kubeconfig all --config /var/tmp/minikube/kubeadm.yaml"
	I0917 00:35:43.491648  616831 ssh_runner.go:235] Completed: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0:$PATH" kubeadm init phase kubeconfig all --config /var/tmp/minikube/kubeadm.yaml": (2.588511393s)
	I0917 00:35:43.491665  616831 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0:$PATH" kubeadm init phase kubelet-start --config /var/tmp/minikube/kubeadm.yaml"
	I0917 00:35:43.659254  616831 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0:$PATH" kubeadm init phase control-plane all --config /var/tmp/minikube/kubeadm.yaml"
	I0917 00:35:43.739072  616831 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0:$PATH" kubeadm init phase etcd local --config /var/tmp/minikube/kubeadm.yaml"
	I0917 00:35:43.829895  616831 api_server.go:52] waiting for apiserver process to appear ...
	I0917 00:35:43.829967  616831 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
	I0917 00:35:44.330684  616831 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
	I0917 00:35:44.830547  616831 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
	I0917 00:35:45.330880  616831 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
	I0917 00:35:45.373119  616831 api_server.go:72] duration metric: took 1.543238975s to wait for apiserver process to appear ...
	I0917 00:35:45.373135  616831 api_server.go:88] waiting for apiserver healthz status ...
	I0917 00:35:45.373162  616831 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8441/healthz ...
	I0917 00:35:49.208165  616831 api_server.go:279] https://192.168.49.2:8441/healthz returned 403:
	{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403}
	W0917 00:35:49.208181  616831 api_server.go:103] status: https://192.168.49.2:8441/healthz returned error 403:
	{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403}
	I0917 00:35:49.208194  616831 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8441/healthz ...
	I0917 00:35:49.270648  616831 api_server.go:279] https://192.168.49.2:8441/healthz returned 403:
	{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403}
	W0917 00:35:49.270665  616831 api_server.go:103] status: https://192.168.49.2:8441/healthz returned error 403:
	{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403}
	I0917 00:35:49.373926  616831 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8441/healthz ...
	I0917 00:35:49.382729  616831 api_server.go:279] https://192.168.49.2:8441/healthz returned 500:
	[+]ping ok
	[+]log ok
	[+]etcd ok
	[+]poststarthook/start-apiserver-admission-initializer ok
	[+]poststarthook/generic-apiserver-start-informers ok
	[+]poststarthook/priority-and-fairness-config-consumer ok
	[+]poststarthook/priority-and-fairness-filter ok
	[+]poststarthook/storage-object-count-tracker-hook ok
	[+]poststarthook/start-apiextensions-informers ok
	[+]poststarthook/start-apiextensions-controllers ok
	[+]poststarthook/crd-informer-synced ok
	[+]poststarthook/start-system-namespaces-controller ok
	[+]poststarthook/start-cluster-authentication-info-controller ok
	[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
	[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
	[+]poststarthook/start-legacy-token-tracking-controller ok
	[+]poststarthook/start-service-ip-repair-controllers ok
	[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
	[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
	[+]poststarthook/priority-and-fairness-config-producer ok
	[+]poststarthook/bootstrap-controller ok
	[+]poststarthook/start-kubernetes-service-cidr-controller ok
	[+]poststarthook/aggregator-reload-proxy-client-cert ok
	[+]poststarthook/start-kube-aggregator-informers ok
	[+]poststarthook/apiservice-status-local-available-controller ok
	[+]poststarthook/apiservice-status-remote-available-controller ok
	[+]poststarthook/apiservice-registration-controller ok
	[+]poststarthook/apiservice-discovery-controller ok
	[+]poststarthook/kube-apiserver-autoregistration ok
	[+]autoregister-completion ok
	[+]poststarthook/apiservice-openapi-controller ok
	[+]poststarthook/apiservice-openapiv3-controller ok
	healthz check failed
	W0917 00:35:49.382744  616831 api_server.go:103] status: https://192.168.49.2:8441/healthz returned error 500:
	[+]ping ok
	[+]log ok
	[+]etcd ok
	[+]poststarthook/start-apiserver-admission-initializer ok
	[+]poststarthook/generic-apiserver-start-informers ok
	[+]poststarthook/priority-and-fairness-config-consumer ok
	[+]poststarthook/priority-and-fairness-filter ok
	[+]poststarthook/storage-object-count-tracker-hook ok
	[+]poststarthook/start-apiextensions-informers ok
	[+]poststarthook/start-apiextensions-controllers ok
	[+]poststarthook/crd-informer-synced ok
	[+]poststarthook/start-system-namespaces-controller ok
	[+]poststarthook/start-cluster-authentication-info-controller ok
	[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
	[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
	[+]poststarthook/start-legacy-token-tracking-controller ok
	[+]poststarthook/start-service-ip-repair-controllers ok
	[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
	[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
	[+]poststarthook/priority-and-fairness-config-producer ok
	[+]poststarthook/bootstrap-controller ok
	[+]poststarthook/start-kubernetes-service-cidr-controller ok
	[+]poststarthook/aggregator-reload-proxy-client-cert ok
	[+]poststarthook/start-kube-aggregator-informers ok
	[+]poststarthook/apiservice-status-local-available-controller ok
	[+]poststarthook/apiservice-status-remote-available-controller ok
	[+]poststarthook/apiservice-registration-controller ok
	[+]poststarthook/apiservice-discovery-controller ok
	[+]poststarthook/kube-apiserver-autoregistration ok
	[+]autoregister-completion ok
	[+]poststarthook/apiservice-openapi-controller ok
	[+]poststarthook/apiservice-openapiv3-controller ok
	healthz check failed
	I0917 00:35:49.873319  616831 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8441/healthz ...
	I0917 00:35:49.884187  616831 api_server.go:279] https://192.168.49.2:8441/healthz returned 500:
	[+]ping ok
	[+]log ok
	[+]etcd ok
	[+]poststarthook/start-apiserver-admission-initializer ok
	[+]poststarthook/generic-apiserver-start-informers ok
	[+]poststarthook/priority-and-fairness-config-consumer ok
	[+]poststarthook/priority-and-fairness-filter ok
	[+]poststarthook/storage-object-count-tracker-hook ok
	[+]poststarthook/start-apiextensions-informers ok
	[+]poststarthook/start-apiextensions-controllers ok
	[+]poststarthook/crd-informer-synced ok
	[+]poststarthook/start-system-namespaces-controller ok
	[+]poststarthook/start-cluster-authentication-info-controller ok
	[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
	[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
	[+]poststarthook/start-legacy-token-tracking-controller ok
	[+]poststarthook/start-service-ip-repair-controllers ok
	[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
	[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
	[+]poststarthook/priority-and-fairness-config-producer ok
	[+]poststarthook/bootstrap-controller ok
	[+]poststarthook/start-kubernetes-service-cidr-controller ok
	[+]poststarthook/aggregator-reload-proxy-client-cert ok
	[+]poststarthook/start-kube-aggregator-informers ok
	[+]poststarthook/apiservice-status-local-available-controller ok
	[+]poststarthook/apiservice-status-remote-available-controller ok
	[+]poststarthook/apiservice-registration-controller ok
	[+]poststarthook/apiservice-discovery-controller ok
	[+]poststarthook/kube-apiserver-autoregistration ok
	[+]autoregister-completion ok
	[+]poststarthook/apiservice-openapi-controller ok
	[+]poststarthook/apiservice-openapiv3-controller ok
	healthz check failed
	W0917 00:35:49.884206  616831 api_server.go:103] status: https://192.168.49.2:8441/healthz returned error 500:
	[+]ping ok
	[+]log ok
	[+]etcd ok
	[+]poststarthook/start-apiserver-admission-initializer ok
	[+]poststarthook/generic-apiserver-start-informers ok
	[+]poststarthook/priority-and-fairness-config-consumer ok
	[+]poststarthook/priority-and-fairness-filter ok
	[+]poststarthook/storage-object-count-tracker-hook ok
	[+]poststarthook/start-apiextensions-informers ok
	[+]poststarthook/start-apiextensions-controllers ok
	[+]poststarthook/crd-informer-synced ok
	[+]poststarthook/start-system-namespaces-controller ok
	[+]poststarthook/start-cluster-authentication-info-controller ok
	[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
	[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
	[+]poststarthook/start-legacy-token-tracking-controller ok
	[+]poststarthook/start-service-ip-repair-controllers ok
	[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
	[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
	[+]poststarthook/priority-and-fairness-config-producer ok
	[+]poststarthook/bootstrap-controller ok
	[+]poststarthook/start-kubernetes-service-cidr-controller ok
	[+]poststarthook/aggregator-reload-proxy-client-cert ok
	[+]poststarthook/start-kube-aggregator-informers ok
	[+]poststarthook/apiservice-status-local-available-controller ok
	[+]poststarthook/apiservice-status-remote-available-controller ok
	[+]poststarthook/apiservice-registration-controller ok
	[+]poststarthook/apiservice-discovery-controller ok
	[+]poststarthook/kube-apiserver-autoregistration ok
	[+]autoregister-completion ok
	[+]poststarthook/apiservice-openapi-controller ok
	[+]poststarthook/apiservice-openapiv3-controller ok
	healthz check failed
	I0917 00:35:50.373776  616831 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8441/healthz ...
	I0917 00:35:50.384331  616831 api_server.go:279] https://192.168.49.2:8441/healthz returned 500:
	[+]ping ok
	[+]log ok
	[+]etcd ok
	[+]poststarthook/start-apiserver-admission-initializer ok
	[+]poststarthook/generic-apiserver-start-informers ok
	[+]poststarthook/priority-and-fairness-config-consumer ok
	[+]poststarthook/priority-and-fairness-filter ok
	[+]poststarthook/storage-object-count-tracker-hook ok
	[+]poststarthook/start-apiextensions-informers ok
	[+]poststarthook/start-apiextensions-controllers ok
	[+]poststarthook/crd-informer-synced ok
	[+]poststarthook/start-system-namespaces-controller ok
	[+]poststarthook/start-cluster-authentication-info-controller ok
	[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
	[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
	[+]poststarthook/start-legacy-token-tracking-controller ok
	[+]poststarthook/start-service-ip-repair-controllers ok
	[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
	[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
	[+]poststarthook/priority-and-fairness-config-producer ok
	[+]poststarthook/bootstrap-controller ok
	[+]poststarthook/start-kubernetes-service-cidr-controller ok
	[+]poststarthook/aggregator-reload-proxy-client-cert ok
	[+]poststarthook/start-kube-aggregator-informers ok
	[+]poststarthook/apiservice-status-local-available-controller ok
	[+]poststarthook/apiservice-status-remote-available-controller ok
	[+]poststarthook/apiservice-registration-controller ok
	[+]poststarthook/apiservice-discovery-controller ok
	[+]poststarthook/kube-apiserver-autoregistration ok
	[+]autoregister-completion ok
	[+]poststarthook/apiservice-openapi-controller ok
	[+]poststarthook/apiservice-openapiv3-controller ok
	healthz check failed
	W0917 00:35:50.384350  616831 api_server.go:103] status: https://192.168.49.2:8441/healthz returned error 500:
	[+]ping ok
	[+]log ok
	[+]etcd ok
	[+]poststarthook/start-apiserver-admission-initializer ok
	[+]poststarthook/generic-apiserver-start-informers ok
	[+]poststarthook/priority-and-fairness-config-consumer ok
	[+]poststarthook/priority-and-fairness-filter ok
	[+]poststarthook/storage-object-count-tracker-hook ok
	[+]poststarthook/start-apiextensions-informers ok
	[+]poststarthook/start-apiextensions-controllers ok
	[+]poststarthook/crd-informer-synced ok
	[+]poststarthook/start-system-namespaces-controller ok
	[+]poststarthook/start-cluster-authentication-info-controller ok
	[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
	[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
	[+]poststarthook/start-legacy-token-tracking-controller ok
	[+]poststarthook/start-service-ip-repair-controllers ok
	[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
	[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
	[+]poststarthook/priority-and-fairness-config-producer ok
	[+]poststarthook/bootstrap-controller ok
	[+]poststarthook/start-kubernetes-service-cidr-controller ok
	[+]poststarthook/aggregator-reload-proxy-client-cert ok
	[+]poststarthook/start-kube-aggregator-informers ok
	[+]poststarthook/apiservice-status-local-available-controller ok
	[+]poststarthook/apiservice-status-remote-available-controller ok
	[+]poststarthook/apiservice-registration-controller ok
	[+]poststarthook/apiservice-discovery-controller ok
	[+]poststarthook/kube-apiserver-autoregistration ok
	[+]autoregister-completion ok
	[+]poststarthook/apiservice-openapi-controller ok
	[+]poststarthook/apiservice-openapiv3-controller ok
	healthz check failed
	I0917 00:35:50.873627  616831 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8441/healthz ...
	I0917 00:35:50.882213  616831 api_server.go:279] https://192.168.49.2:8441/healthz returned 200:
	ok
	I0917 00:35:50.895687  616831 api_server.go:141] control plane version: v1.34.0
	I0917 00:35:50.895703  616831 api_server.go:131] duration metric: took 5.522561671s to wait for apiserver health ...
	I0917 00:35:50.895711  616831 cni.go:84] Creating CNI manager for ""
	I0917 00:35:50.895720  616831 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
	I0917 00:35:50.899145  616831 out.go:179] * Configuring bridge CNI (Container Networking Interface) ...
	I0917 00:35:50.902037  616831 ssh_runner.go:195] Run: sudo mkdir -p /etc/cni/net.d
	I0917 00:35:50.911370  616831 ssh_runner.go:362] scp memory --> /etc/cni/net.d/1-k8s.conflist (496 bytes)
	I0917 00:35:50.933173  616831 system_pods.go:43] waiting for kube-system pods to appear ...
	I0917 00:35:50.936624  616831 system_pods.go:59] 7 kube-system pods found
	I0917 00:35:50.936650  616831 system_pods.go:61] "coredns-66bc5c9577-q6x4w" [26bb6d39-5353-4c95-97a6-b92f16438243] Running / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
	I0917 00:35:50.936657  616831 system_pods.go:61] "etcd-functional-918451" [9e135141-0acb-4f48-b555-59f828914b0f] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd])
	I0917 00:35:50.936665  616831 system_pods.go:61] "kube-apiserver-functional-918451" [a4257554-5980-4e52-a3d2-71b0e3865d7a] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
	I0917 00:35:50.936671  616831 system_pods.go:61] "kube-controller-manager-functional-918451" [c0879b40-7d6c-4cc9-a983-ef9e20f78509] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager])
	I0917 00:35:50.936676  616831 system_pods.go:61] "kube-proxy-q4hcq" [ea7eb633-0e1c-4fee-8469-b9203fba2e85] Pending / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy])
	I0917 00:35:50.936682  616831 system_pods.go:61] "kube-scheduler-functional-918451" [231d8a51-2a2f-4657-9efb-36d7413d7aac] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
	I0917 00:35:50.936687  616831 system_pods.go:61] "storage-provisioner" [52933a3d-318b-43c1-bb02-5115e2dcea88] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
	I0917 00:35:50.936691  616831 system_pods.go:74] duration metric: took 3.508165ms to wait for pod list to return data ...
	I0917 00:35:50.936698  616831 node_conditions.go:102] verifying NodePressure condition ...
	I0917 00:35:50.939626  616831 node_conditions.go:122] node storage ephemeral capacity is 203034800Ki
	I0917 00:35:50.939645  616831 node_conditions.go:123] node cpu capacity is 2
	I0917 00:35:50.939655  616831 node_conditions.go:105] duration metric: took 2.952864ms to run NodePressure ...
	I0917 00:35:50.939671  616831 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0:$PATH" kubeadm init phase addon all --config /var/tmp/minikube/kubeadm.yaml"
	I0917 00:35:51.197134  616831 kubeadm.go:720] waiting for restarted kubelet to initialise ...
	I0917 00:35:51.200827  616831 kubeadm.go:735] kubelet initialised
	I0917 00:35:51.200839  616831 kubeadm.go:736] duration metric: took 3.691915ms waiting for restarted kubelet to initialise ...
	I0917 00:35:51.200853  616831 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
	I0917 00:35:51.209223  616831 ops.go:34] apiserver oom_adj: -16
	I0917 00:35:51.209235  616831 kubeadm.go:593] duration metric: took 10.985184815s to restartPrimaryControlPlane
	I0917 00:35:51.209243  616831 kubeadm.go:394] duration metric: took 11.070201087s to StartCluster
	I0917 00:35:51.209257  616831 settings.go:142] acquiring lock: {Name:mkeeff7458e530a541c151580b54d47f2e77f0de Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0917 00:35:51.209311  616831 settings.go:150] Updating kubeconfig:  /home/jenkins/minikube-integration/21550-576428/kubeconfig
	I0917 00:35:51.209950  616831 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21550-576428/kubeconfig: {Name:mk3b9e4b05730cfa71613487e1675bc90b668ce8 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0917 00:35:51.210162  616831 start.go:235] Will wait 6m0s for node &{Name: IP:192.168.49.2 Port:8441 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}
	I0917 00:35:51.210539  616831 config.go:182] Loaded profile config "functional-918451": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
	I0917 00:35:51.210523  616831 addons.go:511] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-gluster:false storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
	I0917 00:35:51.210595  616831 addons.go:69] Setting default-storageclass=true in profile "functional-918451"
	I0917 00:35:51.210594  616831 addons.go:69] Setting storage-provisioner=true in profile "functional-918451"
	I0917 00:35:51.210606  616831 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "functional-918451"
	I0917 00:35:51.210607  616831 addons.go:238] Setting addon storage-provisioner=true in "functional-918451"
	W0917 00:35:51.210612  616831 addons.go:247] addon storage-provisioner should already be in state true
	I0917 00:35:51.210636  616831 host.go:66] Checking if "functional-918451" exists ...
	I0917 00:35:51.210902  616831 cli_runner.go:164] Run: docker container inspect functional-918451 --format={{.State.Status}}
	I0917 00:35:51.211036  616831 cli_runner.go:164] Run: docker container inspect functional-918451 --format={{.State.Status}}
	I0917 00:35:51.213522  616831 out.go:179] * Verifying Kubernetes components...
	I0917 00:35:51.216759  616831 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I0917 00:35:51.243427  616831 addons.go:238] Setting addon default-storageclass=true in "functional-918451"
	W0917 00:35:51.243437  616831 addons.go:247] addon default-storageclass should already be in state true
	I0917 00:35:51.243461  616831 host.go:66] Checking if "functional-918451" exists ...
	I0917 00:35:51.243855  616831 cli_runner.go:164] Run: docker container inspect functional-918451 --format={{.State.Status}}
	I0917 00:35:51.254231  616831 out.go:179]   - Using image gcr.io/k8s-minikube/storage-provisioner:v5
	I0917 00:35:51.257133  616831 addons.go:435] installing /etc/kubernetes/addons/storage-provisioner.yaml
	I0917 00:35:51.257149  616831 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
	I0917 00:35:51.257213  616831 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-918451
	I0917 00:35:51.279854  616831 addons.go:435] installing /etc/kubernetes/addons/storageclass.yaml
	I0917 00:35:51.279867  616831 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
	I0917 00:35:51.279941  616831 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-918451
	I0917 00:35:51.293176  616831 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33515 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/functional-918451/id_rsa Username:docker}
	I0917 00:35:51.310104  616831 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33515 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/functional-918451/id_rsa Username:docker}
	I0917 00:35:51.443085  616831 ssh_runner.go:195] Run: sudo systemctl start kubelet
	I0917 00:35:51.483844  616831 node_ready.go:35] waiting up to 6m0s for node "functional-918451" to be "Ready" ...
	I0917 00:35:51.490640  616831 node_ready.go:49] node "functional-918451" is "Ready"
	I0917 00:35:51.490655  616831 node_ready.go:38] duration metric: took 6.792097ms for node "functional-918451" to be "Ready" ...
	I0917 00:35:51.490669  616831 api_server.go:52] waiting for apiserver process to appear ...
	I0917 00:35:51.490723  616831 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
	I0917 00:35:51.499047  616831 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
	I0917 00:35:51.527212  616831 api_server.go:72] duration metric: took 317.021453ms to wait for apiserver process to appear ...
	I0917 00:35:51.527239  616831 api_server.go:88] waiting for apiserver healthz status ...
	I0917 00:35:51.527257  616831 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8441/healthz ...
	I0917 00:35:51.537261  616831 api_server.go:279] https://192.168.49.2:8441/healthz returned 200:
	ok
	I0917 00:35:51.540349  616831 api_server.go:141] control plane version: v1.34.0
	I0917 00:35:51.540365  616831 api_server.go:131] duration metric: took 13.120715ms to wait for apiserver health ...
	I0917 00:35:51.540373  616831 system_pods.go:43] waiting for kube-system pods to appear ...
	I0917 00:35:51.546242  616831 system_pods.go:59] 7 kube-system pods found
	I0917 00:35:51.546261  616831 system_pods.go:61] "coredns-66bc5c9577-q6x4w" [26bb6d39-5353-4c95-97a6-b92f16438243] Running / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
	I0917 00:35:51.546268  616831 system_pods.go:61] "etcd-functional-918451" [9e135141-0acb-4f48-b555-59f828914b0f] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd])
	I0917 00:35:51.546276  616831 system_pods.go:61] "kube-apiserver-functional-918451" [a4257554-5980-4e52-a3d2-71b0e3865d7a] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
	I0917 00:35:51.546281  616831 system_pods.go:61] "kube-controller-manager-functional-918451" [c0879b40-7d6c-4cc9-a983-ef9e20f78509] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager])
	I0917 00:35:51.546303  616831 system_pods.go:61] "kube-proxy-q4hcq" [ea7eb633-0e1c-4fee-8469-b9203fba2e85] Pending / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy])
	I0917 00:35:51.546309  616831 system_pods.go:61] "kube-scheduler-functional-918451" [231d8a51-2a2f-4657-9efb-36d7413d7aac] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
	I0917 00:35:51.546315  616831 system_pods.go:61] "storage-provisioner" [52933a3d-318b-43c1-bb02-5115e2dcea88] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
	I0917 00:35:51.546320  616831 system_pods.go:74] duration metric: took 5.942723ms to wait for pod list to return data ...
	I0917 00:35:51.546328  616831 default_sa.go:34] waiting for default service account to be created ...
	I0917 00:35:51.548754  616831 default_sa.go:45] found service account: "default"
	I0917 00:35:51.548767  616831 default_sa.go:55] duration metric: took 2.434427ms for default service account to be created ...
	I0917 00:35:51.548774  616831 system_pods.go:116] waiting for k8s-apps to be running ...
	I0917 00:35:51.556653  616831 system_pods.go:86] 7 kube-system pods found
	I0917 00:35:51.556671  616831 system_pods.go:89] "coredns-66bc5c9577-q6x4w" [26bb6d39-5353-4c95-97a6-b92f16438243] Running / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
	I0917 00:35:51.556689  616831 system_pods.go:89] "etcd-functional-918451" [9e135141-0acb-4f48-b555-59f828914b0f] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd])
	I0917 00:35:51.556696  616831 system_pods.go:89] "kube-apiserver-functional-918451" [a4257554-5980-4e52-a3d2-71b0e3865d7a] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
	I0917 00:35:51.556701  616831 system_pods.go:89] "kube-controller-manager-functional-918451" [c0879b40-7d6c-4cc9-a983-ef9e20f78509] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager])
	I0917 00:35:51.556707  616831 system_pods.go:89] "kube-proxy-q4hcq" [ea7eb633-0e1c-4fee-8469-b9203fba2e85] Pending / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy])
	I0917 00:35:51.556712  616831 system_pods.go:89] "kube-scheduler-functional-918451" [231d8a51-2a2f-4657-9efb-36d7413d7aac] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
	I0917 00:35:51.556717  616831 system_pods.go:89] "storage-provisioner" [52933a3d-318b-43c1-bb02-5115e2dcea88] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
	I0917 00:35:51.556741  616831 retry.go:31] will retry after 289.97205ms: missing components: kube-proxy
	I0917 00:35:51.587796  616831 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
	I0917 00:35:51.851992  616831 system_pods.go:86] 7 kube-system pods found
	I0917 00:35:51.852011  616831 system_pods.go:89] "coredns-66bc5c9577-q6x4w" [26bb6d39-5353-4c95-97a6-b92f16438243] Running / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
	I0917 00:35:51.852018  616831 system_pods.go:89] "etcd-functional-918451" [9e135141-0acb-4f48-b555-59f828914b0f] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd])
	I0917 00:35:51.852032  616831 system_pods.go:89] "kube-apiserver-functional-918451" [a4257554-5980-4e52-a3d2-71b0e3865d7a] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
	I0917 00:35:51.852038  616831 system_pods.go:89] "kube-controller-manager-functional-918451" [c0879b40-7d6c-4cc9-a983-ef9e20f78509] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager])
	I0917 00:35:51.852041  616831 system_pods.go:89] "kube-proxy-q4hcq" [ea7eb633-0e1c-4fee-8469-b9203fba2e85] Running
	I0917 00:35:51.852046  616831 system_pods.go:89] "kube-scheduler-functional-918451" [231d8a51-2a2f-4657-9efb-36d7413d7aac] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
	I0917 00:35:51.852049  616831 system_pods.go:89] "storage-provisioner" [52933a3d-318b-43c1-bb02-5115e2dcea88] Running
	I0917 00:35:51.852056  616831 system_pods.go:126] duration metric: took 303.277418ms to wait for k8s-apps to be running ...
	I0917 00:35:51.852071  616831 system_svc.go:44] waiting for kubelet service to be running ....
	I0917 00:35:51.852127  616831 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
	I0917 00:35:52.405444  616831 system_svc.go:56] duration metric: took 553.36513ms WaitForService to wait for kubelet
	I0917 00:35:52.405458  616831 kubeadm.go:578] duration metric: took 1.195275324s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
	I0917 00:35:52.405474  616831 node_conditions.go:102] verifying NodePressure condition ...
	I0917 00:35:52.407851  616831 node_conditions.go:122] node storage ephemeral capacity is 203034800Ki
	I0917 00:35:52.407865  616831 node_conditions.go:123] node cpu capacity is 2
	I0917 00:35:52.407875  616831 node_conditions.go:105] duration metric: took 2.396299ms to run NodePressure ...
	I0917 00:35:52.407885  616831 start.go:241] waiting for startup goroutines ...
	I0917 00:35:52.408987  616831 out.go:179] * Enabled addons: default-storageclass, storage-provisioner
	I0917 00:35:52.411837  616831 addons.go:514] duration metric: took 1.201321093s for enable addons: enabled=[default-storageclass storage-provisioner]
	I0917 00:35:52.411870  616831 start.go:246] waiting for cluster config update ...
	I0917 00:35:52.411882  616831 start.go:255] writing updated cluster config ...
	I0917 00:35:52.412173  616831 ssh_runner.go:195] Run: rm -f paused
	I0917 00:35:52.415576  616831 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
	I0917 00:35:52.418997  616831 pod_ready.go:83] waiting for pod "coredns-66bc5c9577-q6x4w" in "kube-system" namespace to be "Ready" or be gone ...
	W0917 00:35:54.424244  616831 pod_ready.go:104] pod "coredns-66bc5c9577-q6x4w" is not "Ready", error: <nil>
	I0917 00:35:56.942816  616831 pod_ready.go:94] pod "coredns-66bc5c9577-q6x4w" is "Ready"
	I0917 00:35:56.942832  616831 pod_ready.go:86] duration metric: took 4.52382257s for pod "coredns-66bc5c9577-q6x4w" in "kube-system" namespace to be "Ready" or be gone ...
	I0917 00:35:56.950902  616831 pod_ready.go:83] waiting for pod "etcd-functional-918451" in "kube-system" namespace to be "Ready" or be gone ...
	W0917 00:35:58.956638  616831 pod_ready.go:104] pod "etcd-functional-918451" is not "Ready", error: <nil>
	I0917 00:36:00.957005  616831 pod_ready.go:94] pod "etcd-functional-918451" is "Ready"
	I0917 00:36:00.957018  616831 pod_ready.go:86] duration metric: took 4.006102595s for pod "etcd-functional-918451" in "kube-system" namespace to be "Ready" or be gone ...
	I0917 00:36:00.959116  616831 pod_ready.go:83] waiting for pod "kube-apiserver-functional-918451" in "kube-system" namespace to be "Ready" or be gone ...
	I0917 00:36:00.962869  616831 pod_ready.go:94] pod "kube-apiserver-functional-918451" is "Ready"
	I0917 00:36:00.962882  616831 pod_ready.go:86] duration metric: took 3.751293ms for pod "kube-apiserver-functional-918451" in "kube-system" namespace to be "Ready" or be gone ...
	I0917 00:36:00.964932  616831 pod_ready.go:83] waiting for pod "kube-controller-manager-functional-918451" in "kube-system" namespace to be "Ready" or be gone ...
	I0917 00:36:00.968976  616831 pod_ready.go:94] pod "kube-controller-manager-functional-918451" is "Ready"
	I0917 00:36:00.968989  616831 pod_ready.go:86] duration metric: took 4.046539ms for pod "kube-controller-manager-functional-918451" in "kube-system" namespace to be "Ready" or be gone ...
	I0917 00:36:00.971150  616831 pod_ready.go:83] waiting for pod "kube-proxy-q4hcq" in "kube-system" namespace to be "Ready" or be gone ...
	I0917 00:36:01.155524  616831 pod_ready.go:94] pod "kube-proxy-q4hcq" is "Ready"
	I0917 00:36:01.155540  616831 pod_ready.go:86] duration metric: took 184.378745ms for pod "kube-proxy-q4hcq" in "kube-system" namespace to be "Ready" or be gone ...
	I0917 00:36:01.355133  616831 pod_ready.go:83] waiting for pod "kube-scheduler-functional-918451" in "kube-system" namespace to be "Ready" or be gone ...
	I0917 00:36:01.754304  616831 pod_ready.go:94] pod "kube-scheduler-functional-918451" is "Ready"
	I0917 00:36:01.754318  616831 pod_ready.go:86] duration metric: took 399.17189ms for pod "kube-scheduler-functional-918451" in "kube-system" namespace to be "Ready" or be gone ...
	I0917 00:36:01.754329  616831 pod_ready.go:40] duration metric: took 9.3387321s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
	I0917 00:36:01.810606  616831 start.go:617] kubectl: 1.33.2, cluster: 1.34.0 (minor skew: 1)
	I0917 00:36:01.813975  616831 out.go:179] * Done! kubectl is now configured to use "functional-918451" cluster and "default" namespace by default
	
	
	==> Docker <==
	Sep 17 00:35:50 functional-918451 cri-dockerd[7576]: time="2025-09-17T00:35:50Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/9bd802eae08c92740aa0fc9ca4bd14c1e4c6019f8c83adfa9b39762b626ad50c/resolv.conf as [nameserver 192.168.49.1 search us-east-2.compute.internal options edns0 trust-ad ndots:0]"
	Sep 17 00:35:50 functional-918451 cri-dockerd[7576]: time="2025-09-17T00:35:50Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/c70143b4207a5a2888b2d4924d69bb1271abc510a9bd681f2bec5ced9e097937/resolv.conf as [nameserver 192.168.49.1 search us-east-2.compute.internal options edns0 trust-ad ndots:0]"
	Sep 17 00:36:05 functional-918451 cri-dockerd[7576]: time="2025-09-17T00:36:05Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/fc20c680b0c9a109d2a796a54605e3e59ed6b90dde8d4f74f695afd26aa55199/resolv.conf as [nameserver 10.96.0.10 search default.svc.cluster.local svc.cluster.local cluster.local us-east-2.compute.internal options ndots:5]"
	Sep 17 00:36:05 functional-918451 dockerd[6835]: time="2025-09-17T00:36:05.462212727Z" level=error msg="Not continuing with pull after error" error="errors:\ndenied: requested access to the resource is denied\nunauthorized: authentication required\n"
	Sep 17 00:36:05 functional-918451 dockerd[6835]: time="2025-09-17T00:36:05.462272697Z" level=info msg="Ignoring extra error returned from registry" error="unauthorized: authentication required"
	Sep 17 00:36:08 functional-918451 dockerd[6835]: time="2025-09-17T00:36:08.588935404Z" level=info msg="ignoring event" container=fc20c680b0c9a109d2a796a54605e3e59ed6b90dde8d4f74f695afd26aa55199 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
	Sep 17 00:36:18 functional-918451 cri-dockerd[7576]: time="2025-09-17T00:36:18Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/226891f84e087d0a96cc1a6d0c27c29d4f73265b37779b2bd4dd2e06a7d72633/resolv.conf as [nameserver 10.96.0.10 search default.svc.cluster.local svc.cluster.local cluster.local us-east-2.compute.internal options ndots:5]"
	Sep 17 00:36:20 functional-918451 cri-dockerd[7576]: time="2025-09-17T00:36:20Z" level=info msg="Stop pulling image docker.io/nginx:alpine: Status: Downloaded newer image for nginx:alpine"
	Sep 17 00:36:24 functional-918451 cri-dockerd[7576]: time="2025-09-17T00:36:24Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/31202132ba2c12ccada28a19e38a217018a2a60349d74f877cad07c580f780ea/resolv.conf as [nameserver 10.96.0.10 search default.svc.cluster.local svc.cluster.local cluster.local us-east-2.compute.internal options ndots:5]"
	Sep 17 00:36:24 functional-918451 dockerd[6835]: time="2025-09-17T00:36:24.745281310Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
	Sep 17 00:36:24 functional-918451 cri-dockerd[7576]: time="2025-09-17T00:36:24Z" level=info msg="Stop pulling image docker.io/nginx:latest: latest: Pulling from library/nginx"
	Sep 17 00:36:27 functional-918451 cri-dockerd[7576]: time="2025-09-17T00:36:27Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/6a646a03ba1282739d8818829d7ef6d68ed602a4898281a8cced3af6f6d4e127/resolv.conf as [nameserver 10.96.0.10 search default.svc.cluster.local svc.cluster.local cluster.local us-east-2.compute.internal options ndots:5]"
	Sep 17 00:36:27 functional-918451 dockerd[6835]: time="2025-09-17T00:36:27.820598487Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
	Sep 17 00:36:37 functional-918451 dockerd[6835]: time="2025-09-17T00:36:37.016993757Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
	Sep 17 00:36:41 functional-918451 dockerd[6835]: time="2025-09-17T00:36:41.014364003Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
	Sep 17 00:36:53 functional-918451 cri-dockerd[7576]: time="2025-09-17T00:36:53Z" level=error msg="error getting RW layer size for container ID '17077358dad774a1532240e9047a072ab229af420b91e24fdde2fb6d25bdfe5c': Error response from daemon: No such container: 17077358dad774a1532240e9047a072ab229af420b91e24fdde2fb6d25bdfe5c"
	Sep 17 00:36:53 functional-918451 cri-dockerd[7576]: time="2025-09-17T00:36:53Z" level=error msg="Set backoffDuration to : 1m0s for container ID '17077358dad774a1532240e9047a072ab229af420b91e24fdde2fb6d25bdfe5c'"
	Sep 17 00:37:05 functional-918451 dockerd[6835]: time="2025-09-17T00:37:05.036223709Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
	Sep 17 00:37:07 functional-918451 dockerd[6835]: time="2025-09-17T00:37:07.012067849Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
	Sep 17 00:37:59 functional-918451 dockerd[6835]: time="2025-09-17T00:37:59.123624420Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
	Sep 17 00:37:59 functional-918451 cri-dockerd[7576]: time="2025-09-17T00:37:59Z" level=info msg="Stop pulling image docker.io/nginx:latest: latest: Pulling from library/nginx"
	Sep 17 00:38:00 functional-918451 dockerd[6835]: time="2025-09-17T00:38:00.037097732Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
	Sep 17 00:39:25 functional-918451 dockerd[6835]: time="2025-09-17T00:39:25.034129019Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
	Sep 17 00:39:31 functional-918451 dockerd[6835]: time="2025-09-17T00:39:31.549684585Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
	Sep 17 00:39:31 functional-918451 cri-dockerd[7576]: time="2025-09-17T00:39:31Z" level=info msg="Stop pulling image docker.io/nginx:latest: latest: Pulling from library/nginx"
	
	
	==> container status <==
	CONTAINER           IMAGE                                                                           CREATED             STATE               NAME                      ATTEMPT             POD ID              POD
	7f05a20c84773       nginx@sha256:42a516af16b852e33b7682d5ef8acbd5d13fe08fecadc7ed98605ba5e3b26ab8   4 minutes ago       Running             nginx                     0                   226891f84e087       nginx-svc
	bf9dde3577a17       138784d87c9c5                                                                   4 minutes ago       Running             coredns                   2                   5a0f0f9b9bec0       coredns-66bc5c9577-q6x4w
	2249e1b1919d7       6fc32d66c1411                                                                   4 minutes ago       Running             kube-proxy                3                   c70143b4207a5       kube-proxy-q4hcq
	cf11e0afbf55d       ba04bb24b9575                                                                   4 minutes ago       Running             storage-provisioner       3                   9bd802eae08c9       storage-provisioner
	8f45cd3852343       a25f5ef9c34c3                                                                   4 minutes ago       Running             kube-scheduler            3                   adb05901f064d       kube-scheduler-functional-918451
	810186cb500fd       d291939e99406                                                                   4 minutes ago       Running             kube-apiserver            0                   edfa6965041a5       kube-apiserver-functional-918451
	53d0462ffd2c6       a1894772a478e                                                                   4 minutes ago       Running             etcd                      2                   299a608c07fcf       etcd-functional-918451
	a800de00866e1       996be7e86d9b3                                                                   4 minutes ago       Running             kube-controller-manager   3                   85599d5f0bdd8       kube-controller-manager-functional-918451
	bc1e5b3192079       6fc32d66c1411                                                                   4 minutes ago       Created             kube-proxy                2                   3c0a229533b1f       kube-proxy-q4hcq
	7cda0e892061a       996be7e86d9b3                                                                   4 minutes ago       Created             kube-controller-manager   2                   7a2c5910368f6       kube-controller-manager-functional-918451
	e3a6d89ad5b64       a25f5ef9c34c3                                                                   4 minutes ago       Created             kube-scheduler            2                   ae4004a0e0f99       kube-scheduler-functional-918451
	e707505e3be85       ba04bb24b9575                                                                   4 minutes ago       Exited              storage-provisioner       2                   56ab6c32da135       storage-provisioner
	d3d4055f8ecd2       138784d87c9c5                                                                   5 minutes ago       Exited              coredns                   1                   ecc60cd02b8df       coredns-66bc5c9577-q6x4w
	bd4e50a8edbd5       a1894772a478e                                                                   5 minutes ago       Exited              etcd                      1                   e00e50d78d699       etcd-functional-918451
	
	
	==> coredns [bf9dde3577a1] <==
	maxprocs: Leaving GOMAXPROCS=2: CPU quota undefined
	.:53
	[INFO] plugin/reload: Running configuration SHA512 = 9e2996f8cb67ac53e0259ab1f8d615d07d1beb0bd07e6a1e39769c3bf486a905bb991cc47f8d2f14d0d3a90a87dfc625a0b4c524fed169d8158c40657c0694b1
	CoreDNS-1.12.1
	linux/arm64, go1.24.1, 707c7c1
	[INFO] 127.0.0.1:40702 - 26535 "HINFO IN 1869121631954386126.875895371521758492. udp 56 false 512" NXDOMAIN qr,rd,ra 56 0.03198374s
	
	
	==> coredns [d3d4055f8ecd] <==
	maxprocs: Leaving GOMAXPROCS=2: CPU quota undefined
	[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
	[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
	[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
	[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
	[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
	[INFO] plugin/ready: Still waiting on: "kubernetes"
	[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
	[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
	[INFO] plugin/ready: Still waiting on: "kubernetes"
	.:53
	[INFO] plugin/reload: Running configuration SHA512 = 9e2996f8cb67ac53e0259ab1f8d615d07d1beb0bd07e6a1e39769c3bf486a905bb991cc47f8d2f14d0d3a90a87dfc625a0b4c524fed169d8158c40657c0694b1
	CoreDNS-1.12.1
	linux/arm64, go1.24.1, 707c7c1
	[INFO] 127.0.0.1:57340 - 21792 "HINFO IN 1884376929635518991.5843453519963972009. udp 57 false 512" NXDOMAIN qr,rd,ra 57 0.021793349s
	[INFO] SIGTERM: Shutting down servers then terminating
	[INFO] plugin/health: Going into lameduck mode for 5s
	
	
	==> describe nodes <==
	Name:               functional-918451
	Roles:              control-plane
	Labels:             beta.kubernetes.io/arch=arm64
	                    beta.kubernetes.io/os=linux
	                    kubernetes.io/arch=arm64
	                    kubernetes.io/hostname=functional-918451
	                    kubernetes.io/os=linux
	                    minikube.k8s.io/commit=9829f0bc17c523e4378d28e0c25741106f24f00a
	                    minikube.k8s.io/name=functional-918451
	                    minikube.k8s.io/primary=true
	                    minikube.k8s.io/updated_at=2025_09_17T00_33_25_0700
	                    minikube.k8s.io/version=v1.37.0
	                    node-role.kubernetes.io/control-plane=
	                    node.kubernetes.io/exclude-from-external-load-balancers=
	Annotations:        node.alpha.kubernetes.io/ttl: 0
	                    volumes.kubernetes.io/controller-managed-attach-detach: true
	CreationTimestamp:  Wed, 17 Sep 2025 00:33:22 +0000
	Taints:             <none>
	Unschedulable:      false
	Lease:
	  HolderIdentity:  functional-918451
	  AcquireTime:     <unset>
	  RenewTime:       Wed, 17 Sep 2025 00:40:24 +0000
	Conditions:
	  Type             Status  LastHeartbeatTime                 LastTransitionTime                Reason                       Message
	  ----             ------  -----------------                 ------------------                ------                       -------
	  MemoryPressure   False   Wed, 17 Sep 2025 00:36:50 +0000   Wed, 17 Sep 2025 00:33:18 +0000   KubeletHasSufficientMemory   kubelet has sufficient memory available
	  DiskPressure     False   Wed, 17 Sep 2025 00:36:50 +0000   Wed, 17 Sep 2025 00:33:18 +0000   KubeletHasNoDiskPressure     kubelet has no disk pressure
	  PIDPressure      False   Wed, 17 Sep 2025 00:36:50 +0000   Wed, 17 Sep 2025 00:33:18 +0000   KubeletHasSufficientPID      kubelet has sufficient PID available
	  Ready            True    Wed, 17 Sep 2025 00:36:50 +0000   Wed, 17 Sep 2025 00:33:22 +0000   KubeletReady                 kubelet is posting ready status
	Addresses:
	  InternalIP:  192.168.49.2
	  Hostname:    functional-918451
	Capacity:
	  cpu:                2
	  ephemeral-storage:  203034800Ki
	  hugepages-1Gi:      0
	  hugepages-2Mi:      0
	  hugepages-32Mi:     0
	  hugepages-64Ki:     0
	  memory:             8022300Ki
	  pods:               110
	Allocatable:
	  cpu:                2
	  ephemeral-storage:  203034800Ki
	  hugepages-1Gi:      0
	  hugepages-2Mi:      0
	  hugepages-32Mi:     0
	  hugepages-64Ki:     0
	  memory:             8022300Ki
	  pods:               110
	System Info:
	  Machine ID:                 787c0d47a41949608af3603ec5366447
	  System UUID:                f83af58d-c48d-4abe-ba83-7d4398f15ffc
	  Boot ID:                    54a40c62-e2ca-4fe1-8de3-5249514e3fbf
	  Kernel Version:             5.15.0-1084-aws
	  OS Image:                   Ubuntu 22.04.5 LTS
	  Operating System:           linux
	  Architecture:               arm64
	  Container Runtime Version:  docker://28.4.0
	  Kubelet Version:            v1.34.0
	  Kube-Proxy Version:         
	PodCIDR:                      10.244.0.0/24
	PodCIDRs:                     10.244.0.0/24
	Non-terminated Pods:          (10 in total)
	  Namespace                   Name                                         CPU Requests  CPU Limits  Memory Requests  Memory Limits  Age
	  ---------                   ----                                         ------------  ----------  ---------------  -------------  ---
	  default                     hello-node-connect-7d85dfc575-t4gsf          0 (0%)        0 (0%)      0 (0%)           0 (0%)         3m58s
	  default                     nginx-svc                                    0 (0%)        0 (0%)      0 (0%)           0 (0%)         4m8s
	  default                     sp-pod                                       0 (0%)        0 (0%)      0 (0%)           0 (0%)         4m2s
	  kube-system                 coredns-66bc5c9577-q6x4w                     100m (5%)     0 (0%)      70Mi (0%)        170Mi (2%)     6m55s
	  kube-system                 etcd-functional-918451                       100m (5%)     0 (0%)      100Mi (1%)       0 (0%)         7m
	  kube-system                 kube-apiserver-functional-918451             250m (12%)    0 (0%)      0 (0%)           0 (0%)         4m36s
	  kube-system                 kube-controller-manager-functional-918451    200m (10%)    0 (0%)      0 (0%)           0 (0%)         7m
	  kube-system                 kube-proxy-q4hcq                             0 (0%)        0 (0%)      0 (0%)           0 (0%)         6m55s
	  kube-system                 kube-scheduler-functional-918451             100m (5%)     0 (0%)      0 (0%)           0 (0%)         7m
	  kube-system                 storage-provisioner                          0 (0%)        0 (0%)      0 (0%)           0 (0%)         6m54s
	Allocated resources:
	  (Total limits may be over 100 percent, i.e., overcommitted.)
	  Resource           Requests    Limits
	  --------           --------    ------
	  cpu                750m (37%)  0 (0%)
	  memory             170Mi (2%)  170Mi (2%)
	  ephemeral-storage  0 (0%)      0 (0%)
	  hugepages-1Gi      0 (0%)      0 (0%)
	  hugepages-2Mi      0 (0%)      0 (0%)
	  hugepages-32Mi     0 (0%)      0 (0%)
	  hugepages-64Ki     0 (0%)      0 (0%)
	Events:
	  Type     Reason                   Age                    From             Message
	  ----     ------                   ----                   ----             -------
	  Normal   Starting                 6m53s                  kube-proxy       
	  Normal   Starting                 4m34s                  kube-proxy       
	  Normal   Starting                 5m37s                  kube-proxy       
	  Normal   Starting                 7m8s                   kubelet          Starting kubelet.
	  Warning  CgroupV1                 7m8s                   kubelet          cgroup v1 support is in maintenance mode, please migrate to cgroup v2
	  Normal   NodeHasSufficientMemory  7m8s (x8 over 7m8s)    kubelet          Node functional-918451 status is now: NodeHasSufficientMemory
	  Normal   NodeHasNoDiskPressure    7m8s (x8 over 7m8s)    kubelet          Node functional-918451 status is now: NodeHasNoDiskPressure
	  Normal   NodeHasSufficientPID     7m8s (x7 over 7m8s)    kubelet          Node functional-918451 status is now: NodeHasSufficientPID
	  Normal   NodeAllocatableEnforced  7m8s                   kubelet          Updated Node Allocatable limit across pods
	  Normal   Starting                 7m1s                   kubelet          Starting kubelet.
	  Warning  CgroupV1                 7m1s                   kubelet          cgroup v1 support is in maintenance mode, please migrate to cgroup v2
	  Normal   NodeHasSufficientMemory  7m                     kubelet          Node functional-918451 status is now: NodeHasSufficientMemory
	  Normal   NodeHasNoDiskPressure    7m                     kubelet          Node functional-918451 status is now: NodeHasNoDiskPressure
	  Normal   NodeHasSufficientPID     7m                     kubelet          Node functional-918451 status is now: NodeHasSufficientPID
	  Normal   NodeAllocatableEnforced  7m                     kubelet          Updated Node Allocatable limit across pods
	  Normal   RegisteredNode           6m56s                  node-controller  Node functional-918451 event: Registered Node functional-918451 in Controller
	  Normal   NodeNotReady             5m49s                  kubelet          Node functional-918451 status is now: NodeNotReady
	  Normal   RegisteredNode           5m35s                  node-controller  Node functional-918451 event: Registered Node functional-918451 in Controller
	  Normal   Starting                 4m42s                  kubelet          Starting kubelet.
	  Warning  CgroupV1                 4m42s                  kubelet          cgroup v1 support is in maintenance mode, please migrate to cgroup v2
	  Normal   NodeHasSufficientMemory  4m42s (x8 over 4m42s)  kubelet          Node functional-918451 status is now: NodeHasSufficientMemory
	  Normal   NodeHasNoDiskPressure    4m42s (x8 over 4m42s)  kubelet          Node functional-918451 status is now: NodeHasNoDiskPressure
	  Normal   NodeHasSufficientPID     4m42s (x7 over 4m42s)  kubelet          Node functional-918451 status is now: NodeHasSufficientPID
	  Normal   NodeAllocatableEnforced  4m42s                  kubelet          Updated Node Allocatable limit across pods
	  Normal   RegisteredNode           4m33s                  node-controller  Node functional-918451 event: Registered Node functional-918451 in Controller
	
	
	==> dmesg <==
	[Sep16 22:47] kauditd_printk_skb: 8 callbacks suppressed
	[Sep17 00:20] kauditd_printk_skb: 8 callbacks suppressed
	
	
	==> etcd [53d0462ffd2c] <==
	{"level":"warn","ts":"2025-09-17T00:35:48.079717Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41210","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.100123Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41240","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.117439Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41258","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.148362Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41280","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.161304Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41300","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.176646Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41320","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.193870Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41336","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.212630Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41368","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.231571Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41384","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.251324Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41398","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.266264Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41428","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.283844Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41438","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.305246Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41470","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.324766Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41492","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.338961Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41522","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.366501Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41540","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.407986Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41556","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.435057Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41568","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.452382Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41588","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.477192Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41616","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.510691Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41636","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.541591Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41670","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.545272Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41650","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:35:48.604666Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41692","server-name":"","error":"EOF"}
	{"level":"info","ts":"2025-09-17T00:39:31.191908Z","caller":"traceutil/trace.go:172","msg":"trace[793044705] transaction","detail":"{read_only:false; response_revision:1056; number_of_response:1; }","duration":"143.194218ms","start":"2025-09-17T00:39:31.048697Z","end":"2025-09-17T00:39:31.191891Z","steps":["trace[793044705] 'process raft request'  (duration: 143.096128ms)"],"step_count":1}
	
	
	==> etcd [bd4e50a8edbd] <==
	{"level":"warn","ts":"2025-09-17T00:34:46.313001Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49074","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:34:46.328657Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49100","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:34:46.352785Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49118","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:34:46.380490Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49136","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:34:46.400768Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49158","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:34:46.419290Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49186","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-09-17T00:34:46.492683Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49234","server-name":"","error":"EOF"}
	{"level":"info","ts":"2025-09-17T00:35:27.923747Z","caller":"osutil/interrupt_unix.go:65","msg":"received signal; shutting down","signal":"terminated"}
	{"level":"info","ts":"2025-09-17T00:35:27.923816Z","caller":"embed/etcd.go:426","msg":"closing etcd server","name":"functional-918451","data-dir":"/var/lib/minikube/etcd","advertise-peer-urls":["https://192.168.49.2:2380"],"advertise-client-urls":["https://192.168.49.2:2379"]}
	{"level":"error","ts":"2025-09-17T00:35:27.923929Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"http: Server closed","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*serveCtx).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/serve.go:90"}
	{"level":"error","ts":"2025-09-17T00:35:34.926453Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"http: Server closed","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*serveCtx).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/serve.go:90"}
	{"level":"error","ts":"2025-09-17T00:35:34.926573Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 127.0.0.1:2381: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
	{"level":"info","ts":"2025-09-17T00:35:34.926596Z","caller":"etcdserver/server.go:1281","msg":"skipped leadership transfer for single voting member cluster","local-member-id":"aec36adc501070cc","current-leader-member-id":"aec36adc501070cc"}
	{"level":"info","ts":"2025-09-17T00:35:34.926693Z","caller":"etcdserver/server.go:2319","msg":"server has stopped; stopping cluster version's monitor"}
	{"level":"info","ts":"2025-09-17T00:35:34.926711Z","caller":"etcdserver/server.go:2342","msg":"server has stopped; stopping storage version's monitor"}
	{"level":"warn","ts":"2025-09-17T00:35:34.927930Z","caller":"embed/serve.go:245","msg":"stopping secure grpc server due to error","error":"accept tcp 127.0.0.1:2379: use of closed network connection"}
	{"level":"warn","ts":"2025-09-17T00:35:34.927988Z","caller":"embed/serve.go:247","msg":"stopped secure grpc server due to error","error":"accept tcp 127.0.0.1:2379: use of closed network connection"}
	{"level":"error","ts":"2025-09-17T00:35:34.927997Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 127.0.0.1:2379: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
	{"level":"warn","ts":"2025-09-17T00:35:34.928037Z","caller":"embed/serve.go:245","msg":"stopping secure grpc server due to error","error":"accept tcp 192.168.49.2:2379: use of closed network connection"}
	{"level":"warn","ts":"2025-09-17T00:35:34.928052Z","caller":"embed/serve.go:247","msg":"stopped secure grpc server due to error","error":"accept tcp 192.168.49.2:2379: use of closed network connection"}
	{"level":"error","ts":"2025-09-17T00:35:34.928059Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 192.168.49.2:2379: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
	{"level":"info","ts":"2025-09-17T00:35:34.932196Z","caller":"embed/etcd.go:621","msg":"stopping serving peer traffic","address":"192.168.49.2:2380"}
	{"level":"error","ts":"2025-09-17T00:35:34.932268Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 192.168.49.2:2380: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
	{"level":"info","ts":"2025-09-17T00:35:34.932295Z","caller":"embed/etcd.go:626","msg":"stopped serving peer traffic","address":"192.168.49.2:2380"}
	{"level":"info","ts":"2025-09-17T00:35:34.932302Z","caller":"embed/etcd.go:428","msg":"closed etcd server","name":"functional-918451","data-dir":"/var/lib/minikube/etcd","advertise-peer-urls":["https://192.168.49.2:2380"],"advertise-client-urls":["https://192.168.49.2:2379"]}
	
	
	==> kernel <==
	 00:40:25 up  3:22,  0 users,  load average: 0.47, 1.08, 2.17
	Linux functional-918451 5.15.0-1084-aws #91~20.04.1-Ubuntu SMP Fri May 2 07:00:04 UTC 2025 aarch64 aarch64 aarch64 GNU/Linux
	PRETTY_NAME="Ubuntu 22.04.5 LTS"
	
	
	==> kube-apiserver [810186cb500f] <==
	I0917 00:35:49.352574       1 shared_informer.go:356] "Caches are synced" controller="node_authorizer"
	I0917 00:35:49.363350       1 shared_informer.go:356] "Caches are synced" controller="crd-autoregister"
	I0917 00:35:49.363425       1 aggregator.go:171] initial CRD sync complete...
	I0917 00:35:49.363434       1 autoregister_controller.go:144] Starting autoregister controller
	I0917 00:35:49.363447       1 cache.go:32] Waiting for caches to sync for autoregister controller
	I0917 00:35:49.363453       1 cache.go:39] Caches are synced for autoregister controller
	I0917 00:35:49.380819       1 cidrallocator.go:301] created ClusterIP allocator for Service CIDR 10.96.0.0/12
	I0917 00:35:49.884602       1 controller.go:667] quota admission added evaluator for: serviceaccounts
	I0917 00:35:50.158298       1 storage_scheduling.go:111] all system priority classes are created successfully or already exist.
	I0917 00:35:51.057577       1 controller.go:667] quota admission added evaluator for: deployments.apps
	I0917 00:35:51.110713       1 controller.go:667] quota admission added evaluator for: daemonsets.apps
	I0917 00:35:51.146495       1 controller.go:667] quota admission added evaluator for: roles.rbac.authorization.k8s.io
	I0917 00:35:51.154903       1 controller.go:667] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
	I0917 00:35:52.907894       1 controller.go:667] quota admission added evaluator for: replicasets.apps
	I0917 00:35:52.957781       1 controller.go:667] quota admission added evaluator for: endpointslices.discovery.k8s.io
	I0917 00:35:53.007854       1 controller.go:667] quota admission added evaluator for: endpoints
	I0917 00:36:04.773604       1 alloc.go:328] "allocated clusterIPs" service="default/invalid-svc" clusterIPs={"IPv4":"10.101.248.159"}
	I0917 00:36:17.589382       1 alloc.go:328] "allocated clusterIPs" service="default/nginx-svc" clusterIPs={"IPv4":"10.111.232.235"}
	I0917 00:36:27.137014       1 alloc.go:328] "allocated clusterIPs" service="default/hello-node-connect" clusterIPs={"IPv4":"10.102.46.233"}
	I0917 00:36:48.326669       1 stats.go:136] "Error getting keys" err="empty key: \"\""
	I0917 00:36:51.406481       1 stats.go:136] "Error getting keys" err="empty key: \"\""
	I0917 00:38:01.898138       1 stats.go:136] "Error getting keys" err="empty key: \"\""
	I0917 00:38:06.934436       1 stats.go:136] "Error getting keys" err="empty key: \"\""
	I0917 00:39:05.611504       1 stats.go:136] "Error getting keys" err="empty key: \"\""
	I0917 00:39:11.198033       1 stats.go:136] "Error getting keys" err="empty key: \"\""
	
	
	==> kube-controller-manager [7cda0e892061] <==
	
	
	==> kube-controller-manager [a800de00866e] <==
	I0917 00:35:52.659077       1 shared_informer.go:356] "Caches are synced" controller="node"
	I0917 00:35:52.659241       1 range_allocator.go:177] "Sending events to api server" logger="node-ipam-controller"
	I0917 00:35:52.659372       1 range_allocator.go:183] "Starting range CIDR allocator" logger="node-ipam-controller"
	I0917 00:35:52.659462       1 shared_informer.go:349] "Waiting for caches to sync" controller="cidrallocator"
	I0917 00:35:52.659561       1 shared_informer.go:356] "Caches are synced" controller="cidrallocator"
	I0917 00:35:52.663056       1 shared_informer.go:356] "Caches are synced" controller="validatingadmissionpolicy-status"
	I0917 00:35:52.664386       1 shared_informer.go:356] "Caches are synced" controller="ReplicationController"
	I0917 00:35:52.665624       1 shared_informer.go:356] "Caches are synced" controller="expand"
	I0917 00:35:52.665868       1 shared_informer.go:356] "Caches are synced" controller="endpoint_slice"
	I0917 00:35:52.668115       1 shared_informer.go:356] "Caches are synced" controller="endpoint_slice_mirroring"
	I0917 00:35:52.668389       1 shared_informer.go:356] "Caches are synced" controller="GC"
	I0917 00:35:52.670522       1 shared_informer.go:356] "Caches are synced" controller="ClusterRoleAggregator"
	I0917 00:35:52.672883       1 shared_informer.go:356] "Caches are synced" controller="crt configmap"
	I0917 00:35:52.691346       1 shared_informer.go:356] "Caches are synced" controller="garbage collector"
	I0917 00:35:52.694479       1 shared_informer.go:356] "Caches are synced" controller="endpoint"
	I0917 00:35:52.694480       1 shared_informer.go:356] "Caches are synced" controller="namespace"
	I0917 00:35:52.696661       1 shared_informer.go:356] "Caches are synced" controller="deployment"
	I0917 00:35:52.700607       1 shared_informer.go:356] "Caches are synced" controller="PVC protection"
	I0917 00:35:52.700607       1 shared_informer.go:356] "Caches are synced" controller="certificate-csrsigning-kubelet-serving"
	I0917 00:35:52.700739       1 shared_informer.go:356] "Caches are synced" controller="disruption"
	I0917 00:35:52.701699       1 shared_informer.go:356] "Caches are synced" controller="certificate-csrsigning-kubelet-client"
	I0917 00:35:52.701707       1 shared_informer.go:356] "Caches are synced" controller="certificate-csrapproving"
	I0917 00:35:52.701747       1 shared_informer.go:356] "Caches are synced" controller="stateful set"
	I0917 00:35:52.702840       1 shared_informer.go:356] "Caches are synced" controller="certificate-csrsigning-kube-apiserver-client"
	I0917 00:35:52.735080       1 shared_informer.go:356] "Caches are synced" controller="resource quota"
	
	
	==> kube-proxy [2249e1b1919d] <==
	I0917 00:35:50.820804       1 server_linux.go:53] "Using iptables proxy"
	I0917 00:35:50.918756       1 shared_informer.go:349] "Waiting for caches to sync" controller="node informer cache"
	I0917 00:35:51.019889       1 shared_informer.go:356] "Caches are synced" controller="node informer cache"
	I0917 00:35:51.019925       1 server.go:219] "Successfully retrieved NodeIPs" NodeIPs=["192.168.49.2"]
	E0917 00:35:51.020010       1 server.go:256] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
	I0917 00:35:51.049955       1 server.go:265] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
	I0917 00:35:51.050178       1 server_linux.go:132] "Using iptables Proxier"
	I0917 00:35:51.058525       1 proxier.go:242] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
	I0917 00:35:51.058826       1 server.go:527] "Version info" version="v1.34.0"
	I0917 00:35:51.058853       1 server.go:529] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
	I0917 00:35:51.060083       1 config.go:200] "Starting service config controller"
	I0917 00:35:51.060215       1 shared_informer.go:349] "Waiting for caches to sync" controller="service config"
	I0917 00:35:51.065750       1 config.go:106] "Starting endpoint slice config controller"
	I0917 00:35:51.065775       1 shared_informer.go:349] "Waiting for caches to sync" controller="endpoint slice config"
	I0917 00:35:51.065792       1 config.go:403] "Starting serviceCIDR config controller"
	I0917 00:35:51.065797       1 shared_informer.go:349] "Waiting for caches to sync" controller="serviceCIDR config"
	I0917 00:35:51.066494       1 config.go:309] "Starting node config controller"
	I0917 00:35:51.066513       1 shared_informer.go:349] "Waiting for caches to sync" controller="node config"
	I0917 00:35:51.066520       1 shared_informer.go:356] "Caches are synced" controller="node config"
	I0917 00:35:51.161226       1 shared_informer.go:356] "Caches are synced" controller="service config"
	I0917 00:35:51.166495       1 shared_informer.go:356] "Caches are synced" controller="serviceCIDR config"
	I0917 00:35:51.166526       1 shared_informer.go:356] "Caches are synced" controller="endpoint slice config"
	
	
	==> kube-proxy [bc1e5b319207] <==
	
	
	==> kube-scheduler [8f45cd385234] <==
	I0917 00:35:46.968566       1 serving.go:386] Generated self-signed cert in-memory
	W0917 00:35:49.218994       1 requestheader_controller.go:204] Unable to get configmap/extension-apiserver-authentication in kube-system.  Usually fixed by 'kubectl create rolebinding -n kube-system ROLEBINDING_NAME --role=extension-apiserver-authentication-reader --serviceaccount=YOUR_NS:YOUR_SA'
	W0917 00:35:49.219031       1 authentication.go:397] Error looking up in-cluster authentication configuration: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot get resource "configmaps" in API group "" in the namespace "kube-system"
	W0917 00:35:49.219043       1 authentication.go:398] Continuing without authentication configuration. This may treat all requests as anonymous.
	W0917 00:35:49.219050       1 authentication.go:399] To require authentication configuration lookup to succeed, set --authentication-tolerate-lookup-failure=false
	I0917 00:35:49.292960       1 server.go:175] "Starting Kubernetes Scheduler" version="v1.34.0"
	I0917 00:35:49.292997       1 server.go:177] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
	I0917 00:35:49.297061       1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
	I0917 00:35:49.297101       1 shared_informer.go:349] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
	I0917 00:35:49.297962       1 secure_serving.go:211] Serving securely on 127.0.0.1:10259
	I0917 00:35:49.299271       1 tlsconfig.go:243] "Starting DynamicServingCertificateController"
	I0917 00:35:49.398254       1 shared_informer.go:356] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
	
	
	==> kube-scheduler [e3a6d89ad5b6] <==
	
	
	==> kubelet <==
	Sep 17 00:38:26 functional-918451 kubelet[8557]: E0917 00:38:26.808299    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"myfrontend\" with ImagePullBackOff: \"Back-off pulling image \\\"docker.io/nginx\\\": ErrImagePull: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/sp-pod" podUID="1d4fda37-50c1-41c2-8b81-a48f9ab03c3d"
	Sep 17 00:38:37 functional-918451 kubelet[8557]: E0917 00:38:37.807980    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"echo-server\" with ImagePullBackOff: \"Back-off pulling image \\\"kicbase/echo-server\\\": ErrImagePull: Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/hello-node-connect-7d85dfc575-t4gsf" podUID="f049199c-f82f-43ba-b926-425bd104b855"
	Sep 17 00:38:40 functional-918451 kubelet[8557]: E0917 00:38:40.808002    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"myfrontend\" with ImagePullBackOff: \"Back-off pulling image \\\"docker.io/nginx\\\": ErrImagePull: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/sp-pod" podUID="1d4fda37-50c1-41c2-8b81-a48f9ab03c3d"
	Sep 17 00:38:50 functional-918451 kubelet[8557]: E0917 00:38:50.807769    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"echo-server\" with ImagePullBackOff: \"Back-off pulling image \\\"kicbase/echo-server\\\": ErrImagePull: Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/hello-node-connect-7d85dfc575-t4gsf" podUID="f049199c-f82f-43ba-b926-425bd104b855"
	Sep 17 00:38:51 functional-918451 kubelet[8557]: E0917 00:38:51.807970    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"myfrontend\" with ImagePullBackOff: \"Back-off pulling image \\\"docker.io/nginx\\\": ErrImagePull: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/sp-pod" podUID="1d4fda37-50c1-41c2-8b81-a48f9ab03c3d"
	Sep 17 00:39:02 functional-918451 kubelet[8557]: E0917 00:39:02.808383    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"echo-server\" with ImagePullBackOff: \"Back-off pulling image \\\"kicbase/echo-server\\\": ErrImagePull: Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/hello-node-connect-7d85dfc575-t4gsf" podUID="f049199c-f82f-43ba-b926-425bd104b855"
	Sep 17 00:39:03 functional-918451 kubelet[8557]: E0917 00:39:03.808249    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"myfrontend\" with ImagePullBackOff: \"Back-off pulling image \\\"docker.io/nginx\\\": ErrImagePull: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/sp-pod" podUID="1d4fda37-50c1-41c2-8b81-a48f9ab03c3d"
	Sep 17 00:39:13 functional-918451 kubelet[8557]: E0917 00:39:13.807997    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"echo-server\" with ImagePullBackOff: \"Back-off pulling image \\\"kicbase/echo-server\\\": ErrImagePull: Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/hello-node-connect-7d85dfc575-t4gsf" podUID="f049199c-f82f-43ba-b926-425bd104b855"
	Sep 17 00:39:17 functional-918451 kubelet[8557]: E0917 00:39:17.808317    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"myfrontend\" with ImagePullBackOff: \"Back-off pulling image \\\"docker.io/nginx\\\": ErrImagePull: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/sp-pod" podUID="1d4fda37-50c1-41c2-8b81-a48f9ab03c3d"
	Sep 17 00:39:25 functional-918451 kubelet[8557]: E0917 00:39:25.037923    8557 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit" image="kicbase/echo-server:latest"
	Sep 17 00:39:25 functional-918451 kubelet[8557]: E0917 00:39:25.037982    8557 kuberuntime_image.go:43] "Failed to pull image" err="Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit" image="kicbase/echo-server:latest"
	Sep 17 00:39:25 functional-918451 kubelet[8557]: E0917 00:39:25.038054    8557 kuberuntime_manager.go:1449] "Unhandled Error" err="container echo-server start failed in pod hello-node-connect-7d85dfc575-t4gsf_default(f049199c-f82f-43ba-b926-425bd104b855): ErrImagePull: Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit" logger="UnhandledError"
	Sep 17 00:39:25 functional-918451 kubelet[8557]: E0917 00:39:25.038090    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"echo-server\" with ErrImagePull: \"Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/hello-node-connect-7d85dfc575-t4gsf" podUID="f049199c-f82f-43ba-b926-425bd104b855"
	Sep 17 00:39:31 functional-918451 kubelet[8557]: E0917 00:39:31.556614    8557 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit" image="docker.io/nginx:latest"
	Sep 17 00:39:31 functional-918451 kubelet[8557]: E0917 00:39:31.556667    8557 kuberuntime_image.go:43] "Failed to pull image" err="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit" image="docker.io/nginx:latest"
	Sep 17 00:39:31 functional-918451 kubelet[8557]: E0917 00:39:31.556742    8557 kuberuntime_manager.go:1449] "Unhandled Error" err="container myfrontend start failed in pod sp-pod_default(1d4fda37-50c1-41c2-8b81-a48f9ab03c3d): ErrImagePull: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit" logger="UnhandledError"
	Sep 17 00:39:31 functional-918451 kubelet[8557]: E0917 00:39:31.556773    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"myfrontend\" with ErrImagePull: \"toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/sp-pod" podUID="1d4fda37-50c1-41c2-8b81-a48f9ab03c3d"
	Sep 17 00:39:35 functional-918451 kubelet[8557]: E0917 00:39:35.808377    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"echo-server\" with ImagePullBackOff: \"Back-off pulling image \\\"kicbase/echo-server\\\": ErrImagePull: Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/hello-node-connect-7d85dfc575-t4gsf" podUID="f049199c-f82f-43ba-b926-425bd104b855"
	Sep 17 00:39:43 functional-918451 kubelet[8557]: E0917 00:39:43.808803    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"myfrontend\" with ImagePullBackOff: \"Back-off pulling image \\\"docker.io/nginx\\\": ErrImagePull: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/sp-pod" podUID="1d4fda37-50c1-41c2-8b81-a48f9ab03c3d"
	Sep 17 00:39:49 functional-918451 kubelet[8557]: E0917 00:39:49.813961    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"echo-server\" with ImagePullBackOff: \"Back-off pulling image \\\"kicbase/echo-server\\\": ErrImagePull: Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/hello-node-connect-7d85dfc575-t4gsf" podUID="f049199c-f82f-43ba-b926-425bd104b855"
	Sep 17 00:39:56 functional-918451 kubelet[8557]: E0917 00:39:56.809146    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"myfrontend\" with ImagePullBackOff: \"Back-off pulling image \\\"docker.io/nginx\\\": ErrImagePull: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/sp-pod" podUID="1d4fda37-50c1-41c2-8b81-a48f9ab03c3d"
	Sep 17 00:40:01 functional-918451 kubelet[8557]: E0917 00:40:01.808714    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"echo-server\" with ImagePullBackOff: \"Back-off pulling image \\\"kicbase/echo-server\\\": ErrImagePull: Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/hello-node-connect-7d85dfc575-t4gsf" podUID="f049199c-f82f-43ba-b926-425bd104b855"
	Sep 17 00:40:08 functional-918451 kubelet[8557]: E0917 00:40:08.808614    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"myfrontend\" with ImagePullBackOff: \"Back-off pulling image \\\"docker.io/nginx\\\": ErrImagePull: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/sp-pod" podUID="1d4fda37-50c1-41c2-8b81-a48f9ab03c3d"
	Sep 17 00:40:15 functional-918451 kubelet[8557]: E0917 00:40:15.808267    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"echo-server\" with ImagePullBackOff: \"Back-off pulling image \\\"kicbase/echo-server\\\": ErrImagePull: Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/hello-node-connect-7d85dfc575-t4gsf" podUID="f049199c-f82f-43ba-b926-425bd104b855"
	Sep 17 00:40:22 functional-918451 kubelet[8557]: E0917 00:40:22.807624    8557 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"myfrontend\" with ImagePullBackOff: \"Back-off pulling image \\\"docker.io/nginx\\\": ErrImagePull: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/sp-pod" podUID="1d4fda37-50c1-41c2-8b81-a48f9ab03c3d"
	
	
	==> storage-provisioner [cf11e0afbf55] <==
	W0917 00:40:01.357430       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:40:03.360946       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:40:03.365057       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:40:05.368792       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:40:05.373166       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:40:07.376738       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:40:07.380653       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:40:09.384306       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:40:09.388604       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:40:11.391883       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:40:11.396911       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:40:13.400907       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:40:13.405131       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:40:15.407969       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:40:15.413507       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:40:17.416256       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:40:17.421170       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:40:19.424677       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:40:19.428496       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:40:21.431443       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:40:21.435284       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:40:23.438826       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:40:23.443322       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:40:25.446918       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W0917 00:40:25.451229       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	
	
	==> storage-provisioner [e707505e3be8] <==
	I0917 00:35:40.392782       1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
	F0917 00:35:40.397573       1 main.go:39] error getting server version: Get "https://10.96.0.1:443/version?timeout=32s": dial tcp 10.96.0.1:443: connect: connection refused
	

                                                
                                                
-- /stdout --
helpers_test.go:262: (dbg) Run:  out/minikube-linux-arm64 status --format={{.APIServer}} -p functional-918451 -n functional-918451
helpers_test.go:269: (dbg) Run:  kubectl --context functional-918451 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:280: non-running pods: hello-node-connect-7d85dfc575-t4gsf sp-pod
helpers_test.go:282: ======> post-mortem[TestFunctional/parallel/PersistentVolumeClaim]: describe non-running pods <======
helpers_test.go:285: (dbg) Run:  kubectl --context functional-918451 describe pod hello-node-connect-7d85dfc575-t4gsf sp-pod
helpers_test.go:290: (dbg) kubectl --context functional-918451 describe pod hello-node-connect-7d85dfc575-t4gsf sp-pod:

                                                
                                                
-- stdout --
	Name:             hello-node-connect-7d85dfc575-t4gsf
	Namespace:        default
	Priority:         0
	Service Account:  default
	Node:             functional-918451/192.168.49.2
	Start Time:       Wed, 17 Sep 2025 00:36:27 +0000
	Labels:           app=hello-node-connect
	                  pod-template-hash=7d85dfc575
	Annotations:      <none>
	Status:           Pending
	IP:               10.244.0.9
	IPs:
	  IP:           10.244.0.9
	Controlled By:  ReplicaSet/hello-node-connect-7d85dfc575
	Containers:
	  echo-server:
	    Container ID:   
	    Image:          kicbase/echo-server
	    Image ID:       
	    Port:           <none>
	    Host Port:      <none>
	    State:          Waiting
	      Reason:       ImagePullBackOff
	    Ready:          False
	    Restart Count:  0
	    Environment:    <none>
	    Mounts:
	      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-kpg27 (ro)
	Conditions:
	  Type                        Status
	  PodReadyToStartContainers   True 
	  Initialized                 True 
	  Ready                       False 
	  ContainersReady             False 
	  PodScheduled                True 
	Volumes:
	  kube-api-access-kpg27:
	    Type:                    Projected (a volume that contains injected data from multiple sources)
	    TokenExpirationSeconds:  3607
	    ConfigMapName:           kube-root-ca.crt
	    Optional:                false
	    DownwardAPI:             true
	QoS Class:                   BestEffort
	Node-Selectors:              <none>
	Tolerations:                 node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
	                             node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
	Events:
	  Type     Reason     Age                   From               Message
	  ----     ------     ----                  ----               -------
	  Normal   Scheduled  3m59s                 default-scheduler  Successfully assigned default/hello-node-connect-7d85dfc575-t4gsf to functional-918451
	  Normal   Pulling    62s (x5 over 3m59s)   kubelet            Pulling image "kicbase/echo-server"
	  Warning  Failed     61s (x5 over 3m59s)   kubelet            Failed to pull image "kicbase/echo-server": Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit
	  Warning  Failed     61s (x5 over 3m59s)   kubelet            Error: ErrImagePull
	  Normal   BackOff    11s (x15 over 3m58s)  kubelet            Back-off pulling image "kicbase/echo-server"
	  Warning  Failed     11s (x15 over 3m58s)  kubelet            Error: ImagePullBackOff
	
	
	Name:             sp-pod
	Namespace:        default
	Priority:         0
	Service Account:  default
	Node:             functional-918451/192.168.49.2
	Start Time:       Wed, 17 Sep 2025 00:36:23 +0000
	Labels:           test=storage-provisioner
	Annotations:      <none>
	Status:           Pending
	IP:               10.244.0.8
	IPs:
	  IP:  10.244.0.8
	Containers:
	  myfrontend:
	    Container ID:   
	    Image:          docker.io/nginx
	    Image ID:       
	    Port:           <none>
	    Host Port:      <none>
	    State:          Waiting
	      Reason:       ImagePullBackOff
	    Ready:          False
	    Restart Count:  0
	    Environment:    <none>
	    Mounts:
	      /tmp/mount from mypd (rw)
	      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-rrpzl (ro)
	Conditions:
	  Type                        Status
	  PodReadyToStartContainers   True 
	  Initialized                 True 
	  Ready                       False 
	  ContainersReady             False 
	  PodScheduled                True 
	Volumes:
	  mypd:
	    Type:       PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
	    ClaimName:  myclaim
	    ReadOnly:   false
	  kube-api-access-rrpzl:
	    Type:                    Projected (a volume that contains injected data from multiple sources)
	    TokenExpirationSeconds:  3607
	    ConfigMapName:           kube-root-ca.crt
	    Optional:                false
	    DownwardAPI:             true
	QoS Class:                   BestEffort
	Node-Selectors:              <none>
	Tolerations:                 node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
	                             node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
	Events:
	  Type     Reason     Age                    From               Message
	  ----     ------     ----                   ----               -------
	  Normal   Scheduled  4m3s                   default-scheduler  Successfully assigned default/sp-pod to functional-918451
	  Warning  Failed     3m21s (x2 over 3m49s)  kubelet            Failed to pull image "docker.io/nginx": Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit
	  Normal   Pulling    56s (x5 over 4m2s)     kubelet            Pulling image "docker.io/nginx"
	  Warning  Failed     55s (x3 over 4m2s)     kubelet            Failed to pull image "docker.io/nginx": toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit
	  Warning  Failed     55s (x5 over 4m2s)     kubelet            Error: ErrImagePull
	  Normal   BackOff    4s (x15 over 4m2s)     kubelet            Back-off pulling image "docker.io/nginx"
	  Warning  Failed     4s (x15 over 4m2s)     kubelet            Error: ImagePullBackOff

                                                
                                                
-- /stdout --
helpers_test.go:293: <<< TestFunctional/parallel/PersistentVolumeClaim FAILED: end of post-mortem logs <<<
helpers_test.go:294: ---------------------/post-mortem---------------------------------
--- FAIL: TestFunctional/parallel/PersistentVolumeClaim (249.01s)

                                                
                                    
x
+
TestFunctional/parallel/ServiceCmd/DeployApp (600.74s)

                                                
                                                
=== RUN   TestFunctional/parallel/ServiceCmd/DeployApp
functional_test.go:1451: (dbg) Run:  kubectl --context functional-918451 create deployment hello-node --image kicbase/echo-server
functional_test.go:1455: (dbg) Run:  kubectl --context functional-918451 expose deployment hello-node --type=NodePort --port=8080
functional_test.go:1460: (dbg) TestFunctional/parallel/ServiceCmd/DeployApp: waiting 10m0s for pods matching "app=hello-node" in namespace "default" ...
helpers_test.go:352: "hello-node-75c85bcc94-nttx8" [085d0fa5-d5c9-4d9a-ad8a-e78f727c62bd] Pending / Ready:ContainersNotReady (containers with unready status: [echo-server]) / ContainersReady:ContainersNotReady (containers with unready status: [echo-server])
E0917 00:43:52.991586  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
helpers_test.go:337: TestFunctional/parallel/ServiceCmd/DeployApp: WARNING: pod list for "default" "app=hello-node" returned: client rate limiter Wait returned an error: context deadline exceeded
functional_test.go:1460: ***** TestFunctional/parallel/ServiceCmd/DeployApp: pod "app=hello-node" failed to start within 10m0s: context deadline exceeded ****
functional_test.go:1460: (dbg) Run:  out/minikube-linux-arm64 status --format={{.APIServer}} -p functional-918451 -n functional-918451
functional_test.go:1460: TestFunctional/parallel/ServiceCmd/DeployApp: showing logs for failed pods as of 2025-09-17 00:50:26.978792237 +0000 UTC m=+1761.389418227
functional_test.go:1460: (dbg) Run:  kubectl --context functional-918451 describe po hello-node-75c85bcc94-nttx8 -n default
functional_test.go:1460: (dbg) kubectl --context functional-918451 describe po hello-node-75c85bcc94-nttx8 -n default:
Name:             hello-node-75c85bcc94-nttx8
Namespace:        default
Priority:         0
Service Account:  default
Node:             functional-918451/192.168.49.2
Start Time:       Wed, 17 Sep 2025 00:40:26 +0000
Labels:           app=hello-node
pod-template-hash=75c85bcc94
Annotations:      <none>
Status:           Pending
IP:               10.244.0.10
IPs:
IP:           10.244.0.10
Controlled By:  ReplicaSet/hello-node-75c85bcc94
Containers:
echo-server:
Container ID:   
Image:          kicbase/echo-server
Image ID:       
Port:           <none>
Host Port:      <none>
State:          Waiting
Reason:       ImagePullBackOff
Ready:          False
Restart Count:  0
Environment:    <none>
Mounts:
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-949tz (ro)
Conditions:
Type                        Status
PodReadyToStartContainers   True 
Initialized                 True 
Ready                       False 
ContainersReady             False 
PodScheduled                True 
Volumes:
kube-api-access-949tz:
Type:                    Projected (a volume that contains injected data from multiple sources)
TokenExpirationSeconds:  3607
ConfigMapName:           kube-root-ca.crt
Optional:                false
DownwardAPI:             true
QoS Class:                   BestEffort
Node-Selectors:              <none>
Tolerations:                 node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
Type     Reason     Age                     From               Message
----     ------     ----                    ----               -------
Normal   Scheduled  10m                     default-scheduler  Successfully assigned default/hello-node-75c85bcc94-nttx8 to functional-918451
Warning  Failed     9m20s (x3 over 10m)     kubelet            Failed to pull image "kicbase/echo-server": Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit
Normal   Pulling    6m55s (x5 over 10m)     kubelet            Pulling image "kicbase/echo-server"
Warning  Failed     6m54s (x5 over 10m)     kubelet            Error: ErrImagePull
Warning  Failed     6m54s (x2 over 8m28s)   kubelet            Failed to pull image "kicbase/echo-server": toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit
Warning  Failed     4m49s (x20 over 9m59s)  kubelet            Error: ImagePullBackOff
Normal   BackOff    4m34s (x21 over 9m59s)  kubelet            Back-off pulling image "kicbase/echo-server"
functional_test.go:1460: (dbg) Run:  kubectl --context functional-918451 logs hello-node-75c85bcc94-nttx8 -n default
functional_test.go:1460: (dbg) Non-zero exit: kubectl --context functional-918451 logs hello-node-75c85bcc94-nttx8 -n default: exit status 1 (97.314235ms)

                                                
                                                
** stderr ** 
	Error from server (BadRequest): container "echo-server" in pod "hello-node-75c85bcc94-nttx8" is waiting to start: trying and failing to pull image

                                                
                                                
** /stderr **
functional_test.go:1460: kubectl --context functional-918451 logs hello-node-75c85bcc94-nttx8 -n default: exit status 1
functional_test.go:1461: failed waiting for hello-node pod: app=hello-node within 10m0s: context deadline exceeded
--- FAIL: TestFunctional/parallel/ServiceCmd/DeployApp (600.74s)

                                                
                                    
x
+
TestFunctional/parallel/ServiceCmd/HTTPS (0.42s)

                                                
                                                
=== RUN   TestFunctional/parallel/ServiceCmd/HTTPS
functional_test.go:1519: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 service --namespace=default --https --url hello-node
functional_test.go:1519: (dbg) Non-zero exit: out/minikube-linux-arm64 -p functional-918451 service --namespace=default --https --url hello-node: exit status 115 (420.851915ms)

                                                
                                                
-- stdout --
	https://192.168.49.2:32078
	
	

                                                
                                                
-- /stdout --
** stderr ** 
	X Exiting due to SVC_UNREACHABLE: service not available: no running pod for service hello-node found
	* 
	╭─────────────────────────────────────────────────────────────────────────────────────────────╮
	│                                                                                             │
	│    * If the above advice does not help, please let us know:                                 │
	│      https://github.com/kubernetes/minikube/issues/new/choose                               │
	│                                                                                             │
	│    * Please run `minikube logs --file=logs.txt` and attach logs.txt to the GitHub issue.    │
	│    * Please also attach the following file to the GitHub issue:                             │
	│    * - /tmp/minikube_service_3af0dd3f106bd0c134df3d834cbdbb288a06d35d_0.log                 │
	│                                                                                             │
	╰─────────────────────────────────────────────────────────────────────────────────────────────╯

                                                
                                                
** /stderr **
functional_test.go:1521: failed to get service url. args "out/minikube-linux-arm64 -p functional-918451 service --namespace=default --https --url hello-node" : exit status 115
--- FAIL: TestFunctional/parallel/ServiceCmd/HTTPS (0.42s)

                                                
                                    
x
+
TestFunctional/parallel/ServiceCmd/Format (0.42s)

                                                
                                                
=== RUN   TestFunctional/parallel/ServiceCmd/Format
functional_test.go:1550: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 service hello-node --url --format={{.IP}}
functional_test.go:1550: (dbg) Non-zero exit: out/minikube-linux-arm64 -p functional-918451 service hello-node --url --format={{.IP}}: exit status 115 (418.240081ms)

                                                
                                                
-- stdout --
	192.168.49.2
	
	

                                                
                                                
-- /stdout --
** stderr ** 
	X Exiting due to SVC_UNREACHABLE: service not available: no running pod for service hello-node found
	* 
	╭─────────────────────────────────────────────────────────────────────────────────────────────╮
	│                                                                                             │
	│    * If the above advice does not help, please let us know:                                 │
	│      https://github.com/kubernetes/minikube/issues/new/choose                               │
	│                                                                                             │
	│    * Please run `minikube logs --file=logs.txt` and attach logs.txt to the GitHub issue.    │
	│    * Please also attach the following file to the GitHub issue:                             │
	│    * - /tmp/minikube_service_7cc4328ee572bf2be3730700e5bda4ff5ee9066f_0.log                 │
	│                                                                                             │
	╰─────────────────────────────────────────────────────────────────────────────────────────────╯

                                                
                                                
** /stderr **
functional_test.go:1552: failed to get service url with custom format. args "out/minikube-linux-arm64 -p functional-918451 service hello-node --url --format={{.IP}}": exit status 115
--- FAIL: TestFunctional/parallel/ServiceCmd/Format (0.42s)

                                                
                                    
x
+
TestFunctional/parallel/ServiceCmd/URL (0.41s)

                                                
                                                
=== RUN   TestFunctional/parallel/ServiceCmd/URL
functional_test.go:1569: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 service hello-node --url
functional_test.go:1569: (dbg) Non-zero exit: out/minikube-linux-arm64 -p functional-918451 service hello-node --url: exit status 115 (407.613834ms)

                                                
                                                
-- stdout --
	http://192.168.49.2:32078
	
	

                                                
                                                
-- /stdout --
** stderr ** 
	X Exiting due to SVC_UNREACHABLE: service not available: no running pod for service hello-node found
	* 
	╭─────────────────────────────────────────────────────────────────────────────────────────────╮
	│                                                                                             │
	│    * If the above advice does not help, please let us know:                                 │
	│      https://github.com/kubernetes/minikube/issues/new/choose                               │
	│                                                                                             │
	│    * Please run `minikube logs --file=logs.txt` and attach logs.txt to the GitHub issue.    │
	│    * Please also attach the following file to the GitHub issue:                             │
	│    * - /tmp/minikube_service_7cc4328ee572bf2be3730700e5bda4ff5ee9066f_0.log                 │
	│                                                                                             │
	╰─────────────────────────────────────────────────────────────────────────────────────────────╯

                                                
                                                
** /stderr **
functional_test.go:1571: failed to get service url. args: "out/minikube-linux-arm64 -p functional-918451 service hello-node --url": exit status 115
functional_test.go:1575: found endpoint for hello-node: http://192.168.49.2:32078
--- FAIL: TestFunctional/parallel/ServiceCmd/URL (0.41s)

                                                
                                    

Test pass (313/347)

Order passed test Duration
3 TestDownloadOnly/v1.28.0/json-events 6.97
4 TestDownloadOnly/v1.28.0/preload-exists 0
8 TestDownloadOnly/v1.28.0/LogsDuration 0.09
9 TestDownloadOnly/v1.28.0/DeleteAll 0.21
10 TestDownloadOnly/v1.28.0/DeleteAlwaysSucceeds 0.14
12 TestDownloadOnly/v1.34.0/json-events 6.56
13 TestDownloadOnly/v1.34.0/preload-exists 0
17 TestDownloadOnly/v1.34.0/LogsDuration 0.09
18 TestDownloadOnly/v1.34.0/DeleteAll 0.22
19 TestDownloadOnly/v1.34.0/DeleteAlwaysSucceeds 0.15
21 TestBinaryMirror 0.59
22 TestOffline 54.39
25 TestAddons/PreSetup/EnablingAddonOnNonExistingCluster 0.07
26 TestAddons/PreSetup/DisablingAddonOnNonExistingCluster 0.07
27 TestAddons/Setup 151.43
29 TestAddons/serial/Volcano 41.14
31 TestAddons/serial/GCPAuth/Namespaces 0.18
32 TestAddons/serial/GCPAuth/FakeCredentials 9.99
35 TestAddons/parallel/Registry 16.53
36 TestAddons/parallel/RegistryCreds 0.79
37 TestAddons/parallel/Ingress 22.41
38 TestAddons/parallel/InspektorGadget 5.24
39 TestAddons/parallel/MetricsServer 5.77
41 TestAddons/parallel/CSI 61.96
42 TestAddons/parallel/Headlamp 18.8
43 TestAddons/parallel/CloudSpanner 6.57
45 TestAddons/parallel/NvidiaDevicePlugin 6.5
46 TestAddons/parallel/Yakd 10.88
48 TestAddons/StoppedEnableDisable 11.38
49 TestCertOptions 41.49
50 TestCertExpiration 267.46
51 TestDockerFlags 47.16
52 TestForceSystemdFlag 50.75
53 TestForceSystemdEnv 38.31
59 TestErrorSpam/setup 33.15
60 TestErrorSpam/start 0.8
61 TestErrorSpam/status 1.04
62 TestErrorSpam/pause 1.41
63 TestErrorSpam/unpause 1.47
64 TestErrorSpam/stop 11.01
67 TestFunctional/serial/CopySyncFile 0
68 TestFunctional/serial/StartWithProxy 71.93
69 TestFunctional/serial/AuditLog 0
70 TestFunctional/serial/SoftStart 56.06
71 TestFunctional/serial/KubeContext 0.06
72 TestFunctional/serial/KubectlGetPods 0.09
75 TestFunctional/serial/CacheCmd/cache/add_remote 2.91
76 TestFunctional/serial/CacheCmd/cache/add_local 1.04
77 TestFunctional/serial/CacheCmd/cache/CacheDelete 0.06
78 TestFunctional/serial/CacheCmd/cache/list 0.05
79 TestFunctional/serial/CacheCmd/cache/verify_cache_inside_node 0.32
80 TestFunctional/serial/CacheCmd/cache/cache_reload 1.68
81 TestFunctional/serial/CacheCmd/cache/delete 0.11
82 TestFunctional/serial/MinikubeKubectlCmd 0.13
83 TestFunctional/serial/MinikubeKubectlCmdDirectly 0.14
84 TestFunctional/serial/ExtraConfig 52.99
85 TestFunctional/serial/ComponentHealth 0.11
86 TestFunctional/serial/LogsCmd 1.29
87 TestFunctional/serial/LogsFileCmd 1.31
88 TestFunctional/serial/InvalidService 4.57
90 TestFunctional/parallel/ConfigCmd 0.48
92 TestFunctional/parallel/DryRun 0.44
93 TestFunctional/parallel/InternationalLanguage 0.21
94 TestFunctional/parallel/StatusCmd 1.09
99 TestFunctional/parallel/AddonsCmd 0.14
102 TestFunctional/parallel/SSHCmd 0.81
103 TestFunctional/parallel/CpCmd 1.98
105 TestFunctional/parallel/FileSync 0.41
106 TestFunctional/parallel/CertSync 2.18
110 TestFunctional/parallel/NodeLabels 0.13
112 TestFunctional/parallel/NonActiveRuntimeDisabled 0.35
114 TestFunctional/parallel/License 0.41
115 TestFunctional/parallel/Version/short 0.05
116 TestFunctional/parallel/Version/components 1.01
117 TestFunctional/parallel/ImageCommands/ImageListShort 0.21
118 TestFunctional/parallel/ImageCommands/ImageListTable 0.21
119 TestFunctional/parallel/ImageCommands/ImageListJson 0.21
120 TestFunctional/parallel/ImageCommands/ImageListYaml 0.22
121 TestFunctional/parallel/ImageCommands/ImageBuild 3.51
122 TestFunctional/parallel/ImageCommands/Setup 0.69
123 TestFunctional/parallel/ImageCommands/ImageLoadDaemon 1.2
124 TestFunctional/parallel/DockerEnv/bash 1.23
125 TestFunctional/parallel/ImageCommands/ImageReloadDaemon 0.93
126 TestFunctional/parallel/UpdateContextCmd/no_changes 0.17
127 TestFunctional/parallel/UpdateContextCmd/no_minikube_cluster 0.14
128 TestFunctional/parallel/UpdateContextCmd/no_clusters 0.15
129 TestFunctional/parallel/ImageCommands/ImageTagAndLoadDaemon 1.25
130 TestFunctional/parallel/ImageCommands/ImageSaveToFile 0.46
131 TestFunctional/parallel/ImageCommands/ImageRemove 0.54
132 TestFunctional/parallel/ProfileCmd/profile_not_create 0.52
133 TestFunctional/parallel/ImageCommands/ImageLoadFromFile 0.78
134 TestFunctional/parallel/ProfileCmd/profile_list 0.54
135 TestFunctional/parallel/ProfileCmd/profile_json_output 0.52
136 TestFunctional/parallel/ImageCommands/ImageSaveDaemon 0.44
138 TestFunctional/parallel/TunnelCmd/serial/RunSecondTunnel 0.62
139 TestFunctional/parallel/TunnelCmd/serial/StartTunnel 0
141 TestFunctional/parallel/TunnelCmd/serial/WaitService/Setup 9.35
142 TestFunctional/parallel/TunnelCmd/serial/WaitService/IngressIP 0.09
143 TestFunctional/parallel/TunnelCmd/serial/AccessDirect 0
147 TestFunctional/parallel/TunnelCmd/serial/DeleteTunnel 0.11
149 TestFunctional/parallel/MountCmd/any-port 7.64
150 TestFunctional/parallel/MountCmd/specific-port 1.97
151 TestFunctional/parallel/MountCmd/VerifyCleanup 1.95
152 TestFunctional/parallel/ServiceCmd/List 1.31
153 TestFunctional/parallel/ServiceCmd/JSONOutput 1.33
157 TestFunctional/delete_echo-server_images 0.04
158 TestFunctional/delete_my-image_image 0.02
159 TestFunctional/delete_minikube_cached_images 0.02
164 TestMultiControlPlane/serial/StartCluster 136.91
165 TestMultiControlPlane/serial/DeployApp 40.11
166 TestMultiControlPlane/serial/PingHostFromPods 1.78
167 TestMultiControlPlane/serial/AddWorkerNode 19.57
168 TestMultiControlPlane/serial/NodeLabels 0.17
169 TestMultiControlPlane/serial/HAppyAfterClusterStart 1.3
170 TestMultiControlPlane/serial/CopyFile 20.37
171 TestMultiControlPlane/serial/StopSecondaryNode 11.75
172 TestMultiControlPlane/serial/DegradedAfterControlPlaneNodeStop 0.81
173 TestMultiControlPlane/serial/RestartSecondaryNode 45.24
174 TestMultiControlPlane/serial/HAppyAfterSecondaryNodeRestart 1.12
175 TestMultiControlPlane/serial/RestartClusterKeepsNodes 205.13
176 TestMultiControlPlane/serial/DeleteSecondaryNode 11.83
177 TestMultiControlPlane/serial/DegradedAfterSecondaryNodeDelete 0.8
178 TestMultiControlPlane/serial/StopCluster 32.83
179 TestMultiControlPlane/serial/RestartCluster 108.93
180 TestMultiControlPlane/serial/DegradedAfterClusterRestart 0.82
181 TestMultiControlPlane/serial/AddSecondaryNode 52.34
182 TestMultiControlPlane/serial/HAppyAfterSecondaryNodeAdd 1.4
185 TestImageBuild/serial/Setup 32.38
186 TestImageBuild/serial/NormalBuild 1.78
187 TestImageBuild/serial/BuildWithBuildArg 0.96
188 TestImageBuild/serial/BuildWithDockerIgnore 0.9
189 TestImageBuild/serial/BuildWithSpecifiedDockerfile 0.91
193 TestJSONOutput/start/Command 77.92
194 TestJSONOutput/start/Audit 0
196 TestJSONOutput/start/parallel/DistinctCurrentSteps 0
197 TestJSONOutput/start/parallel/IncreasingCurrentSteps 0
199 TestJSONOutput/pause/Command 0.61
200 TestJSONOutput/pause/Audit 0
202 TestJSONOutput/pause/parallel/DistinctCurrentSteps 0
203 TestJSONOutput/pause/parallel/IncreasingCurrentSteps 0
205 TestJSONOutput/unpause/Command 0.52
206 TestJSONOutput/unpause/Audit 0
208 TestJSONOutput/unpause/parallel/DistinctCurrentSteps 0
209 TestJSONOutput/unpause/parallel/IncreasingCurrentSteps 0
211 TestJSONOutput/stop/Command 5.79
212 TestJSONOutput/stop/Audit 0
214 TestJSONOutput/stop/parallel/DistinctCurrentSteps 0
215 TestJSONOutput/stop/parallel/IncreasingCurrentSteps 0
216 TestErrorJSONOutput 0.23
218 TestKicCustomNetwork/create_custom_network 33.53
219 TestKicCustomNetwork/use_default_bridge_network 33.88
220 TestKicExistingNetwork 34.95
221 TestKicCustomSubnet 34.44
222 TestKicStaticIP 30.68
223 TestMainNoArgs 0.05
224 TestMinikubeProfile 72.25
227 TestMountStart/serial/StartWithMountFirst 7.67
228 TestMountStart/serial/VerifyMountFirst 0.26
229 TestMountStart/serial/StartWithMountSecond 8.04
230 TestMountStart/serial/VerifyMountSecond 0.27
231 TestMountStart/serial/DeleteFirst 1.45
232 TestMountStart/serial/VerifyMountPostDelete 0.28
233 TestMountStart/serial/Stop 1.19
234 TestMountStart/serial/RestartStopped 8.64
235 TestMountStart/serial/VerifyMountPostStop 0.27
238 TestMultiNode/serial/FreshStart2Nodes 72.88
239 TestMultiNode/serial/DeployApp2Nodes 47.02
240 TestMultiNode/serial/PingHostFrom2Pods 0.98
241 TestMultiNode/serial/AddNode 14.92
242 TestMultiNode/serial/MultiNodeLabels 0.1
243 TestMultiNode/serial/ProfileList 0.86
244 TestMultiNode/serial/CopyFile 10.93
245 TestMultiNode/serial/StopNode 2.26
246 TestMultiNode/serial/StartAfterStop 9.3
247 TestMultiNode/serial/RestartKeepsNodes 75.93
248 TestMultiNode/serial/DeleteNode 5.75
249 TestMultiNode/serial/StopMultiNode 21.61
250 TestMultiNode/serial/RestartMultiNode 52.54
251 TestMultiNode/serial/ValidateNameConflict 38.89
256 TestPreload 156.91
258 TestScheduledStopUnix 107.05
259 TestSkaffold 137.48
261 TestInsufficientStorage 10.92
262 TestRunningBinaryUpgrade 74.27
264 TestKubernetesUpgrade 378.93
265 TestMissingContainerUpgrade 91.34
267 TestNoKubernetes/serial/StartNoK8sWithVersion 0.11
268 TestNoKubernetes/serial/StartWithK8s 42.53
269 TestNoKubernetes/serial/StartWithStopK8s 20.44
281 TestNoKubernetes/serial/Start 9.05
282 TestNoKubernetes/serial/VerifyK8sNotRunning 0.32
283 TestNoKubernetes/serial/ProfileList 1.23
284 TestNoKubernetes/serial/Stop 1.26
285 TestNoKubernetes/serial/StartNoArgs 8.42
286 TestNoKubernetes/serial/VerifyK8sNotRunningSecond 0.39
287 TestStoppedBinaryUpgrade/Setup 0.75
288 TestStoppedBinaryUpgrade/Upgrade 88.58
289 TestStoppedBinaryUpgrade/MinikubeLogs 1.09
298 TestPause/serial/Start 78.05
299 TestPause/serial/SecondStartNoReconfiguration 58.18
300 TestPause/serial/Pause 0.82
301 TestPause/serial/VerifyStatus 0.43
302 TestPause/serial/Unpause 0.64
303 TestPause/serial/PauseAgain 1.05
304 TestPause/serial/DeletePaused 2.18
305 TestPause/serial/VerifyDeletedResources 14.16
306 TestNetworkPlugins/group/auto/Start 74.73
307 TestNetworkPlugins/group/auto/KubeletFlags 0.29
308 TestNetworkPlugins/group/auto/NetCatPod 10.43
309 TestNetworkPlugins/group/auto/DNS 0.23
310 TestNetworkPlugins/group/auto/Localhost 0.17
311 TestNetworkPlugins/group/auto/HairPin 0.21
312 TestNetworkPlugins/group/kindnet/Start 78.03
313 TestNetworkPlugins/group/calico/Start 71.31
314 TestNetworkPlugins/group/calico/ControllerPod 6.01
315 TestNetworkPlugins/group/kindnet/ControllerPod 6.01
316 TestNetworkPlugins/group/calico/KubeletFlags 0.38
317 TestNetworkPlugins/group/calico/NetCatPod 11.31
318 TestNetworkPlugins/group/kindnet/KubeletFlags 0.43
319 TestNetworkPlugins/group/kindnet/NetCatPod 11.57
320 TestNetworkPlugins/group/calico/DNS 0.21
321 TestNetworkPlugins/group/calico/Localhost 0.16
322 TestNetworkPlugins/group/calico/HairPin 0.19
323 TestNetworkPlugins/group/kindnet/DNS 0.2
324 TestNetworkPlugins/group/kindnet/Localhost 0.19
325 TestNetworkPlugins/group/kindnet/HairPin 0.16
326 TestNetworkPlugins/group/custom-flannel/Start 62.31
327 TestNetworkPlugins/group/false/Start 82.22
328 TestNetworkPlugins/group/custom-flannel/KubeletFlags 0.31
329 TestNetworkPlugins/group/custom-flannel/NetCatPod 10.29
330 TestNetworkPlugins/group/custom-flannel/DNS 0.2
331 TestNetworkPlugins/group/custom-flannel/Localhost 0.16
332 TestNetworkPlugins/group/custom-flannel/HairPin 0.15
333 TestNetworkPlugins/group/false/KubeletFlags 0.48
334 TestNetworkPlugins/group/false/NetCatPod 10.38
335 TestNetworkPlugins/group/false/DNS 0.19
336 TestNetworkPlugins/group/false/Localhost 0.22
337 TestNetworkPlugins/group/false/HairPin 0.25
338 TestNetworkPlugins/group/enable-default-cni/Start 83.25
339 TestNetworkPlugins/group/flannel/Start 62.32
340 TestNetworkPlugins/group/enable-default-cni/KubeletFlags 0.34
341 TestNetworkPlugins/group/enable-default-cni/NetCatPod 10.31
342 TestNetworkPlugins/group/flannel/ControllerPod 6.01
343 TestNetworkPlugins/group/flannel/KubeletFlags 0.29
344 TestNetworkPlugins/group/flannel/NetCatPod 10.3
345 TestNetworkPlugins/group/enable-default-cni/DNS 0.3
346 TestNetworkPlugins/group/enable-default-cni/Localhost 0.26
347 TestNetworkPlugins/group/enable-default-cni/HairPin 0.2
348 TestNetworkPlugins/group/flannel/DNS 0.23
349 TestNetworkPlugins/group/flannel/Localhost 0.33
350 TestNetworkPlugins/group/flannel/HairPin 0.31
351 TestNetworkPlugins/group/bridge/Start 52.83
352 TestNetworkPlugins/group/kubenet/Start 87.03
353 TestNetworkPlugins/group/bridge/KubeletFlags 0.42
354 TestNetworkPlugins/group/bridge/NetCatPod 11.41
355 TestNetworkPlugins/group/bridge/DNS 0.42
356 TestNetworkPlugins/group/bridge/Localhost 0.2
357 TestNetworkPlugins/group/bridge/HairPin 0.17
359 TestStartStop/group/old-k8s-version/serial/FirstStart 89.36
360 TestNetworkPlugins/group/kubenet/KubeletFlags 0.41
361 TestNetworkPlugins/group/kubenet/NetCatPod 12.35
362 TestNetworkPlugins/group/kubenet/DNS 0.25
363 TestNetworkPlugins/group/kubenet/Localhost 0.26
364 TestNetworkPlugins/group/kubenet/HairPin 0.24
366 TestStartStop/group/embed-certs/serial/FirstStart 79.21
367 TestStartStop/group/old-k8s-version/serial/DeployApp 10.51
368 TestStartStop/group/old-k8s-version/serial/EnableAddonWhileActive 1.31
369 TestStartStop/group/old-k8s-version/serial/Stop 10.82
370 TestStartStop/group/old-k8s-version/serial/EnableAddonAfterStop 0.19
371 TestStartStop/group/old-k8s-version/serial/SecondStart 27.76
372 TestStartStop/group/embed-certs/serial/DeployApp 9.42
373 TestStartStop/group/embed-certs/serial/EnableAddonWhileActive 1.4
374 TestStartStop/group/old-k8s-version/serial/UserAppExistsAfterStop 12.01
375 TestStartStop/group/embed-certs/serial/Stop 10.99
376 TestStartStop/group/embed-certs/serial/EnableAddonAfterStop 0.2
377 TestStartStop/group/embed-certs/serial/SecondStart 59.13
378 TestStartStop/group/old-k8s-version/serial/AddonExistsAfterStop 5.12
379 TestStartStop/group/old-k8s-version/serial/VerifyKubernetesImages 0.27
380 TestStartStop/group/old-k8s-version/serial/Pause 4.59
382 TestStartStop/group/no-preload/serial/FirstStart 89.13
383 TestStartStop/group/embed-certs/serial/UserAppExistsAfterStop 6
384 TestStartStop/group/embed-certs/serial/AddonExistsAfterStop 5.11
385 TestStartStop/group/embed-certs/serial/VerifyKubernetesImages 0.3
386 TestStartStop/group/embed-certs/serial/Pause 3.15
388 TestStartStop/group/default-k8s-diff-port/serial/FirstStart 78.04
389 TestStartStop/group/no-preload/serial/DeployApp 11.46
390 TestStartStop/group/no-preload/serial/EnableAddonWhileActive 1.56
391 TestStartStop/group/no-preload/serial/Stop 10.98
392 TestStartStop/group/no-preload/serial/EnableAddonAfterStop 0.19
393 TestStartStop/group/no-preload/serial/SecondStart 54
394 TestStartStop/group/default-k8s-diff-port/serial/DeployApp 10.39
395 TestStartStop/group/default-k8s-diff-port/serial/EnableAddonWhileActive 1.11
396 TestStartStop/group/default-k8s-diff-port/serial/Stop 11.07
397 TestStartStop/group/default-k8s-diff-port/serial/EnableAddonAfterStop 0.19
398 TestStartStop/group/default-k8s-diff-port/serial/SecondStart 56.61
399 TestStartStop/group/no-preload/serial/UserAppExistsAfterStop 6
400 TestStartStop/group/no-preload/serial/AddonExistsAfterStop 5.14
401 TestStartStop/group/no-preload/serial/VerifyKubernetesImages 0.29
402 TestStartStop/group/no-preload/serial/Pause 4.31
404 TestStartStop/group/newest-cni/serial/FirstStart 35.78
405 TestStartStop/group/default-k8s-diff-port/serial/UserAppExistsAfterStop 6
406 TestStartStop/group/newest-cni/serial/DeployApp 0
407 TestStartStop/group/newest-cni/serial/EnableAddonWhileActive 1.37
408 TestStartStop/group/newest-cni/serial/Stop 5.79
409 TestStartStop/group/default-k8s-diff-port/serial/AddonExistsAfterStop 5.09
410 TestStartStop/group/newest-cni/serial/EnableAddonAfterStop 0.2
411 TestStartStop/group/newest-cni/serial/SecondStart 19.67
412 TestStartStop/group/default-k8s-diff-port/serial/VerifyKubernetesImages 0.24
413 TestStartStop/group/default-k8s-diff-port/serial/Pause 3.55
414 TestStartStop/group/newest-cni/serial/UserAppExistsAfterStop 0
415 TestStartStop/group/newest-cni/serial/AddonExistsAfterStop 0
416 TestStartStop/group/newest-cni/serial/VerifyKubernetesImages 0.27
417 TestStartStop/group/newest-cni/serial/Pause 2.86
x
+
TestDownloadOnly/v1.28.0/json-events (6.97s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.28.0/json-events
aaa_download_only_test.go:81: (dbg) Run:  out/minikube-linux-arm64 start -o=json --download-only -p download-only-821429 --force --alsologtostderr --kubernetes-version=v1.28.0 --container-runtime=docker --driver=docker  --container-runtime=docker
aaa_download_only_test.go:81: (dbg) Done: out/minikube-linux-arm64 start -o=json --download-only -p download-only-821429 --force --alsologtostderr --kubernetes-version=v1.28.0 --container-runtime=docker --driver=docker  --container-runtime=docker: (6.969404681s)
--- PASS: TestDownloadOnly/v1.28.0/json-events (6.97s)

                                                
                                    
x
+
TestDownloadOnly/v1.28.0/preload-exists (0s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.28.0/preload-exists
I0917 00:21:12.594899  578284 preload.go:131] Checking if preload exists for k8s version v1.28.0 and runtime docker
I0917 00:21:12.594984  578284 preload.go:146] Found local preload: /home/jenkins/minikube-integration/21550-576428/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-docker-overlay2-arm64.tar.lz4
--- PASS: TestDownloadOnly/v1.28.0/preload-exists (0.00s)

                                                
                                    
x
+
TestDownloadOnly/v1.28.0/LogsDuration (0.09s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.28.0/LogsDuration
aaa_download_only_test.go:184: (dbg) Run:  out/minikube-linux-arm64 logs -p download-only-821429
aaa_download_only_test.go:184: (dbg) Non-zero exit: out/minikube-linux-arm64 logs -p download-only-821429: exit status 85 (86.114719ms)

                                                
                                                
-- stdout --
	
	==> Audit <==
	┌─────────┬───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬──────────────────────┬─────────┬─────────┬─────────────────────┬──────────┐
	│ COMMAND │                                                                                     ARGS                                                                                      │       PROFILE        │  USER   │ VERSION │     START TIME      │ END TIME │
	├─────────┼───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼──────────────────────┼─────────┼─────────┼─────────────────────┼──────────┤
	│ start   │ -o=json --download-only -p download-only-821429 --force --alsologtostderr --kubernetes-version=v1.28.0 --container-runtime=docker --driver=docker  --container-runtime=docker │ download-only-821429 │ jenkins │ v1.37.0 │ 17 Sep 25 00:21 UTC │          │
	└─────────┴───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴──────────────────────┴─────────┴─────────┴─────────────────────┴──────────┘
	
	
	==> Last Start <==
	Log file created at: 2025/09/17 00:21:05
	Running on machine: ip-172-31-21-244
	Binary: Built with gc go1.24.6 for linux/arm64
	Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
	I0917 00:21:05.667820  578289 out.go:360] Setting OutFile to fd 1 ...
	I0917 00:21:05.668021  578289 out.go:408] TERM=,COLORTERM=, which probably does not support color
	I0917 00:21:05.668048  578289 out.go:374] Setting ErrFile to fd 2...
	I0917 00:21:05.668069  578289 out.go:408] TERM=,COLORTERM=, which probably does not support color
	I0917 00:21:05.668349  578289 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21550-576428/.minikube/bin
	W0917 00:21:05.668545  578289 root.go:314] Error reading config file at /home/jenkins/minikube-integration/21550-576428/.minikube/config/config.json: open /home/jenkins/minikube-integration/21550-576428/.minikube/config/config.json: no such file or directory
	I0917 00:21:05.669085  578289 out.go:368] Setting JSON to true
	I0917 00:21:05.669961  578289 start.go:130] hostinfo: {"hostname":"ip-172-31-21-244","uptime":11011,"bootTime":1758057455,"procs":153,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1084-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"da8ac1fd-6236-412a-a346-95873c98230d"}
	I0917 00:21:05.670053  578289 start.go:140] virtualization:  
	I0917 00:21:05.674046  578289 out.go:99] [download-only-821429] minikube v1.37.0 on Ubuntu 20.04 (arm64)
	W0917 00:21:05.674223  578289 preload.go:293] Failed to list preload files: open /home/jenkins/minikube-integration/21550-576428/.minikube/cache/preloaded-tarball: no such file or directory
	I0917 00:21:05.674352  578289 notify.go:220] Checking for updates...
	I0917 00:21:05.677767  578289 out.go:171] MINIKUBE_LOCATION=21550
	I0917 00:21:05.680742  578289 out.go:171] MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	I0917 00:21:05.683662  578289 out.go:171] KUBECONFIG=/home/jenkins/minikube-integration/21550-576428/kubeconfig
	I0917 00:21:05.686531  578289 out.go:171] MINIKUBE_HOME=/home/jenkins/minikube-integration/21550-576428/.minikube
	I0917 00:21:05.689347  578289 out.go:171] MINIKUBE_BIN=out/minikube-linux-arm64
	W0917 00:21:05.694810  578289 out.go:336] minikube skips various validations when --force is supplied; this may lead to unexpected behavior
	I0917 00:21:05.695108  578289 driver.go:421] Setting default libvirt URI to qemu:///system
	I0917 00:21:05.720630  578289 docker.go:123] docker version: linux-28.1.1:Docker Engine - Community
	I0917 00:21:05.720749  578289 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0917 00:21:05.781731  578289 info.go:266] docker info: {ID:5FDH:SA5P:5GCT:NLAS:B73P:SGDQ:PBG5:UBVH:UZY3:RXGO:CI7S:WAIH Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:29 OomKillDisable:true NGoroutines:61 SystemTime:2025-09-17 00:21:05.773143127 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1084-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214835200 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-21-244 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx P
ath:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1]] Warnings:<nil>}}
	I0917 00:21:05.781846  578289 docker.go:318] overlay module found
	I0917 00:21:05.784822  578289 out.go:99] Using the docker driver based on user configuration
	I0917 00:21:05.784862  578289 start.go:304] selected driver: docker
	I0917 00:21:05.784874  578289 start.go:918] validating driver "docker" against <nil>
	I0917 00:21:05.784988  578289 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0917 00:21:05.842988  578289 info.go:266] docker info: {ID:5FDH:SA5P:5GCT:NLAS:B73P:SGDQ:PBG5:UBVH:UZY3:RXGO:CI7S:WAIH Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:29 OomKillDisable:true NGoroutines:61 SystemTime:2025-09-17 00:21:05.834378117 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1084-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214835200 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-21-244 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx P
ath:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1]] Warnings:<nil>}}
	I0917 00:21:05.843150  578289 start_flags.go:327] no existing cluster config was found, will generate one from the flags 
	I0917 00:21:05.843419  578289 start_flags.go:410] Using suggested 3072MB memory alloc based on sys=7834MB, container=7834MB
	I0917 00:21:05.843602  578289 start_flags.go:974] Wait components to verify : map[apiserver:true system_pods:true]
	I0917 00:21:05.846691  578289 out.go:171] Using Docker driver with root privileges
	I0917 00:21:05.849624  578289 cni.go:84] Creating CNI manager for ""
	I0917 00:21:05.849703  578289 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
	I0917 00:21:05.849717  578289 start_flags.go:336] Found "bridge CNI" CNI - setting NetworkPlugin=cni
	I0917 00:21:05.849796  578289 start.go:348] cluster config:
	{Name:download-only-821429 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:download-only-821429 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CR
ISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
	I0917 00:21:05.852759  578289 out.go:99] Starting "download-only-821429" primary control-plane node in "download-only-821429" cluster
	I0917 00:21:05.852792  578289 cache.go:123] Beginning downloading kic base image for docker with docker
	I0917 00:21:05.855615  578289 out.go:99] Pulling base image v0.0.48 ...
	I0917 00:21:05.855652  578289 preload.go:131] Checking if preload exists for k8s version v1.28.0 and runtime docker
	I0917 00:21:05.855826  578289 image.go:81] Checking for gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 in local docker daemon
	I0917 00:21:05.871370  578289 cache.go:152] Downloading gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 to local cache
	I0917 00:21:05.872182  578289 image.go:65] Checking for gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 in local cache directory
	I0917 00:21:05.872298  578289 image.go:150] Writing gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 to local cache
	I0917 00:21:05.922799  578289 preload.go:118] Found remote preload: https://storage.googleapis.com/minikube-preloaded-volume-tarballs/v18/v1.28.0/preloaded-images-k8s-v18-v1.28.0-docker-overlay2-arm64.tar.lz4
	I0917 00:21:05.922836  578289 cache.go:58] Caching tarball of preloaded images
	I0917 00:21:05.923601  578289 preload.go:131] Checking if preload exists for k8s version v1.28.0 and runtime docker
	I0917 00:21:05.927049  578289 out.go:99] Downloading Kubernetes v1.28.0 preload ...
	I0917 00:21:05.927078  578289 preload.go:236] getting checksum for preloaded-images-k8s-v18-v1.28.0-docker-overlay2-arm64.tar.lz4 ...
	I0917 00:21:06.019635  578289 download.go:108] Downloading: https://storage.googleapis.com/minikube-preloaded-volume-tarballs/v18/v1.28.0/preloaded-images-k8s-v18-v1.28.0-docker-overlay2-arm64.tar.lz4?checksum=md5:002a73d62a3b066a08573cf3da2c8cb4 -> /home/jenkins/minikube-integration/21550-576428/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-docker-overlay2-arm64.tar.lz4
	I0917 00:21:10.666207  578289 cache.go:155] successfully saved gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 as a tarball
	
	
	* The control-plane node download-only-821429 host does not exist
	  To start a cluster, run: "minikube start -p download-only-821429"

                                                
                                                
-- /stdout --
aaa_download_only_test.go:185: minikube logs failed with error: exit status 85
--- PASS: TestDownloadOnly/v1.28.0/LogsDuration (0.09s)

                                                
                                    
x
+
TestDownloadOnly/v1.28.0/DeleteAll (0.21s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.28.0/DeleteAll
aaa_download_only_test.go:197: (dbg) Run:  out/minikube-linux-arm64 delete --all
--- PASS: TestDownloadOnly/v1.28.0/DeleteAll (0.21s)

                                                
                                    
x
+
TestDownloadOnly/v1.28.0/DeleteAlwaysSucceeds (0.14s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.28.0/DeleteAlwaysSucceeds
aaa_download_only_test.go:208: (dbg) Run:  out/minikube-linux-arm64 delete -p download-only-821429
--- PASS: TestDownloadOnly/v1.28.0/DeleteAlwaysSucceeds (0.14s)

                                                
                                    
x
+
TestDownloadOnly/v1.34.0/json-events (6.56s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.34.0/json-events
aaa_download_only_test.go:81: (dbg) Run:  out/minikube-linux-arm64 start -o=json --download-only -p download-only-457544 --force --alsologtostderr --kubernetes-version=v1.34.0 --container-runtime=docker --driver=docker  --container-runtime=docker
aaa_download_only_test.go:81: (dbg) Done: out/minikube-linux-arm64 start -o=json --download-only -p download-only-457544 --force --alsologtostderr --kubernetes-version=v1.34.0 --container-runtime=docker --driver=docker  --container-runtime=docker: (6.557324743s)
--- PASS: TestDownloadOnly/v1.34.0/json-events (6.56s)

                                                
                                    
x
+
TestDownloadOnly/v1.34.0/preload-exists (0s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.34.0/preload-exists
I0917 00:21:19.586106  578284 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime docker
I0917 00:21:19.586144  578284 preload.go:146] Found local preload: /home/jenkins/minikube-integration/21550-576428/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-docker-overlay2-arm64.tar.lz4
--- PASS: TestDownloadOnly/v1.34.0/preload-exists (0.00s)

                                                
                                    
x
+
TestDownloadOnly/v1.34.0/LogsDuration (0.09s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.34.0/LogsDuration
aaa_download_only_test.go:184: (dbg) Run:  out/minikube-linux-arm64 logs -p download-only-457544
aaa_download_only_test.go:184: (dbg) Non-zero exit: out/minikube-linux-arm64 logs -p download-only-457544: exit status 85 (88.20243ms)

                                                
                                                
-- stdout --
	
	==> Audit <==
	┌─────────┬───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬──────────────────────┬─────────┬─────────┬─────────────────────┬─────────────────────┐
	│ COMMAND │                                                                                     ARGS                                                                                      │       PROFILE        │  USER   │ VERSION │     START TIME      │      END TIME       │
	├─────────┼───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼──────────────────────┼─────────┼─────────┼─────────────────────┼─────────────────────┤
	│ start   │ -o=json --download-only -p download-only-821429 --force --alsologtostderr --kubernetes-version=v1.28.0 --container-runtime=docker --driver=docker  --container-runtime=docker │ download-only-821429 │ jenkins │ v1.37.0 │ 17 Sep 25 00:21 UTC │                     │
	│ delete  │ --all                                                                                                                                                                         │ minikube             │ jenkins │ v1.37.0 │ 17 Sep 25 00:21 UTC │ 17 Sep 25 00:21 UTC │
	│ delete  │ -p download-only-821429                                                                                                                                                       │ download-only-821429 │ jenkins │ v1.37.0 │ 17 Sep 25 00:21 UTC │ 17 Sep 25 00:21 UTC │
	│ start   │ -o=json --download-only -p download-only-457544 --force --alsologtostderr --kubernetes-version=v1.34.0 --container-runtime=docker --driver=docker  --container-runtime=docker │ download-only-457544 │ jenkins │ v1.37.0 │ 17 Sep 25 00:21 UTC │                     │
	└─────────┴───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴──────────────────────┴─────────┴─────────┴─────────────────────┴─────────────────────┘
	
	
	==> Last Start <==
	Log file created at: 2025/09/17 00:21:13
	Running on machine: ip-172-31-21-244
	Binary: Built with gc go1.24.6 for linux/arm64
	Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
	I0917 00:21:13.072331  578491 out.go:360] Setting OutFile to fd 1 ...
	I0917 00:21:13.072588  578491 out.go:408] TERM=,COLORTERM=, which probably does not support color
	I0917 00:21:13.072622  578491 out.go:374] Setting ErrFile to fd 2...
	I0917 00:21:13.072643  578491 out.go:408] TERM=,COLORTERM=, which probably does not support color
	I0917 00:21:13.072917  578491 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21550-576428/.minikube/bin
	I0917 00:21:13.073426  578491 out.go:368] Setting JSON to true
	I0917 00:21:13.074295  578491 start.go:130] hostinfo: {"hostname":"ip-172-31-21-244","uptime":11019,"bootTime":1758057455,"procs":148,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1084-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"da8ac1fd-6236-412a-a346-95873c98230d"}
	I0917 00:21:13.074393  578491 start.go:140] virtualization:  
	I0917 00:21:13.077681  578491 out.go:99] [download-only-457544] minikube v1.37.0 on Ubuntu 20.04 (arm64)
	I0917 00:21:13.077917  578491 notify.go:220] Checking for updates...
	I0917 00:21:13.080779  578491 out.go:171] MINIKUBE_LOCATION=21550
	I0917 00:21:13.083918  578491 out.go:171] MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	I0917 00:21:13.086864  578491 out.go:171] KUBECONFIG=/home/jenkins/minikube-integration/21550-576428/kubeconfig
	I0917 00:21:13.089759  578491 out.go:171] MINIKUBE_HOME=/home/jenkins/minikube-integration/21550-576428/.minikube
	I0917 00:21:13.092619  578491 out.go:171] MINIKUBE_BIN=out/minikube-linux-arm64
	W0917 00:21:13.098268  578491 out.go:336] minikube skips various validations when --force is supplied; this may lead to unexpected behavior
	I0917 00:21:13.098538  578491 driver.go:421] Setting default libvirt URI to qemu:///system
	I0917 00:21:13.126534  578491 docker.go:123] docker version: linux-28.1.1:Docker Engine - Community
	I0917 00:21:13.126653  578491 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0917 00:21:13.189106  578491 info.go:266] docker info: {ID:5FDH:SA5P:5GCT:NLAS:B73P:SGDQ:PBG5:UBVH:UZY3:RXGO:CI7S:WAIH Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:29 OomKillDisable:true NGoroutines:48 SystemTime:2025-09-17 00:21:13.180194092 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1084-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214835200 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-21-244 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx P
ath:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1]] Warnings:<nil>}}
	I0917 00:21:13.189220  578491 docker.go:318] overlay module found
	I0917 00:21:13.192256  578491 out.go:99] Using the docker driver based on user configuration
	I0917 00:21:13.192295  578491 start.go:304] selected driver: docker
	I0917 00:21:13.192304  578491 start.go:918] validating driver "docker" against <nil>
	I0917 00:21:13.192401  578491 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0917 00:21:13.257813  578491 info.go:266] docker info: {ID:5FDH:SA5P:5GCT:NLAS:B73P:SGDQ:PBG5:UBVH:UZY3:RXGO:CI7S:WAIH Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:29 OomKillDisable:true NGoroutines:48 SystemTime:2025-09-17 00:21:13.249190339 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1084-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214835200 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-21-244 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx P
ath:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1]] Warnings:<nil>}}
	I0917 00:21:13.257973  578491 start_flags.go:327] no existing cluster config was found, will generate one from the flags 
	I0917 00:21:13.258255  578491 start_flags.go:410] Using suggested 3072MB memory alloc based on sys=7834MB, container=7834MB
	I0917 00:21:13.258408  578491 start_flags.go:974] Wait components to verify : map[apiserver:true system_pods:true]
	I0917 00:21:13.261410  578491 out.go:171] Using Docker driver with root privileges
	I0917 00:21:13.264159  578491 cni.go:84] Creating CNI manager for ""
	I0917 00:21:13.264245  578491 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
	I0917 00:21:13.264260  578491 start_flags.go:336] Found "bridge CNI" CNI - setting NetworkPlugin=cni
	I0917 00:21:13.264358  578491 start.go:348] cluster config:
	{Name:download-only-457544 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:download-only-457544 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CR
ISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
	I0917 00:21:13.267296  578491 out.go:99] Starting "download-only-457544" primary control-plane node in "download-only-457544" cluster
	I0917 00:21:13.267318  578491 cache.go:123] Beginning downloading kic base image for docker with docker
	I0917 00:21:13.270226  578491 out.go:99] Pulling base image v0.0.48 ...
	I0917 00:21:13.270254  578491 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime docker
	I0917 00:21:13.270309  578491 image.go:81] Checking for gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 in local docker daemon
	I0917 00:21:13.291467  578491 cache.go:152] Downloading gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 to local cache
	I0917 00:21:13.291628  578491 image.go:65] Checking for gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 in local cache directory
	I0917 00:21:13.291653  578491 image.go:68] Found gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 in local cache directory, skipping pull
	I0917 00:21:13.291662  578491 image.go:137] gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 exists in cache, skipping pull
	I0917 00:21:13.291670  578491 cache.go:155] successfully saved gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 as a tarball
	I0917 00:21:13.322700  578491 preload.go:118] Found remote preload: https://storage.googleapis.com/minikube-preloaded-volume-tarballs/v18/v1.34.0/preloaded-images-k8s-v18-v1.34.0-docker-overlay2-arm64.tar.lz4
	I0917 00:21:13.322725  578491 cache.go:58] Caching tarball of preloaded images
	I0917 00:21:13.323510  578491 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime docker
	I0917 00:21:13.326600  578491 out.go:99] Downloading Kubernetes v1.34.0 preload ...
	I0917 00:21:13.326623  578491 preload.go:236] getting checksum for preloaded-images-k8s-v18-v1.34.0-docker-overlay2-arm64.tar.lz4 ...
	I0917 00:21:13.411223  578491 download.go:108] Downloading: https://storage.googleapis.com/minikube-preloaded-volume-tarballs/v18/v1.34.0/preloaded-images-k8s-v18-v1.34.0-docker-overlay2-arm64.tar.lz4?checksum=md5:0b3d43bc03104538fd9d40ba6a11edba -> /home/jenkins/minikube-integration/21550-576428/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-docker-overlay2-arm64.tar.lz4
	I0917 00:21:17.814110  578491 preload.go:247] saving checksum for preloaded-images-k8s-v18-v1.34.0-docker-overlay2-arm64.tar.lz4 ...
	I0917 00:21:17.814218  578491 preload.go:254] verifying checksum of /home/jenkins/minikube-integration/21550-576428/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-docker-overlay2-arm64.tar.lz4 ...
	
	
	* The control-plane node download-only-457544 host does not exist
	  To start a cluster, run: "minikube start -p download-only-457544"

                                                
                                                
-- /stdout --
aaa_download_only_test.go:185: minikube logs failed with error: exit status 85
--- PASS: TestDownloadOnly/v1.34.0/LogsDuration (0.09s)

                                                
                                    
x
+
TestDownloadOnly/v1.34.0/DeleteAll (0.22s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.34.0/DeleteAll
aaa_download_only_test.go:197: (dbg) Run:  out/minikube-linux-arm64 delete --all
--- PASS: TestDownloadOnly/v1.34.0/DeleteAll (0.22s)

                                                
                                    
x
+
TestDownloadOnly/v1.34.0/DeleteAlwaysSucceeds (0.15s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.34.0/DeleteAlwaysSucceeds
aaa_download_only_test.go:208: (dbg) Run:  out/minikube-linux-arm64 delete -p download-only-457544
--- PASS: TestDownloadOnly/v1.34.0/DeleteAlwaysSucceeds (0.15s)

                                                
                                    
x
+
TestBinaryMirror (0.59s)

                                                
                                                
=== RUN   TestBinaryMirror
I0917 00:21:20.882259  578284 binary.go:74] Not caching binary, using https://dl.k8s.io/release/v1.34.0/bin/linux/arm64/kubectl?checksum=file:https://dl.k8s.io/release/v1.34.0/bin/linux/arm64/kubectl.sha256
aaa_download_only_test.go:314: (dbg) Run:  out/minikube-linux-arm64 start --download-only -p binary-mirror-287818 --alsologtostderr --binary-mirror http://127.0.0.1:36367 --driver=docker  --container-runtime=docker
helpers_test.go:175: Cleaning up "binary-mirror-287818" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p binary-mirror-287818
--- PASS: TestBinaryMirror (0.59s)

                                                
                                    
x
+
TestOffline (54.39s)

                                                
                                                
=== RUN   TestOffline
=== PAUSE TestOffline

                                                
                                                

                                                
                                                
=== CONT  TestOffline
aab_offline_test.go:55: (dbg) Run:  out/minikube-linux-arm64 start -p offline-docker-271044 --alsologtostderr -v=1 --memory=3072 --wait=true --driver=docker  --container-runtime=docker
aab_offline_test.go:55: (dbg) Done: out/minikube-linux-arm64 start -p offline-docker-271044 --alsologtostderr -v=1 --memory=3072 --wait=true --driver=docker  --container-runtime=docker: (52.233972374s)
helpers_test.go:175: Cleaning up "offline-docker-271044" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p offline-docker-271044
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p offline-docker-271044: (2.156389858s)
--- PASS: TestOffline (54.39s)

                                                
                                    
x
+
TestAddons/PreSetup/EnablingAddonOnNonExistingCluster (0.07s)

                                                
                                                
=== RUN   TestAddons/PreSetup/EnablingAddonOnNonExistingCluster
=== PAUSE TestAddons/PreSetup/EnablingAddonOnNonExistingCluster

                                                
                                                

                                                
                                                
=== CONT  TestAddons/PreSetup/EnablingAddonOnNonExistingCluster
addons_test.go:1000: (dbg) Run:  out/minikube-linux-arm64 addons enable dashboard -p addons-235235
addons_test.go:1000: (dbg) Non-zero exit: out/minikube-linux-arm64 addons enable dashboard -p addons-235235: exit status 85 (67.476964ms)

                                                
                                                
-- stdout --
	* Profile "addons-235235" not found. Run "minikube profile list" to view all profiles.
	  To start a cluster, run: "minikube start -p addons-235235"

                                                
                                                
-- /stdout --
--- PASS: TestAddons/PreSetup/EnablingAddonOnNonExistingCluster (0.07s)

                                                
                                    
x
+
TestAddons/PreSetup/DisablingAddonOnNonExistingCluster (0.07s)

                                                
                                                
=== RUN   TestAddons/PreSetup/DisablingAddonOnNonExistingCluster
=== PAUSE TestAddons/PreSetup/DisablingAddonOnNonExistingCluster

                                                
                                                

                                                
                                                
=== CONT  TestAddons/PreSetup/DisablingAddonOnNonExistingCluster
addons_test.go:1011: (dbg) Run:  out/minikube-linux-arm64 addons disable dashboard -p addons-235235
addons_test.go:1011: (dbg) Non-zero exit: out/minikube-linux-arm64 addons disable dashboard -p addons-235235: exit status 85 (68.252937ms)

                                                
                                                
-- stdout --
	* Profile "addons-235235" not found. Run "minikube profile list" to view all profiles.
	  To start a cluster, run: "minikube start -p addons-235235"

                                                
                                                
-- /stdout --
--- PASS: TestAddons/PreSetup/DisablingAddonOnNonExistingCluster (0.07s)

                                                
                                    
x
+
TestAddons/Setup (151.43s)

                                                
                                                
=== RUN   TestAddons/Setup
addons_test.go:108: (dbg) Run:  out/minikube-linux-arm64 start -p addons-235235 --wait=true --memory=4096 --alsologtostderr --addons=registry --addons=registry-creds --addons=metrics-server --addons=volumesnapshots --addons=csi-hostpath-driver --addons=gcp-auth --addons=cloud-spanner --addons=inspektor-gadget --addons=nvidia-device-plugin --addons=yakd --addons=volcano --addons=amd-gpu-device-plugin --driver=docker  --container-runtime=docker --addons=ingress --addons=ingress-dns --addons=storage-provisioner-rancher
addons_test.go:108: (dbg) Done: out/minikube-linux-arm64 start -p addons-235235 --wait=true --memory=4096 --alsologtostderr --addons=registry --addons=registry-creds --addons=metrics-server --addons=volumesnapshots --addons=csi-hostpath-driver --addons=gcp-auth --addons=cloud-spanner --addons=inspektor-gadget --addons=nvidia-device-plugin --addons=yakd --addons=volcano --addons=amd-gpu-device-plugin --driver=docker  --container-runtime=docker --addons=ingress --addons=ingress-dns --addons=storage-provisioner-rancher: (2m31.424205897s)
--- PASS: TestAddons/Setup (151.43s)

                                                
                                    
x
+
TestAddons/serial/Volcano (41.14s)

                                                
                                                
=== RUN   TestAddons/serial/Volcano
addons_test.go:876: volcano-admission stabilized in 54.561743ms
addons_test.go:868: volcano-scheduler stabilized in 55.385731ms
addons_test.go:884: volcano-controller stabilized in 55.420102ms
addons_test.go:890: (dbg) TestAddons/serial/Volcano: waiting 6m0s for pods matching "app=volcano-scheduler" in namespace "volcano-system" ...
helpers_test.go:352: "volcano-scheduler-799f64f894-fqz7g" [4a4bf987-4283-43b3-9057-3151ccf17076] Running
addons_test.go:890: (dbg) TestAddons/serial/Volcano: app=volcano-scheduler healthy within 6.003955901s
addons_test.go:894: (dbg) TestAddons/serial/Volcano: waiting 6m0s for pods matching "app=volcano-admission" in namespace "volcano-system" ...
helpers_test.go:352: "volcano-admission-589c7dd587-4svhd" [39279625-ba4c-4153-92c6-d50b22b5f198] Running
addons_test.go:894: (dbg) TestAddons/serial/Volcano: app=volcano-admission healthy within 5.004019672s
addons_test.go:898: (dbg) TestAddons/serial/Volcano: waiting 6m0s for pods matching "app=volcano-controller" in namespace "volcano-system" ...
helpers_test.go:352: "volcano-controllers-7dc6969b45-wnch8" [e042ea55-e3c9-42a8-8dad-4349a19e1173] Running
addons_test.go:898: (dbg) TestAddons/serial/Volcano: app=volcano-controller healthy within 5.003353135s
addons_test.go:903: (dbg) Run:  kubectl --context addons-235235 delete -n volcano-system job volcano-admission-init
addons_test.go:909: (dbg) Run:  kubectl --context addons-235235 create -f testdata/vcjob.yaml
addons_test.go:917: (dbg) Run:  kubectl --context addons-235235 get vcjob -n my-volcano
addons_test.go:935: (dbg) TestAddons/serial/Volcano: waiting 3m0s for pods matching "volcano.sh/job-name=test-job" in namespace "my-volcano" ...
helpers_test.go:352: "test-job-nginx-0" [73b9595e-cbc5-4468-842a-066b2110421c] Pending
helpers_test.go:352: "test-job-nginx-0" [73b9595e-cbc5-4468-842a-066b2110421c] Pending / Ready:ContainersNotReady (containers with unready status: [nginx]) / ContainersReady:ContainersNotReady (containers with unready status: [nginx])
helpers_test.go:352: "test-job-nginx-0" [73b9595e-cbc5-4468-842a-066b2110421c] Running
addons_test.go:935: (dbg) TestAddons/serial/Volcano: volcano.sh/job-name=test-job healthy within 13.00784282s
addons_test.go:1053: (dbg) Run:  out/minikube-linux-arm64 -p addons-235235 addons disable volcano --alsologtostderr -v=1
addons_test.go:1053: (dbg) Done: out/minikube-linux-arm64 -p addons-235235 addons disable volcano --alsologtostderr -v=1: (11.497465068s)
--- PASS: TestAddons/serial/Volcano (41.14s)

                                                
                                    
x
+
TestAddons/serial/GCPAuth/Namespaces (0.18s)

                                                
                                                
=== RUN   TestAddons/serial/GCPAuth/Namespaces
addons_test.go:630: (dbg) Run:  kubectl --context addons-235235 create ns new-namespace
addons_test.go:644: (dbg) Run:  kubectl --context addons-235235 get secret gcp-auth -n new-namespace
--- PASS: TestAddons/serial/GCPAuth/Namespaces (0.18s)

                                                
                                    
x
+
TestAddons/serial/GCPAuth/FakeCredentials (9.99s)

                                                
                                                
=== RUN   TestAddons/serial/GCPAuth/FakeCredentials
addons_test.go:675: (dbg) Run:  kubectl --context addons-235235 create -f testdata/busybox.yaml
addons_test.go:682: (dbg) Run:  kubectl --context addons-235235 create sa gcp-auth-test
addons_test.go:688: (dbg) TestAddons/serial/GCPAuth/FakeCredentials: waiting 8m0s for pods matching "integration-test=busybox" in namespace "default" ...
helpers_test.go:352: "busybox" [936b2c4a-eaf0-408a-9371-703d641f1765] Pending / Ready:ContainersNotReady (containers with unready status: [busybox]) / ContainersReady:ContainersNotReady (containers with unready status: [busybox])
helpers_test.go:352: "busybox" [936b2c4a-eaf0-408a-9371-703d641f1765] Running
addons_test.go:688: (dbg) TestAddons/serial/GCPAuth/FakeCredentials: integration-test=busybox healthy within 9.00391734s
addons_test.go:694: (dbg) Run:  kubectl --context addons-235235 exec busybox -- /bin/sh -c "printenv GOOGLE_APPLICATION_CREDENTIALS"
addons_test.go:706: (dbg) Run:  kubectl --context addons-235235 describe sa gcp-auth-test
addons_test.go:720: (dbg) Run:  kubectl --context addons-235235 exec busybox -- /bin/sh -c "cat /google-app-creds.json"
addons_test.go:744: (dbg) Run:  kubectl --context addons-235235 exec busybox -- /bin/sh -c "printenv GOOGLE_CLOUD_PROJECT"
--- PASS: TestAddons/serial/GCPAuth/FakeCredentials (9.99s)

                                                
                                    
x
+
TestAddons/parallel/Registry (16.53s)

                                                
                                                
=== RUN   TestAddons/parallel/Registry
=== PAUSE TestAddons/parallel/Registry

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/Registry
addons_test.go:382: registry stabilized in 3.563089ms
addons_test.go:384: (dbg) TestAddons/parallel/Registry: waiting 6m0s for pods matching "actual-registry=true" in namespace "kube-system" ...
helpers_test.go:352: "registry-66898fdd98-wd8hb" [8ab107e4-0115-4366-9bd4-27d43e0f5fde] Running
addons_test.go:384: (dbg) TestAddons/parallel/Registry: actual-registry=true healthy within 6.002719392s
addons_test.go:387: (dbg) TestAddons/parallel/Registry: waiting 10m0s for pods matching "registry-proxy=true" in namespace "kube-system" ...
helpers_test.go:352: "registry-proxy-w6q4j" [bd100a37-3e81-4ba5-9d5c-654e6cabeefe] Running
addons_test.go:387: (dbg) TestAddons/parallel/Registry: registry-proxy=true healthy within 5.003808754s
addons_test.go:392: (dbg) Run:  kubectl --context addons-235235 delete po -l run=registry-test --now
addons_test.go:397: (dbg) Run:  kubectl --context addons-235235 run --rm registry-test --restart=Never --image=gcr.io/k8s-minikube/busybox -it -- sh -c "wget --spider -S http://registry.kube-system.svc.cluster.local"
addons_test.go:397: (dbg) Done: kubectl --context addons-235235 run --rm registry-test --restart=Never --image=gcr.io/k8s-minikube/busybox -it -- sh -c "wget --spider -S http://registry.kube-system.svc.cluster.local": (4.596500284s)
addons_test.go:411: (dbg) Run:  out/minikube-linux-arm64 -p addons-235235 ip
2025/09/17 00:25:09 [DEBUG] GET http://192.168.49.2:5000
addons_test.go:1053: (dbg) Run:  out/minikube-linux-arm64 -p addons-235235 addons disable registry --alsologtostderr -v=1
--- PASS: TestAddons/parallel/Registry (16.53s)

                                                
                                    
x
+
TestAddons/parallel/RegistryCreds (0.79s)

                                                
                                                
=== RUN   TestAddons/parallel/RegistryCreds
=== PAUSE TestAddons/parallel/RegistryCreds

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/RegistryCreds
addons_test.go:323: registry-creds stabilized in 3.598732ms
addons_test.go:325: (dbg) Run:  out/minikube-linux-arm64 addons configure registry-creds -f ./testdata/addons_testconfig.json -p addons-235235
addons_test.go:332: (dbg) Run:  kubectl --context addons-235235 -n kube-system get secret -o yaml
addons_test.go:1053: (dbg) Run:  out/minikube-linux-arm64 -p addons-235235 addons disable registry-creds --alsologtostderr -v=1
--- PASS: TestAddons/parallel/RegistryCreds (0.79s)

                                                
                                    
x
+
TestAddons/parallel/Ingress (22.41s)

                                                
                                                
=== RUN   TestAddons/parallel/Ingress
=== PAUSE TestAddons/parallel/Ingress

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/Ingress
addons_test.go:209: (dbg) Run:  kubectl --context addons-235235 wait --for=condition=ready --namespace=ingress-nginx pod --selector=app.kubernetes.io/component=controller --timeout=90s
addons_test.go:234: (dbg) Run:  kubectl --context addons-235235 replace --force -f testdata/nginx-ingress-v1.yaml
addons_test.go:247: (dbg) Run:  kubectl --context addons-235235 replace --force -f testdata/nginx-pod-svc.yaml
addons_test.go:252: (dbg) TestAddons/parallel/Ingress: waiting 8m0s for pods matching "run=nginx" in namespace "default" ...
helpers_test.go:352: "nginx" [311b3310-5edc-402f-af4a-13f3b44b6f3a] Pending / Ready:ContainersNotReady (containers with unready status: [nginx]) / ContainersReady:ContainersNotReady (containers with unready status: [nginx])
helpers_test.go:352: "nginx" [311b3310-5edc-402f-af4a-13f3b44b6f3a] Running
addons_test.go:252: (dbg) TestAddons/parallel/Ingress: run=nginx healthy within 10.003561642s
I0917 00:25:33.680297  578284 kapi.go:150] Service nginx in namespace default found.
addons_test.go:264: (dbg) Run:  out/minikube-linux-arm64 -p addons-235235 ssh "curl -s http://127.0.0.1/ -H 'Host: nginx.example.com'"
addons_test.go:288: (dbg) Run:  kubectl --context addons-235235 replace --force -f testdata/ingress-dns-example-v1.yaml
addons_test.go:293: (dbg) Run:  out/minikube-linux-arm64 -p addons-235235 ip
addons_test.go:299: (dbg) Run:  nslookup hello-john.test 192.168.49.2
addons_test.go:1053: (dbg) Run:  out/minikube-linux-arm64 -p addons-235235 addons disable ingress-dns --alsologtostderr -v=1
addons_test.go:1053: (dbg) Done: out/minikube-linux-arm64 -p addons-235235 addons disable ingress-dns --alsologtostderr -v=1: (2.360007984s)
addons_test.go:1053: (dbg) Run:  out/minikube-linux-arm64 -p addons-235235 addons disable ingress --alsologtostderr -v=1
addons_test.go:1053: (dbg) Done: out/minikube-linux-arm64 -p addons-235235 addons disable ingress --alsologtostderr -v=1: (7.846852059s)
--- PASS: TestAddons/parallel/Ingress (22.41s)

                                                
                                    
x
+
TestAddons/parallel/InspektorGadget (5.24s)

                                                
                                                
=== RUN   TestAddons/parallel/InspektorGadget
=== PAUSE TestAddons/parallel/InspektorGadget

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/InspektorGadget
addons_test.go:823: (dbg) TestAddons/parallel/InspektorGadget: waiting 8m0s for pods matching "k8s-app=gadget" in namespace "gadget" ...
helpers_test.go:352: "gadget-4bk2n" [1b336809-50f8-4bee-a0ee-adee7e57a0da] Running
addons_test.go:823: (dbg) TestAddons/parallel/InspektorGadget: k8s-app=gadget healthy within 5.004201909s
addons_test.go:1053: (dbg) Run:  out/minikube-linux-arm64 -p addons-235235 addons disable inspektor-gadget --alsologtostderr -v=1
--- PASS: TestAddons/parallel/InspektorGadget (5.24s)

                                                
                                    
x
+
TestAddons/parallel/MetricsServer (5.77s)

                                                
                                                
=== RUN   TestAddons/parallel/MetricsServer
=== PAUSE TestAddons/parallel/MetricsServer

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/MetricsServer
addons_test.go:455: metrics-server stabilized in 3.472236ms
addons_test.go:457: (dbg) TestAddons/parallel/MetricsServer: waiting 6m0s for pods matching "k8s-app=metrics-server" in namespace "kube-system" ...
helpers_test.go:352: "metrics-server-85b7d694d7-wxzn6" [07657805-0866-4a88-9dc0-bde04eb55366] Running
addons_test.go:457: (dbg) TestAddons/parallel/MetricsServer: k8s-app=metrics-server healthy within 5.00422619s
addons_test.go:463: (dbg) Run:  kubectl --context addons-235235 top pods -n kube-system
addons_test.go:1053: (dbg) Run:  out/minikube-linux-arm64 -p addons-235235 addons disable metrics-server --alsologtostderr -v=1
--- PASS: TestAddons/parallel/MetricsServer (5.77s)

                                                
                                    
x
+
TestAddons/parallel/CSI (61.96s)

                                                
                                                
=== RUN   TestAddons/parallel/CSI
=== PAUSE TestAddons/parallel/CSI

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/CSI
I0917 00:25:09.780736  578284 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=csi-hostpath-driver" in ns "kube-system" ...
I0917 00:25:09.784512  578284 kapi.go:86] Found 3 Pods for label selector kubernetes.io/minikube-addons=csi-hostpath-driver
I0917 00:25:09.784536  578284 kapi.go:107] duration metric: took 6.869225ms to wait for kubernetes.io/minikube-addons=csi-hostpath-driver ...
addons_test.go:549: csi-hostpath-driver pods stabilized in 6.889639ms
addons_test.go:552: (dbg) Run:  kubectl --context addons-235235 create -f testdata/csi-hostpath-driver/pvc.yaml
addons_test.go:557: (dbg) TestAddons/parallel/CSI: waiting 6m0s for pvc "hpvc" in namespace "default" ...
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc hpvc -o jsonpath={.status.phase} -n default
addons_test.go:562: (dbg) Run:  kubectl --context addons-235235 create -f testdata/csi-hostpath-driver/pv-pod.yaml
addons_test.go:567: (dbg) TestAddons/parallel/CSI: waiting 6m0s for pods matching "app=task-pv-pod" in namespace "default" ...
helpers_test.go:352: "task-pv-pod" [b9bed5d4-abdf-4514-bd62-1051c2dfc79a] Pending
helpers_test.go:352: "task-pv-pod" [b9bed5d4-abdf-4514-bd62-1051c2dfc79a] Pending / Ready:ContainersNotReady (containers with unready status: [task-pv-container]) / ContainersReady:ContainersNotReady (containers with unready status: [task-pv-container])
helpers_test.go:352: "task-pv-pod" [b9bed5d4-abdf-4514-bd62-1051c2dfc79a] Running
addons_test.go:567: (dbg) TestAddons/parallel/CSI: app=task-pv-pod healthy within 7.003460981s
addons_test.go:572: (dbg) Run:  kubectl --context addons-235235 create -f testdata/csi-hostpath-driver/snapshot.yaml
addons_test.go:577: (dbg) TestAddons/parallel/CSI: waiting 6m0s for volume snapshot "new-snapshot-demo" in namespace "default" ...
helpers_test.go:427: (dbg) Run:  kubectl --context addons-235235 get volumesnapshot new-snapshot-demo -o jsonpath={.status.readyToUse} -n default
helpers_test.go:427: (dbg) Run:  kubectl --context addons-235235 get volumesnapshot new-snapshot-demo -o jsonpath={.status.readyToUse} -n default
addons_test.go:582: (dbg) Run:  kubectl --context addons-235235 delete pod task-pv-pod
addons_test.go:582: (dbg) Done: kubectl --context addons-235235 delete pod task-pv-pod: (1.008288709s)
addons_test.go:588: (dbg) Run:  kubectl --context addons-235235 delete pvc hpvc
addons_test.go:594: (dbg) Run:  kubectl --context addons-235235 create -f testdata/csi-hostpath-driver/pvc-restore.yaml
addons_test.go:599: (dbg) TestAddons/parallel/CSI: waiting 6m0s for pvc "hpvc-restore" in namespace "default" ...
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-235235 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
addons_test.go:604: (dbg) Run:  kubectl --context addons-235235 create -f testdata/csi-hostpath-driver/pv-pod-restore.yaml
addons_test.go:609: (dbg) TestAddons/parallel/CSI: waiting 6m0s for pods matching "app=task-pv-pod-restore" in namespace "default" ...
helpers_test.go:352: "task-pv-pod-restore" [54d435c1-9b64-47a2-b121-995354a77799] Pending
helpers_test.go:352: "task-pv-pod-restore" [54d435c1-9b64-47a2-b121-995354a77799] Pending / Ready:ContainersNotReady (containers with unready status: [task-pv-container]) / ContainersReady:ContainersNotReady (containers with unready status: [task-pv-container])
helpers_test.go:352: "task-pv-pod-restore" [54d435c1-9b64-47a2-b121-995354a77799] Running
addons_test.go:609: (dbg) TestAddons/parallel/CSI: app=task-pv-pod-restore healthy within 8.003558335s
addons_test.go:614: (dbg) Run:  kubectl --context addons-235235 delete pod task-pv-pod-restore
addons_test.go:618: (dbg) Run:  kubectl --context addons-235235 delete pvc hpvc-restore
addons_test.go:622: (dbg) Run:  kubectl --context addons-235235 delete volumesnapshot new-snapshot-demo
addons_test.go:1053: (dbg) Run:  out/minikube-linux-arm64 -p addons-235235 addons disable volumesnapshots --alsologtostderr -v=1
addons_test.go:1053: (dbg) Done: out/minikube-linux-arm64 -p addons-235235 addons disable volumesnapshots --alsologtostderr -v=1: (1.267225644s)
addons_test.go:1053: (dbg) Run:  out/minikube-linux-arm64 -p addons-235235 addons disable csi-hostpath-driver --alsologtostderr -v=1
addons_test.go:1053: (dbg) Done: out/minikube-linux-arm64 -p addons-235235 addons disable csi-hostpath-driver --alsologtostderr -v=1: (6.99315851s)
--- PASS: TestAddons/parallel/CSI (61.96s)

                                                
                                    
x
+
TestAddons/parallel/Headlamp (18.8s)

                                                
                                                
=== RUN   TestAddons/parallel/Headlamp
=== PAUSE TestAddons/parallel/Headlamp

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/Headlamp
addons_test.go:808: (dbg) Run:  out/minikube-linux-arm64 addons enable headlamp -p addons-235235 --alsologtostderr -v=1
addons_test.go:813: (dbg) TestAddons/parallel/Headlamp: waiting 8m0s for pods matching "app.kubernetes.io/name=headlamp" in namespace "headlamp" ...
helpers_test.go:352: "headlamp-85f8f8dc54-8qpkg" [97b50089-df32-4780-ab7b-ef9beba9ef03] Pending
helpers_test.go:352: "headlamp-85f8f8dc54-8qpkg" [97b50089-df32-4780-ab7b-ef9beba9ef03] Pending / Ready:ContainersNotReady (containers with unready status: [headlamp]) / ContainersReady:ContainersNotReady (containers with unready status: [headlamp])
helpers_test.go:352: "headlamp-85f8f8dc54-8qpkg" [97b50089-df32-4780-ab7b-ef9beba9ef03] Running
addons_test.go:813: (dbg) TestAddons/parallel/Headlamp: app.kubernetes.io/name=headlamp healthy within 12.003322508s
addons_test.go:1053: (dbg) Run:  out/minikube-linux-arm64 -p addons-235235 addons disable headlamp --alsologtostderr -v=1
addons_test.go:1053: (dbg) Done: out/minikube-linux-arm64 -p addons-235235 addons disable headlamp --alsologtostderr -v=1: (5.814842618s)
--- PASS: TestAddons/parallel/Headlamp (18.80s)

                                                
                                    
x
+
TestAddons/parallel/CloudSpanner (6.57s)

                                                
                                                
=== RUN   TestAddons/parallel/CloudSpanner
=== PAUSE TestAddons/parallel/CloudSpanner

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/CloudSpanner
addons_test.go:840: (dbg) TestAddons/parallel/CloudSpanner: waiting 6m0s for pods matching "app=cloud-spanner-emulator" in namespace "default" ...
helpers_test.go:352: "cloud-spanner-emulator-85f6b7fc65-z9nnc" [61708599-82b6-4e73-b7e5-09ede1c50a9f] Running
addons_test.go:840: (dbg) TestAddons/parallel/CloudSpanner: app=cloud-spanner-emulator healthy within 6.004054114s
addons_test.go:1053: (dbg) Run:  out/minikube-linux-arm64 -p addons-235235 addons disable cloud-spanner --alsologtostderr -v=1
--- PASS: TestAddons/parallel/CloudSpanner (6.57s)

                                                
                                    
x
+
TestAddons/parallel/NvidiaDevicePlugin (6.5s)

                                                
                                                
=== RUN   TestAddons/parallel/NvidiaDevicePlugin
=== PAUSE TestAddons/parallel/NvidiaDevicePlugin

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/NvidiaDevicePlugin
addons_test.go:1025: (dbg) TestAddons/parallel/NvidiaDevicePlugin: waiting 6m0s for pods matching "name=nvidia-device-plugin-ds" in namespace "kube-system" ...
helpers_test.go:352: "nvidia-device-plugin-daemonset-wwzd9" [62b9d30e-619a-4e29-816c-46c303ee603b] Running
addons_test.go:1025: (dbg) TestAddons/parallel/NvidiaDevicePlugin: name=nvidia-device-plugin-ds healthy within 6.003663462s
addons_test.go:1053: (dbg) Run:  out/minikube-linux-arm64 -p addons-235235 addons disable nvidia-device-plugin --alsologtostderr -v=1
--- PASS: TestAddons/parallel/NvidiaDevicePlugin (6.50s)

                                                
                                    
x
+
TestAddons/parallel/Yakd (10.88s)

                                                
                                                
=== RUN   TestAddons/parallel/Yakd
=== PAUSE TestAddons/parallel/Yakd

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/Yakd
addons_test.go:1047: (dbg) TestAddons/parallel/Yakd: waiting 2m0s for pods matching "app.kubernetes.io/name=yakd-dashboard" in namespace "yakd-dashboard" ...
helpers_test.go:352: "yakd-dashboard-5ff678cb9-k7dwg" [f989c25f-236a-4ebb-9ee0-4f6837905586] Running
addons_test.go:1047: (dbg) TestAddons/parallel/Yakd: app.kubernetes.io/name=yakd-dashboard healthy within 5.003411305s
addons_test.go:1053: (dbg) Run:  out/minikube-linux-arm64 -p addons-235235 addons disable yakd --alsologtostderr -v=1
addons_test.go:1053: (dbg) Done: out/minikube-linux-arm64 -p addons-235235 addons disable yakd --alsologtostderr -v=1: (5.876725423s)
--- PASS: TestAddons/parallel/Yakd (10.88s)

                                                
                                    
x
+
TestAddons/StoppedEnableDisable (11.38s)

                                                
                                                
=== RUN   TestAddons/StoppedEnableDisable
addons_test.go:172: (dbg) Run:  out/minikube-linux-arm64 stop -p addons-235235
addons_test.go:172: (dbg) Done: out/minikube-linux-arm64 stop -p addons-235235: (11.072522908s)
addons_test.go:176: (dbg) Run:  out/minikube-linux-arm64 addons enable dashboard -p addons-235235
addons_test.go:180: (dbg) Run:  out/minikube-linux-arm64 addons disable dashboard -p addons-235235
addons_test.go:185: (dbg) Run:  out/minikube-linux-arm64 addons disable gvisor -p addons-235235
--- PASS: TestAddons/StoppedEnableDisable (11.38s)

                                                
                                    
x
+
TestCertOptions (41.49s)

                                                
                                                
=== RUN   TestCertOptions
=== PAUSE TestCertOptions

                                                
                                                

                                                
                                                
=== CONT  TestCertOptions
cert_options_test.go:49: (dbg) Run:  out/minikube-linux-arm64 start -p cert-options-248426 --memory=3072 --apiserver-ips=127.0.0.1 --apiserver-ips=192.168.15.15 --apiserver-names=localhost --apiserver-names=www.google.com --apiserver-port=8555 --driver=docker  --container-runtime=docker
cert_options_test.go:49: (dbg) Done: out/minikube-linux-arm64 start -p cert-options-248426 --memory=3072 --apiserver-ips=127.0.0.1 --apiserver-ips=192.168.15.15 --apiserver-names=localhost --apiserver-names=www.google.com --apiserver-port=8555 --driver=docker  --container-runtime=docker: (38.710364449s)
cert_options_test.go:60: (dbg) Run:  out/minikube-linux-arm64 -p cert-options-248426 ssh "openssl x509 -text -noout -in /var/lib/minikube/certs/apiserver.crt"
cert_options_test.go:88: (dbg) Run:  kubectl --context cert-options-248426 config view
cert_options_test.go:100: (dbg) Run:  out/minikube-linux-arm64 ssh -p cert-options-248426 -- "sudo cat /etc/kubernetes/admin.conf"
helpers_test.go:175: Cleaning up "cert-options-248426" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p cert-options-248426
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p cert-options-248426: (2.117863537s)
--- PASS: TestCertOptions (41.49s)

                                                
                                    
x
+
TestCertExpiration (267.46s)

                                                
                                                
=== RUN   TestCertExpiration
=== PAUSE TestCertExpiration

                                                
                                                

                                                
                                                
=== CONT  TestCertExpiration
cert_options_test.go:123: (dbg) Run:  out/minikube-linux-arm64 start -p cert-expiration-467950 --memory=3072 --cert-expiration=3m --driver=docker  --container-runtime=docker
cert_options_test.go:123: (dbg) Done: out/minikube-linux-arm64 start -p cert-expiration-467950 --memory=3072 --cert-expiration=3m --driver=docker  --container-runtime=docker: (38.604435924s)
cert_options_test.go:131: (dbg) Run:  out/minikube-linux-arm64 start -p cert-expiration-467950 --memory=3072 --cert-expiration=8760h --driver=docker  --container-runtime=docker
E0917 01:28:52.993264  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:28:53.251469  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/skaffold-735592/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
cert_options_test.go:131: (dbg) Done: out/minikube-linux-arm64 start -p cert-expiration-467950 --memory=3072 --cert-expiration=8760h --driver=docker  --container-runtime=docker: (46.259688462s)
helpers_test.go:175: Cleaning up "cert-expiration-467950" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p cert-expiration-467950
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p cert-expiration-467950: (2.595733177s)
--- PASS: TestCertExpiration (267.46s)

                                                
                                    
x
+
TestDockerFlags (47.16s)

                                                
                                                
=== RUN   TestDockerFlags
=== PAUSE TestDockerFlags

                                                
                                                

                                                
                                                
=== CONT  TestDockerFlags
docker_test.go:51: (dbg) Run:  out/minikube-linux-arm64 start -p docker-flags-725253 --cache-images=false --memory=3072 --install-addons=false --wait=false --docker-env=FOO=BAR --docker-env=BAZ=BAT --docker-opt=debug --docker-opt=icc=true --alsologtostderr -v=5 --driver=docker  --container-runtime=docker
docker_test.go:51: (dbg) Done: out/minikube-linux-arm64 start -p docker-flags-725253 --cache-images=false --memory=3072 --install-addons=false --wait=false --docker-env=FOO=BAR --docker-env=BAZ=BAT --docker-opt=debug --docker-opt=icc=true --alsologtostderr -v=5 --driver=docker  --container-runtime=docker: (43.853650539s)
docker_test.go:56: (dbg) Run:  out/minikube-linux-arm64 -p docker-flags-725253 ssh "sudo systemctl show docker --property=Environment --no-pager"
docker_test.go:67: (dbg) Run:  out/minikube-linux-arm64 -p docker-flags-725253 ssh "sudo systemctl show docker --property=ExecStart --no-pager"
helpers_test.go:175: Cleaning up "docker-flags-725253" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p docker-flags-725253
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p docker-flags-725253: (2.588285584s)
--- PASS: TestDockerFlags (47.16s)

                                                
                                    
x
+
TestForceSystemdFlag (50.75s)

                                                
                                                
=== RUN   TestForceSystemdFlag
=== PAUSE TestForceSystemdFlag

                                                
                                                

                                                
                                                
=== CONT  TestForceSystemdFlag
docker_test.go:91: (dbg) Run:  out/minikube-linux-arm64 start -p force-systemd-flag-407535 --memory=3072 --force-systemd --alsologtostderr -v=5 --driver=docker  --container-runtime=docker
docker_test.go:91: (dbg) Done: out/minikube-linux-arm64 start -p force-systemd-flag-407535 --memory=3072 --force-systemd --alsologtostderr -v=5 --driver=docker  --container-runtime=docker: (47.932129709s)
docker_test.go:110: (dbg) Run:  out/minikube-linux-arm64 -p force-systemd-flag-407535 ssh "docker info --format {{.CgroupDriver}}"
helpers_test.go:175: Cleaning up "force-systemd-flag-407535" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p force-systemd-flag-407535
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p force-systemd-flag-407535: (2.371277313s)
--- PASS: TestForceSystemdFlag (50.75s)

                                                
                                    
x
+
TestForceSystemdEnv (38.31s)

                                                
                                                
=== RUN   TestForceSystemdEnv
=== PAUSE TestForceSystemdEnv

                                                
                                                

                                                
                                                
=== CONT  TestForceSystemdEnv
docker_test.go:155: (dbg) Run:  out/minikube-linux-arm64 start -p force-systemd-env-573925 --memory=3072 --alsologtostderr -v=5 --driver=docker  --container-runtime=docker
docker_test.go:155: (dbg) Done: out/minikube-linux-arm64 start -p force-systemd-env-573925 --memory=3072 --alsologtostderr -v=5 --driver=docker  --container-runtime=docker: (35.763232501s)
docker_test.go:110: (dbg) Run:  out/minikube-linux-arm64 -p force-systemd-env-573925 ssh "docker info --format {{.CgroupDriver}}"
helpers_test.go:175: Cleaning up "force-systemd-env-573925" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p force-systemd-env-573925
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p force-systemd-env-573925: (2.18145249s)
--- PASS: TestForceSystemdEnv (38.31s)

                                                
                                    
x
+
TestErrorSpam/setup (33.15s)

                                                
                                                
=== RUN   TestErrorSpam/setup
error_spam_test.go:81: (dbg) Run:  out/minikube-linux-arm64 start -p nospam-329896 -n=1 --memory=3072 --wait=false --log_dir=/tmp/nospam-329896 --driver=docker  --container-runtime=docker
error_spam_test.go:81: (dbg) Done: out/minikube-linux-arm64 start -p nospam-329896 -n=1 --memory=3072 --wait=false --log_dir=/tmp/nospam-329896 --driver=docker  --container-runtime=docker: (33.151401971s)
--- PASS: TestErrorSpam/setup (33.15s)

                                                
                                    
x
+
TestErrorSpam/start (0.8s)

                                                
                                                
=== RUN   TestErrorSpam/start
error_spam_test.go:206: Cleaning up 1 logfile(s) ...
error_spam_test.go:149: (dbg) Run:  out/minikube-linux-arm64 -p nospam-329896 --log_dir /tmp/nospam-329896 start --dry-run
error_spam_test.go:149: (dbg) Run:  out/minikube-linux-arm64 -p nospam-329896 --log_dir /tmp/nospam-329896 start --dry-run
error_spam_test.go:172: (dbg) Run:  out/minikube-linux-arm64 -p nospam-329896 --log_dir /tmp/nospam-329896 start --dry-run
--- PASS: TestErrorSpam/start (0.80s)

                                                
                                    
x
+
TestErrorSpam/status (1.04s)

                                                
                                                
=== RUN   TestErrorSpam/status
error_spam_test.go:206: Cleaning up 0 logfile(s) ...
error_spam_test.go:149: (dbg) Run:  out/minikube-linux-arm64 -p nospam-329896 --log_dir /tmp/nospam-329896 status
error_spam_test.go:149: (dbg) Run:  out/minikube-linux-arm64 -p nospam-329896 --log_dir /tmp/nospam-329896 status
error_spam_test.go:172: (dbg) Run:  out/minikube-linux-arm64 -p nospam-329896 --log_dir /tmp/nospam-329896 status
--- PASS: TestErrorSpam/status (1.04s)

                                                
                                    
x
+
TestErrorSpam/pause (1.41s)

                                                
                                                
=== RUN   TestErrorSpam/pause
error_spam_test.go:206: Cleaning up 0 logfile(s) ...
error_spam_test.go:149: (dbg) Run:  out/minikube-linux-arm64 -p nospam-329896 --log_dir /tmp/nospam-329896 pause
error_spam_test.go:149: (dbg) Run:  out/minikube-linux-arm64 -p nospam-329896 --log_dir /tmp/nospam-329896 pause
error_spam_test.go:172: (dbg) Run:  out/minikube-linux-arm64 -p nospam-329896 --log_dir /tmp/nospam-329896 pause
--- PASS: TestErrorSpam/pause (1.41s)

                                                
                                    
x
+
TestErrorSpam/unpause (1.47s)

                                                
                                                
=== RUN   TestErrorSpam/unpause
error_spam_test.go:206: Cleaning up 0 logfile(s) ...
error_spam_test.go:149: (dbg) Run:  out/minikube-linux-arm64 -p nospam-329896 --log_dir /tmp/nospam-329896 unpause
error_spam_test.go:149: (dbg) Run:  out/minikube-linux-arm64 -p nospam-329896 --log_dir /tmp/nospam-329896 unpause
error_spam_test.go:172: (dbg) Run:  out/minikube-linux-arm64 -p nospam-329896 --log_dir /tmp/nospam-329896 unpause
--- PASS: TestErrorSpam/unpause (1.47s)

                                                
                                    
x
+
TestErrorSpam/stop (11.01s)

                                                
                                                
=== RUN   TestErrorSpam/stop
error_spam_test.go:206: Cleaning up 0 logfile(s) ...
error_spam_test.go:149: (dbg) Run:  out/minikube-linux-arm64 -p nospam-329896 --log_dir /tmp/nospam-329896 stop
error_spam_test.go:149: (dbg) Done: out/minikube-linux-arm64 -p nospam-329896 --log_dir /tmp/nospam-329896 stop: (10.808136217s)
error_spam_test.go:149: (dbg) Run:  out/minikube-linux-arm64 -p nospam-329896 --log_dir /tmp/nospam-329896 stop
error_spam_test.go:172: (dbg) Run:  out/minikube-linux-arm64 -p nospam-329896 --log_dir /tmp/nospam-329896 stop
--- PASS: TestErrorSpam/stop (11.01s)

                                                
                                    
x
+
TestFunctional/serial/CopySyncFile (0s)

                                                
                                                
=== RUN   TestFunctional/serial/CopySyncFile
functional_test.go:1860: local sync path: /home/jenkins/minikube-integration/21550-576428/.minikube/files/etc/test/nested/copy/578284/hosts
--- PASS: TestFunctional/serial/CopySyncFile (0.00s)

                                                
                                    
x
+
TestFunctional/serial/StartWithProxy (71.93s)

                                                
                                                
=== RUN   TestFunctional/serial/StartWithProxy
functional_test.go:2239: (dbg) Run:  out/minikube-linux-arm64 start -p functional-918451 --memory=4096 --apiserver-port=8441 --wait=all --driver=docker  --container-runtime=docker
E0917 00:33:53.000283  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 00:33:53.007525  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 00:33:53.019064  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 00:33:53.040411  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 00:33:53.081854  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 00:33:53.163333  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 00:33:53.324916  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 00:33:53.646593  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 00:33:54.288647  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 00:33:55.570085  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 00:33:58.131892  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 00:34:03.253984  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
functional_test.go:2239: (dbg) Done: out/minikube-linux-arm64 start -p functional-918451 --memory=4096 --apiserver-port=8441 --wait=all --driver=docker  --container-runtime=docker: (1m11.928535283s)
--- PASS: TestFunctional/serial/StartWithProxy (71.93s)

                                                
                                    
x
+
TestFunctional/serial/AuditLog (0s)

                                                
                                                
=== RUN   TestFunctional/serial/AuditLog
--- PASS: TestFunctional/serial/AuditLog (0.00s)

                                                
                                    
x
+
TestFunctional/serial/SoftStart (56.06s)

                                                
                                                
=== RUN   TestFunctional/serial/SoftStart
I0917 00:34:06.195322  578284 config.go:182] Loaded profile config "functional-918451": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
functional_test.go:674: (dbg) Run:  out/minikube-linux-arm64 start -p functional-918451 --alsologtostderr -v=8
E0917 00:34:13.496386  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 00:34:33.977873  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
functional_test.go:674: (dbg) Done: out/minikube-linux-arm64 start -p functional-918451 --alsologtostderr -v=8: (56.052221435s)
functional_test.go:678: soft start took 56.060192252s for "functional-918451" cluster.
I0917 00:35:02.247930  578284 config.go:182] Loaded profile config "functional-918451": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
--- PASS: TestFunctional/serial/SoftStart (56.06s)

                                                
                                    
x
+
TestFunctional/serial/KubeContext (0.06s)

                                                
                                                
=== RUN   TestFunctional/serial/KubeContext
functional_test.go:696: (dbg) Run:  kubectl config current-context
--- PASS: TestFunctional/serial/KubeContext (0.06s)

                                                
                                    
x
+
TestFunctional/serial/KubectlGetPods (0.09s)

                                                
                                                
=== RUN   TestFunctional/serial/KubectlGetPods
functional_test.go:711: (dbg) Run:  kubectl --context functional-918451 get po -A
--- PASS: TestFunctional/serial/KubectlGetPods (0.09s)

                                                
                                    
x
+
TestFunctional/serial/CacheCmd/cache/add_remote (2.91s)

                                                
                                                
=== RUN   TestFunctional/serial/CacheCmd/cache/add_remote
functional_test.go:1064: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 cache add registry.k8s.io/pause:3.1
functional_test.go:1064: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 cache add registry.k8s.io/pause:3.3
functional_test.go:1064: (dbg) Done: out/minikube-linux-arm64 -p functional-918451 cache add registry.k8s.io/pause:3.3: (1.012144389s)
functional_test.go:1064: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 cache add registry.k8s.io/pause:latest
--- PASS: TestFunctional/serial/CacheCmd/cache/add_remote (2.91s)

                                                
                                    
x
+
TestFunctional/serial/CacheCmd/cache/add_local (1.04s)

                                                
                                                
=== RUN   TestFunctional/serial/CacheCmd/cache/add_local
functional_test.go:1092: (dbg) Run:  docker build -t minikube-local-cache-test:functional-918451 /tmp/TestFunctionalserialCacheCmdcacheadd_local4270769072/001
functional_test.go:1104: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 cache add minikube-local-cache-test:functional-918451
functional_test.go:1109: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 cache delete minikube-local-cache-test:functional-918451
functional_test.go:1098: (dbg) Run:  docker rmi minikube-local-cache-test:functional-918451
--- PASS: TestFunctional/serial/CacheCmd/cache/add_local (1.04s)

                                                
                                    
x
+
TestFunctional/serial/CacheCmd/cache/CacheDelete (0.06s)

                                                
                                                
=== RUN   TestFunctional/serial/CacheCmd/cache/CacheDelete
functional_test.go:1117: (dbg) Run:  out/minikube-linux-arm64 cache delete registry.k8s.io/pause:3.3
--- PASS: TestFunctional/serial/CacheCmd/cache/CacheDelete (0.06s)

                                                
                                    
x
+
TestFunctional/serial/CacheCmd/cache/list (0.05s)

                                                
                                                
=== RUN   TestFunctional/serial/CacheCmd/cache/list
functional_test.go:1125: (dbg) Run:  out/minikube-linux-arm64 cache list
--- PASS: TestFunctional/serial/CacheCmd/cache/list (0.05s)

                                                
                                    
x
+
TestFunctional/serial/CacheCmd/cache/verify_cache_inside_node (0.32s)

                                                
                                                
=== RUN   TestFunctional/serial/CacheCmd/cache/verify_cache_inside_node
functional_test.go:1139: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 ssh sudo crictl images
--- PASS: TestFunctional/serial/CacheCmd/cache/verify_cache_inside_node (0.32s)

                                                
                                    
x
+
TestFunctional/serial/CacheCmd/cache/cache_reload (1.68s)

                                                
                                                
=== RUN   TestFunctional/serial/CacheCmd/cache/cache_reload
functional_test.go:1162: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 ssh sudo docker rmi registry.k8s.io/pause:latest
functional_test.go:1168: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 ssh sudo crictl inspecti registry.k8s.io/pause:latest
functional_test.go:1168: (dbg) Non-zero exit: out/minikube-linux-arm64 -p functional-918451 ssh sudo crictl inspecti registry.k8s.io/pause:latest: exit status 1 (321.635538ms)

                                                
                                                
-- stdout --
	FATA[0000] no such image "registry.k8s.io/pause:latest" present 

                                                
                                                
-- /stdout --
** stderr ** 
	ssh: Process exited with status 1

                                                
                                                
** /stderr **
functional_test.go:1173: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 cache reload
functional_test.go:1178: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 ssh sudo crictl inspecti registry.k8s.io/pause:latest
--- PASS: TestFunctional/serial/CacheCmd/cache/cache_reload (1.68s)

                                                
                                    
x
+
TestFunctional/serial/CacheCmd/cache/delete (0.11s)

                                                
                                                
=== RUN   TestFunctional/serial/CacheCmd/cache/delete
functional_test.go:1187: (dbg) Run:  out/minikube-linux-arm64 cache delete registry.k8s.io/pause:3.1
functional_test.go:1187: (dbg) Run:  out/minikube-linux-arm64 cache delete registry.k8s.io/pause:latest
--- PASS: TestFunctional/serial/CacheCmd/cache/delete (0.11s)

                                                
                                    
x
+
TestFunctional/serial/MinikubeKubectlCmd (0.13s)

                                                
                                                
=== RUN   TestFunctional/serial/MinikubeKubectlCmd
functional_test.go:731: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 kubectl -- --context functional-918451 get pods
--- PASS: TestFunctional/serial/MinikubeKubectlCmd (0.13s)

                                                
                                    
x
+
TestFunctional/serial/MinikubeKubectlCmdDirectly (0.14s)

                                                
                                                
=== RUN   TestFunctional/serial/MinikubeKubectlCmdDirectly
functional_test.go:756: (dbg) Run:  out/kubectl --context functional-918451 get pods
--- PASS: TestFunctional/serial/MinikubeKubectlCmdDirectly (0.14s)

                                                
                                    
x
+
TestFunctional/serial/ExtraConfig (52.99s)

                                                
                                                
=== RUN   TestFunctional/serial/ExtraConfig
functional_test.go:772: (dbg) Run:  out/minikube-linux-arm64 start -p functional-918451 --extra-config=apiserver.enable-admission-plugins=NamespaceAutoProvision --wait=all
E0917 00:35:14.939581  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
functional_test.go:772: (dbg) Done: out/minikube-linux-arm64 start -p functional-918451 --extra-config=apiserver.enable-admission-plugins=NamespaceAutoProvision --wait=all: (52.991021804s)
functional_test.go:776: restart took 52.991123989s for "functional-918451" cluster.
I0917 00:36:01.831799  578284 config.go:182] Loaded profile config "functional-918451": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
--- PASS: TestFunctional/serial/ExtraConfig (52.99s)

                                                
                                    
x
+
TestFunctional/serial/ComponentHealth (0.11s)

                                                
                                                
=== RUN   TestFunctional/serial/ComponentHealth
functional_test.go:825: (dbg) Run:  kubectl --context functional-918451 get po -l tier=control-plane -n kube-system -o=json
functional_test.go:840: etcd phase: Running
functional_test.go:850: etcd status: Ready
functional_test.go:840: kube-apiserver phase: Running
functional_test.go:850: kube-apiserver status: Ready
functional_test.go:840: kube-controller-manager phase: Running
functional_test.go:850: kube-controller-manager status: Ready
functional_test.go:840: kube-scheduler phase: Running
functional_test.go:850: kube-scheduler status: Ready
--- PASS: TestFunctional/serial/ComponentHealth (0.11s)

                                                
                                    
x
+
TestFunctional/serial/LogsCmd (1.29s)

                                                
                                                
=== RUN   TestFunctional/serial/LogsCmd
functional_test.go:1251: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 logs
functional_test.go:1251: (dbg) Done: out/minikube-linux-arm64 -p functional-918451 logs: (1.293541799s)
--- PASS: TestFunctional/serial/LogsCmd (1.29s)

                                                
                                    
x
+
TestFunctional/serial/LogsFileCmd (1.31s)

                                                
                                                
=== RUN   TestFunctional/serial/LogsFileCmd
functional_test.go:1265: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 logs --file /tmp/TestFunctionalserialLogsFileCmd3634901667/001/logs.txt
functional_test.go:1265: (dbg) Done: out/minikube-linux-arm64 -p functional-918451 logs --file /tmp/TestFunctionalserialLogsFileCmd3634901667/001/logs.txt: (1.308932301s)
--- PASS: TestFunctional/serial/LogsFileCmd (1.31s)

                                                
                                    
x
+
TestFunctional/serial/InvalidService (4.57s)

                                                
                                                
=== RUN   TestFunctional/serial/InvalidService
functional_test.go:2326: (dbg) Run:  kubectl --context functional-918451 apply -f testdata/invalidsvc.yaml
functional_test.go:2340: (dbg) Run:  out/minikube-linux-arm64 service invalid-svc -p functional-918451
functional_test.go:2340: (dbg) Non-zero exit: out/minikube-linux-arm64 service invalid-svc -p functional-918451: exit status 115 (625.779426ms)

                                                
                                                
-- stdout --
	┌───────────┬─────────────┬─────────────┬───────────────────────────┐
	│ NAMESPACE │    NAME     │ TARGET PORT │            URL            │
	├───────────┼─────────────┼─────────────┼───────────────────────────┤
	│ default   │ invalid-svc │ 80          │ http://192.168.49.2:31806 │
	└───────────┴─────────────┴─────────────┴───────────────────────────┘
	
	

                                                
                                                
-- /stdout --
** stderr ** 
	X Exiting due to SVC_UNREACHABLE: service not available: no running pod for service invalid-svc found
	* 
	╭─────────────────────────────────────────────────────────────────────────────────────────────╮
	│                                                                                             │
	│    * If the above advice does not help, please let us know:                                 │
	│      https://github.com/kubernetes/minikube/issues/new/choose                               │
	│                                                                                             │
	│    * Please run `minikube logs --file=logs.txt` and attach logs.txt to the GitHub issue.    │
	│    * Please also attach the following file to the GitHub issue:                             │
	│    * - /tmp/minikube_service_96b204199e3191fa1740d4430b018a3c8028d52d_0.log                 │
	│                                                                                             │
	╰─────────────────────────────────────────────────────────────────────────────────────────────╯

                                                
                                                
** /stderr **
functional_test.go:2332: (dbg) Run:  kubectl --context functional-918451 delete -f testdata/invalidsvc.yaml
--- PASS: TestFunctional/serial/InvalidService (4.57s)

                                                
                                    
x
+
TestFunctional/parallel/ConfigCmd (0.48s)

                                                
                                                
=== RUN   TestFunctional/parallel/ConfigCmd
=== PAUSE TestFunctional/parallel/ConfigCmd

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/ConfigCmd
functional_test.go:1214: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 config unset cpus
functional_test.go:1214: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 config get cpus
functional_test.go:1214: (dbg) Non-zero exit: out/minikube-linux-arm64 -p functional-918451 config get cpus: exit status 14 (75.417924ms)

                                                
                                                
** stderr ** 
	Error: specified key could not be found in config

                                                
                                                
** /stderr **
functional_test.go:1214: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 config set cpus 2
functional_test.go:1214: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 config get cpus
functional_test.go:1214: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 config unset cpus
functional_test.go:1214: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 config get cpus
functional_test.go:1214: (dbg) Non-zero exit: out/minikube-linux-arm64 -p functional-918451 config get cpus: exit status 14 (61.806473ms)

                                                
                                                
** stderr ** 
	Error: specified key could not be found in config

                                                
                                                
** /stderr **
--- PASS: TestFunctional/parallel/ConfigCmd (0.48s)

                                                
                                    
x
+
TestFunctional/parallel/DryRun (0.44s)

                                                
                                                
=== RUN   TestFunctional/parallel/DryRun
=== PAUSE TestFunctional/parallel/DryRun

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/DryRun
functional_test.go:989: (dbg) Run:  out/minikube-linux-arm64 start -p functional-918451 --dry-run --memory 250MB --alsologtostderr --driver=docker  --container-runtime=docker
functional_test.go:989: (dbg) Non-zero exit: out/minikube-linux-arm64 start -p functional-918451 --dry-run --memory 250MB --alsologtostderr --driver=docker  --container-runtime=docker: exit status 23 (195.491212ms)

                                                
                                                
-- stdout --
	* [functional-918451] minikube v1.37.0 on Ubuntu 20.04 (arm64)
	  - MINIKUBE_LOCATION=21550
	  - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	  - KUBECONFIG=/home/jenkins/minikube-integration/21550-576428/kubeconfig
	  - MINIKUBE_HOME=/home/jenkins/minikube-integration/21550-576428/.minikube
	  - MINIKUBE_BIN=out/minikube-linux-arm64
	  - MINIKUBE_FORCE_SYSTEMD=
	* Using the docker driver based on existing profile
	
	

                                                
                                                
-- /stdout --
** stderr ** 
	I0917 00:46:30.158709  626918 out.go:360] Setting OutFile to fd 1 ...
	I0917 00:46:30.158917  626918 out.go:408] TERM=,COLORTERM=, which probably does not support color
	I0917 00:46:30.158943  626918 out.go:374] Setting ErrFile to fd 2...
	I0917 00:46:30.158965  626918 out.go:408] TERM=,COLORTERM=, which probably does not support color
	I0917 00:46:30.159270  626918 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21550-576428/.minikube/bin
	I0917 00:46:30.159714  626918 out.go:368] Setting JSON to false
	I0917 00:46:30.160837  626918 start.go:130] hostinfo: {"hostname":"ip-172-31-21-244","uptime":12536,"bootTime":1758057455,"procs":192,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1084-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"da8ac1fd-6236-412a-a346-95873c98230d"}
	I0917 00:46:30.160938  626918 start.go:140] virtualization:  
	I0917 00:46:30.164574  626918 out.go:179] * [functional-918451] minikube v1.37.0 on Ubuntu 20.04 (arm64)
	I0917 00:46:30.167575  626918 out.go:179]   - MINIKUBE_LOCATION=21550
	I0917 00:46:30.167676  626918 notify.go:220] Checking for updates...
	I0917 00:46:30.173431  626918 out.go:179]   - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	I0917 00:46:30.176230  626918 out.go:179]   - KUBECONFIG=/home/jenkins/minikube-integration/21550-576428/kubeconfig
	I0917 00:46:30.179286  626918 out.go:179]   - MINIKUBE_HOME=/home/jenkins/minikube-integration/21550-576428/.minikube
	I0917 00:46:30.183356  626918 out.go:179]   - MINIKUBE_BIN=out/minikube-linux-arm64
	I0917 00:46:30.187032  626918 out.go:179]   - MINIKUBE_FORCE_SYSTEMD=
	I0917 00:46:30.190448  626918 config.go:182] Loaded profile config "functional-918451": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
	I0917 00:46:30.191047  626918 driver.go:421] Setting default libvirt URI to qemu:///system
	I0917 00:46:30.214965  626918 docker.go:123] docker version: linux-28.1.1:Docker Engine - Community
	I0917 00:46:30.215091  626918 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0917 00:46:30.281437  626918 info.go:266] docker info: {ID:5FDH:SA5P:5GCT:NLAS:B73P:SGDQ:PBG5:UBVH:UZY3:RXGO:CI7S:WAIH Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:3 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:36 OomKillDisable:true NGoroutines:52 SystemTime:2025-09-17 00:46:30.271290181 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1084-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214835200 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-21-244 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx P
ath:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1]] Warnings:<nil>}}
	I0917 00:46:30.281554  626918 docker.go:318] overlay module found
	I0917 00:46:30.284656  626918 out.go:179] * Using the docker driver based on existing profile
	I0917 00:46:30.287389  626918 start.go:304] selected driver: docker
	I0917 00:46:30.287409  626918 start.go:918] validating driver "docker" against &{Name:functional-918451 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:4096 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8441 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:functional-918451 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerN
ames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:apiserver Key:enable-admission-plugins Value:NamespaceAutoProvision}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8441 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p
MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
	I0917 00:46:30.287511  626918 start.go:929] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
	I0917 00:46:30.290990  626918 out.go:203] 
	W0917 00:46:30.293877  626918 out.go:285] X Exiting due to RSRC_INSUFFICIENT_REQ_MEMORY: Requested memory allocation 250MiB is less than the usable minimum of 1800MB
	X Exiting due to RSRC_INSUFFICIENT_REQ_MEMORY: Requested memory allocation 250MiB is less than the usable minimum of 1800MB
	I0917 00:46:30.296697  626918 out.go:203] 

                                                
                                                
** /stderr **
functional_test.go:1006: (dbg) Run:  out/minikube-linux-arm64 start -p functional-918451 --dry-run --alsologtostderr -v=1 --driver=docker  --container-runtime=docker
--- PASS: TestFunctional/parallel/DryRun (0.44s)

                                                
                                    
x
+
TestFunctional/parallel/InternationalLanguage (0.21s)

                                                
                                                
=== RUN   TestFunctional/parallel/InternationalLanguage
=== PAUSE TestFunctional/parallel/InternationalLanguage

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/InternationalLanguage
functional_test.go:1035: (dbg) Run:  out/minikube-linux-arm64 start -p functional-918451 --dry-run --memory 250MB --alsologtostderr --driver=docker  --container-runtime=docker
functional_test.go:1035: (dbg) Non-zero exit: out/minikube-linux-arm64 start -p functional-918451 --dry-run --memory 250MB --alsologtostderr --driver=docker  --container-runtime=docker: exit status 23 (207.638946ms)

                                                
                                                
-- stdout --
	* [functional-918451] minikube v1.37.0 sur Ubuntu 20.04 (arm64)
	  - MINIKUBE_LOCATION=21550
	  - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	  - KUBECONFIG=/home/jenkins/minikube-integration/21550-576428/kubeconfig
	  - MINIKUBE_HOME=/home/jenkins/minikube-integration/21550-576428/.minikube
	  - MINIKUBE_BIN=out/minikube-linux-arm64
	  - MINIKUBE_FORCE_SYSTEMD=
	* Utilisation du pilote docker basé sur le profil existant
	
	

                                                
                                                
-- /stdout --
** stderr ** 
	I0917 00:46:43.249961  628774 out.go:360] Setting OutFile to fd 1 ...
	I0917 00:46:43.250084  628774 out.go:408] TERM=,COLORTERM=, which probably does not support color
	I0917 00:46:43.250103  628774 out.go:374] Setting ErrFile to fd 2...
	I0917 00:46:43.250108  628774 out.go:408] TERM=,COLORTERM=, which probably does not support color
	I0917 00:46:43.251102  628774 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21550-576428/.minikube/bin
	I0917 00:46:43.251493  628774 out.go:368] Setting JSON to false
	I0917 00:46:43.252476  628774 start.go:130] hostinfo: {"hostname":"ip-172-31-21-244","uptime":12549,"bootTime":1758057455,"procs":198,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1084-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"da8ac1fd-6236-412a-a346-95873c98230d"}
	I0917 00:46:43.252543  628774 start.go:140] virtualization:  
	I0917 00:46:43.256110  628774 out.go:179] * [functional-918451] minikube v1.37.0 sur Ubuntu 20.04 (arm64)
	I0917 00:46:43.259038  628774 out.go:179]   - MINIKUBE_LOCATION=21550
	I0917 00:46:43.259156  628774 notify.go:220] Checking for updates...
	I0917 00:46:43.264582  628774 out.go:179]   - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	I0917 00:46:43.267389  628774 out.go:179]   - KUBECONFIG=/home/jenkins/minikube-integration/21550-576428/kubeconfig
	I0917 00:46:43.270251  628774 out.go:179]   - MINIKUBE_HOME=/home/jenkins/minikube-integration/21550-576428/.minikube
	I0917 00:46:43.273089  628774 out.go:179]   - MINIKUBE_BIN=out/minikube-linux-arm64
	I0917 00:46:43.275977  628774 out.go:179]   - MINIKUBE_FORCE_SYSTEMD=
	I0917 00:46:43.279222  628774 config.go:182] Loaded profile config "functional-918451": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
	I0917 00:46:43.279769  628774 driver.go:421] Setting default libvirt URI to qemu:///system
	I0917 00:46:43.309959  628774 docker.go:123] docker version: linux-28.1.1:Docker Engine - Community
	I0917 00:46:43.310112  628774 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0917 00:46:43.378351  628774 info.go:266] docker info: {ID:5FDH:SA5P:5GCT:NLAS:B73P:SGDQ:PBG5:UBVH:UZY3:RXGO:CI7S:WAIH Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:3 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:36 OomKillDisable:true NGoroutines:52 SystemTime:2025-09-17 00:46:43.368740075 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1084-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214835200 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-21-244 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx P
ath:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1]] Warnings:<nil>}}
	I0917 00:46:43.378547  628774 docker.go:318] overlay module found
	I0917 00:46:43.382058  628774 out.go:179] * Utilisation du pilote docker basé sur le profil existant
	I0917 00:46:43.384892  628774 start.go:304] selected driver: docker
	I0917 00:46:43.384911  628774 start.go:918] validating driver "docker" against &{Name:functional-918451 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:4096 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8441 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:functional-918451 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerN
ames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:apiserver Key:enable-admission-plugins Value:NamespaceAutoProvision}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8441 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p
MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
	I0917 00:46:43.385020  628774 start.go:929] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
	I0917 00:46:43.388416  628774 out.go:203] 
	W0917 00:46:43.391369  628774 out.go:285] X Fermeture en raison de RSRC_INSUFFICIENT_REQ_MEMORY : L'allocation de mémoire demandée 250 Mio est inférieure au minimum utilisable de 1800 Mo
	X Fermeture en raison de RSRC_INSUFFICIENT_REQ_MEMORY : L'allocation de mémoire demandée 250 Mio est inférieure au minimum utilisable de 1800 Mo
	I0917 00:46:43.394193  628774 out.go:203] 

                                                
                                                
** /stderr **
--- PASS: TestFunctional/parallel/InternationalLanguage (0.21s)

                                                
                                    
x
+
TestFunctional/parallel/StatusCmd (1.09s)

                                                
                                                
=== RUN   TestFunctional/parallel/StatusCmd
=== PAUSE TestFunctional/parallel/StatusCmd

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/StatusCmd
functional_test.go:869: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 status
functional_test.go:875: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 status -f host:{{.Host}},kublet:{{.Kubelet}},apiserver:{{.APIServer}},kubeconfig:{{.Kubeconfig}}
functional_test.go:887: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 status -o json
--- PASS: TestFunctional/parallel/StatusCmd (1.09s)

                                                
                                    
x
+
TestFunctional/parallel/AddonsCmd (0.14s)

                                                
                                                
=== RUN   TestFunctional/parallel/AddonsCmd
=== PAUSE TestFunctional/parallel/AddonsCmd

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/AddonsCmd
functional_test.go:1695: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 addons list
functional_test.go:1707: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 addons list -o json
--- PASS: TestFunctional/parallel/AddonsCmd (0.14s)

                                                
                                    
x
+
TestFunctional/parallel/SSHCmd (0.81s)

                                                
                                                
=== RUN   TestFunctional/parallel/SSHCmd
=== PAUSE TestFunctional/parallel/SSHCmd

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/SSHCmd
functional_test.go:1730: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 ssh "echo hello"
functional_test.go:1747: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 ssh "cat /etc/hostname"
--- PASS: TestFunctional/parallel/SSHCmd (0.81s)

                                                
                                    
x
+
TestFunctional/parallel/CpCmd (1.98s)

                                                
                                                
=== RUN   TestFunctional/parallel/CpCmd
=== PAUSE TestFunctional/parallel/CpCmd

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/CpCmd
helpers_test.go:573: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 cp testdata/cp-test.txt /home/docker/cp-test.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 ssh -n functional-918451 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 cp functional-918451:/home/docker/cp-test.txt /tmp/TestFunctionalparallelCpCmd2379163934/001/cp-test.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 ssh -n functional-918451 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 cp testdata/cp-test.txt /tmp/does/not/exist/cp-test.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 ssh -n functional-918451 "sudo cat /tmp/does/not/exist/cp-test.txt"
--- PASS: TestFunctional/parallel/CpCmd (1.98s)

                                                
                                    
x
+
TestFunctional/parallel/FileSync (0.41s)

                                                
                                                
=== RUN   TestFunctional/parallel/FileSync
=== PAUSE TestFunctional/parallel/FileSync

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/FileSync
functional_test.go:1934: Checking for existence of /etc/test/nested/copy/578284/hosts within VM
functional_test.go:1936: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 ssh "sudo cat /etc/test/nested/copy/578284/hosts"
functional_test.go:1941: file sync test content: Test file for checking file sync process
--- PASS: TestFunctional/parallel/FileSync (0.41s)

                                                
                                    
x
+
TestFunctional/parallel/CertSync (2.18s)

                                                
                                                
=== RUN   TestFunctional/parallel/CertSync
=== PAUSE TestFunctional/parallel/CertSync

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/CertSync
functional_test.go:1977: Checking for existence of /etc/ssl/certs/578284.pem within VM
functional_test.go:1978: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 ssh "sudo cat /etc/ssl/certs/578284.pem"
functional_test.go:1977: Checking for existence of /usr/share/ca-certificates/578284.pem within VM
functional_test.go:1978: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 ssh "sudo cat /usr/share/ca-certificates/578284.pem"
functional_test.go:1977: Checking for existence of /etc/ssl/certs/51391683.0 within VM
functional_test.go:1978: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 ssh "sudo cat /etc/ssl/certs/51391683.0"
functional_test.go:2004: Checking for existence of /etc/ssl/certs/5782842.pem within VM
functional_test.go:2005: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 ssh "sudo cat /etc/ssl/certs/5782842.pem"
functional_test.go:2004: Checking for existence of /usr/share/ca-certificates/5782842.pem within VM
functional_test.go:2005: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 ssh "sudo cat /usr/share/ca-certificates/5782842.pem"
functional_test.go:2004: Checking for existence of /etc/ssl/certs/3ec20f2e.0 within VM
functional_test.go:2005: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 ssh "sudo cat /etc/ssl/certs/3ec20f2e.0"
--- PASS: TestFunctional/parallel/CertSync (2.18s)

                                                
                                    
x
+
TestFunctional/parallel/NodeLabels (0.13s)

                                                
                                                
=== RUN   TestFunctional/parallel/NodeLabels
=== PAUSE TestFunctional/parallel/NodeLabels

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/NodeLabels
functional_test.go:234: (dbg) Run:  kubectl --context functional-918451 get nodes --output=go-template "--template='{{range $k, $v := (index .items 0).metadata.labels}}{{$k}} {{end}}'"
--- PASS: TestFunctional/parallel/NodeLabels (0.13s)

                                                
                                    
x
+
TestFunctional/parallel/NonActiveRuntimeDisabled (0.35s)

                                                
                                                
=== RUN   TestFunctional/parallel/NonActiveRuntimeDisabled
=== PAUSE TestFunctional/parallel/NonActiveRuntimeDisabled

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/NonActiveRuntimeDisabled
functional_test.go:2032: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 ssh "sudo systemctl is-active crio"
functional_test.go:2032: (dbg) Non-zero exit: out/minikube-linux-arm64 -p functional-918451 ssh "sudo systemctl is-active crio": exit status 1 (348.678923ms)

                                                
                                                
-- stdout --
	inactive

                                                
                                                
-- /stdout --
** stderr ** 
	ssh: Process exited with status 3

                                                
                                                
** /stderr **
--- PASS: TestFunctional/parallel/NonActiveRuntimeDisabled (0.35s)

                                                
                                    
x
+
TestFunctional/parallel/License (0.41s)

                                                
                                                
=== RUN   TestFunctional/parallel/License
=== PAUSE TestFunctional/parallel/License

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/License
functional_test.go:2293: (dbg) Run:  out/minikube-linux-arm64 license
--- PASS: TestFunctional/parallel/License (0.41s)

                                                
                                    
x
+
TestFunctional/parallel/Version/short (0.05s)

                                                
                                                
=== RUN   TestFunctional/parallel/Version/short
=== PAUSE TestFunctional/parallel/Version/short

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/Version/short
functional_test.go:2261: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 version --short
--- PASS: TestFunctional/parallel/Version/short (0.05s)

                                                
                                    
x
+
TestFunctional/parallel/Version/components (1.01s)

                                                
                                                
=== RUN   TestFunctional/parallel/Version/components
=== PAUSE TestFunctional/parallel/Version/components

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/Version/components
functional_test.go:2275: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 version -o=json --components
functional_test.go:2275: (dbg) Done: out/minikube-linux-arm64 -p functional-918451 version -o=json --components: (1.012980058s)
--- PASS: TestFunctional/parallel/Version/components (1.01s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageListShort (0.21s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageListShort
=== PAUSE TestFunctional/parallel/ImageCommands/ImageListShort

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/ImageCommands/ImageListShort
functional_test.go:276: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 image ls --format short --alsologtostderr
functional_test.go:281: (dbg) Stdout: out/minikube-linux-arm64 -p functional-918451 image ls --format short --alsologtostderr:
registry.k8s.io/pause:latest
registry.k8s.io/pause:3.3
registry.k8s.io/pause:3.10.1
registry.k8s.io/pause:3.1
registry.k8s.io/kube-scheduler:v1.34.0
registry.k8s.io/kube-proxy:v1.34.0
registry.k8s.io/kube-controller-manager:v1.34.0
registry.k8s.io/kube-apiserver:v1.34.0
registry.k8s.io/etcd:3.6.4-0
registry.k8s.io/coredns/coredns:v1.12.1
gcr.io/k8s-minikube/storage-provisioner:v5
gcr.io/k8s-minikube/busybox:1.28.4-glibc
docker.io/library/nginx:alpine
docker.io/library/minikube-local-cache-test:functional-918451
docker.io/kicbase/echo-server:functional-918451
functional_test.go:284: (dbg) Stderr: out/minikube-linux-arm64 -p functional-918451 image ls --format short --alsologtostderr:
I0917 00:50:35.011944  630923 out.go:360] Setting OutFile to fd 1 ...
I0917 00:50:35.012245  630923 out.go:408] TERM=,COLORTERM=, which probably does not support color
I0917 00:50:35.012285  630923 out.go:374] Setting ErrFile to fd 2...
I0917 00:50:35.012306  630923 out.go:408] TERM=,COLORTERM=, which probably does not support color
I0917 00:50:35.012666  630923 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21550-576428/.minikube/bin
I0917 00:50:35.013420  630923 config.go:182] Loaded profile config "functional-918451": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
I0917 00:50:35.013628  630923 config.go:182] Loaded profile config "functional-918451": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
I0917 00:50:35.014162  630923 cli_runner.go:164] Run: docker container inspect functional-918451 --format={{.State.Status}}
I0917 00:50:35.033230  630923 ssh_runner.go:195] Run: systemctl --version
I0917 00:50:35.033293  630923 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-918451
I0917 00:50:35.051238  630923 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33515 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/functional-918451/id_rsa Username:docker}
I0917 00:50:35.145079  630923 ssh_runner.go:195] Run: docker images --no-trunc --format "{{json .}}"
--- PASS: TestFunctional/parallel/ImageCommands/ImageListShort (0.21s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageListTable (0.21s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageListTable
=== PAUSE TestFunctional/parallel/ImageCommands/ImageListTable

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/ImageCommands/ImageListTable
functional_test.go:276: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 image ls --format table --alsologtostderr
functional_test.go:281: (dbg) Stdout: out/minikube-linux-arm64 -p functional-918451 image ls --format table --alsologtostderr:
┌─────────────────────────────────────────────┬───────────────────┬───────────────┬────────┐
│                    IMAGE                    │        TAG        │   IMAGE ID    │  SIZE  │
├─────────────────────────────────────────────┼───────────────────┼───────────────┼────────┤
│ registry.k8s.io/etcd                        │ 3.6.4-0           │ a1894772a478e │ 205MB  │
│ registry.k8s.io/pause                       │ 3.10.1            │ d7b100cd9a77b │ 514kB  │
│ registry.k8s.io/pause                       │ 3.1               │ 8057e0500773a │ 525kB  │
│ registry.k8s.io/kube-apiserver              │ v1.34.0           │ d291939e99406 │ 83.7MB │
│ registry.k8s.io/kube-controller-manager     │ v1.34.0           │ 996be7e86d9b3 │ 71.5MB │
│ registry.k8s.io/kube-proxy                  │ v1.34.0           │ 6fc32d66c1411 │ 74.7MB │
│ registry.k8s.io/pause                       │ 3.3               │ 3d18732f8686c │ 484kB  │
│ localhost/my-image                          │ functional-918451 │ 671f4b502becc │ 1.41MB │
│ docker.io/library/minikube-local-cache-test │ functional-918451 │ abe02c9a36f30 │ 30B    │
│ registry.k8s.io/kube-scheduler              │ v1.34.0           │ a25f5ef9c34c3 │ 50.5MB │
│ docker.io/library/nginx                     │ alpine            │ 35f3cbee4fb77 │ 52.9MB │
│ registry.k8s.io/coredns/coredns             │ v1.12.1           │ 138784d87c9c5 │ 72.1MB │
│ docker.io/kicbase/echo-server               │ functional-918451 │ ce2d2cda2d858 │ 4.78MB │
│ registry.k8s.io/pause                       │ latest            │ 8cb2091f603e7 │ 240kB  │
│ gcr.io/k8s-minikube/storage-provisioner     │ v5                │ ba04bb24b9575 │ 29MB   │
│ gcr.io/k8s-minikube/busybox                 │ 1.28.4-glibc      │ 1611cd07b61d5 │ 3.55MB │
└─────────────────────────────────────────────┴───────────────────┴───────────────┴────────┘
functional_test.go:284: (dbg) Stderr: out/minikube-linux-arm64 -p functional-918451 image ls --format table --alsologtostderr:
I0917 00:50:39.163487  631260 out.go:360] Setting OutFile to fd 1 ...
I0917 00:50:39.163694  631260 out.go:408] TERM=,COLORTERM=, which probably does not support color
I0917 00:50:39.163720  631260 out.go:374] Setting ErrFile to fd 2...
I0917 00:50:39.163740  631260 out.go:408] TERM=,COLORTERM=, which probably does not support color
I0917 00:50:39.164026  631260 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21550-576428/.minikube/bin
I0917 00:50:39.164741  631260 config.go:182] Loaded profile config "functional-918451": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
I0917 00:50:39.164905  631260 config.go:182] Loaded profile config "functional-918451": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
I0917 00:50:39.165422  631260 cli_runner.go:164] Run: docker container inspect functional-918451 --format={{.State.Status}}
I0917 00:50:39.184651  631260 ssh_runner.go:195] Run: systemctl --version
I0917 00:50:39.184709  631260 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-918451
I0917 00:50:39.201571  631260 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33515 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/functional-918451/id_rsa Username:docker}
I0917 00:50:39.297062  631260 ssh_runner.go:195] Run: docker images --no-trunc --format "{{json .}}"
--- PASS: TestFunctional/parallel/ImageCommands/ImageListTable (0.21s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageListJson (0.21s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageListJson
=== PAUSE TestFunctional/parallel/ImageCommands/ImageListJson

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/ImageCommands/ImageListJson
functional_test.go:276: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 image ls --format json --alsologtostderr
functional_test.go:281: (dbg) Stdout: out/minikube-linux-arm64 -p functional-918451 image ls --format json --alsologtostderr:
[{"id":"35f3cbee4fb77c3efb39f2723a21ce181906139442a37de8ffc52d89641d9936","repoDigests":[],"repoTags":["docker.io/library/nginx:alpine"],"size":"52900000"},{"id":"ce2d2cda2d858fdaea84129deb86d18e5dbf1c548f230b79fdca74cc91729d17","repoDigests":[],"repoTags":["docker.io/kicbase/echo-server:functional-918451"],"size":"4780000"},{"id":"abe02c9a36f308344856ee7b44bea0be7802b73bc7288526da246fe1029bb61b","repoDigests":[],"repoTags":["docker.io/library/minikube-local-cache-test:functional-918451"],"size":"30"},{"id":"6fc32d66c141152245438e6512df788cb52d64a1617e33561950b0e7a4675abf","repoDigests":[],"repoTags":["registry.k8s.io/kube-proxy:v1.34.0"],"size":"74700000"},{"id":"a1894772a478e07c67a56e8bf32335fdbe1dd4ec96976a5987083164bd00bc0e","repoDigests":[],"repoTags":["registry.k8s.io/etcd:3.6.4-0"],"size":"205000000"},{"id":"ba04bb24b95753201135cbc420b233c1b0b9fa2e1fd21d28319c348c33fbcde6","repoDigests":[],"repoTags":["gcr.io/k8s-minikube/storage-provisioner:v5"],"size":"29000000"},{"id":"d291939e99406491148421
5449d0ab96c535b02adc4fc5d0ad4e438cf71465be","repoDigests":[],"repoTags":["registry.k8s.io/kube-apiserver:v1.34.0"],"size":"83700000"},{"id":"996be7e86d9b3a549d718de63713d9fea9db1f45ac44863a6770292d7b463570","repoDigests":[],"repoTags":["registry.k8s.io/kube-controller-manager:v1.34.0"],"size":"71500000"},{"id":"d7b100cd9a77ba782c5e428c8dd5a1df4a1e79d4cb6294acd7d01290ab3babbd","repoDigests":[],"repoTags":["registry.k8s.io/pause:3.10.1"],"size":"514000"},{"id":"138784d87c9c50f8e59412544da4cf4928d61ccbaf93b9f5898a3ba406871bfc","repoDigests":[],"repoTags":["registry.k8s.io/coredns/coredns:v1.12.1"],"size":"72100000"},{"id":"1611cd07b61d57dbbfebe6db242513fd51e1c02d20ba08af17a45837d86a8a8c","repoDigests":[],"repoTags":["gcr.io/k8s-minikube/busybox:1.28.4-glibc"],"size":"3550000"},{"id":"8057e0500773a37cde2cff041eb13ebd68c748419a2fbfd1dfb5bf38696cc8e5","repoDigests":[],"repoTags":["registry.k8s.io/pause:3.1"],"size":"525000"},{"id":"3d18732f8686cc3c878055d99a05fa80289502fa496b36b6a0fe0f77206a7300","repoDigests":[],"
repoTags":["registry.k8s.io/pause:3.3"],"size":"484000"},{"id":"8cb2091f603e75187e2f6226c5901d12e00b1d1f778c6471ae4578e8a1c4724a","repoDigests":[],"repoTags":["registry.k8s.io/pause:latest"],"size":"240000"},{"id":"a25f5ef9c34c37c649f3b4f9631a169221ac2d6f41d9767c7588cd355f76f9ee","repoDigests":[],"repoTags":["registry.k8s.io/kube-scheduler:v1.34.0"],"size":"50500000"}]
functional_test.go:284: (dbg) Stderr: out/minikube-linux-arm64 -p functional-918451 image ls --format json --alsologtostderr:
I0917 00:50:35.219623  630955 out.go:360] Setting OutFile to fd 1 ...
I0917 00:50:35.219784  630955 out.go:408] TERM=,COLORTERM=, which probably does not support color
I0917 00:50:35.219809  630955 out.go:374] Setting ErrFile to fd 2...
I0917 00:50:35.219828  630955 out.go:408] TERM=,COLORTERM=, which probably does not support color
I0917 00:50:35.220085  630955 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21550-576428/.minikube/bin
I0917 00:50:35.220770  630955 config.go:182] Loaded profile config "functional-918451": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
I0917 00:50:35.220943  630955 config.go:182] Loaded profile config "functional-918451": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
I0917 00:50:35.221416  630955 cli_runner.go:164] Run: docker container inspect functional-918451 --format={{.State.Status}}
I0917 00:50:35.239242  630955 ssh_runner.go:195] Run: systemctl --version
I0917 00:50:35.239292  630955 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-918451
I0917 00:50:35.260575  630955 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33515 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/functional-918451/id_rsa Username:docker}
I0917 00:50:35.357093  630955 ssh_runner.go:195] Run: docker images --no-trunc --format "{{json .}}"
--- PASS: TestFunctional/parallel/ImageCommands/ImageListJson (0.21s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageListYaml (0.22s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageListYaml
=== PAUSE TestFunctional/parallel/ImageCommands/ImageListYaml

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/ImageCommands/ImageListYaml
functional_test.go:276: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 image ls --format yaml --alsologtostderr
functional_test.go:281: (dbg) Stdout: out/minikube-linux-arm64 -p functional-918451 image ls --format yaml --alsologtostderr:
- id: 1611cd07b61d57dbbfebe6db242513fd51e1c02d20ba08af17a45837d86a8a8c
repoDigests: []
repoTags:
- gcr.io/k8s-minikube/busybox:1.28.4-glibc
size: "3550000"
- id: 3d18732f8686cc3c878055d99a05fa80289502fa496b36b6a0fe0f77206a7300
repoDigests: []
repoTags:
- registry.k8s.io/pause:3.3
size: "484000"
- id: 671f4b502becc8dd0e25aab0c3b9ea814b6bc43fe59a3c8f4f5ac433e5b800a6
repoDigests: []
repoTags:
- localhost/my-image:functional-918451
size: "1410000"
- id: abe02c9a36f308344856ee7b44bea0be7802b73bc7288526da246fe1029bb61b
repoDigests: []
repoTags:
- docker.io/library/minikube-local-cache-test:functional-918451
size: "30"
- id: d291939e994064911484215449d0ab96c535b02adc4fc5d0ad4e438cf71465be
repoDigests: []
repoTags:
- registry.k8s.io/kube-apiserver:v1.34.0
size: "83700000"
- id: 6fc32d66c141152245438e6512df788cb52d64a1617e33561950b0e7a4675abf
repoDigests: []
repoTags:
- registry.k8s.io/kube-proxy:v1.34.0
size: "74700000"
- id: a1894772a478e07c67a56e8bf32335fdbe1dd4ec96976a5987083164bd00bc0e
repoDigests: []
repoTags:
- registry.k8s.io/etcd:3.6.4-0
size: "205000000"
- id: ba04bb24b95753201135cbc420b233c1b0b9fa2e1fd21d28319c348c33fbcde6
repoDigests: []
repoTags:
- gcr.io/k8s-minikube/storage-provisioner:v5
size: "29000000"
- id: 8cb2091f603e75187e2f6226c5901d12e00b1d1f778c6471ae4578e8a1c4724a
repoDigests: []
repoTags:
- registry.k8s.io/pause:latest
size: "240000"
- id: 35f3cbee4fb77c3efb39f2723a21ce181906139442a37de8ffc52d89641d9936
repoDigests: []
repoTags:
- docker.io/library/nginx:alpine
size: "52900000"
- id: 138784d87c9c50f8e59412544da4cf4928d61ccbaf93b9f5898a3ba406871bfc
repoDigests: []
repoTags:
- registry.k8s.io/coredns/coredns:v1.12.1
size: "72100000"
- id: 8057e0500773a37cde2cff041eb13ebd68c748419a2fbfd1dfb5bf38696cc8e5
repoDigests: []
repoTags:
- registry.k8s.io/pause:3.1
size: "525000"
- id: a25f5ef9c34c37c649f3b4f9631a169221ac2d6f41d9767c7588cd355f76f9ee
repoDigests: []
repoTags:
- registry.k8s.io/kube-scheduler:v1.34.0
size: "50500000"
- id: 996be7e86d9b3a549d718de63713d9fea9db1f45ac44863a6770292d7b463570
repoDigests: []
repoTags:
- registry.k8s.io/kube-controller-manager:v1.34.0
size: "71500000"
- id: d7b100cd9a77ba782c5e428c8dd5a1df4a1e79d4cb6294acd7d01290ab3babbd
repoDigests: []
repoTags:
- registry.k8s.io/pause:3.10.1
size: "514000"
- id: ce2d2cda2d858fdaea84129deb86d18e5dbf1c548f230b79fdca74cc91729d17
repoDigests: []
repoTags:
- docker.io/kicbase/echo-server:functional-918451
size: "4780000"

                                                
                                                
functional_test.go:284: (dbg) Stderr: out/minikube-linux-arm64 -p functional-918451 image ls --format yaml --alsologtostderr:
I0917 00:50:38.941124  631222 out.go:360] Setting OutFile to fd 1 ...
I0917 00:50:38.941261  631222 out.go:408] TERM=,COLORTERM=, which probably does not support color
I0917 00:50:38.941273  631222 out.go:374] Setting ErrFile to fd 2...
I0917 00:50:38.941278  631222 out.go:408] TERM=,COLORTERM=, which probably does not support color
I0917 00:50:38.941517  631222 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21550-576428/.minikube/bin
I0917 00:50:38.942126  631222 config.go:182] Loaded profile config "functional-918451": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
I0917 00:50:38.942248  631222 config.go:182] Loaded profile config "functional-918451": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
I0917 00:50:38.942688  631222 cli_runner.go:164] Run: docker container inspect functional-918451 --format={{.State.Status}}
I0917 00:50:38.960943  631222 ssh_runner.go:195] Run: systemctl --version
I0917 00:50:38.960996  631222 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-918451
I0917 00:50:38.979491  631222 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33515 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/functional-918451/id_rsa Username:docker}
I0917 00:50:39.076905  631222 ssh_runner.go:195] Run: docker images --no-trunc --format "{{json .}}"
--- PASS: TestFunctional/parallel/ImageCommands/ImageListYaml (0.22s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageBuild (3.51s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageBuild
=== PAUSE TestFunctional/parallel/ImageCommands/ImageBuild

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/ImageCommands/ImageBuild
functional_test.go:323: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 ssh pgrep buildkitd
functional_test.go:323: (dbg) Non-zero exit: out/minikube-linux-arm64 -p functional-918451 ssh pgrep buildkitd: exit status 1 (273.765444ms)

                                                
                                                
** stderr ** 
	ssh: Process exited with status 1

                                                
                                                
** /stderr **
functional_test.go:330: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 image build -t localhost/my-image:functional-918451 testdata/build --alsologtostderr
functional_test.go:330: (dbg) Done: out/minikube-linux-arm64 -p functional-918451 image build -t localhost/my-image:functional-918451 testdata/build --alsologtostderr: (3.015983653s)
functional_test.go:338: (dbg) Stderr: out/minikube-linux-arm64 -p functional-918451 image build -t localhost/my-image:functional-918451 testdata/build --alsologtostderr:
I0917 00:50:35.705683  631059 out.go:360] Setting OutFile to fd 1 ...
I0917 00:50:35.706427  631059 out.go:408] TERM=,COLORTERM=, which probably does not support color
I0917 00:50:35.706463  631059 out.go:374] Setting ErrFile to fd 2...
I0917 00:50:35.706482  631059 out.go:408] TERM=,COLORTERM=, which probably does not support color
I0917 00:50:35.706787  631059 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21550-576428/.minikube/bin
I0917 00:50:35.707456  631059 config.go:182] Loaded profile config "functional-918451": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
I0917 00:50:35.709761  631059 config.go:182] Loaded profile config "functional-918451": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
I0917 00:50:35.710333  631059 cli_runner.go:164] Run: docker container inspect functional-918451 --format={{.State.Status}}
I0917 00:50:35.727548  631059 ssh_runner.go:195] Run: systemctl --version
I0917 00:50:35.727602  631059 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-918451
I0917 00:50:35.746077  631059 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33515 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/functional-918451/id_rsa Username:docker}
I0917 00:50:35.841052  631059 build_images.go:161] Building image from path: /tmp/build.3171442529.tar
I0917 00:50:35.841127  631059 ssh_runner.go:195] Run: sudo mkdir -p /var/lib/minikube/build
I0917 00:50:35.851044  631059 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/build/build.3171442529.tar
I0917 00:50:35.854319  631059 ssh_runner.go:352] existence check for /var/lib/minikube/build/build.3171442529.tar: stat -c "%s %y" /var/lib/minikube/build/build.3171442529.tar: Process exited with status 1
stdout:

                                                
                                                
stderr:
stat: cannot statx '/var/lib/minikube/build/build.3171442529.tar': No such file or directory
I0917 00:50:35.854349  631059 ssh_runner.go:362] scp /tmp/build.3171442529.tar --> /var/lib/minikube/build/build.3171442529.tar (3072 bytes)
I0917 00:50:35.879634  631059 ssh_runner.go:195] Run: sudo mkdir -p /var/lib/minikube/build/build.3171442529
I0917 00:50:35.888391  631059 ssh_runner.go:195] Run: sudo tar -C /var/lib/minikube/build/build.3171442529 -xf /var/lib/minikube/build/build.3171442529.tar
I0917 00:50:35.897578  631059 docker.go:361] Building image: /var/lib/minikube/build/build.3171442529
I0917 00:50:35.897665  631059 ssh_runner.go:195] Run: docker build -t localhost/my-image:functional-918451 /var/lib/minikube/build/build.3171442529
#0 building with "default" instance using docker driver

                                                
                                                
#1 [internal] load build definition from Dockerfile
#1 transferring dockerfile: 97B done
#1 DONE 0.0s

                                                
                                                
#2 [internal] load metadata for gcr.io/k8s-minikube/busybox:latest
#2 DONE 1.4s

                                                
                                                
#3 [internal] load .dockerignore
#3 transferring context: 2B done
#3 DONE 0.0s

                                                
                                                
#4 [internal] load build context
#4 transferring context: 62B done
#4 DONE 0.0s

                                                
                                                
#5 [1/3] FROM gcr.io/k8s-minikube/busybox:latest@sha256:ca5ae90100d50772da31f3b5016209e25ad61972404e2ccd83d44f10dee7e79b
#5 resolve gcr.io/k8s-minikube/busybox:latest@sha256:ca5ae90100d50772da31f3b5016209e25ad61972404e2ccd83d44f10dee7e79b 0.0s done
#5 sha256:ca5ae90100d50772da31f3b5016209e25ad61972404e2ccd83d44f10dee7e79b 770B / 770B done
#5 sha256:a77fe109c026308f149d36484d795b42efe0fd29b332be9071f63e1634c36ac9 527B / 527B done
#5 sha256:71a676dd070f4b701c3272e566d84951362f1326ea07d5bbad119d1c4f6b3d02 1.47kB / 1.47kB done
#5 sha256:a01966dde7f8d5ba10b6d87e776c7c8fb5a5f6bfa678874bd28b33b1fc6dba34 0B / 828.50kB 0.1s
#5 extracting sha256:a01966dde7f8d5ba10b6d87e776c7c8fb5a5f6bfa678874bd28b33b1fc6dba34
#5 sha256:a01966dde7f8d5ba10b6d87e776c7c8fb5a5f6bfa678874bd28b33b1fc6dba34 828.50kB / 828.50kB 0.5s done
#5 extracting sha256:a01966dde7f8d5ba10b6d87e776c7c8fb5a5f6bfa678874bd28b33b1fc6dba34 0.0s done
#5 DONE 0.6s

                                                
                                                
#6 [2/3] RUN true
#6 DONE 0.2s

                                                
                                                
#7 [3/3] ADD content.txt /
#7 DONE 0.1s

                                                
                                                
#8 exporting to image
#8 exporting layers 0.1s done
#8 writing image sha256:671f4b502becc8dd0e25aab0c3b9ea814b6bc43fe59a3c8f4f5ac433e5b800a6 done
#8 naming to localhost/my-image:functional-918451 done
#8 DONE 0.1s
I0917 00:50:38.647433  631059 ssh_runner.go:235] Completed: docker build -t localhost/my-image:functional-918451 /var/lib/minikube/build/build.3171442529: (2.74973853s)
I0917 00:50:38.647502  631059 ssh_runner.go:195] Run: sudo rm -rf /var/lib/minikube/build/build.3171442529
I0917 00:50:38.656960  631059 ssh_runner.go:195] Run: sudo rm -f /var/lib/minikube/build/build.3171442529.tar
I0917 00:50:38.668126  631059 build_images.go:217] Built localhost/my-image:functional-918451 from /tmp/build.3171442529.tar
I0917 00:50:38.668156  631059 build_images.go:133] succeeded building to: functional-918451
I0917 00:50:38.668162  631059 build_images.go:134] failed building to: 
functional_test.go:466: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 image ls
--- PASS: TestFunctional/parallel/ImageCommands/ImageBuild (3.51s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/Setup (0.69s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/Setup
functional_test.go:357: (dbg) Run:  docker pull kicbase/echo-server:1.0
functional_test.go:362: (dbg) Run:  docker tag kicbase/echo-server:1.0 kicbase/echo-server:functional-918451
--- PASS: TestFunctional/parallel/ImageCommands/Setup (0.69s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageLoadDaemon (1.2s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageLoadDaemon
functional_test.go:370: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 image load --daemon kicbase/echo-server:functional-918451 --alsologtostderr
functional_test.go:466: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 image ls
--- PASS: TestFunctional/parallel/ImageCommands/ImageLoadDaemon (1.20s)

                                                
                                    
x
+
TestFunctional/parallel/DockerEnv/bash (1.23s)

                                                
                                                
=== RUN   TestFunctional/parallel/DockerEnv/bash
functional_test.go:514: (dbg) Run:  /bin/bash -c "eval $(out/minikube-linux-arm64 -p functional-918451 docker-env) && out/minikube-linux-arm64 status -p functional-918451"
functional_test.go:537: (dbg) Run:  /bin/bash -c "eval $(out/minikube-linux-arm64 -p functional-918451 docker-env) && docker images"
--- PASS: TestFunctional/parallel/DockerEnv/bash (1.23s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageReloadDaemon (0.93s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageReloadDaemon
functional_test.go:380: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 image load --daemon kicbase/echo-server:functional-918451 --alsologtostderr
functional_test.go:466: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 image ls
--- PASS: TestFunctional/parallel/ImageCommands/ImageReloadDaemon (0.93s)

                                                
                                    
x
+
TestFunctional/parallel/UpdateContextCmd/no_changes (0.17s)

                                                
                                                
=== RUN   TestFunctional/parallel/UpdateContextCmd/no_changes
=== PAUSE TestFunctional/parallel/UpdateContextCmd/no_changes

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/UpdateContextCmd/no_changes
functional_test.go:2124: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 update-context --alsologtostderr -v=2
--- PASS: TestFunctional/parallel/UpdateContextCmd/no_changes (0.17s)

                                                
                                    
x
+
TestFunctional/parallel/UpdateContextCmd/no_minikube_cluster (0.14s)

                                                
                                                
=== RUN   TestFunctional/parallel/UpdateContextCmd/no_minikube_cluster
=== PAUSE TestFunctional/parallel/UpdateContextCmd/no_minikube_cluster

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/UpdateContextCmd/no_minikube_cluster
functional_test.go:2124: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 update-context --alsologtostderr -v=2
--- PASS: TestFunctional/parallel/UpdateContextCmd/no_minikube_cluster (0.14s)

                                                
                                    
x
+
TestFunctional/parallel/UpdateContextCmd/no_clusters (0.15s)

                                                
                                                
=== RUN   TestFunctional/parallel/UpdateContextCmd/no_clusters
=== PAUSE TestFunctional/parallel/UpdateContextCmd/no_clusters

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/UpdateContextCmd/no_clusters
functional_test.go:2124: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 update-context --alsologtostderr -v=2
--- PASS: TestFunctional/parallel/UpdateContextCmd/no_clusters (0.15s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageTagAndLoadDaemon (1.25s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageTagAndLoadDaemon
functional_test.go:250: (dbg) Run:  docker pull kicbase/echo-server:latest
functional_test.go:255: (dbg) Run:  docker tag kicbase/echo-server:latest kicbase/echo-server:functional-918451
functional_test.go:260: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 image load --daemon kicbase/echo-server:functional-918451 --alsologtostderr
functional_test.go:466: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 image ls
--- PASS: TestFunctional/parallel/ImageCommands/ImageTagAndLoadDaemon (1.25s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageSaveToFile (0.46s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageSaveToFile
functional_test.go:395: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 image save kicbase/echo-server:functional-918451 /home/jenkins/workspace/Docker_Linux_docker_arm64/echo-server-save.tar --alsologtostderr
--- PASS: TestFunctional/parallel/ImageCommands/ImageSaveToFile (0.46s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageRemove (0.54s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageRemove
functional_test.go:407: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 image rm kicbase/echo-server:functional-918451 --alsologtostderr
functional_test.go:466: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 image ls
--- PASS: TestFunctional/parallel/ImageCommands/ImageRemove (0.54s)

                                                
                                    
x
+
TestFunctional/parallel/ProfileCmd/profile_not_create (0.52s)

                                                
                                                
=== RUN   TestFunctional/parallel/ProfileCmd/profile_not_create
functional_test.go:1285: (dbg) Run:  out/minikube-linux-arm64 profile lis
functional_test.go:1290: (dbg) Run:  out/minikube-linux-arm64 profile list --output json
--- PASS: TestFunctional/parallel/ProfileCmd/profile_not_create (0.52s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageLoadFromFile (0.78s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageLoadFromFile
functional_test.go:424: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 image load /home/jenkins/workspace/Docker_Linux_docker_arm64/echo-server-save.tar --alsologtostderr
functional_test.go:466: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 image ls
--- PASS: TestFunctional/parallel/ImageCommands/ImageLoadFromFile (0.78s)

                                                
                                    
x
+
TestFunctional/parallel/ProfileCmd/profile_list (0.54s)

                                                
                                                
=== RUN   TestFunctional/parallel/ProfileCmd/profile_list
functional_test.go:1325: (dbg) Run:  out/minikube-linux-arm64 profile list
functional_test.go:1330: Took "469.245435ms" to run "out/minikube-linux-arm64 profile list"
functional_test.go:1339: (dbg) Run:  out/minikube-linux-arm64 profile list -l
functional_test.go:1344: Took "66.883453ms" to run "out/minikube-linux-arm64 profile list -l"
--- PASS: TestFunctional/parallel/ProfileCmd/profile_list (0.54s)

                                                
                                    
x
+
TestFunctional/parallel/ProfileCmd/profile_json_output (0.52s)

                                                
                                                
=== RUN   TestFunctional/parallel/ProfileCmd/profile_json_output
functional_test.go:1376: (dbg) Run:  out/minikube-linux-arm64 profile list -o json
functional_test.go:1381: Took "402.113628ms" to run "out/minikube-linux-arm64 profile list -o json"
functional_test.go:1389: (dbg) Run:  out/minikube-linux-arm64 profile list -o json --light
functional_test.go:1394: Took "113.727146ms" to run "out/minikube-linux-arm64 profile list -o json --light"
--- PASS: TestFunctional/parallel/ProfileCmd/profile_json_output (0.52s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageSaveDaemon (0.44s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageSaveDaemon
functional_test.go:434: (dbg) Run:  docker rmi kicbase/echo-server:functional-918451
functional_test.go:439: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 image save --daemon kicbase/echo-server:functional-918451 --alsologtostderr
functional_test.go:447: (dbg) Run:  docker image inspect kicbase/echo-server:functional-918451
--- PASS: TestFunctional/parallel/ImageCommands/ImageSaveDaemon (0.44s)

                                                
                                    
x
+
TestFunctional/parallel/TunnelCmd/serial/RunSecondTunnel (0.62s)

                                                
                                                
=== RUN   TestFunctional/parallel/TunnelCmd/serial/RunSecondTunnel
functional_test_tunnel_test.go:154: (dbg) daemon: [out/minikube-linux-arm64 -p functional-918451 tunnel --alsologtostderr]
functional_test_tunnel_test.go:154: (dbg) daemon: [out/minikube-linux-arm64 -p functional-918451 tunnel --alsologtostderr]
functional_test_tunnel_test.go:194: (dbg) stopping [out/minikube-linux-arm64 -p functional-918451 tunnel --alsologtostderr] ...
helpers_test.go:525: unable to kill pid 623172: os: process already finished
functional_test_tunnel_test.go:194: (dbg) stopping [out/minikube-linux-arm64 -p functional-918451 tunnel --alsologtostderr] ...
helpers_test.go:507: unable to find parent, assuming dead: process does not exist
--- PASS: TestFunctional/parallel/TunnelCmd/serial/RunSecondTunnel (0.62s)

                                                
                                    
x
+
TestFunctional/parallel/TunnelCmd/serial/StartTunnel (0s)

                                                
                                                
=== RUN   TestFunctional/parallel/TunnelCmd/serial/StartTunnel
functional_test_tunnel_test.go:129: (dbg) daemon: [out/minikube-linux-arm64 -p functional-918451 tunnel --alsologtostderr]
--- PASS: TestFunctional/parallel/TunnelCmd/serial/StartTunnel (0.00s)

                                                
                                    
x
+
TestFunctional/parallel/TunnelCmd/serial/WaitService/Setup (9.35s)

                                                
                                                
=== RUN   TestFunctional/parallel/TunnelCmd/serial/WaitService/Setup
functional_test_tunnel_test.go:212: (dbg) Run:  kubectl --context functional-918451 apply -f testdata/testsvc.yaml
functional_test_tunnel_test.go:216: (dbg) TestFunctional/parallel/TunnelCmd/serial/WaitService/Setup: waiting 4m0s for pods matching "run=nginx-svc" in namespace "default" ...
helpers_test.go:352: "nginx-svc" [b1fb2858-6e5b-4edc-8fd0-227613d5ebd8] Pending
helpers_test.go:352: "nginx-svc" [b1fb2858-6e5b-4edc-8fd0-227613d5ebd8] Pending / Ready:ContainersNotReady (containers with unready status: [nginx]) / ContainersReady:ContainersNotReady (containers with unready status: [nginx])
helpers_test.go:352: "nginx-svc" [b1fb2858-6e5b-4edc-8fd0-227613d5ebd8] Running
functional_test_tunnel_test.go:216: (dbg) TestFunctional/parallel/TunnelCmd/serial/WaitService/Setup: run=nginx-svc healthy within 9.003372896s
I0917 00:36:26.604823  578284 kapi.go:150] Service nginx-svc in namespace default found.
--- PASS: TestFunctional/parallel/TunnelCmd/serial/WaitService/Setup (9.35s)

                                                
                                    
x
+
TestFunctional/parallel/TunnelCmd/serial/WaitService/IngressIP (0.09s)

                                                
                                                
=== RUN   TestFunctional/parallel/TunnelCmd/serial/WaitService/IngressIP
functional_test_tunnel_test.go:234: (dbg) Run:  kubectl --context functional-918451 get svc nginx-svc -o jsonpath={.status.loadBalancer.ingress[0].ip}
--- PASS: TestFunctional/parallel/TunnelCmd/serial/WaitService/IngressIP (0.09s)

                                                
                                    
x
+
TestFunctional/parallel/TunnelCmd/serial/AccessDirect (0s)

                                                
                                                
=== RUN   TestFunctional/parallel/TunnelCmd/serial/AccessDirect
functional_test_tunnel_test.go:299: tunnel at http://10.111.232.235 is working!
--- PASS: TestFunctional/parallel/TunnelCmd/serial/AccessDirect (0.00s)

                                                
                                    
x
+
TestFunctional/parallel/TunnelCmd/serial/DeleteTunnel (0.11s)

                                                
                                                
=== RUN   TestFunctional/parallel/TunnelCmd/serial/DeleteTunnel
functional_test_tunnel_test.go:434: (dbg) stopping [out/minikube-linux-arm64 -p functional-918451 tunnel --alsologtostderr] ...
functional_test_tunnel_test.go:437: failed to stop process: signal: terminated
--- PASS: TestFunctional/parallel/TunnelCmd/serial/DeleteTunnel (0.11s)

                                                
                                    
x
+
TestFunctional/parallel/MountCmd/any-port (7.64s)

                                                
                                                
=== RUN   TestFunctional/parallel/MountCmd/any-port
functional_test_mount_test.go:73: (dbg) daemon: [out/minikube-linux-arm64 mount -p functional-918451 /tmp/TestFunctionalparallelMountCmdany-port2559124128/001:/mount-9p --alsologtostderr -v=1]
functional_test_mount_test.go:107: wrote "test-1758069990555858036" to /tmp/TestFunctionalparallelMountCmdany-port2559124128/001/created-by-test
functional_test_mount_test.go:107: wrote "test-1758069990555858036" to /tmp/TestFunctionalparallelMountCmdany-port2559124128/001/created-by-test-removed-by-pod
functional_test_mount_test.go:107: wrote "test-1758069990555858036" to /tmp/TestFunctionalparallelMountCmdany-port2559124128/001/test-1758069990555858036
functional_test_mount_test.go:115: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 ssh "findmnt -T /mount-9p | grep 9p"
functional_test_mount_test.go:115: (dbg) Non-zero exit: out/minikube-linux-arm64 -p functional-918451 ssh "findmnt -T /mount-9p | grep 9p": exit status 1 (354.573738ms)

                                                
                                                
** stderr ** 
	ssh: Process exited with status 1

                                                
                                                
** /stderr **
I0917 00:46:30.911621  578284 retry.go:31] will retry after 303.370972ms: exit status 1
functional_test_mount_test.go:115: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 ssh "findmnt -T /mount-9p | grep 9p"
functional_test_mount_test.go:129: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 ssh -- ls -la /mount-9p
functional_test_mount_test.go:133: guest mount directory contents
total 2
-rw-r--r-- 1 docker docker 24 Sep 17 00:46 created-by-test
-rw-r--r-- 1 docker docker 24 Sep 17 00:46 created-by-test-removed-by-pod
-rw-r--r-- 1 docker docker 24 Sep 17 00:46 test-1758069990555858036
functional_test_mount_test.go:137: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 ssh cat /mount-9p/test-1758069990555858036
functional_test_mount_test.go:148: (dbg) Run:  kubectl --context functional-918451 replace --force -f testdata/busybox-mount-test.yaml
functional_test_mount_test.go:153: (dbg) TestFunctional/parallel/MountCmd/any-port: waiting 4m0s for pods matching "integration-test=busybox-mount" in namespace "default" ...
helpers_test.go:352: "busybox-mount" [52129871-8afd-45d1-b261-f1224678f6d6] Pending
helpers_test.go:352: "busybox-mount" [52129871-8afd-45d1-b261-f1224678f6d6] Pending / Ready:ContainersNotReady (containers with unready status: [mount-munger]) / ContainersReady:ContainersNotReady (containers with unready status: [mount-munger])
helpers_test.go:352: "busybox-mount" [52129871-8afd-45d1-b261-f1224678f6d6] Pending / Initialized:PodCompleted / Ready:PodCompleted / ContainersReady:PodCompleted
helpers_test.go:352: "busybox-mount" [52129871-8afd-45d1-b261-f1224678f6d6] Succeeded / Initialized:PodCompleted / Ready:PodCompleted / ContainersReady:PodCompleted
functional_test_mount_test.go:153: (dbg) TestFunctional/parallel/MountCmd/any-port: integration-test=busybox-mount healthy within 5.003983478s
functional_test_mount_test.go:169: (dbg) Run:  kubectl --context functional-918451 logs busybox-mount
functional_test_mount_test.go:181: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 ssh stat /mount-9p/created-by-test
functional_test_mount_test.go:181: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 ssh stat /mount-9p/created-by-pod
functional_test_mount_test.go:90: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 ssh "sudo umount -f /mount-9p"
functional_test_mount_test.go:94: (dbg) stopping [out/minikube-linux-arm64 mount -p functional-918451 /tmp/TestFunctionalparallelMountCmdany-port2559124128/001:/mount-9p --alsologtostderr -v=1] ...
--- PASS: TestFunctional/parallel/MountCmd/any-port (7.64s)

                                                
                                    
x
+
TestFunctional/parallel/MountCmd/specific-port (1.97s)

                                                
                                                
=== RUN   TestFunctional/parallel/MountCmd/specific-port
functional_test_mount_test.go:213: (dbg) daemon: [out/minikube-linux-arm64 mount -p functional-918451 /tmp/TestFunctionalparallelMountCmdspecific-port2141867240/001:/mount-9p --alsologtostderr -v=1 --port 46464]
functional_test_mount_test.go:243: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 ssh "findmnt -T /mount-9p | grep 9p"
functional_test_mount_test.go:243: (dbg) Non-zero exit: out/minikube-linux-arm64 -p functional-918451 ssh "findmnt -T /mount-9p | grep 9p": exit status 1 (337.943436ms)

                                                
                                                
** stderr ** 
	ssh: Process exited with status 1

                                                
                                                
** /stderr **
I0917 00:46:38.534711  578284 retry.go:31] will retry after 578.076933ms: exit status 1
functional_test_mount_test.go:243: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 ssh "findmnt -T /mount-9p | grep 9p"
functional_test_mount_test.go:257: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 ssh -- ls -la /mount-9p
functional_test_mount_test.go:261: guest mount directory contents
total 0
functional_test_mount_test.go:263: (dbg) stopping [out/minikube-linux-arm64 mount -p functional-918451 /tmp/TestFunctionalparallelMountCmdspecific-port2141867240/001:/mount-9p --alsologtostderr -v=1 --port 46464] ...
functional_test_mount_test.go:264: reading mount text
functional_test_mount_test.go:278: done reading mount text
functional_test_mount_test.go:230: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 ssh "sudo umount -f /mount-9p"
functional_test_mount_test.go:230: (dbg) Non-zero exit: out/minikube-linux-arm64 -p functional-918451 ssh "sudo umount -f /mount-9p": exit status 1 (298.896647ms)

                                                
                                                
-- stdout --
	umount: /mount-9p: not mounted.

                                                
                                                
-- /stdout --
** stderr ** 
	ssh: Process exited with status 32

                                                
                                                
** /stderr **
functional_test_mount_test.go:232: "out/minikube-linux-arm64 -p functional-918451 ssh \"sudo umount -f /mount-9p\"": exit status 1
functional_test_mount_test.go:234: (dbg) stopping [out/minikube-linux-arm64 mount -p functional-918451 /tmp/TestFunctionalparallelMountCmdspecific-port2141867240/001:/mount-9p --alsologtostderr -v=1 --port 46464] ...
--- PASS: TestFunctional/parallel/MountCmd/specific-port (1.97s)

                                                
                                    
x
+
TestFunctional/parallel/MountCmd/VerifyCleanup (1.95s)

                                                
                                                
=== RUN   TestFunctional/parallel/MountCmd/VerifyCleanup
functional_test_mount_test.go:298: (dbg) daemon: [out/minikube-linux-arm64 mount -p functional-918451 /tmp/TestFunctionalparallelMountCmdVerifyCleanup3591012543/001:/mount1 --alsologtostderr -v=1]
functional_test_mount_test.go:298: (dbg) daemon: [out/minikube-linux-arm64 mount -p functional-918451 /tmp/TestFunctionalparallelMountCmdVerifyCleanup3591012543/001:/mount2 --alsologtostderr -v=1]
functional_test_mount_test.go:298: (dbg) daemon: [out/minikube-linux-arm64 mount -p functional-918451 /tmp/TestFunctionalparallelMountCmdVerifyCleanup3591012543/001:/mount3 --alsologtostderr -v=1]
functional_test_mount_test.go:325: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 ssh "findmnt -T" /mount1
functional_test_mount_test.go:325: (dbg) Non-zero exit: out/minikube-linux-arm64 -p functional-918451 ssh "findmnt -T" /mount1: exit status 1 (582.011985ms)

                                                
                                                
** stderr ** 
	ssh: Process exited with status 1

                                                
                                                
** /stderr **
I0917 00:46:40.750280  578284 retry.go:31] will retry after 429.224276ms: exit status 1
functional_test_mount_test.go:325: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 ssh "findmnt -T" /mount1
functional_test_mount_test.go:325: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 ssh "findmnt -T" /mount2
functional_test_mount_test.go:325: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 ssh "findmnt -T" /mount3
functional_test_mount_test.go:370: (dbg) Run:  out/minikube-linux-arm64 mount -p functional-918451 --kill=true
functional_test_mount_test.go:313: (dbg) stopping [out/minikube-linux-arm64 mount -p functional-918451 /tmp/TestFunctionalparallelMountCmdVerifyCleanup3591012543/001:/mount1 --alsologtostderr -v=1] ...
helpers_test.go:507: unable to find parent, assuming dead: process does not exist
functional_test_mount_test.go:313: (dbg) stopping [out/minikube-linux-arm64 mount -p functional-918451 /tmp/TestFunctionalparallelMountCmdVerifyCleanup3591012543/001:/mount2 --alsologtostderr -v=1] ...
helpers_test.go:507: unable to find parent, assuming dead: process does not exist
functional_test_mount_test.go:313: (dbg) stopping [out/minikube-linux-arm64 mount -p functional-918451 /tmp/TestFunctionalparallelMountCmdVerifyCleanup3591012543/001:/mount3 --alsologtostderr -v=1] ...
helpers_test.go:507: unable to find parent, assuming dead: process does not exist
--- PASS: TestFunctional/parallel/MountCmd/VerifyCleanup (1.95s)

                                                
                                    
x
+
TestFunctional/parallel/ServiceCmd/List (1.31s)

                                                
                                                
=== RUN   TestFunctional/parallel/ServiceCmd/List
functional_test.go:1469: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 service list
functional_test.go:1469: (dbg) Done: out/minikube-linux-arm64 -p functional-918451 service list: (1.310818912s)
--- PASS: TestFunctional/parallel/ServiceCmd/List (1.31s)

                                                
                                    
x
+
TestFunctional/parallel/ServiceCmd/JSONOutput (1.33s)

                                                
                                                
=== RUN   TestFunctional/parallel/ServiceCmd/JSONOutput
functional_test.go:1499: (dbg) Run:  out/minikube-linux-arm64 -p functional-918451 service list -o json
functional_test.go:1499: (dbg) Done: out/minikube-linux-arm64 -p functional-918451 service list -o json: (1.327394189s)
functional_test.go:1504: Took "1.327468288s" to run "out/minikube-linux-arm64 -p functional-918451 service list -o json"
--- PASS: TestFunctional/parallel/ServiceCmd/JSONOutput (1.33s)

                                                
                                    
x
+
TestFunctional/delete_echo-server_images (0.04s)

                                                
                                                
=== RUN   TestFunctional/delete_echo-server_images
functional_test.go:205: (dbg) Run:  docker rmi -f kicbase/echo-server:1.0
functional_test.go:205: (dbg) Run:  docker rmi -f kicbase/echo-server:functional-918451
--- PASS: TestFunctional/delete_echo-server_images (0.04s)

                                                
                                    
x
+
TestFunctional/delete_my-image_image (0.02s)

                                                
                                                
=== RUN   TestFunctional/delete_my-image_image
functional_test.go:213: (dbg) Run:  docker rmi -f localhost/my-image:functional-918451
--- PASS: TestFunctional/delete_my-image_image (0.02s)

                                                
                                    
x
+
TestFunctional/delete_minikube_cached_images (0.02s)

                                                
                                                
=== RUN   TestFunctional/delete_minikube_cached_images
functional_test.go:221: (dbg) Run:  docker rmi -f minikube-local-cache-test:functional-918451
--- PASS: TestFunctional/delete_minikube_cached_images (0.02s)

                                                
                                    
x
+
TestMultiControlPlane/serial/StartCluster (136.91s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/StartCluster
ha_test.go:101: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 start --ha --memory 3072 --wait true --alsologtostderr -v 5 --driver=docker  --container-runtime=docker
E0917 00:53:52.992650  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
ha_test.go:101: (dbg) Done: out/minikube-linux-arm64 -p ha-956147 start --ha --memory 3072 --wait true --alsologtostderr -v 5 --driver=docker  --container-runtime=docker: (2m16.050528704s)
ha_test.go:107: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 status --alsologtostderr -v 5
--- PASS: TestMultiControlPlane/serial/StartCluster (136.91s)

                                                
                                    
x
+
TestMultiControlPlane/serial/DeployApp (40.11s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/DeployApp
ha_test.go:128: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 kubectl -- apply -f ./testdata/ha/ha-pod-dns-test.yaml
ha_test.go:133: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 kubectl -- rollout status deployment/busybox
ha_test.go:133: (dbg) Done: out/minikube-linux-arm64 -p ha-956147 kubectl -- rollout status deployment/busybox: (4.975737073s)
ha_test.go:140: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}'
ha_test.go:149: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.4 10.244.1.2'\n\n-- /stdout --"
I0917 00:54:10.475417  578284 retry.go:31] will retry after 1.074279714s: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.4 10.244.1.2'\n\n-- /stdout --"
ha_test.go:140: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}'
ha_test.go:149: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.4 10.244.1.2'\n\n-- /stdout --"
I0917 00:54:11.734295  578284 retry.go:31] will retry after 2.028781581s: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.4 10.244.1.2'\n\n-- /stdout --"
ha_test.go:140: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}'
ha_test.go:149: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.4 10.244.1.2'\n\n-- /stdout --"
I0917 00:54:13.913796  578284 retry.go:31] will retry after 1.599602479s: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.4 10.244.1.2'\n\n-- /stdout --"
ha_test.go:140: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}'
ha_test.go:149: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.4 10.244.1.2'\n\n-- /stdout --"
I0917 00:54:15.697468  578284 retry.go:31] will retry after 3.136089232s: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.4 10.244.1.2'\n\n-- /stdout --"
ha_test.go:140: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}'
ha_test.go:149: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.4 10.244.1.2'\n\n-- /stdout --"
I0917 00:54:18.982672  578284 retry.go:31] will retry after 3.131502271s: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.4 10.244.1.2'\n\n-- /stdout --"
ha_test.go:140: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}'
ha_test.go:149: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.4 10.244.1.2'\n\n-- /stdout --"
I0917 00:54:22.292599  578284 retry.go:31] will retry after 8.247135504s: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.4 10.244.1.2'\n\n-- /stdout --"
ha_test.go:140: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}'
ha_test.go:149: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.4 10.244.1.2'\n\n-- /stdout --"
I0917 00:54:30.702339  578284 retry.go:31] will retry after 11.553258557s: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.4 10.244.1.2'\n\n-- /stdout --"
ha_test.go:140: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}'
ha_test.go:163: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 kubectl -- get pods -o jsonpath='{.items[*].metadata.name}'
ha_test.go:171: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 kubectl -- exec busybox-7b57f96db7-bvkpg -- nslookup kubernetes.io
ha_test.go:171: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 kubectl -- exec busybox-7b57f96db7-qsdpg -- nslookup kubernetes.io
ha_test.go:171: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 kubectl -- exec busybox-7b57f96db7-r8p9q -- nslookup kubernetes.io
ha_test.go:181: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 kubectl -- exec busybox-7b57f96db7-bvkpg -- nslookup kubernetes.default
ha_test.go:181: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 kubectl -- exec busybox-7b57f96db7-qsdpg -- nslookup kubernetes.default
ha_test.go:181: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 kubectl -- exec busybox-7b57f96db7-r8p9q -- nslookup kubernetes.default
ha_test.go:189: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 kubectl -- exec busybox-7b57f96db7-bvkpg -- nslookup kubernetes.default.svc.cluster.local
ha_test.go:189: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 kubectl -- exec busybox-7b57f96db7-qsdpg -- nslookup kubernetes.default.svc.cluster.local
ha_test.go:189: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 kubectl -- exec busybox-7b57f96db7-r8p9q -- nslookup kubernetes.default.svc.cluster.local
--- PASS: TestMultiControlPlane/serial/DeployApp (40.11s)

                                                
                                    
x
+
TestMultiControlPlane/serial/PingHostFromPods (1.78s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/PingHostFromPods
ha_test.go:199: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 kubectl -- get pods -o jsonpath='{.items[*].metadata.name}'
ha_test.go:207: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 kubectl -- exec busybox-7b57f96db7-bvkpg -- sh -c "nslookup host.minikube.internal | awk 'NR==5' | cut -d' ' -f3"
ha_test.go:218: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 kubectl -- exec busybox-7b57f96db7-bvkpg -- sh -c "ping -c 1 192.168.49.1"
ha_test.go:207: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 kubectl -- exec busybox-7b57f96db7-qsdpg -- sh -c "nslookup host.minikube.internal | awk 'NR==5' | cut -d' ' -f3"
ha_test.go:218: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 kubectl -- exec busybox-7b57f96db7-qsdpg -- sh -c "ping -c 1 192.168.49.1"
ha_test.go:207: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 kubectl -- exec busybox-7b57f96db7-r8p9q -- sh -c "nslookup host.minikube.internal | awk 'NR==5' | cut -d' ' -f3"
ha_test.go:218: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 kubectl -- exec busybox-7b57f96db7-r8p9q -- sh -c "ping -c 1 192.168.49.1"
--- PASS: TestMultiControlPlane/serial/PingHostFromPods (1.78s)

                                                
                                    
x
+
TestMultiControlPlane/serial/AddWorkerNode (19.57s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/AddWorkerNode
ha_test.go:228: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 node add --alsologtostderr -v 5
ha_test.go:228: (dbg) Done: out/minikube-linux-arm64 -p ha-956147 node add --alsologtostderr -v 5: (17.999525478s)
ha_test.go:234: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 status --alsologtostderr -v 5
ha_test.go:234: (dbg) Done: out/minikube-linux-arm64 -p ha-956147 status --alsologtostderr -v 5: (1.569324535s)
--- PASS: TestMultiControlPlane/serial/AddWorkerNode (19.57s)

                                                
                                    
x
+
TestMultiControlPlane/serial/NodeLabels (0.17s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/NodeLabels
ha_test.go:255: (dbg) Run:  kubectl --context ha-956147 get nodes -o "jsonpath=[{range .items[*]}{.metadata.labels},{end}]"
--- PASS: TestMultiControlPlane/serial/NodeLabels (0.17s)

                                                
                                    
x
+
TestMultiControlPlane/serial/HAppyAfterClusterStart (1.3s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/HAppyAfterClusterStart
ha_test.go:281: (dbg) Run:  out/minikube-linux-arm64 profile list --output json
ha_test.go:281: (dbg) Done: out/minikube-linux-arm64 profile list --output json: (1.300225033s)
--- PASS: TestMultiControlPlane/serial/HAppyAfterClusterStart (1.30s)

                                                
                                    
x
+
TestMultiControlPlane/serial/CopyFile (20.37s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/CopyFile
ha_test.go:328: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 status --output json --alsologtostderr -v 5
ha_test.go:328: (dbg) Done: out/minikube-linux-arm64 -p ha-956147 status --output json --alsologtostderr -v 5: (1.25759031s)
helpers_test.go:573: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 cp testdata/cp-test.txt ha-956147:/home/docker/cp-test.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 ssh -n ha-956147 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 cp ha-956147:/home/docker/cp-test.txt /tmp/TestMultiControlPlaneserialCopyFile129101577/001/cp-test_ha-956147.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 ssh -n ha-956147 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 cp ha-956147:/home/docker/cp-test.txt ha-956147-m02:/home/docker/cp-test_ha-956147_ha-956147-m02.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 ssh -n ha-956147 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:551: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 ssh -n ha-956147-m02 "sudo cat /home/docker/cp-test_ha-956147_ha-956147-m02.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 cp ha-956147:/home/docker/cp-test.txt ha-956147-m03:/home/docker/cp-test_ha-956147_ha-956147-m03.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 ssh -n ha-956147 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:551: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 ssh -n ha-956147-m03 "sudo cat /home/docker/cp-test_ha-956147_ha-956147-m03.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 cp ha-956147:/home/docker/cp-test.txt ha-956147-m04:/home/docker/cp-test_ha-956147_ha-956147-m04.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 ssh -n ha-956147 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:551: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 ssh -n ha-956147-m04 "sudo cat /home/docker/cp-test_ha-956147_ha-956147-m04.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 cp testdata/cp-test.txt ha-956147-m02:/home/docker/cp-test.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 ssh -n ha-956147-m02 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 cp ha-956147-m02:/home/docker/cp-test.txt /tmp/TestMultiControlPlaneserialCopyFile129101577/001/cp-test_ha-956147-m02.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 ssh -n ha-956147-m02 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 cp ha-956147-m02:/home/docker/cp-test.txt ha-956147:/home/docker/cp-test_ha-956147-m02_ha-956147.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 ssh -n ha-956147-m02 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:551: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 ssh -n ha-956147 "sudo cat /home/docker/cp-test_ha-956147-m02_ha-956147.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 cp ha-956147-m02:/home/docker/cp-test.txt ha-956147-m03:/home/docker/cp-test_ha-956147-m02_ha-956147-m03.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 ssh -n ha-956147-m02 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:551: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 ssh -n ha-956147-m03 "sudo cat /home/docker/cp-test_ha-956147-m02_ha-956147-m03.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 cp ha-956147-m02:/home/docker/cp-test.txt ha-956147-m04:/home/docker/cp-test_ha-956147-m02_ha-956147-m04.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 ssh -n ha-956147-m02 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:551: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 ssh -n ha-956147-m04 "sudo cat /home/docker/cp-test_ha-956147-m02_ha-956147-m04.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 cp testdata/cp-test.txt ha-956147-m03:/home/docker/cp-test.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 ssh -n ha-956147-m03 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 cp ha-956147-m03:/home/docker/cp-test.txt /tmp/TestMultiControlPlaneserialCopyFile129101577/001/cp-test_ha-956147-m03.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 ssh -n ha-956147-m03 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 cp ha-956147-m03:/home/docker/cp-test.txt ha-956147:/home/docker/cp-test_ha-956147-m03_ha-956147.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 ssh -n ha-956147-m03 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:551: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 ssh -n ha-956147 "sudo cat /home/docker/cp-test_ha-956147-m03_ha-956147.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 cp ha-956147-m03:/home/docker/cp-test.txt ha-956147-m02:/home/docker/cp-test_ha-956147-m03_ha-956147-m02.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 ssh -n ha-956147-m03 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:551: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 ssh -n ha-956147-m02 "sudo cat /home/docker/cp-test_ha-956147-m03_ha-956147-m02.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 cp ha-956147-m03:/home/docker/cp-test.txt ha-956147-m04:/home/docker/cp-test_ha-956147-m03_ha-956147-m04.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 ssh -n ha-956147-m03 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:551: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 ssh -n ha-956147-m04 "sudo cat /home/docker/cp-test_ha-956147-m03_ha-956147-m04.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 cp testdata/cp-test.txt ha-956147-m04:/home/docker/cp-test.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 ssh -n ha-956147-m04 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 cp ha-956147-m04:/home/docker/cp-test.txt /tmp/TestMultiControlPlaneserialCopyFile129101577/001/cp-test_ha-956147-m04.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 ssh -n ha-956147-m04 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 cp ha-956147-m04:/home/docker/cp-test.txt ha-956147:/home/docker/cp-test_ha-956147-m04_ha-956147.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 ssh -n ha-956147-m04 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:551: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 ssh -n ha-956147 "sudo cat /home/docker/cp-test_ha-956147-m04_ha-956147.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 cp ha-956147-m04:/home/docker/cp-test.txt ha-956147-m02:/home/docker/cp-test_ha-956147-m04_ha-956147-m02.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 ssh -n ha-956147-m04 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:551: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 ssh -n ha-956147-m02 "sudo cat /home/docker/cp-test_ha-956147-m04_ha-956147-m02.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 cp ha-956147-m04:/home/docker/cp-test.txt ha-956147-m03:/home/docker/cp-test_ha-956147-m04_ha-956147-m03.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 ssh -n ha-956147-m04 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:551: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 ssh -n ha-956147-m03 "sudo cat /home/docker/cp-test_ha-956147-m04_ha-956147-m03.txt"
--- PASS: TestMultiControlPlane/serial/CopyFile (20.37s)

                                                
                                    
x
+
TestMultiControlPlane/serial/StopSecondaryNode (11.75s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/StopSecondaryNode
ha_test.go:365: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 node stop m02 --alsologtostderr -v 5
ha_test.go:365: (dbg) Done: out/minikube-linux-arm64 -p ha-956147 node stop m02 --alsologtostderr -v 5: (10.981269773s)
ha_test.go:371: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 status --alsologtostderr -v 5
ha_test.go:371: (dbg) Non-zero exit: out/minikube-linux-arm64 -p ha-956147 status --alsologtostderr -v 5: exit status 7 (766.180148ms)

                                                
                                                
-- stdout --
	ha-956147
	type: Control Plane
	host: Running
	kubelet: Running
	apiserver: Running
	kubeconfig: Configured
	
	ha-956147-m02
	type: Control Plane
	host: Stopped
	kubelet: Stopped
	apiserver: Stopped
	kubeconfig: Stopped
	
	ha-956147-m03
	type: Control Plane
	host: Running
	kubelet: Running
	apiserver: Running
	kubeconfig: Configured
	
	ha-956147-m04
	type: Worker
	host: Running
	kubelet: Running
	

                                                
                                                
-- /stdout --
** stderr ** 
	I0917 00:55:39.403304  654484 out.go:360] Setting OutFile to fd 1 ...
	I0917 00:55:39.403634  654484 out.go:408] TERM=,COLORTERM=, which probably does not support color
	I0917 00:55:39.403648  654484 out.go:374] Setting ErrFile to fd 2...
	I0917 00:55:39.403653  654484 out.go:408] TERM=,COLORTERM=, which probably does not support color
	I0917 00:55:39.403956  654484 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21550-576428/.minikube/bin
	I0917 00:55:39.404174  654484 out.go:368] Setting JSON to false
	I0917 00:55:39.404219  654484 mustload.go:65] Loading cluster: ha-956147
	I0917 00:55:39.404317  654484 notify.go:220] Checking for updates...
	I0917 00:55:39.404774  654484 config.go:182] Loaded profile config "ha-956147": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
	I0917 00:55:39.404802  654484 status.go:174] checking status of ha-956147 ...
	I0917 00:55:39.405809  654484 cli_runner.go:164] Run: docker container inspect ha-956147 --format={{.State.Status}}
	I0917 00:55:39.435899  654484 status.go:371] ha-956147 host status = "Running" (err=<nil>)
	I0917 00:55:39.435923  654484 host.go:66] Checking if "ha-956147" exists ...
	I0917 00:55:39.436428  654484 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-956147
	I0917 00:55:39.461068  654484 host.go:66] Checking if "ha-956147" exists ...
	I0917 00:55:39.461444  654484 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
	I0917 00:55:39.461500  654484 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-956147
	I0917 00:55:39.481171  654484 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33520 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/ha-956147/id_rsa Username:docker}
	I0917 00:55:39.581829  654484 ssh_runner.go:195] Run: systemctl --version
	I0917 00:55:39.586019  654484 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
	I0917 00:55:39.597215  654484 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0917 00:55:39.654396  654484 info.go:266] docker info: {ID:5FDH:SA5P:5GCT:NLAS:B73P:SGDQ:PBG5:UBVH:UZY3:RXGO:CI7S:WAIH Containers:4 ContainersRunning:3 ContainersPaused:0 ContainersStopped:1 Images:3 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:62 OomKillDisable:true NGoroutines:72 SystemTime:2025-09-17 00:55:39.644964568 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1084-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214835200 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-21-244 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx P
ath:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1]] Warnings:<nil>}}
	I0917 00:55:39.654960  654484 kubeconfig.go:125] found "ha-956147" server: "https://192.168.49.254:8443"
	I0917 00:55:39.655002  654484 api_server.go:166] Checking apiserver status ...
	I0917 00:55:39.655047  654484 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
	I0917 00:55:39.668586  654484 ssh_runner.go:195] Run: sudo egrep ^[0-9]+:freezer: /proc/2213/cgroup
	I0917 00:55:39.679181  654484 api_server.go:182] apiserver freezer: "3:freezer:/docker/67bf306d7f85ce1708cc553d50bfe3e3a253a6ee2e6996f5969e97a8c131391c/kubepods/burstable/podeb34c956f858ba733995bb3474f36522/360647f5669afe6c9f4021511c1962b94df6cd434035c5ad48cf347f94198b4a"
	I0917 00:55:39.679246  654484 ssh_runner.go:195] Run: sudo cat /sys/fs/cgroup/freezer/docker/67bf306d7f85ce1708cc553d50bfe3e3a253a6ee2e6996f5969e97a8c131391c/kubepods/burstable/podeb34c956f858ba733995bb3474f36522/360647f5669afe6c9f4021511c1962b94df6cd434035c5ad48cf347f94198b4a/freezer.state
	I0917 00:55:39.687831  654484 api_server.go:204] freezer state: "THAWED"
	I0917 00:55:39.687861  654484 api_server.go:253] Checking apiserver healthz at https://192.168.49.254:8443/healthz ...
	I0917 00:55:39.695972  654484 api_server.go:279] https://192.168.49.254:8443/healthz returned 200:
	ok
	I0917 00:55:39.696000  654484 status.go:463] ha-956147 apiserver status = Running (err=<nil>)
	I0917 00:55:39.696010  654484 status.go:176] ha-956147 status: &{Name:ha-956147 Host:Running Kubelet:Running APIServer:Running Kubeconfig:Configured Worker:false TimeToStop: DockerEnv: PodManEnv:}
	I0917 00:55:39.696026  654484 status.go:174] checking status of ha-956147-m02 ...
	I0917 00:55:39.696327  654484 cli_runner.go:164] Run: docker container inspect ha-956147-m02 --format={{.State.Status}}
	I0917 00:55:39.714871  654484 status.go:371] ha-956147-m02 host status = "Stopped" (err=<nil>)
	I0917 00:55:39.714895  654484 status.go:384] host is not running, skipping remaining checks
	I0917 00:55:39.714908  654484 status.go:176] ha-956147-m02 status: &{Name:ha-956147-m02 Host:Stopped Kubelet:Stopped APIServer:Stopped Kubeconfig:Stopped Worker:false TimeToStop: DockerEnv: PodManEnv:}
	I0917 00:55:39.714929  654484 status.go:174] checking status of ha-956147-m03 ...
	I0917 00:55:39.715300  654484 cli_runner.go:164] Run: docker container inspect ha-956147-m03 --format={{.State.Status}}
	I0917 00:55:39.738168  654484 status.go:371] ha-956147-m03 host status = "Running" (err=<nil>)
	I0917 00:55:39.738191  654484 host.go:66] Checking if "ha-956147-m03" exists ...
	I0917 00:55:39.738490  654484 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-956147-m03
	I0917 00:55:39.757239  654484 host.go:66] Checking if "ha-956147-m03" exists ...
	I0917 00:55:39.757548  654484 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
	I0917 00:55:39.757593  654484 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-956147-m03
	I0917 00:55:39.775542  654484 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33530 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/ha-956147-m03/id_rsa Username:docker}
	I0917 00:55:39.869767  654484 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
	I0917 00:55:39.883239  654484 kubeconfig.go:125] found "ha-956147" server: "https://192.168.49.254:8443"
	I0917 00:55:39.883266  654484 api_server.go:166] Checking apiserver status ...
	I0917 00:55:39.883306  654484 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
	I0917 00:55:39.895256  654484 ssh_runner.go:195] Run: sudo egrep ^[0-9]+:freezer: /proc/2170/cgroup
	I0917 00:55:39.905511  654484 api_server.go:182] apiserver freezer: "3:freezer:/docker/2212e07837a64d3d61323f21990a2cd62d9a813315ba89984288e639ad786dc3/kubepods/burstable/pod0f651e0e962490313dbc4cde25457b96/4d8289afeffbf35dfa84d7d8b89481d9e3d6265bfca4f04c2e5be44281dc526d"
	I0917 00:55:39.905593  654484 ssh_runner.go:195] Run: sudo cat /sys/fs/cgroup/freezer/docker/2212e07837a64d3d61323f21990a2cd62d9a813315ba89984288e639ad786dc3/kubepods/burstable/pod0f651e0e962490313dbc4cde25457b96/4d8289afeffbf35dfa84d7d8b89481d9e3d6265bfca4f04c2e5be44281dc526d/freezer.state
	I0917 00:55:39.914574  654484 api_server.go:204] freezer state: "THAWED"
	I0917 00:55:39.914602  654484 api_server.go:253] Checking apiserver healthz at https://192.168.49.254:8443/healthz ...
	I0917 00:55:39.923207  654484 api_server.go:279] https://192.168.49.254:8443/healthz returned 200:
	ok
	I0917 00:55:39.923299  654484 status.go:463] ha-956147-m03 apiserver status = Running (err=<nil>)
	I0917 00:55:39.923322  654484 status.go:176] ha-956147-m03 status: &{Name:ha-956147-m03 Host:Running Kubelet:Running APIServer:Running Kubeconfig:Configured Worker:false TimeToStop: DockerEnv: PodManEnv:}
	I0917 00:55:39.923364  654484 status.go:174] checking status of ha-956147-m04 ...
	I0917 00:55:39.923701  654484 cli_runner.go:164] Run: docker container inspect ha-956147-m04 --format={{.State.Status}}
	I0917 00:55:39.945748  654484 status.go:371] ha-956147-m04 host status = "Running" (err=<nil>)
	I0917 00:55:39.945772  654484 host.go:66] Checking if "ha-956147-m04" exists ...
	I0917 00:55:39.946065  654484 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-956147-m04
	I0917 00:55:39.962976  654484 host.go:66] Checking if "ha-956147-m04" exists ...
	I0917 00:55:39.963264  654484 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
	I0917 00:55:39.963301  654484 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-956147-m04
	I0917 00:55:39.980931  654484 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33535 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/ha-956147-m04/id_rsa Username:docker}
	I0917 00:55:40.095370  654484 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
	I0917 00:55:40.111464  654484 status.go:176] ha-956147-m04 status: &{Name:ha-956147-m04 Host:Running Kubelet:Running APIServer:Irrelevant Kubeconfig:Irrelevant Worker:true TimeToStop: DockerEnv: PodManEnv:}

                                                
                                                
** /stderr **
--- PASS: TestMultiControlPlane/serial/StopSecondaryNode (11.75s)

                                                
                                    
x
+
TestMultiControlPlane/serial/DegradedAfterControlPlaneNodeStop (0.81s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/DegradedAfterControlPlaneNodeStop
ha_test.go:392: (dbg) Run:  out/minikube-linux-arm64 profile list --output json
--- PASS: TestMultiControlPlane/serial/DegradedAfterControlPlaneNodeStop (0.81s)

                                                
                                    
x
+
TestMultiControlPlane/serial/RestartSecondaryNode (45.24s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/RestartSecondaryNode
ha_test.go:422: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 node start m02 --alsologtostderr -v 5
E0917 00:56:17.252807  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/functional-918451/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 00:56:17.259142  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/functional-918451/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 00:56:17.271009  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/functional-918451/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 00:56:17.292433  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/functional-918451/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 00:56:17.333756  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/functional-918451/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 00:56:17.415496  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/functional-918451/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 00:56:17.577122  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/functional-918451/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 00:56:17.898635  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/functional-918451/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 00:56:18.540399  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/functional-918451/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 00:56:19.822593  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/functional-918451/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 00:56:22.383991  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/functional-918451/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
ha_test.go:422: (dbg) Done: out/minikube-linux-arm64 -p ha-956147 node start m02 --alsologtostderr -v 5: (43.668243179s)
ha_test.go:430: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 status --alsologtostderr -v 5
ha_test.go:430: (dbg) Done: out/minikube-linux-arm64 -p ha-956147 status --alsologtostderr -v 5: (1.430724705s)
ha_test.go:450: (dbg) Run:  kubectl get nodes
--- PASS: TestMultiControlPlane/serial/RestartSecondaryNode (45.24s)

                                                
                                    
x
+
TestMultiControlPlane/serial/HAppyAfterSecondaryNodeRestart (1.12s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/HAppyAfterSecondaryNodeRestart
ha_test.go:281: (dbg) Run:  out/minikube-linux-arm64 profile list --output json
ha_test.go:281: (dbg) Done: out/minikube-linux-arm64 profile list --output json: (1.115614712s)
--- PASS: TestMultiControlPlane/serial/HAppyAfterSecondaryNodeRestart (1.12s)

                                                
                                    
x
+
TestMultiControlPlane/serial/RestartClusterKeepsNodes (205.13s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/RestartClusterKeepsNodes
ha_test.go:458: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 node list --alsologtostderr -v 5
ha_test.go:464: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 stop --alsologtostderr -v 5
E0917 00:56:27.505715  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/functional-918451/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 00:56:37.747171  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/functional-918451/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 00:56:58.228809  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/functional-918451/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
ha_test.go:464: (dbg) Done: out/minikube-linux-arm64 -p ha-956147 stop --alsologtostderr -v 5: (33.737384823s)
ha_test.go:469: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 start --wait true --alsologtostderr -v 5
E0917 00:57:39.190438  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/functional-918451/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 00:58:52.992067  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 00:59:01.111940  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/functional-918451/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
ha_test.go:469: (dbg) Done: out/minikube-linux-arm64 -p ha-956147 start --wait true --alsologtostderr -v 5: (2m51.233514233s)
ha_test.go:474: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 node list --alsologtostderr -v 5
--- PASS: TestMultiControlPlane/serial/RestartClusterKeepsNodes (205.13s)

                                                
                                    
x
+
TestMultiControlPlane/serial/DeleteSecondaryNode (11.83s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/DeleteSecondaryNode
ha_test.go:489: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 node delete m03 --alsologtostderr -v 5
ha_test.go:489: (dbg) Done: out/minikube-linux-arm64 -p ha-956147 node delete m03 --alsologtostderr -v 5: (10.819058228s)
ha_test.go:495: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 status --alsologtostderr -v 5
ha_test.go:513: (dbg) Run:  kubectl get nodes
ha_test.go:521: (dbg) Run:  kubectl get nodes -o "go-template='{{range .items}}{{range .status.conditions}}{{if eq .type "Ready"}} {{.status}}{{"\n"}}{{end}}{{end}}{{end}}'"
--- PASS: TestMultiControlPlane/serial/DeleteSecondaryNode (11.83s)

                                                
                                    
x
+
TestMultiControlPlane/serial/DegradedAfterSecondaryNodeDelete (0.8s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/DegradedAfterSecondaryNodeDelete
ha_test.go:392: (dbg) Run:  out/minikube-linux-arm64 profile list --output json
--- PASS: TestMultiControlPlane/serial/DegradedAfterSecondaryNodeDelete (0.80s)

                                                
                                    
x
+
TestMultiControlPlane/serial/StopCluster (32.83s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/StopCluster
ha_test.go:533: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 stop --alsologtostderr -v 5
ha_test.go:533: (dbg) Done: out/minikube-linux-arm64 -p ha-956147 stop --alsologtostderr -v 5: (32.711299902s)
ha_test.go:539: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 status --alsologtostderr -v 5
ha_test.go:539: (dbg) Non-zero exit: out/minikube-linux-arm64 -p ha-956147 status --alsologtostderr -v 5: exit status 7 (115.57559ms)

                                                
                                                
-- stdout --
	ha-956147
	type: Control Plane
	host: Stopped
	kubelet: Stopped
	apiserver: Stopped
	kubeconfig: Stopped
	
	ha-956147-m02
	type: Control Plane
	host: Stopped
	kubelet: Stopped
	apiserver: Stopped
	kubeconfig: Stopped
	
	ha-956147-m04
	type: Worker
	host: Stopped
	kubelet: Stopped
	

                                                
                                                
-- /stdout --
** stderr ** 
	I0917 01:00:37.802009  682024 out.go:360] Setting OutFile to fd 1 ...
	I0917 01:00:37.802193  682024 out.go:408] TERM=,COLORTERM=, which probably does not support color
	I0917 01:00:37.802222  682024 out.go:374] Setting ErrFile to fd 2...
	I0917 01:00:37.802245  682024 out.go:408] TERM=,COLORTERM=, which probably does not support color
	I0917 01:00:37.802519  682024 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21550-576428/.minikube/bin
	I0917 01:00:37.802739  682024 out.go:368] Setting JSON to false
	I0917 01:00:37.802799  682024 mustload.go:65] Loading cluster: ha-956147
	I0917 01:00:37.802886  682024 notify.go:220] Checking for updates...
	I0917 01:00:37.803265  682024 config.go:182] Loaded profile config "ha-956147": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
	I0917 01:00:37.803307  682024 status.go:174] checking status of ha-956147 ...
	I0917 01:00:37.804137  682024 cli_runner.go:164] Run: docker container inspect ha-956147 --format={{.State.Status}}
	I0917 01:00:37.822192  682024 status.go:371] ha-956147 host status = "Stopped" (err=<nil>)
	I0917 01:00:37.822217  682024 status.go:384] host is not running, skipping remaining checks
	I0917 01:00:37.822224  682024 status.go:176] ha-956147 status: &{Name:ha-956147 Host:Stopped Kubelet:Stopped APIServer:Stopped Kubeconfig:Stopped Worker:false TimeToStop: DockerEnv: PodManEnv:}
	I0917 01:00:37.822254  682024 status.go:174] checking status of ha-956147-m02 ...
	I0917 01:00:37.822554  682024 cli_runner.go:164] Run: docker container inspect ha-956147-m02 --format={{.State.Status}}
	I0917 01:00:37.852142  682024 status.go:371] ha-956147-m02 host status = "Stopped" (err=<nil>)
	I0917 01:00:37.852164  682024 status.go:384] host is not running, skipping remaining checks
	I0917 01:00:37.852170  682024 status.go:176] ha-956147-m02 status: &{Name:ha-956147-m02 Host:Stopped Kubelet:Stopped APIServer:Stopped Kubeconfig:Stopped Worker:false TimeToStop: DockerEnv: PodManEnv:}
	I0917 01:00:37.852187  682024 status.go:174] checking status of ha-956147-m04 ...
	I0917 01:00:37.852487  682024 cli_runner.go:164] Run: docker container inspect ha-956147-m04 --format={{.State.Status}}
	I0917 01:00:37.869896  682024 status.go:371] ha-956147-m04 host status = "Stopped" (err=<nil>)
	I0917 01:00:37.869918  682024 status.go:384] host is not running, skipping remaining checks
	I0917 01:00:37.869939  682024 status.go:176] ha-956147-m04 status: &{Name:ha-956147-m04 Host:Stopped Kubelet:Stopped APIServer:Stopped Kubeconfig:Stopped Worker:true TimeToStop: DockerEnv: PodManEnv:}

                                                
                                                
** /stderr **
--- PASS: TestMultiControlPlane/serial/StopCluster (32.83s)

                                                
                                    
x
+
TestMultiControlPlane/serial/RestartCluster (108.93s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/RestartCluster
ha_test.go:562: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 start --wait true --alsologtostderr -v 5 --driver=docker  --container-runtime=docker
E0917 01:01:17.254643  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/functional-918451/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:01:44.953250  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/functional-918451/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
ha_test.go:562: (dbg) Done: out/minikube-linux-arm64 -p ha-956147 start --wait true --alsologtostderr -v 5 --driver=docker  --container-runtime=docker: (1m47.910242293s)
ha_test.go:568: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 status --alsologtostderr -v 5
ha_test.go:586: (dbg) Run:  kubectl get nodes
ha_test.go:594: (dbg) Run:  kubectl get nodes -o "go-template='{{range .items}}{{range .status.conditions}}{{if eq .type "Ready"}} {{.status}}{{"\n"}}{{end}}{{end}}{{end}}'"
--- PASS: TestMultiControlPlane/serial/RestartCluster (108.93s)

                                                
                                    
x
+
TestMultiControlPlane/serial/DegradedAfterClusterRestart (0.82s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/DegradedAfterClusterRestart
ha_test.go:392: (dbg) Run:  out/minikube-linux-arm64 profile list --output json
--- PASS: TestMultiControlPlane/serial/DegradedAfterClusterRestart (0.82s)

                                                
                                    
x
+
TestMultiControlPlane/serial/AddSecondaryNode (52.34s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/AddSecondaryNode
ha_test.go:607: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 node add --control-plane --alsologtostderr -v 5
ha_test.go:607: (dbg) Done: out/minikube-linux-arm64 -p ha-956147 node add --control-plane --alsologtostderr -v 5: (50.755918376s)
ha_test.go:613: (dbg) Run:  out/minikube-linux-arm64 -p ha-956147 status --alsologtostderr -v 5
ha_test.go:613: (dbg) Done: out/minikube-linux-arm64 -p ha-956147 status --alsologtostderr -v 5: (1.584057093s)
--- PASS: TestMultiControlPlane/serial/AddSecondaryNode (52.34s)

                                                
                                    
x
+
TestMultiControlPlane/serial/HAppyAfterSecondaryNodeAdd (1.4s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/HAppyAfterSecondaryNodeAdd
ha_test.go:281: (dbg) Run:  out/minikube-linux-arm64 profile list --output json
ha_test.go:281: (dbg) Done: out/minikube-linux-arm64 profile list --output json: (1.403008481s)
--- PASS: TestMultiControlPlane/serial/HAppyAfterSecondaryNodeAdd (1.40s)

                                                
                                    
x
+
TestImageBuild/serial/Setup (32.38s)

                                                
                                                
=== RUN   TestImageBuild/serial/Setup
image_test.go:69: (dbg) Run:  out/minikube-linux-arm64 start -p image-796025 --driver=docker  --container-runtime=docker
E0917 01:03:52.992848  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
image_test.go:69: (dbg) Done: out/minikube-linux-arm64 start -p image-796025 --driver=docker  --container-runtime=docker: (32.384399661s)
--- PASS: TestImageBuild/serial/Setup (32.38s)

                                                
                                    
x
+
TestImageBuild/serial/NormalBuild (1.78s)

                                                
                                                
=== RUN   TestImageBuild/serial/NormalBuild
image_test.go:78: (dbg) Run:  out/minikube-linux-arm64 image build -t aaa:latest ./testdata/image-build/test-normal -p image-796025
image_test.go:78: (dbg) Done: out/minikube-linux-arm64 image build -t aaa:latest ./testdata/image-build/test-normal -p image-796025: (1.777537132s)
--- PASS: TestImageBuild/serial/NormalBuild (1.78s)

                                                
                                    
x
+
TestImageBuild/serial/BuildWithBuildArg (0.96s)

                                                
                                                
=== RUN   TestImageBuild/serial/BuildWithBuildArg
image_test.go:99: (dbg) Run:  out/minikube-linux-arm64 image build -t aaa:latest --build-opt=build-arg=ENV_A=test_env_str --build-opt=no-cache ./testdata/image-build/test-arg -p image-796025
--- PASS: TestImageBuild/serial/BuildWithBuildArg (0.96s)

                                                
                                    
x
+
TestImageBuild/serial/BuildWithDockerIgnore (0.9s)

                                                
                                                
=== RUN   TestImageBuild/serial/BuildWithDockerIgnore
image_test.go:133: (dbg) Run:  out/minikube-linux-arm64 image build -t aaa:latest ./testdata/image-build/test-normal --build-opt=no-cache -p image-796025
--- PASS: TestImageBuild/serial/BuildWithDockerIgnore (0.90s)

                                                
                                    
x
+
TestImageBuild/serial/BuildWithSpecifiedDockerfile (0.91s)

                                                
                                                
=== RUN   TestImageBuild/serial/BuildWithSpecifiedDockerfile
image_test.go:88: (dbg) Run:  out/minikube-linux-arm64 image build -t aaa:latest -f inner/Dockerfile ./testdata/image-build/test-f -p image-796025
--- PASS: TestImageBuild/serial/BuildWithSpecifiedDockerfile (0.91s)

                                                
                                    
x
+
TestJSONOutput/start/Command (77.92s)

                                                
                                                
=== RUN   TestJSONOutput/start/Command
json_output_test.go:63: (dbg) Run:  out/minikube-linux-arm64 start -p json-output-613494 --output=json --user=testUser --memory=3072 --wait=true --driver=docker  --container-runtime=docker
json_output_test.go:63: (dbg) Done: out/minikube-linux-arm64 start -p json-output-613494 --output=json --user=testUser --memory=3072 --wait=true --driver=docker  --container-runtime=docker: (1m17.91893802s)
--- PASS: TestJSONOutput/start/Command (77.92s)

                                                
                                    
x
+
TestJSONOutput/start/Audit (0s)

                                                
                                                
=== RUN   TestJSONOutput/start/Audit
--- PASS: TestJSONOutput/start/Audit (0.00s)

                                                
                                    
x
+
TestJSONOutput/start/parallel/DistinctCurrentSteps (0s)

                                                
                                                
=== RUN   TestJSONOutput/start/parallel/DistinctCurrentSteps
=== PAUSE TestJSONOutput/start/parallel/DistinctCurrentSteps

                                                
                                                

                                                
                                                
=== CONT  TestJSONOutput/start/parallel/DistinctCurrentSteps
--- PASS: TestJSONOutput/start/parallel/DistinctCurrentSteps (0.00s)

                                                
                                    
x
+
TestJSONOutput/start/parallel/IncreasingCurrentSteps (0s)

                                                
                                                
=== RUN   TestJSONOutput/start/parallel/IncreasingCurrentSteps
=== PAUSE TestJSONOutput/start/parallel/IncreasingCurrentSteps

                                                
                                                

                                                
                                                
=== CONT  TestJSONOutput/start/parallel/IncreasingCurrentSteps
--- PASS: TestJSONOutput/start/parallel/IncreasingCurrentSteps (0.00s)

                                                
                                    
x
+
TestJSONOutput/pause/Command (0.61s)

                                                
                                                
=== RUN   TestJSONOutput/pause/Command
json_output_test.go:63: (dbg) Run:  out/minikube-linux-arm64 pause -p json-output-613494 --output=json --user=testUser
--- PASS: TestJSONOutput/pause/Command (0.61s)

                                                
                                    
x
+
TestJSONOutput/pause/Audit (0s)

                                                
                                                
=== RUN   TestJSONOutput/pause/Audit
--- PASS: TestJSONOutput/pause/Audit (0.00s)

                                                
                                    
x
+
TestJSONOutput/pause/parallel/DistinctCurrentSteps (0s)

                                                
                                                
=== RUN   TestJSONOutput/pause/parallel/DistinctCurrentSteps
=== PAUSE TestJSONOutput/pause/parallel/DistinctCurrentSteps

                                                
                                                

                                                
                                                
=== CONT  TestJSONOutput/pause/parallel/DistinctCurrentSteps
--- PASS: TestJSONOutput/pause/parallel/DistinctCurrentSteps (0.00s)

                                                
                                    
x
+
TestJSONOutput/pause/parallel/IncreasingCurrentSteps (0s)

                                                
                                                
=== RUN   TestJSONOutput/pause/parallel/IncreasingCurrentSteps
=== PAUSE TestJSONOutput/pause/parallel/IncreasingCurrentSteps

                                                
                                                

                                                
                                                
=== CONT  TestJSONOutput/pause/parallel/IncreasingCurrentSteps
--- PASS: TestJSONOutput/pause/parallel/IncreasingCurrentSteps (0.00s)

                                                
                                    
x
+
TestJSONOutput/unpause/Command (0.52s)

                                                
                                                
=== RUN   TestJSONOutput/unpause/Command
json_output_test.go:63: (dbg) Run:  out/minikube-linux-arm64 unpause -p json-output-613494 --output=json --user=testUser
--- PASS: TestJSONOutput/unpause/Command (0.52s)

                                                
                                    
x
+
TestJSONOutput/unpause/Audit (0s)

                                                
                                                
=== RUN   TestJSONOutput/unpause/Audit
--- PASS: TestJSONOutput/unpause/Audit (0.00s)

                                                
                                    
x
+
TestJSONOutput/unpause/parallel/DistinctCurrentSteps (0s)

                                                
                                                
=== RUN   TestJSONOutput/unpause/parallel/DistinctCurrentSteps
=== PAUSE TestJSONOutput/unpause/parallel/DistinctCurrentSteps

                                                
                                                

                                                
                                                
=== CONT  TestJSONOutput/unpause/parallel/DistinctCurrentSteps
--- PASS: TestJSONOutput/unpause/parallel/DistinctCurrentSteps (0.00s)

                                                
                                    
x
+
TestJSONOutput/unpause/parallel/IncreasingCurrentSteps (0s)

                                                
                                                
=== RUN   TestJSONOutput/unpause/parallel/IncreasingCurrentSteps
=== PAUSE TestJSONOutput/unpause/parallel/IncreasingCurrentSteps

                                                
                                                

                                                
                                                
=== CONT  TestJSONOutput/unpause/parallel/IncreasingCurrentSteps
--- PASS: TestJSONOutput/unpause/parallel/IncreasingCurrentSteps (0.00s)

                                                
                                    
x
+
TestJSONOutput/stop/Command (5.79s)

                                                
                                                
=== RUN   TestJSONOutput/stop/Command
json_output_test.go:63: (dbg) Run:  out/minikube-linux-arm64 stop -p json-output-613494 --output=json --user=testUser
json_output_test.go:63: (dbg) Done: out/minikube-linux-arm64 stop -p json-output-613494 --output=json --user=testUser: (5.792347599s)
--- PASS: TestJSONOutput/stop/Command (5.79s)

                                                
                                    
x
+
TestJSONOutput/stop/Audit (0s)

                                                
                                                
=== RUN   TestJSONOutput/stop/Audit
--- PASS: TestJSONOutput/stop/Audit (0.00s)

                                                
                                    
x
+
TestJSONOutput/stop/parallel/DistinctCurrentSteps (0s)

                                                
                                                
=== RUN   TestJSONOutput/stop/parallel/DistinctCurrentSteps
=== PAUSE TestJSONOutput/stop/parallel/DistinctCurrentSteps

                                                
                                                

                                                
                                                
=== CONT  TestJSONOutput/stop/parallel/DistinctCurrentSteps
--- PASS: TestJSONOutput/stop/parallel/DistinctCurrentSteps (0.00s)

                                                
                                    
x
+
TestJSONOutput/stop/parallel/IncreasingCurrentSteps (0s)

                                                
                                                
=== RUN   TestJSONOutput/stop/parallel/IncreasingCurrentSteps
=== PAUSE TestJSONOutput/stop/parallel/IncreasingCurrentSteps

                                                
                                                

                                                
                                                
=== CONT  TestJSONOutput/stop/parallel/IncreasingCurrentSteps
--- PASS: TestJSONOutput/stop/parallel/IncreasingCurrentSteps (0.00s)

                                                
                                    
x
+
TestErrorJSONOutput (0.23s)

                                                
                                                
=== RUN   TestErrorJSONOutput
json_output_test.go:160: (dbg) Run:  out/minikube-linux-arm64 start -p json-output-error-480799 --memory=3072 --output=json --wait=true --driver=fail
json_output_test.go:160: (dbg) Non-zero exit: out/minikube-linux-arm64 start -p json-output-error-480799 --memory=3072 --output=json --wait=true --driver=fail: exit status 56 (93.631639ms)

                                                
                                                
-- stdout --
	{"specversion":"1.0","id":"bc251b3e-1c8e-4241-9d0c-e427d3a72a07","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.step","datacontenttype":"application/json","data":{"currentstep":"0","message":"[json-output-error-480799] minikube v1.37.0 on Ubuntu 20.04 (arm64)","name":"Initial Minikube Setup","totalsteps":"19"}}
	{"specversion":"1.0","id":"c9c643c1-eb16-480e-9af9-dfc3780ab516","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_LOCATION=21550"}}
	{"specversion":"1.0","id":"b4be1619-c5d0-4f16-b5ab-a61d0e8f0d71","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true"}}
	{"specversion":"1.0","id":"f17ffc55-7c50-40a6-a882-49b89d30e63d","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"KUBECONFIG=/home/jenkins/minikube-integration/21550-576428/kubeconfig"}}
	{"specversion":"1.0","id":"4ef77221-972a-4a0a-af00-b9f93f5baf25","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_HOME=/home/jenkins/minikube-integration/21550-576428/.minikube"}}
	{"specversion":"1.0","id":"89f5bc46-829a-46b6-a6f0-1584c66049e3","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_BIN=out/minikube-linux-arm64"}}
	{"specversion":"1.0","id":"cd80a448-f84c-4cd7-b58e-c113688e329d","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_FORCE_SYSTEMD="}}
	{"specversion":"1.0","id":"4d61c5df-d470-4e7b-a834-d8d4e7daefb5","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.error","datacontenttype":"application/json","data":{"advice":"","exitcode":"56","issues":"","message":"The driver 'fail' is not supported on linux/arm64","name":"DRV_UNSUPPORTED_OS","url":""}}

                                                
                                                
-- /stdout --
helpers_test.go:175: Cleaning up "json-output-error-480799" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p json-output-error-480799
--- PASS: TestErrorJSONOutput (0.23s)

                                                
                                    
x
+
TestKicCustomNetwork/create_custom_network (33.53s)

                                                
                                                
=== RUN   TestKicCustomNetwork/create_custom_network
kic_custom_network_test.go:57: (dbg) Run:  out/minikube-linux-arm64 start -p docker-network-232585 --network=
kic_custom_network_test.go:57: (dbg) Done: out/minikube-linux-arm64 start -p docker-network-232585 --network=: (31.788865025s)
kic_custom_network_test.go:150: (dbg) Run:  docker network ls --format {{.Name}}
helpers_test.go:175: Cleaning up "docker-network-232585" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p docker-network-232585
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p docker-network-232585: (1.720572914s)
--- PASS: TestKicCustomNetwork/create_custom_network (33.53s)

                                                
                                    
x
+
TestKicCustomNetwork/use_default_bridge_network (33.88s)

                                                
                                                
=== RUN   TestKicCustomNetwork/use_default_bridge_network
kic_custom_network_test.go:57: (dbg) Run:  out/minikube-linux-arm64 start -p docker-network-645561 --network=bridge
E0917 01:06:17.252580  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/functional-918451/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
kic_custom_network_test.go:57: (dbg) Done: out/minikube-linux-arm64 start -p docker-network-645561 --network=bridge: (31.844689504s)
kic_custom_network_test.go:150: (dbg) Run:  docker network ls --format {{.Name}}
helpers_test.go:175: Cleaning up "docker-network-645561" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p docker-network-645561
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p docker-network-645561: (2.017391325s)
--- PASS: TestKicCustomNetwork/use_default_bridge_network (33.88s)

                                                
                                    
x
+
TestKicExistingNetwork (34.95s)

                                                
                                                
=== RUN   TestKicExistingNetwork
I0917 01:06:43.801457  578284 cli_runner.go:164] Run: docker network inspect existing-network --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W0917 01:06:43.817674  578284 cli_runner.go:211] docker network inspect existing-network --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I0917 01:06:43.817757  578284 network_create.go:284] running [docker network inspect existing-network] to gather additional debugging logs...
I0917 01:06:43.817774  578284 cli_runner.go:164] Run: docker network inspect existing-network
W0917 01:06:43.833701  578284 cli_runner.go:211] docker network inspect existing-network returned with exit code 1
I0917 01:06:43.833731  578284 network_create.go:287] error running [docker network inspect existing-network]: docker network inspect existing-network: exit status 1
stdout:
[]

                                                
                                                
stderr:
Error response from daemon: network existing-network not found
I0917 01:06:43.833745  578284 network_create.go:289] output of [docker network inspect existing-network]: -- stdout --
[]

                                                
                                                
-- /stdout --
** stderr ** 
Error response from daemon: network existing-network not found

                                                
                                                
** /stderr **
I0917 01:06:43.833860  578284 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0917 01:06:43.849960  578284 network.go:211] skipping subnet 192.168.49.0/24 that is taken: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName:br-f3f8de7032cd IfaceIPv4:192.168.49.1 IfaceMTU:1500 IfaceMAC:0a:49:ee:b5:0e:1a} reservation:<nil>}
I0917 01:06:43.850242  578284 network.go:206] using free private subnet 192.168.58.0/24: &{IP:192.168.58.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.58.0/24 Gateway:192.168.58.1 ClientMin:192.168.58.2 ClientMax:192.168.58.254 Broadcast:192.168.58.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0x40015832d0}
I0917 01:06:43.850264  578284 network_create.go:124] attempt to create docker network existing-network 192.168.58.0/24 with gateway 192.168.58.1 and MTU of 1500 ...
I0917 01:06:43.850315  578284 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.58.0/24 --gateway=192.168.58.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=existing-network existing-network
I0917 01:06:43.918622  578284 network_create.go:108] docker network existing-network 192.168.58.0/24 created
kic_custom_network_test.go:150: (dbg) Run:  docker network ls --format {{.Name}}
kic_custom_network_test.go:93: (dbg) Run:  out/minikube-linux-arm64 start -p existing-network-646879 --network=existing-network
E0917 01:06:56.067121  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
kic_custom_network_test.go:93: (dbg) Done: out/minikube-linux-arm64 start -p existing-network-646879 --network=existing-network: (32.817233114s)
helpers_test.go:175: Cleaning up "existing-network-646879" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p existing-network-646879
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p existing-network-646879: (1.978688512s)
I0917 01:07:18.732583  578284 cli_runner.go:164] Run: docker network ls --filter=label=existing-network --format {{.Name}}
--- PASS: TestKicExistingNetwork (34.95s)

                                                
                                    
x
+
TestKicCustomSubnet (34.44s)

                                                
                                                
=== RUN   TestKicCustomSubnet
kic_custom_network_test.go:112: (dbg) Run:  out/minikube-linux-arm64 start -p custom-subnet-382566 --subnet=192.168.60.0/24
kic_custom_network_test.go:112: (dbg) Done: out/minikube-linux-arm64 start -p custom-subnet-382566 --subnet=192.168.60.0/24: (32.259093539s)
kic_custom_network_test.go:161: (dbg) Run:  docker network inspect custom-subnet-382566 --format "{{(index .IPAM.Config 0).Subnet}}"
helpers_test.go:175: Cleaning up "custom-subnet-382566" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p custom-subnet-382566
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p custom-subnet-382566: (2.148199646s)
--- PASS: TestKicCustomSubnet (34.44s)

                                                
                                    
x
+
TestKicStaticIP (30.68s)

                                                
                                                
=== RUN   TestKicStaticIP
kic_custom_network_test.go:132: (dbg) Run:  out/minikube-linux-arm64 start -p static-ip-305589 --static-ip=192.168.200.200
kic_custom_network_test.go:132: (dbg) Done: out/minikube-linux-arm64 start -p static-ip-305589 --static-ip=192.168.200.200: (28.429078613s)
kic_custom_network_test.go:138: (dbg) Run:  out/minikube-linux-arm64 -p static-ip-305589 ip
helpers_test.go:175: Cleaning up "static-ip-305589" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p static-ip-305589
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p static-ip-305589: (2.102257878s)
--- PASS: TestKicStaticIP (30.68s)

                                                
                                    
x
+
TestMainNoArgs (0.05s)

                                                
                                                
=== RUN   TestMainNoArgs
main_test.go:70: (dbg) Run:  out/minikube-linux-arm64
--- PASS: TestMainNoArgs (0.05s)

                                                
                                    
x
+
TestMinikubeProfile (72.25s)

                                                
                                                
=== RUN   TestMinikubeProfile
minikube_profile_test.go:44: (dbg) Run:  out/minikube-linux-arm64 start -p first-769221 --driver=docker  --container-runtime=docker
E0917 01:08:52.992573  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
minikube_profile_test.go:44: (dbg) Done: out/minikube-linux-arm64 start -p first-769221 --driver=docker  --container-runtime=docker: (34.285375482s)
minikube_profile_test.go:44: (dbg) Run:  out/minikube-linux-arm64 start -p second-775416 --driver=docker  --container-runtime=docker
minikube_profile_test.go:44: (dbg) Done: out/minikube-linux-arm64 start -p second-775416 --driver=docker  --container-runtime=docker: (32.40236777s)
minikube_profile_test.go:51: (dbg) Run:  out/minikube-linux-arm64 profile first-769221
minikube_profile_test.go:55: (dbg) Run:  out/minikube-linux-arm64 profile list -ojson
minikube_profile_test.go:51: (dbg) Run:  out/minikube-linux-arm64 profile second-775416
minikube_profile_test.go:55: (dbg) Run:  out/minikube-linux-arm64 profile list -ojson
helpers_test.go:175: Cleaning up "second-775416" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p second-775416
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p second-775416: (2.063570041s)
helpers_test.go:175: Cleaning up "first-769221" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p first-769221
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p first-769221: (2.111639663s)
--- PASS: TestMinikubeProfile (72.25s)

                                                
                                    
x
+
TestMountStart/serial/StartWithMountFirst (7.67s)

                                                
                                                
=== RUN   TestMountStart/serial/StartWithMountFirst
mount_start_test.go:118: (dbg) Run:  out/minikube-linux-arm64 start -p mount-start-1-394764 --memory=3072 --mount-string /tmp/TestMountStartserial3229832978/001:/minikube-host --mount-gid 0 --mount-msize 6543 --mount-port 46464 --mount-uid 0 --no-kubernetes --driver=docker  --container-runtime=docker
mount_start_test.go:118: (dbg) Done: out/minikube-linux-arm64 start -p mount-start-1-394764 --memory=3072 --mount-string /tmp/TestMountStartserial3229832978/001:/minikube-host --mount-gid 0 --mount-msize 6543 --mount-port 46464 --mount-uid 0 --no-kubernetes --driver=docker  --container-runtime=docker: (6.674642976s)
--- PASS: TestMountStart/serial/StartWithMountFirst (7.67s)

                                                
                                    
x
+
TestMountStart/serial/VerifyMountFirst (0.26s)

                                                
                                                
=== RUN   TestMountStart/serial/VerifyMountFirst
mount_start_test.go:134: (dbg) Run:  out/minikube-linux-arm64 -p mount-start-1-394764 ssh -- ls /minikube-host
--- PASS: TestMountStart/serial/VerifyMountFirst (0.26s)

                                                
                                    
x
+
TestMountStart/serial/StartWithMountSecond (8.04s)

                                                
                                                
=== RUN   TestMountStart/serial/StartWithMountSecond
mount_start_test.go:118: (dbg) Run:  out/minikube-linux-arm64 start -p mount-start-2-396462 --memory=3072 --mount-string /tmp/TestMountStartserial3229832978/001:/minikube-host --mount-gid 0 --mount-msize 6543 --mount-port 46465 --mount-uid 0 --no-kubernetes --driver=docker  --container-runtime=docker
mount_start_test.go:118: (dbg) Done: out/minikube-linux-arm64 start -p mount-start-2-396462 --memory=3072 --mount-string /tmp/TestMountStartserial3229832978/001:/minikube-host --mount-gid 0 --mount-msize 6543 --mount-port 46465 --mount-uid 0 --no-kubernetes --driver=docker  --container-runtime=docker: (7.042908188s)
--- PASS: TestMountStart/serial/StartWithMountSecond (8.04s)

                                                
                                    
x
+
TestMountStart/serial/VerifyMountSecond (0.27s)

                                                
                                                
=== RUN   TestMountStart/serial/VerifyMountSecond
mount_start_test.go:134: (dbg) Run:  out/minikube-linux-arm64 -p mount-start-2-396462 ssh -- ls /minikube-host
--- PASS: TestMountStart/serial/VerifyMountSecond (0.27s)

                                                
                                    
x
+
TestMountStart/serial/DeleteFirst (1.45s)

                                                
                                                
=== RUN   TestMountStart/serial/DeleteFirst
pause_test.go:132: (dbg) Run:  out/minikube-linux-arm64 delete -p mount-start-1-394764 --alsologtostderr -v=5
pause_test.go:132: (dbg) Done: out/minikube-linux-arm64 delete -p mount-start-1-394764 --alsologtostderr -v=5: (1.446828712s)
--- PASS: TestMountStart/serial/DeleteFirst (1.45s)

                                                
                                    
x
+
TestMountStart/serial/VerifyMountPostDelete (0.28s)

                                                
                                                
=== RUN   TestMountStart/serial/VerifyMountPostDelete
mount_start_test.go:134: (dbg) Run:  out/minikube-linux-arm64 -p mount-start-2-396462 ssh -- ls /minikube-host
--- PASS: TestMountStart/serial/VerifyMountPostDelete (0.28s)

                                                
                                    
x
+
TestMountStart/serial/Stop (1.19s)

                                                
                                                
=== RUN   TestMountStart/serial/Stop
mount_start_test.go:196: (dbg) Run:  out/minikube-linux-arm64 stop -p mount-start-2-396462
mount_start_test.go:196: (dbg) Done: out/minikube-linux-arm64 stop -p mount-start-2-396462: (1.194481218s)
--- PASS: TestMountStart/serial/Stop (1.19s)

                                                
                                    
x
+
TestMountStart/serial/RestartStopped (8.64s)

                                                
                                                
=== RUN   TestMountStart/serial/RestartStopped
mount_start_test.go:207: (dbg) Run:  out/minikube-linux-arm64 start -p mount-start-2-396462
mount_start_test.go:207: (dbg) Done: out/minikube-linux-arm64 start -p mount-start-2-396462: (7.63573318s)
--- PASS: TestMountStart/serial/RestartStopped (8.64s)

                                                
                                    
x
+
TestMountStart/serial/VerifyMountPostStop (0.27s)

                                                
                                                
=== RUN   TestMountStart/serial/VerifyMountPostStop
mount_start_test.go:134: (dbg) Run:  out/minikube-linux-arm64 -p mount-start-2-396462 ssh -- ls /minikube-host
--- PASS: TestMountStart/serial/VerifyMountPostStop (0.27s)

                                                
                                    
x
+
TestMultiNode/serial/FreshStart2Nodes (72.88s)

                                                
                                                
=== RUN   TestMultiNode/serial/FreshStart2Nodes
multinode_test.go:96: (dbg) Run:  out/minikube-linux-arm64 start -p multinode-360800 --wait=true --memory=3072 --nodes=2 -v=5 --alsologtostderr --driver=docker  --container-runtime=docker
E0917 01:11:17.252895  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/functional-918451/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
multinode_test.go:96: (dbg) Done: out/minikube-linux-arm64 start -p multinode-360800 --wait=true --memory=3072 --nodes=2 -v=5 --alsologtostderr --driver=docker  --container-runtime=docker: (1m12.380983713s)
multinode_test.go:102: (dbg) Run:  out/minikube-linux-arm64 -p multinode-360800 status --alsologtostderr
--- PASS: TestMultiNode/serial/FreshStart2Nodes (72.88s)

                                                
                                    
x
+
TestMultiNode/serial/DeployApp2Nodes (47.02s)

                                                
                                                
=== RUN   TestMultiNode/serial/DeployApp2Nodes
multinode_test.go:493: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-360800 -- apply -f ./testdata/multinodes/multinode-pod-dns-test.yaml
multinode_test.go:498: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-360800 -- rollout status deployment/busybox
multinode_test.go:498: (dbg) Done: out/minikube-linux-arm64 kubectl -p multinode-360800 -- rollout status deployment/busybox: (4.127081794s)
multinode_test.go:505: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-360800 -- get pods -o jsonpath='{.items[*].status.podIP}'
multinode_test.go:514: expected 2 Pod IPs but got 1 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.3'\n\n-- /stdout --"
I0917 01:11:23.266277  578284 retry.go:31] will retry after 852.593581ms: expected 2 Pod IPs but got 1 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.3'\n\n-- /stdout --"
multinode_test.go:505: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-360800 -- get pods -o jsonpath='{.items[*].status.podIP}'
multinode_test.go:514: expected 2 Pod IPs but got 1 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.3'\n\n-- /stdout --"
I0917 01:11:24.291279  578284 retry.go:31] will retry after 2.197717689s: expected 2 Pod IPs but got 1 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.3'\n\n-- /stdout --"
multinode_test.go:505: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-360800 -- get pods -o jsonpath='{.items[*].status.podIP}'
multinode_test.go:514: expected 2 Pod IPs but got 1 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.3'\n\n-- /stdout --"
I0917 01:11:26.633170  578284 retry.go:31] will retry after 2.438502857s: expected 2 Pod IPs but got 1 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.3'\n\n-- /stdout --"
multinode_test.go:505: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-360800 -- get pods -o jsonpath='{.items[*].status.podIP}'
multinode_test.go:514: expected 2 Pod IPs but got 1 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.3'\n\n-- /stdout --"
I0917 01:11:29.226687  578284 retry.go:31] will retry after 4.216093766s: expected 2 Pod IPs but got 1 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.3'\n\n-- /stdout --"
multinode_test.go:505: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-360800 -- get pods -o jsonpath='{.items[*].status.podIP}'
multinode_test.go:514: expected 2 Pod IPs but got 1 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.3'\n\n-- /stdout --"
I0917 01:11:33.585287  578284 retry.go:31] will retry after 2.873937834s: expected 2 Pod IPs but got 1 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.3'\n\n-- /stdout --"
multinode_test.go:505: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-360800 -- get pods -o jsonpath='{.items[*].status.podIP}'
multinode_test.go:514: expected 2 Pod IPs but got 1 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.3'\n\n-- /stdout --"
I0917 01:11:36.605429  578284 retry.go:31] will retry after 6.747414046s: expected 2 Pod IPs but got 1 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.3'\n\n-- /stdout --"
multinode_test.go:505: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-360800 -- get pods -o jsonpath='{.items[*].status.podIP}'
multinode_test.go:514: expected 2 Pod IPs but got 1 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.3'\n\n-- /stdout --"
I0917 01:11:43.499426  578284 retry.go:31] will retry after 9.52724323s: expected 2 Pod IPs but got 1 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.3'\n\n-- /stdout --"
multinode_test.go:505: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-360800 -- get pods -o jsonpath='{.items[*].status.podIP}'
multinode_test.go:514: expected 2 Pod IPs but got 1 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.3'\n\n-- /stdout --"
I0917 01:11:53.180593  578284 retry.go:31] will retry after 11.014703867s: expected 2 Pod IPs but got 1 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.3'\n\n-- /stdout --"
multinode_test.go:505: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-360800 -- get pods -o jsonpath='{.items[*].status.podIP}'
multinode_test.go:528: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-360800 -- get pods -o jsonpath='{.items[*].metadata.name}'
multinode_test.go:536: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-360800 -- exec busybox-7b57f96db7-str69 -- nslookup kubernetes.io
multinode_test.go:536: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-360800 -- exec busybox-7b57f96db7-xj547 -- nslookup kubernetes.io
multinode_test.go:546: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-360800 -- exec busybox-7b57f96db7-str69 -- nslookup kubernetes.default
multinode_test.go:546: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-360800 -- exec busybox-7b57f96db7-xj547 -- nslookup kubernetes.default
multinode_test.go:554: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-360800 -- exec busybox-7b57f96db7-str69 -- nslookup kubernetes.default.svc.cluster.local
multinode_test.go:554: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-360800 -- exec busybox-7b57f96db7-xj547 -- nslookup kubernetes.default.svc.cluster.local
--- PASS: TestMultiNode/serial/DeployApp2Nodes (47.02s)

                                                
                                    
x
+
TestMultiNode/serial/PingHostFrom2Pods (0.98s)

                                                
                                                
=== RUN   TestMultiNode/serial/PingHostFrom2Pods
multinode_test.go:564: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-360800 -- get pods -o jsonpath='{.items[*].metadata.name}'
multinode_test.go:572: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-360800 -- exec busybox-7b57f96db7-str69 -- sh -c "nslookup host.minikube.internal | awk 'NR==5' | cut -d' ' -f3"
multinode_test.go:583: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-360800 -- exec busybox-7b57f96db7-str69 -- sh -c "ping -c 1 192.168.67.1"
multinode_test.go:572: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-360800 -- exec busybox-7b57f96db7-xj547 -- sh -c "nslookup host.minikube.internal | awk 'NR==5' | cut -d' ' -f3"
multinode_test.go:583: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-360800 -- exec busybox-7b57f96db7-xj547 -- sh -c "ping -c 1 192.168.67.1"
--- PASS: TestMultiNode/serial/PingHostFrom2Pods (0.98s)

                                                
                                    
x
+
TestMultiNode/serial/AddNode (14.92s)

                                                
                                                
=== RUN   TestMultiNode/serial/AddNode
multinode_test.go:121: (dbg) Run:  out/minikube-linux-arm64 node add -p multinode-360800 -v=5 --alsologtostderr
multinode_test.go:121: (dbg) Done: out/minikube-linux-arm64 node add -p multinode-360800 -v=5 --alsologtostderr: (14.260417802s)
multinode_test.go:127: (dbg) Run:  out/minikube-linux-arm64 -p multinode-360800 status --alsologtostderr
--- PASS: TestMultiNode/serial/AddNode (14.92s)

                                                
                                    
x
+
TestMultiNode/serial/MultiNodeLabels (0.1s)

                                                
                                                
=== RUN   TestMultiNode/serial/MultiNodeLabels
multinode_test.go:221: (dbg) Run:  kubectl --context multinode-360800 get nodes -o "jsonpath=[{range .items[*]}{.metadata.labels},{end}]"
--- PASS: TestMultiNode/serial/MultiNodeLabels (0.10s)

                                                
                                    
x
+
TestMultiNode/serial/ProfileList (0.86s)

                                                
                                                
=== RUN   TestMultiNode/serial/ProfileList
multinode_test.go:143: (dbg) Run:  out/minikube-linux-arm64 profile list --output json
--- PASS: TestMultiNode/serial/ProfileList (0.86s)

                                                
                                    
x
+
TestMultiNode/serial/CopyFile (10.93s)

                                                
                                                
=== RUN   TestMultiNode/serial/CopyFile
multinode_test.go:184: (dbg) Run:  out/minikube-linux-arm64 -p multinode-360800 status --output json --alsologtostderr
helpers_test.go:573: (dbg) Run:  out/minikube-linux-arm64 -p multinode-360800 cp testdata/cp-test.txt multinode-360800:/home/docker/cp-test.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-arm64 -p multinode-360800 ssh -n multinode-360800 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-arm64 -p multinode-360800 cp multinode-360800:/home/docker/cp-test.txt /tmp/TestMultiNodeserialCopyFile3972739023/001/cp-test_multinode-360800.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-arm64 -p multinode-360800 ssh -n multinode-360800 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-arm64 -p multinode-360800 cp multinode-360800:/home/docker/cp-test.txt multinode-360800-m02:/home/docker/cp-test_multinode-360800_multinode-360800-m02.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-arm64 -p multinode-360800 ssh -n multinode-360800 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:551: (dbg) Run:  out/minikube-linux-arm64 -p multinode-360800 ssh -n multinode-360800-m02 "sudo cat /home/docker/cp-test_multinode-360800_multinode-360800-m02.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-arm64 -p multinode-360800 cp multinode-360800:/home/docker/cp-test.txt multinode-360800-m03:/home/docker/cp-test_multinode-360800_multinode-360800-m03.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-arm64 -p multinode-360800 ssh -n multinode-360800 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:551: (dbg) Run:  out/minikube-linux-arm64 -p multinode-360800 ssh -n multinode-360800-m03 "sudo cat /home/docker/cp-test_multinode-360800_multinode-360800-m03.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-arm64 -p multinode-360800 cp testdata/cp-test.txt multinode-360800-m02:/home/docker/cp-test.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-arm64 -p multinode-360800 ssh -n multinode-360800-m02 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-arm64 -p multinode-360800 cp multinode-360800-m02:/home/docker/cp-test.txt /tmp/TestMultiNodeserialCopyFile3972739023/001/cp-test_multinode-360800-m02.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-arm64 -p multinode-360800 ssh -n multinode-360800-m02 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-arm64 -p multinode-360800 cp multinode-360800-m02:/home/docker/cp-test.txt multinode-360800:/home/docker/cp-test_multinode-360800-m02_multinode-360800.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-arm64 -p multinode-360800 ssh -n multinode-360800-m02 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:551: (dbg) Run:  out/minikube-linux-arm64 -p multinode-360800 ssh -n multinode-360800 "sudo cat /home/docker/cp-test_multinode-360800-m02_multinode-360800.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-arm64 -p multinode-360800 cp multinode-360800-m02:/home/docker/cp-test.txt multinode-360800-m03:/home/docker/cp-test_multinode-360800-m02_multinode-360800-m03.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-arm64 -p multinode-360800 ssh -n multinode-360800-m02 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:551: (dbg) Run:  out/minikube-linux-arm64 -p multinode-360800 ssh -n multinode-360800-m03 "sudo cat /home/docker/cp-test_multinode-360800-m02_multinode-360800-m03.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-arm64 -p multinode-360800 cp testdata/cp-test.txt multinode-360800-m03:/home/docker/cp-test.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-arm64 -p multinode-360800 ssh -n multinode-360800-m03 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-arm64 -p multinode-360800 cp multinode-360800-m03:/home/docker/cp-test.txt /tmp/TestMultiNodeserialCopyFile3972739023/001/cp-test_multinode-360800-m03.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-arm64 -p multinode-360800 ssh -n multinode-360800-m03 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-arm64 -p multinode-360800 cp multinode-360800-m03:/home/docker/cp-test.txt multinode-360800:/home/docker/cp-test_multinode-360800-m03_multinode-360800.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-arm64 -p multinode-360800 ssh -n multinode-360800-m03 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:551: (dbg) Run:  out/minikube-linux-arm64 -p multinode-360800 ssh -n multinode-360800 "sudo cat /home/docker/cp-test_multinode-360800-m03_multinode-360800.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-arm64 -p multinode-360800 cp multinode-360800-m03:/home/docker/cp-test.txt multinode-360800-m02:/home/docker/cp-test_multinode-360800-m03_multinode-360800-m02.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-arm64 -p multinode-360800 ssh -n multinode-360800-m03 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:551: (dbg) Run:  out/minikube-linux-arm64 -p multinode-360800 ssh -n multinode-360800-m02 "sudo cat /home/docker/cp-test_multinode-360800-m03_multinode-360800-m02.txt"
--- PASS: TestMultiNode/serial/CopyFile (10.93s)

                                                
                                    
x
+
TestMultiNode/serial/StopNode (2.26s)

                                                
                                                
=== RUN   TestMultiNode/serial/StopNode
multinode_test.go:248: (dbg) Run:  out/minikube-linux-arm64 -p multinode-360800 node stop m03
multinode_test.go:248: (dbg) Done: out/minikube-linux-arm64 -p multinode-360800 node stop m03: (1.210966149s)
multinode_test.go:254: (dbg) Run:  out/minikube-linux-arm64 -p multinode-360800 status
multinode_test.go:254: (dbg) Non-zero exit: out/minikube-linux-arm64 -p multinode-360800 status: exit status 7 (516.653716ms)

                                                
                                                
-- stdout --
	multinode-360800
	type: Control Plane
	host: Running
	kubelet: Running
	apiserver: Running
	kubeconfig: Configured
	
	multinode-360800-m02
	type: Worker
	host: Running
	kubelet: Running
	
	multinode-360800-m03
	type: Worker
	host: Stopped
	kubelet: Stopped
	

                                                
                                                
-- /stdout --
multinode_test.go:261: (dbg) Run:  out/minikube-linux-arm64 -p multinode-360800 status --alsologtostderr
multinode_test.go:261: (dbg) Non-zero exit: out/minikube-linux-arm64 -p multinode-360800 status --alsologtostderr: exit status 7 (531.872582ms)

                                                
                                                
-- stdout --
	multinode-360800
	type: Control Plane
	host: Running
	kubelet: Running
	apiserver: Running
	kubeconfig: Configured
	
	multinode-360800-m02
	type: Worker
	host: Running
	kubelet: Running
	
	multinode-360800-m03
	type: Worker
	host: Stopped
	kubelet: Stopped
	

                                                
                                                
-- /stdout --
** stderr ** 
	I0917 01:12:35.348135  756696 out.go:360] Setting OutFile to fd 1 ...
	I0917 01:12:35.348320  756696 out.go:408] TERM=,COLORTERM=, which probably does not support color
	I0917 01:12:35.348350  756696 out.go:374] Setting ErrFile to fd 2...
	I0917 01:12:35.348369  756696 out.go:408] TERM=,COLORTERM=, which probably does not support color
	I0917 01:12:35.348688  756696 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21550-576428/.minikube/bin
	I0917 01:12:35.348914  756696 out.go:368] Setting JSON to false
	I0917 01:12:35.348980  756696 mustload.go:65] Loading cluster: multinode-360800
	I0917 01:12:35.349036  756696 notify.go:220] Checking for updates...
	I0917 01:12:35.349468  756696 config.go:182] Loaded profile config "multinode-360800": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
	I0917 01:12:35.349511  756696 status.go:174] checking status of multinode-360800 ...
	I0917 01:12:35.350326  756696 cli_runner.go:164] Run: docker container inspect multinode-360800 --format={{.State.Status}}
	I0917 01:12:35.369215  756696 status.go:371] multinode-360800 host status = "Running" (err=<nil>)
	I0917 01:12:35.369241  756696 host.go:66] Checking if "multinode-360800" exists ...
	I0917 01:12:35.369567  756696 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" multinode-360800
	I0917 01:12:35.402717  756696 host.go:66] Checking if "multinode-360800" exists ...
	I0917 01:12:35.403023  756696 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
	I0917 01:12:35.403078  756696 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" multinode-360800
	I0917 01:12:35.421864  756696 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33646 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/multinode-360800/id_rsa Username:docker}
	I0917 01:12:35.517631  756696 ssh_runner.go:195] Run: systemctl --version
	I0917 01:12:35.521630  756696 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
	I0917 01:12:35.533435  756696 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0917 01:12:35.593172  756696 info.go:266] docker info: {ID:5FDH:SA5P:5GCT:NLAS:B73P:SGDQ:PBG5:UBVH:UZY3:RXGO:CI7S:WAIH Containers:3 ContainersRunning:2 ContainersPaused:0 ContainersStopped:1 Images:3 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:50 OomKillDisable:true NGoroutines:62 SystemTime:2025-09-17 01:12:35.582855999 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1084-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214835200 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-21-244 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx P
ath:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1]] Warnings:<nil>}}
	I0917 01:12:35.593963  756696 kubeconfig.go:125] found "multinode-360800" server: "https://192.168.67.2:8443"
	I0917 01:12:35.594010  756696 api_server.go:166] Checking apiserver status ...
	I0917 01:12:35.594067  756696 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
	I0917 01:12:35.606046  756696 ssh_runner.go:195] Run: sudo egrep ^[0-9]+:freezer: /proc/2263/cgroup
	I0917 01:12:35.616150  756696 api_server.go:182] apiserver freezer: "3:freezer:/docker/334aed85320149d54644a930bec06f6cc46e5afbc150c4c1c38baadd2b25fcef/kubepods/burstable/pod7382739ca9f5bfe7542b59e870385180/c4b8df3bf05fef65387947b54ebc3166d910b2c23a428b4c3fdeab725d8c3153"
	I0917 01:12:35.616230  756696 ssh_runner.go:195] Run: sudo cat /sys/fs/cgroup/freezer/docker/334aed85320149d54644a930bec06f6cc46e5afbc150c4c1c38baadd2b25fcef/kubepods/burstable/pod7382739ca9f5bfe7542b59e870385180/c4b8df3bf05fef65387947b54ebc3166d910b2c23a428b4c3fdeab725d8c3153/freezer.state
	I0917 01:12:35.625337  756696 api_server.go:204] freezer state: "THAWED"
	I0917 01:12:35.625366  756696 api_server.go:253] Checking apiserver healthz at https://192.168.67.2:8443/healthz ...
	I0917 01:12:35.634055  756696 api_server.go:279] https://192.168.67.2:8443/healthz returned 200:
	ok
	I0917 01:12:35.634092  756696 status.go:463] multinode-360800 apiserver status = Running (err=<nil>)
	I0917 01:12:35.634104  756696 status.go:176] multinode-360800 status: &{Name:multinode-360800 Host:Running Kubelet:Running APIServer:Running Kubeconfig:Configured Worker:false TimeToStop: DockerEnv: PodManEnv:}
	I0917 01:12:35.634121  756696 status.go:174] checking status of multinode-360800-m02 ...
	I0917 01:12:35.634436  756696 cli_runner.go:164] Run: docker container inspect multinode-360800-m02 --format={{.State.Status}}
	I0917 01:12:35.658073  756696 status.go:371] multinode-360800-m02 host status = "Running" (err=<nil>)
	I0917 01:12:35.658097  756696 host.go:66] Checking if "multinode-360800-m02" exists ...
	I0917 01:12:35.658398  756696 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" multinode-360800-m02
	I0917 01:12:35.675522  756696 host.go:66] Checking if "multinode-360800-m02" exists ...
	I0917 01:12:35.675825  756696 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
	I0917 01:12:35.675870  756696 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" multinode-360800-m02
	I0917 01:12:35.693394  756696 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33651 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/multinode-360800-m02/id_rsa Username:docker}
	I0917 01:12:35.793512  756696 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
	I0917 01:12:35.805873  756696 status.go:176] multinode-360800-m02 status: &{Name:multinode-360800-m02 Host:Running Kubelet:Running APIServer:Irrelevant Kubeconfig:Irrelevant Worker:true TimeToStop: DockerEnv: PodManEnv:}
	I0917 01:12:35.805907  756696 status.go:174] checking status of multinode-360800-m03 ...
	I0917 01:12:35.806228  756696 cli_runner.go:164] Run: docker container inspect multinode-360800-m03 --format={{.State.Status}}
	I0917 01:12:35.822920  756696 status.go:371] multinode-360800-m03 host status = "Stopped" (err=<nil>)
	I0917 01:12:35.822945  756696 status.go:384] host is not running, skipping remaining checks
	I0917 01:12:35.822953  756696 status.go:176] multinode-360800-m03 status: &{Name:multinode-360800-m03 Host:Stopped Kubelet:Stopped APIServer:Stopped Kubeconfig:Stopped Worker:true TimeToStop: DockerEnv: PodManEnv:}

                                                
                                                
** /stderr **
--- PASS: TestMultiNode/serial/StopNode (2.26s)

                                                
                                    
x
+
TestMultiNode/serial/StartAfterStop (9.3s)

                                                
                                                
=== RUN   TestMultiNode/serial/StartAfterStop
multinode_test.go:282: (dbg) Run:  out/minikube-linux-arm64 -p multinode-360800 node start m03 -v=5 --alsologtostderr
E0917 01:12:40.315470  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/functional-918451/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
multinode_test.go:282: (dbg) Done: out/minikube-linux-arm64 -p multinode-360800 node start m03 -v=5 --alsologtostderr: (8.470105935s)
multinode_test.go:290: (dbg) Run:  out/minikube-linux-arm64 -p multinode-360800 status -v=5 --alsologtostderr
multinode_test.go:306: (dbg) Run:  kubectl get nodes
--- PASS: TestMultiNode/serial/StartAfterStop (9.30s)

                                                
                                    
x
+
TestMultiNode/serial/RestartKeepsNodes (75.93s)

                                                
                                                
=== RUN   TestMultiNode/serial/RestartKeepsNodes
multinode_test.go:314: (dbg) Run:  out/minikube-linux-arm64 node list -p multinode-360800
multinode_test.go:321: (dbg) Run:  out/minikube-linux-arm64 stop -p multinode-360800
multinode_test.go:321: (dbg) Done: out/minikube-linux-arm64 stop -p multinode-360800: (22.632022651s)
multinode_test.go:326: (dbg) Run:  out/minikube-linux-arm64 start -p multinode-360800 --wait=true -v=5 --alsologtostderr
E0917 01:13:52.991877  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
multinode_test.go:326: (dbg) Done: out/minikube-linux-arm64 start -p multinode-360800 --wait=true -v=5 --alsologtostderr: (53.106889595s)
multinode_test.go:331: (dbg) Run:  out/minikube-linux-arm64 node list -p multinode-360800
--- PASS: TestMultiNode/serial/RestartKeepsNodes (75.93s)

                                                
                                    
x
+
TestMultiNode/serial/DeleteNode (5.75s)

                                                
                                                
=== RUN   TestMultiNode/serial/DeleteNode
multinode_test.go:416: (dbg) Run:  out/minikube-linux-arm64 -p multinode-360800 node delete m03
multinode_test.go:416: (dbg) Done: out/minikube-linux-arm64 -p multinode-360800 node delete m03: (5.063023332s)
multinode_test.go:422: (dbg) Run:  out/minikube-linux-arm64 -p multinode-360800 status --alsologtostderr
multinode_test.go:436: (dbg) Run:  kubectl get nodes
multinode_test.go:444: (dbg) Run:  kubectl get nodes -o "go-template='{{range .items}}{{range .status.conditions}}{{if eq .type "Ready"}} {{.status}}{{"\n"}}{{end}}{{end}}{{end}}'"
--- PASS: TestMultiNode/serial/DeleteNode (5.75s)

                                                
                                    
x
+
TestMultiNode/serial/StopMultiNode (21.61s)

                                                
                                                
=== RUN   TestMultiNode/serial/StopMultiNode
multinode_test.go:345: (dbg) Run:  out/minikube-linux-arm64 -p multinode-360800 stop
multinode_test.go:345: (dbg) Done: out/minikube-linux-arm64 -p multinode-360800 stop: (21.435674225s)
multinode_test.go:351: (dbg) Run:  out/minikube-linux-arm64 -p multinode-360800 status
multinode_test.go:351: (dbg) Non-zero exit: out/minikube-linux-arm64 -p multinode-360800 status: exit status 7 (85.827221ms)

                                                
                                                
-- stdout --
	multinode-360800
	type: Control Plane
	host: Stopped
	kubelet: Stopped
	apiserver: Stopped
	kubeconfig: Stopped
	
	multinode-360800-m02
	type: Worker
	host: Stopped
	kubelet: Stopped
	

                                                
                                                
-- /stdout --
multinode_test.go:358: (dbg) Run:  out/minikube-linux-arm64 -p multinode-360800 status --alsologtostderr
multinode_test.go:358: (dbg) Non-zero exit: out/minikube-linux-arm64 -p multinode-360800 status --alsologtostderr: exit status 7 (84.180806ms)

                                                
                                                
-- stdout --
	multinode-360800
	type: Control Plane
	host: Stopped
	kubelet: Stopped
	apiserver: Stopped
	kubeconfig: Stopped
	
	multinode-360800-m02
	type: Worker
	host: Stopped
	kubelet: Stopped
	

                                                
                                                
-- /stdout --
** stderr ** 
	I0917 01:14:28.373603  770022 out.go:360] Setting OutFile to fd 1 ...
	I0917 01:14:28.373713  770022 out.go:408] TERM=,COLORTERM=, which probably does not support color
	I0917 01:14:28.373769  770022 out.go:374] Setting ErrFile to fd 2...
	I0917 01:14:28.373774  770022 out.go:408] TERM=,COLORTERM=, which probably does not support color
	I0917 01:14:28.374020  770022 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21550-576428/.minikube/bin
	I0917 01:14:28.374205  770022 out.go:368] Setting JSON to false
	I0917 01:14:28.374232  770022 mustload.go:65] Loading cluster: multinode-360800
	I0917 01:14:28.374288  770022 notify.go:220] Checking for updates...
	I0917 01:14:28.374618  770022 config.go:182] Loaded profile config "multinode-360800": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
	I0917 01:14:28.374638  770022 status.go:174] checking status of multinode-360800 ...
	I0917 01:14:28.375143  770022 cli_runner.go:164] Run: docker container inspect multinode-360800 --format={{.State.Status}}
	I0917 01:14:28.393958  770022 status.go:371] multinode-360800 host status = "Stopped" (err=<nil>)
	I0917 01:14:28.393979  770022 status.go:384] host is not running, skipping remaining checks
	I0917 01:14:28.393985  770022 status.go:176] multinode-360800 status: &{Name:multinode-360800 Host:Stopped Kubelet:Stopped APIServer:Stopped Kubeconfig:Stopped Worker:false TimeToStop: DockerEnv: PodManEnv:}
	I0917 01:14:28.394010  770022 status.go:174] checking status of multinode-360800-m02 ...
	I0917 01:14:28.394306  770022 cli_runner.go:164] Run: docker container inspect multinode-360800-m02 --format={{.State.Status}}
	I0917 01:14:28.410609  770022 status.go:371] multinode-360800-m02 host status = "Stopped" (err=<nil>)
	I0917 01:14:28.410629  770022 status.go:384] host is not running, skipping remaining checks
	I0917 01:14:28.410636  770022 status.go:176] multinode-360800-m02 status: &{Name:multinode-360800-m02 Host:Stopped Kubelet:Stopped APIServer:Stopped Kubeconfig:Stopped Worker:true TimeToStop: DockerEnv: PodManEnv:}

                                                
                                                
** /stderr **
--- PASS: TestMultiNode/serial/StopMultiNode (21.61s)

                                                
                                    
x
+
TestMultiNode/serial/RestartMultiNode (52.54s)

                                                
                                                
=== RUN   TestMultiNode/serial/RestartMultiNode
multinode_test.go:376: (dbg) Run:  out/minikube-linux-arm64 start -p multinode-360800 --wait=true -v=5 --alsologtostderr --driver=docker  --container-runtime=docker
multinode_test.go:376: (dbg) Done: out/minikube-linux-arm64 start -p multinode-360800 --wait=true -v=5 --alsologtostderr --driver=docker  --container-runtime=docker: (51.851067728s)
multinode_test.go:382: (dbg) Run:  out/minikube-linux-arm64 -p multinode-360800 status --alsologtostderr
multinode_test.go:396: (dbg) Run:  kubectl get nodes
multinode_test.go:404: (dbg) Run:  kubectl get nodes -o "go-template='{{range .items}}{{range .status.conditions}}{{if eq .type "Ready"}} {{.status}}{{"\n"}}{{end}}{{end}}{{end}}'"
--- PASS: TestMultiNode/serial/RestartMultiNode (52.54s)

                                                
                                    
x
+
TestMultiNode/serial/ValidateNameConflict (38.89s)

                                                
                                                
=== RUN   TestMultiNode/serial/ValidateNameConflict
multinode_test.go:455: (dbg) Run:  out/minikube-linux-arm64 node list -p multinode-360800
multinode_test.go:464: (dbg) Run:  out/minikube-linux-arm64 start -p multinode-360800-m02 --driver=docker  --container-runtime=docker
multinode_test.go:464: (dbg) Non-zero exit: out/minikube-linux-arm64 start -p multinode-360800-m02 --driver=docker  --container-runtime=docker: exit status 14 (95.026712ms)

                                                
                                                
-- stdout --
	* [multinode-360800-m02] minikube v1.37.0 on Ubuntu 20.04 (arm64)
	  - MINIKUBE_LOCATION=21550
	  - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	  - KUBECONFIG=/home/jenkins/minikube-integration/21550-576428/kubeconfig
	  - MINIKUBE_HOME=/home/jenkins/minikube-integration/21550-576428/.minikube
	  - MINIKUBE_BIN=out/minikube-linux-arm64
	  - MINIKUBE_FORCE_SYSTEMD=
	
	

                                                
                                                
-- /stdout --
** stderr ** 
	! Profile name 'multinode-360800-m02' is duplicated with machine name 'multinode-360800-m02' in profile 'multinode-360800'
	X Exiting due to MK_USAGE: Profile name should be unique

                                                
                                                
** /stderr **
multinode_test.go:472: (dbg) Run:  out/minikube-linux-arm64 start -p multinode-360800-m03 --driver=docker  --container-runtime=docker
multinode_test.go:472: (dbg) Done: out/minikube-linux-arm64 start -p multinode-360800-m03 --driver=docker  --container-runtime=docker: (36.350517821s)
multinode_test.go:479: (dbg) Run:  out/minikube-linux-arm64 node add -p multinode-360800
multinode_test.go:479: (dbg) Non-zero exit: out/minikube-linux-arm64 node add -p multinode-360800: exit status 80 (350.029415ms)

                                                
                                                
-- stdout --
	* Adding node m03 to cluster multinode-360800 as [worker]
	
	

                                                
                                                
-- /stdout --
** stderr ** 
	X Exiting due to GUEST_NODE_ADD: failed to add node: Node multinode-360800-m03 already exists in multinode-360800-m03 profile
	* 
	╭─────────────────────────────────────────────────────────────────────────────────────────────╮
	│                                                                                             │
	│    * If the above advice does not help, please let us know:                                 │
	│      https://github.com/kubernetes/minikube/issues/new/choose                               │
	│                                                                                             │
	│    * Please run `minikube logs --file=logs.txt` and attach logs.txt to the GitHub issue.    │
	│    * Please also attach the following file to the GitHub issue:                             │
	│    * - /tmp/minikube_node_040ea7097fd6ed71e65be9a474587f81f0ccd21d_0.log                    │
	│                                                                                             │
	╰─────────────────────────────────────────────────────────────────────────────────────────────╯

                                                
                                                
** /stderr **
multinode_test.go:484: (dbg) Run:  out/minikube-linux-arm64 delete -p multinode-360800-m03
multinode_test.go:484: (dbg) Done: out/minikube-linux-arm64 delete -p multinode-360800-m03: (2.030702483s)
--- PASS: TestMultiNode/serial/ValidateNameConflict (38.89s)

                                                
                                    
x
+
TestPreload (156.91s)

                                                
                                                
=== RUN   TestPreload
preload_test.go:43: (dbg) Run:  out/minikube-linux-arm64 start -p test-preload-369828 --memory=3072 --alsologtostderr --wait=true --preload=false --driver=docker  --container-runtime=docker --kubernetes-version=v1.32.0
E0917 01:16:17.252635  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/functional-918451/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
preload_test.go:43: (dbg) Done: out/minikube-linux-arm64 start -p test-preload-369828 --memory=3072 --alsologtostderr --wait=true --preload=false --driver=docker  --container-runtime=docker --kubernetes-version=v1.32.0: (1m22.944403631s)
preload_test.go:51: (dbg) Run:  out/minikube-linux-arm64 -p test-preload-369828 image pull gcr.io/k8s-minikube/busybox
preload_test.go:51: (dbg) Done: out/minikube-linux-arm64 -p test-preload-369828 image pull gcr.io/k8s-minikube/busybox: (2.378227641s)
preload_test.go:57: (dbg) Run:  out/minikube-linux-arm64 stop -p test-preload-369828
preload_test.go:57: (dbg) Done: out/minikube-linux-arm64 stop -p test-preload-369828: (5.696033237s)
preload_test.go:65: (dbg) Run:  out/minikube-linux-arm64 start -p test-preload-369828 --memory=3072 --alsologtostderr -v=1 --wait=true --driver=docker  --container-runtime=docker
preload_test.go:65: (dbg) Done: out/minikube-linux-arm64 start -p test-preload-369828 --memory=3072 --alsologtostderr -v=1 --wait=true --driver=docker  --container-runtime=docker: (1m3.465069411s)
preload_test.go:70: (dbg) Run:  out/minikube-linux-arm64 -p test-preload-369828 image list
helpers_test.go:175: Cleaning up "test-preload-369828" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p test-preload-369828
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p test-preload-369828: (2.204462097s)
--- PASS: TestPreload (156.91s)

                                                
                                    
x
+
TestScheduledStopUnix (107.05s)

                                                
                                                
=== RUN   TestScheduledStopUnix
scheduled_stop_test.go:128: (dbg) Run:  out/minikube-linux-arm64 start -p scheduled-stop-655470 --memory=3072 --driver=docker  --container-runtime=docker
E0917 01:18:52.991848  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
scheduled_stop_test.go:128: (dbg) Done: out/minikube-linux-arm64 start -p scheduled-stop-655470 --memory=3072 --driver=docker  --container-runtime=docker: (33.895152092s)
scheduled_stop_test.go:137: (dbg) Run:  out/minikube-linux-arm64 stop -p scheduled-stop-655470 --schedule 5m
scheduled_stop_test.go:191: (dbg) Run:  out/minikube-linux-arm64 status --format={{.TimeToStop}} -p scheduled-stop-655470 -n scheduled-stop-655470
scheduled_stop_test.go:169: signal error was:  <nil>
scheduled_stop_test.go:137: (dbg) Run:  out/minikube-linux-arm64 stop -p scheduled-stop-655470 --schedule 15s
scheduled_stop_test.go:169: signal error was:  os: process already finished
I0917 01:19:15.239420  578284 retry.go:31] will retry after 102.863µs: open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/scheduled-stop-655470/pid: no such file or directory
I0917 01:19:15.240608  578284 retry.go:31] will retry after 117.575µs: open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/scheduled-stop-655470/pid: no such file or directory
I0917 01:19:15.241744  578284 retry.go:31] will retry after 193.135µs: open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/scheduled-stop-655470/pid: no such file or directory
I0917 01:19:15.242830  578284 retry.go:31] will retry after 241.48µs: open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/scheduled-stop-655470/pid: no such file or directory
I0917 01:19:15.243958  578284 retry.go:31] will retry after 548.094µs: open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/scheduled-stop-655470/pid: no such file or directory
I0917 01:19:15.245045  578284 retry.go:31] will retry after 416.564µs: open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/scheduled-stop-655470/pid: no such file or directory
I0917 01:19:15.246165  578284 retry.go:31] will retry after 1.391008ms: open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/scheduled-stop-655470/pid: no such file or directory
I0917 01:19:15.248376  578284 retry.go:31] will retry after 1.13151ms: open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/scheduled-stop-655470/pid: no such file or directory
I0917 01:19:15.250596  578284 retry.go:31] will retry after 2.168438ms: open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/scheduled-stop-655470/pid: no such file or directory
I0917 01:19:15.253802  578284 retry.go:31] will retry after 3.915021ms: open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/scheduled-stop-655470/pid: no such file or directory
I0917 01:19:15.257986  578284 retry.go:31] will retry after 5.235952ms: open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/scheduled-stop-655470/pid: no such file or directory
I0917 01:19:15.264228  578284 retry.go:31] will retry after 7.517589ms: open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/scheduled-stop-655470/pid: no such file or directory
I0917 01:19:15.272423  578284 retry.go:31] will retry after 7.644787ms: open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/scheduled-stop-655470/pid: no such file or directory
I0917 01:19:15.280675  578284 retry.go:31] will retry after 18.456329ms: open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/scheduled-stop-655470/pid: no such file or directory
I0917 01:19:15.299936  578284 retry.go:31] will retry after 40.568284ms: open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/scheduled-stop-655470/pid: no such file or directory
scheduled_stop_test.go:137: (dbg) Run:  out/minikube-linux-arm64 stop -p scheduled-stop-655470 --cancel-scheduled
scheduled_stop_test.go:176: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Host}} -p scheduled-stop-655470 -n scheduled-stop-655470
scheduled_stop_test.go:205: (dbg) Run:  out/minikube-linux-arm64 status -p scheduled-stop-655470
scheduled_stop_test.go:137: (dbg) Run:  out/minikube-linux-arm64 stop -p scheduled-stop-655470 --schedule 15s
scheduled_stop_test.go:169: signal error was:  os: process already finished
scheduled_stop_test.go:205: (dbg) Run:  out/minikube-linux-arm64 status -p scheduled-stop-655470
scheduled_stop_test.go:205: (dbg) Non-zero exit: out/minikube-linux-arm64 status -p scheduled-stop-655470: exit status 7 (70.921074ms)

                                                
                                                
-- stdout --
	scheduled-stop-655470
	type: Control Plane
	host: Stopped
	kubelet: Stopped
	apiserver: Stopped
	kubeconfig: Stopped
	

                                                
                                                
-- /stdout --
scheduled_stop_test.go:176: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Host}} -p scheduled-stop-655470 -n scheduled-stop-655470
scheduled_stop_test.go:176: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.Host}} -p scheduled-stop-655470 -n scheduled-stop-655470: exit status 7 (67.670278ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
scheduled_stop_test.go:176: status error: exit status 7 (may be ok)
helpers_test.go:175: Cleaning up "scheduled-stop-655470" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p scheduled-stop-655470
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p scheduled-stop-655470: (1.653653143s)
--- PASS: TestScheduledStopUnix (107.05s)

                                                
                                    
x
+
TestSkaffold (137.48s)

                                                
                                                
=== RUN   TestSkaffold
skaffold_test.go:59: (dbg) Run:  /tmp/skaffold.exe2091944127 version
skaffold_test.go:63: skaffold version: v2.16.1
skaffold_test.go:66: (dbg) Run:  out/minikube-linux-arm64 start -p skaffold-735592 --memory=3072 --driver=docker  --container-runtime=docker
skaffold_test.go:66: (dbg) Done: out/minikube-linux-arm64 start -p skaffold-735592 --memory=3072 --driver=docker  --container-runtime=docker: (31.719426952s)
skaffold_test.go:86: copying out/minikube-linux-arm64 to /home/jenkins/workspace/Docker_Linux_docker_arm64/out/minikube
skaffold_test.go:105: (dbg) Run:  /tmp/skaffold.exe2091944127 run --minikube-profile skaffold-735592 --kube-context skaffold-735592 --status-check=true --port-forward=false --interactive=false
E0917 01:21:17.252281  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/functional-918451/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
skaffold_test.go:105: (dbg) Done: /tmp/skaffold.exe2091944127 run --minikube-profile skaffold-735592 --kube-context skaffold-735592 --status-check=true --port-forward=false --interactive=false: (1m29.704647719s)
skaffold_test.go:111: (dbg) TestSkaffold: waiting 1m0s for pods matching "app=leeroy-app" in namespace "default" ...
helpers_test.go:352: "leeroy-app-6fb5b677d7-wqngp" [b8c0a39a-091b-454e-8b8a-31b2fe53eec7] Running
skaffold_test.go:111: (dbg) TestSkaffold: app=leeroy-app healthy within 6.003229777s
skaffold_test.go:114: (dbg) TestSkaffold: waiting 1m0s for pods matching "app=leeroy-web" in namespace "default" ...
helpers_test.go:352: "leeroy-web-57dd9b68d9-5v6bw" [900a6069-e19d-48f4-8894-79e2d78544cc] Running
skaffold_test.go:114: (dbg) TestSkaffold: app=leeroy-web healthy within 5.004145225s
helpers_test.go:175: Cleaning up "skaffold-735592" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p skaffold-735592
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p skaffold-735592: (3.049324435s)
--- PASS: TestSkaffold (137.48s)

                                                
                                    
x
+
TestInsufficientStorage (10.92s)

                                                
                                                
=== RUN   TestInsufficientStorage
status_test.go:50: (dbg) Run:  out/minikube-linux-arm64 start -p insufficient-storage-896027 --memory=3072 --output=json --wait=true --driver=docker  --container-runtime=docker
status_test.go:50: (dbg) Non-zero exit: out/minikube-linux-arm64 start -p insufficient-storage-896027 --memory=3072 --output=json --wait=true --driver=docker  --container-runtime=docker: exit status 26 (8.583821937s)

                                                
                                                
-- stdout --
	{"specversion":"1.0","id":"e4a58260-c6c9-4f20-8de1-f7985355f8c5","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.step","datacontenttype":"application/json","data":{"currentstep":"0","message":"[insufficient-storage-896027] minikube v1.37.0 on Ubuntu 20.04 (arm64)","name":"Initial Minikube Setup","totalsteps":"19"}}
	{"specversion":"1.0","id":"e3b45e6d-423c-4e4e-a974-6b7531b4e41e","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_LOCATION=21550"}}
	{"specversion":"1.0","id":"349480bb-2b27-4c37-807c-e0def60fd5b5","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true"}}
	{"specversion":"1.0","id":"807d1751-98d3-4062-b8b4-0bae8d7bfa93","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"KUBECONFIG=/home/jenkins/minikube-integration/21550-576428/kubeconfig"}}
	{"specversion":"1.0","id":"709bd3dc-6a63-4313-b12c-10e7febd4538","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_HOME=/home/jenkins/minikube-integration/21550-576428/.minikube"}}
	{"specversion":"1.0","id":"953145d9-1d71-46f3-9d2a-a91fa96e75c6","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_BIN=out/minikube-linux-arm64"}}
	{"specversion":"1.0","id":"44d68637-d1ae-4cb6-9625-7bc084e52891","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_FORCE_SYSTEMD="}}
	{"specversion":"1.0","id":"67329e30-e311-4ed4-947c-3fd44a0ea030","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_TEST_STORAGE_CAPACITY=100"}}
	{"specversion":"1.0","id":"f29d1532-e3c0-4e8a-ac24-b4efa926ec1a","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_TEST_AVAILABLE_STORAGE=19"}}
	{"specversion":"1.0","id":"4169b3fc-611f-40ef-b4d4-86cce128e4c1","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.step","datacontenttype":"application/json","data":{"currentstep":"1","message":"Using the docker driver based on user configuration","name":"Selecting Driver","totalsteps":"19"}}
	{"specversion":"1.0","id":"7d6ca73f-702f-4806-8241-a3373038ae6b","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"Using Docker driver with root privileges"}}
	{"specversion":"1.0","id":"1550e94f-9caa-4a6a-a14c-0db01c49a3e6","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.step","datacontenttype":"application/json","data":{"currentstep":"3","message":"Starting \"insufficient-storage-896027\" primary control-plane node in \"insufficient-storage-896027\" cluster","name":"Starting Node","totalsteps":"19"}}
	{"specversion":"1.0","id":"fe33b82c-888f-4b06-b427-9accf1b2052f","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.step","datacontenttype":"application/json","data":{"currentstep":"5","message":"Pulling base image v0.0.48 ...","name":"Pulling Base Image","totalsteps":"19"}}
	{"specversion":"1.0","id":"f96b8c96-853b-4e27-b32b-5a6d2dc4a4b3","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.step","datacontenttype":"application/json","data":{"currentstep":"8","message":"Creating docker container (CPUs=2, Memory=3072MB) ...","name":"Creating Container","totalsteps":"19"}}
	{"specversion":"1.0","id":"940be0d9-4b3a-4ac2-af72-d2b02ca8a9f9","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.error","datacontenttype":"application/json","data":{"advice":"Try one or more of the following to free up space on the device:\n\n\t\t\t1. Run \"docker system prune\" to remove unused Docker data (optionally with \"-a\")\n\t\t\t2. Increase the storage allocated to Docker for Desktop by clicking on:\n\t\t\t\tDocker icon \u003e Preferences \u003e Resources \u003e Disk Image Size\n\t\t\t3. Run \"minikube ssh -- docker system prune\" if using the Docker container runtime","exitcode":"26","issues":"https://github.com/kubernetes/minikube/issues/9024","message":"Docker is out of disk space! (/var is at 100% of capacity). You can pass '--force' to skip this check.","name":"RSRC_DOCKER_STORAGE","url":""}}

                                                
                                                
-- /stdout --
status_test.go:76: (dbg) Run:  out/minikube-linux-arm64 status -p insufficient-storage-896027 --output=json --layout=cluster
status_test.go:76: (dbg) Non-zero exit: out/minikube-linux-arm64 status -p insufficient-storage-896027 --output=json --layout=cluster: exit status 7 (304.187214ms)

                                                
                                                
-- stdout --
	{"Name":"insufficient-storage-896027","StatusCode":507,"StatusName":"InsufficientStorage","StatusDetail":"/var is almost out of disk space","Step":"Creating Container","StepDetail":"Creating docker container (CPUs=2, Memory=3072MB) ...","BinaryVersion":"v1.37.0","Components":{"kubeconfig":{"Name":"kubeconfig","StatusCode":500,"StatusName":"Error"}},"Nodes":[{"Name":"insufficient-storage-896027","StatusCode":507,"StatusName":"InsufficientStorage","Components":{"apiserver":{"Name":"apiserver","StatusCode":405,"StatusName":"Stopped"},"kubelet":{"Name":"kubelet","StatusCode":405,"StatusName":"Stopped"}}}]}

                                                
                                                
-- /stdout --
** stderr ** 
	E0917 01:22:54.271523  804155 status.go:458] kubeconfig endpoint: get endpoint: "insufficient-storage-896027" does not appear in /home/jenkins/minikube-integration/21550-576428/kubeconfig

                                                
                                                
** /stderr **
status_test.go:76: (dbg) Run:  out/minikube-linux-arm64 status -p insufficient-storage-896027 --output=json --layout=cluster
status_test.go:76: (dbg) Non-zero exit: out/minikube-linux-arm64 status -p insufficient-storage-896027 --output=json --layout=cluster: exit status 7 (317.295303ms)

                                                
                                                
-- stdout --
	{"Name":"insufficient-storage-896027","StatusCode":507,"StatusName":"InsufficientStorage","StatusDetail":"/var is almost out of disk space","BinaryVersion":"v1.37.0","Components":{"kubeconfig":{"Name":"kubeconfig","StatusCode":500,"StatusName":"Error"}},"Nodes":[{"Name":"insufficient-storage-896027","StatusCode":507,"StatusName":"InsufficientStorage","Components":{"apiserver":{"Name":"apiserver","StatusCode":405,"StatusName":"Stopped"},"kubelet":{"Name":"kubelet","StatusCode":405,"StatusName":"Stopped"}}}]}

                                                
                                                
-- /stdout --
** stderr ** 
	E0917 01:22:54.587372  804218 status.go:458] kubeconfig endpoint: get endpoint: "insufficient-storage-896027" does not appear in /home/jenkins/minikube-integration/21550-576428/kubeconfig
	E0917 01:22:54.597620  804218 status.go:258] unable to read event log: stat: stat /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/insufficient-storage-896027/events.json: no such file or directory

                                                
                                                
** /stderr **
helpers_test.go:175: Cleaning up "insufficient-storage-896027" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p insufficient-storage-896027
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p insufficient-storage-896027: (1.710199161s)
--- PASS: TestInsufficientStorage (10.92s)

                                                
                                    
x
+
TestRunningBinaryUpgrade (74.27s)

                                                
                                                
=== RUN   TestRunningBinaryUpgrade
=== PAUSE TestRunningBinaryUpgrade

                                                
                                                

                                                
                                                
=== CONT  TestRunningBinaryUpgrade
version_upgrade_test.go:120: (dbg) Run:  /tmp/minikube-v1.32.0.284777443 start -p running-upgrade-510806 --memory=3072 --vm-driver=docker  --container-runtime=docker
version_upgrade_test.go:120: (dbg) Done: /tmp/minikube-v1.32.0.284777443 start -p running-upgrade-510806 --memory=3072 --vm-driver=docker  --container-runtime=docker: (43.951958041s)
version_upgrade_test.go:130: (dbg) Run:  out/minikube-linux-arm64 start -p running-upgrade-510806 --memory=3072 --alsologtostderr -v=1 --driver=docker  --container-runtime=docker
version_upgrade_test.go:130: (dbg) Done: out/minikube-linux-arm64 start -p running-upgrade-510806 --memory=3072 --alsologtostderr -v=1 --driver=docker  --container-runtime=docker: (27.498032044s)
helpers_test.go:175: Cleaning up "running-upgrade-510806" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p running-upgrade-510806
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p running-upgrade-510806: (2.045882519s)
--- PASS: TestRunningBinaryUpgrade (74.27s)

                                                
                                    
x
+
TestKubernetesUpgrade (378.93s)

                                                
                                                
=== RUN   TestKubernetesUpgrade
=== PAUSE TestKubernetesUpgrade

                                                
                                                

                                                
                                                
=== CONT  TestKubernetesUpgrade
version_upgrade_test.go:222: (dbg) Run:  out/minikube-linux-arm64 start -p kubernetes-upgrade-981235 --memory=3072 --kubernetes-version=v1.28.0 --alsologtostderr -v=1 --driver=docker  --container-runtime=docker
E0917 01:29:20.316777  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/functional-918451/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
version_upgrade_test.go:222: (dbg) Done: out/minikube-linux-arm64 start -p kubernetes-upgrade-981235 --memory=3072 --kubernetes-version=v1.28.0 --alsologtostderr -v=1 --driver=docker  --container-runtime=docker: (42.755426157s)
version_upgrade_test.go:227: (dbg) Run:  out/minikube-linux-arm64 stop -p kubernetes-upgrade-981235
version_upgrade_test.go:227: (dbg) Done: out/minikube-linux-arm64 stop -p kubernetes-upgrade-981235: (1.902301238s)
version_upgrade_test.go:232: (dbg) Run:  out/minikube-linux-arm64 -p kubernetes-upgrade-981235 status --format={{.Host}}
version_upgrade_test.go:232: (dbg) Non-zero exit: out/minikube-linux-arm64 -p kubernetes-upgrade-981235 status --format={{.Host}}: exit status 7 (67.235551ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
version_upgrade_test.go:234: status error: exit status 7 (may be ok)
version_upgrade_test.go:243: (dbg) Run:  out/minikube-linux-arm64 start -p kubernetes-upgrade-981235 --memory=3072 --kubernetes-version=v1.34.0 --alsologtostderr -v=1 --driver=docker  --container-runtime=docker
E0917 01:30:15.172778  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/skaffold-735592/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
version_upgrade_test.go:243: (dbg) Done: out/minikube-linux-arm64 start -p kubernetes-upgrade-981235 --memory=3072 --kubernetes-version=v1.34.0 --alsologtostderr -v=1 --driver=docker  --container-runtime=docker: (4m40.688186404s)
version_upgrade_test.go:248: (dbg) Run:  kubectl --context kubernetes-upgrade-981235 version --output=json
version_upgrade_test.go:267: Attempting to downgrade Kubernetes (should fail)
version_upgrade_test.go:269: (dbg) Run:  out/minikube-linux-arm64 start -p kubernetes-upgrade-981235 --memory=3072 --kubernetes-version=v1.28.0 --driver=docker  --container-runtime=docker
version_upgrade_test.go:269: (dbg) Non-zero exit: out/minikube-linux-arm64 start -p kubernetes-upgrade-981235 --memory=3072 --kubernetes-version=v1.28.0 --driver=docker  --container-runtime=docker: exit status 106 (90.153184ms)

                                                
                                                
-- stdout --
	* [kubernetes-upgrade-981235] minikube v1.37.0 on Ubuntu 20.04 (arm64)
	  - MINIKUBE_LOCATION=21550
	  - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	  - KUBECONFIG=/home/jenkins/minikube-integration/21550-576428/kubeconfig
	  - MINIKUBE_HOME=/home/jenkins/minikube-integration/21550-576428/.minikube
	  - MINIKUBE_BIN=out/minikube-linux-arm64
	  - MINIKUBE_FORCE_SYSTEMD=
	
	

                                                
                                                
-- /stdout --
** stderr ** 
	X Exiting due to K8S_DOWNGRADE_UNSUPPORTED: Unable to safely downgrade existing Kubernetes v1.34.0 cluster to v1.28.0
	* Suggestion: 
	
	    1) Recreate the cluster with Kubernetes 1.28.0, by running:
	    
	    minikube delete -p kubernetes-upgrade-981235
	    minikube start -p kubernetes-upgrade-981235 --kubernetes-version=v1.28.0
	    
	    2) Create a second cluster with Kubernetes 1.28.0, by running:
	    
	    minikube start -p kubernetes-upgrade-9812352 --kubernetes-version=v1.28.0
	    
	    3) Use the existing cluster at version Kubernetes 1.34.0, by running:
	    
	    minikube start -p kubernetes-upgrade-981235 --kubernetes-version=v1.34.0
	    

                                                
                                                
** /stderr **
version_upgrade_test.go:273: Attempting restart after unsuccessful downgrade
version_upgrade_test.go:275: (dbg) Run:  out/minikube-linux-arm64 start -p kubernetes-upgrade-981235 --memory=3072 --kubernetes-version=v1.34.0 --alsologtostderr -v=1 --driver=docker  --container-runtime=docker
version_upgrade_test.go:275: (dbg) Done: out/minikube-linux-arm64 start -p kubernetes-upgrade-981235 --memory=3072 --kubernetes-version=v1.34.0 --alsologtostderr -v=1 --driver=docker  --container-runtime=docker: (50.085984152s)
helpers_test.go:175: Cleaning up "kubernetes-upgrade-981235" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p kubernetes-upgrade-981235
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p kubernetes-upgrade-981235: (3.244210168s)
--- PASS: TestKubernetesUpgrade (378.93s)

                                                
                                    
x
+
TestMissingContainerUpgrade (91.34s)

                                                
                                                
=== RUN   TestMissingContainerUpgrade
=== PAUSE TestMissingContainerUpgrade

                                                
                                                

                                                
                                                
=== CONT  TestMissingContainerUpgrade
version_upgrade_test.go:309: (dbg) Run:  /tmp/minikube-v1.32.0.4107143491 start -p missing-upgrade-359395 --memory=3072 --driver=docker  --container-runtime=docker
E0917 01:27:36.443917  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/skaffold-735592/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:27:41.565380  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/skaffold-735592/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:27:51.807266  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/skaffold-735592/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
version_upgrade_test.go:309: (dbg) Done: /tmp/minikube-v1.32.0.4107143491 start -p missing-upgrade-359395 --memory=3072 --driver=docker  --container-runtime=docker: (33.241356741s)
version_upgrade_test.go:318: (dbg) Run:  docker stop missing-upgrade-359395
E0917 01:28:12.289125  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/skaffold-735592/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
version_upgrade_test.go:318: (dbg) Done: docker stop missing-upgrade-359395: (10.424211412s)
version_upgrade_test.go:323: (dbg) Run:  docker rm missing-upgrade-359395
version_upgrade_test.go:329: (dbg) Run:  out/minikube-linux-arm64 start -p missing-upgrade-359395 --memory=3072 --alsologtostderr -v=1 --driver=docker  --container-runtime=docker
version_upgrade_test.go:329: (dbg) Done: out/minikube-linux-arm64 start -p missing-upgrade-359395 --memory=3072 --alsologtostderr -v=1 --driver=docker  --container-runtime=docker: (44.588282652s)
helpers_test.go:175: Cleaning up "missing-upgrade-359395" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p missing-upgrade-359395
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p missing-upgrade-359395: (2.125753001s)
--- PASS: TestMissingContainerUpgrade (91.34s)

                                                
                                    
x
+
TestNoKubernetes/serial/StartNoK8sWithVersion (0.11s)

                                                
                                                
=== RUN   TestNoKubernetes/serial/StartNoK8sWithVersion
no_kubernetes_test.go:85: (dbg) Run:  out/minikube-linux-arm64 start -p NoKubernetes-037677 --no-kubernetes --kubernetes-version=v1.28.0 --driver=docker  --container-runtime=docker
no_kubernetes_test.go:85: (dbg) Non-zero exit: out/minikube-linux-arm64 start -p NoKubernetes-037677 --no-kubernetes --kubernetes-version=v1.28.0 --driver=docker  --container-runtime=docker: exit status 14 (111.417922ms)

                                                
                                                
-- stdout --
	* [NoKubernetes-037677] minikube v1.37.0 on Ubuntu 20.04 (arm64)
	  - MINIKUBE_LOCATION=21550
	  - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	  - KUBECONFIG=/home/jenkins/minikube-integration/21550-576428/kubeconfig
	  - MINIKUBE_HOME=/home/jenkins/minikube-integration/21550-576428/.minikube
	  - MINIKUBE_BIN=out/minikube-linux-arm64
	  - MINIKUBE_FORCE_SYSTEMD=
	
	

                                                
                                                
-- /stdout --
** stderr ** 
	X Exiting due to MK_USAGE: cannot specify --kubernetes-version with --no-kubernetes,
	to unset a global config run:
	
	$ minikube config unset kubernetes-version

                                                
                                                
** /stderr **
--- PASS: TestNoKubernetes/serial/StartNoK8sWithVersion (0.11s)

                                                
                                    
x
+
TestNoKubernetes/serial/StartWithK8s (42.53s)

                                                
                                                
=== RUN   TestNoKubernetes/serial/StartWithK8s
no_kubernetes_test.go:97: (dbg) Run:  out/minikube-linux-arm64 start -p NoKubernetes-037677 --memory=3072 --alsologtostderr -v=5 --driver=docker  --container-runtime=docker
E0917 01:23:36.068877  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
no_kubernetes_test.go:97: (dbg) Done: out/minikube-linux-arm64 start -p NoKubernetes-037677 --memory=3072 --alsologtostderr -v=5 --driver=docker  --container-runtime=docker: (42.095991778s)
no_kubernetes_test.go:202: (dbg) Run:  out/minikube-linux-arm64 -p NoKubernetes-037677 status -o json
--- PASS: TestNoKubernetes/serial/StartWithK8s (42.53s)

                                                
                                    
x
+
TestNoKubernetes/serial/StartWithStopK8s (20.44s)

                                                
                                                
=== RUN   TestNoKubernetes/serial/StartWithStopK8s
no_kubernetes_test.go:114: (dbg) Run:  out/minikube-linux-arm64 start -p NoKubernetes-037677 --no-kubernetes --memory=3072 --alsologtostderr -v=5 --driver=docker  --container-runtime=docker
no_kubernetes_test.go:114: (dbg) Done: out/minikube-linux-arm64 start -p NoKubernetes-037677 --no-kubernetes --memory=3072 --alsologtostderr -v=5 --driver=docker  --container-runtime=docker: (17.991901417s)
no_kubernetes_test.go:202: (dbg) Run:  out/minikube-linux-arm64 -p NoKubernetes-037677 status -o json
no_kubernetes_test.go:202: (dbg) Non-zero exit: out/minikube-linux-arm64 -p NoKubernetes-037677 status -o json: exit status 2 (365.939769ms)

                                                
                                                
-- stdout --
	{"Name":"NoKubernetes-037677","Host":"Running","Kubelet":"Stopped","APIServer":"Stopped","Kubeconfig":"Configured","Worker":false}

                                                
                                                
-- /stdout --
no_kubernetes_test.go:126: (dbg) Run:  out/minikube-linux-arm64 delete -p NoKubernetes-037677
no_kubernetes_test.go:126: (dbg) Done: out/minikube-linux-arm64 delete -p NoKubernetes-037677: (2.082393359s)
--- PASS: TestNoKubernetes/serial/StartWithStopK8s (20.44s)

                                                
                                    
x
+
TestNoKubernetes/serial/Start (9.05s)

                                                
                                                
=== RUN   TestNoKubernetes/serial/Start
no_kubernetes_test.go:138: (dbg) Run:  out/minikube-linux-arm64 start -p NoKubernetes-037677 --no-kubernetes --memory=3072 --alsologtostderr -v=5 --driver=docker  --container-runtime=docker
no_kubernetes_test.go:138: (dbg) Done: out/minikube-linux-arm64 start -p NoKubernetes-037677 --no-kubernetes --memory=3072 --alsologtostderr -v=5 --driver=docker  --container-runtime=docker: (9.052125066s)
--- PASS: TestNoKubernetes/serial/Start (9.05s)

                                                
                                    
x
+
TestNoKubernetes/serial/VerifyK8sNotRunning (0.32s)

                                                
                                                
=== RUN   TestNoKubernetes/serial/VerifyK8sNotRunning
no_kubernetes_test.go:149: (dbg) Run:  out/minikube-linux-arm64 ssh -p NoKubernetes-037677 "sudo systemctl is-active --quiet service kubelet"
no_kubernetes_test.go:149: (dbg) Non-zero exit: out/minikube-linux-arm64 ssh -p NoKubernetes-037677 "sudo systemctl is-active --quiet service kubelet": exit status 1 (320.893585ms)

                                                
                                                
** stderr ** 
	ssh: Process exited with status 3

                                                
                                                
** /stderr **
--- PASS: TestNoKubernetes/serial/VerifyK8sNotRunning (0.32s)

                                                
                                    
x
+
TestNoKubernetes/serial/ProfileList (1.23s)

                                                
                                                
=== RUN   TestNoKubernetes/serial/ProfileList
no_kubernetes_test.go:171: (dbg) Run:  out/minikube-linux-arm64 profile list
no_kubernetes_test.go:181: (dbg) Run:  out/minikube-linux-arm64 profile list --output=json
--- PASS: TestNoKubernetes/serial/ProfileList (1.23s)

                                                
                                    
x
+
TestNoKubernetes/serial/Stop (1.26s)

                                                
                                                
=== RUN   TestNoKubernetes/serial/Stop
no_kubernetes_test.go:160: (dbg) Run:  out/minikube-linux-arm64 stop -p NoKubernetes-037677
no_kubernetes_test.go:160: (dbg) Done: out/minikube-linux-arm64 stop -p NoKubernetes-037677: (1.258219219s)
--- PASS: TestNoKubernetes/serial/Stop (1.26s)

                                                
                                    
x
+
TestNoKubernetes/serial/StartNoArgs (8.42s)

                                                
                                                
=== RUN   TestNoKubernetes/serial/StartNoArgs
no_kubernetes_test.go:193: (dbg) Run:  out/minikube-linux-arm64 start -p NoKubernetes-037677 --driver=docker  --container-runtime=docker
no_kubernetes_test.go:193: (dbg) Done: out/minikube-linux-arm64 start -p NoKubernetes-037677 --driver=docker  --container-runtime=docker: (8.423581101s)
--- PASS: TestNoKubernetes/serial/StartNoArgs (8.42s)

                                                
                                    
x
+
TestNoKubernetes/serial/VerifyK8sNotRunningSecond (0.39s)

                                                
                                                
=== RUN   TestNoKubernetes/serial/VerifyK8sNotRunningSecond
no_kubernetes_test.go:149: (dbg) Run:  out/minikube-linux-arm64 ssh -p NoKubernetes-037677 "sudo systemctl is-active --quiet service kubelet"
no_kubernetes_test.go:149: (dbg) Non-zero exit: out/minikube-linux-arm64 ssh -p NoKubernetes-037677 "sudo systemctl is-active --quiet service kubelet": exit status 1 (389.967434ms)

                                                
                                                
** stderr ** 
	ssh: Process exited with status 3

                                                
                                                
** /stderr **
--- PASS: TestNoKubernetes/serial/VerifyK8sNotRunningSecond (0.39s)

                                                
                                    
x
+
TestStoppedBinaryUpgrade/Setup (0.75s)

                                                
                                                
=== RUN   TestStoppedBinaryUpgrade/Setup
--- PASS: TestStoppedBinaryUpgrade/Setup (0.75s)

                                                
                                    
x
+
TestStoppedBinaryUpgrade/Upgrade (88.58s)

                                                
                                                
=== RUN   TestStoppedBinaryUpgrade/Upgrade
version_upgrade_test.go:183: (dbg) Run:  /tmp/minikube-v1.32.0.196994398 start -p stopped-upgrade-698729 --memory=3072 --vm-driver=docker  --container-runtime=docker
E0917 01:26:17.252574  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/functional-918451/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
version_upgrade_test.go:183: (dbg) Done: /tmp/minikube-v1.32.0.196994398 start -p stopped-upgrade-698729 --memory=3072 --vm-driver=docker  --container-runtime=docker: (55.530609198s)
version_upgrade_test.go:192: (dbg) Run:  /tmp/minikube-v1.32.0.196994398 -p stopped-upgrade-698729 stop
version_upgrade_test.go:192: (dbg) Done: /tmp/minikube-v1.32.0.196994398 -p stopped-upgrade-698729 stop: (10.862703114s)
version_upgrade_test.go:198: (dbg) Run:  out/minikube-linux-arm64 start -p stopped-upgrade-698729 --memory=3072 --alsologtostderr -v=1 --driver=docker  --container-runtime=docker
version_upgrade_test.go:198: (dbg) Done: out/minikube-linux-arm64 start -p stopped-upgrade-698729 --memory=3072 --alsologtostderr -v=1 --driver=docker  --container-runtime=docker: (22.184834259s)
--- PASS: TestStoppedBinaryUpgrade/Upgrade (88.58s)

                                                
                                    
x
+
TestStoppedBinaryUpgrade/MinikubeLogs (1.09s)

                                                
                                                
=== RUN   TestStoppedBinaryUpgrade/MinikubeLogs
version_upgrade_test.go:206: (dbg) Run:  out/minikube-linux-arm64 logs -p stopped-upgrade-698729
E0917 01:27:31.312743  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/skaffold-735592/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:27:31.319079  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/skaffold-735592/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:27:31.330429  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/skaffold-735592/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:27:31.351789  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/skaffold-735592/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:27:31.393886  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/skaffold-735592/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:27:31.475122  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/skaffold-735592/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:27:31.636754  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/skaffold-735592/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:27:31.958561  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/skaffold-735592/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
version_upgrade_test.go:206: (dbg) Done: out/minikube-linux-arm64 logs -p stopped-upgrade-698729: (1.085813461s)
--- PASS: TestStoppedBinaryUpgrade/MinikubeLogs (1.09s)

                                                
                                    
x
+
TestPause/serial/Start (78.05s)

                                                
                                                
=== RUN   TestPause/serial/Start
pause_test.go:80: (dbg) Run:  out/minikube-linux-arm64 start -p pause-683948 --memory=3072 --install-addons=false --wait=all --driver=docker  --container-runtime=docker
E0917 01:31:17.252776  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/functional-918451/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
pause_test.go:80: (dbg) Done: out/minikube-linux-arm64 start -p pause-683948 --memory=3072 --install-addons=false --wait=all --driver=docker  --container-runtime=docker: (1m18.050394617s)
--- PASS: TestPause/serial/Start (78.05s)

                                                
                                    
x
+
TestPause/serial/SecondStartNoReconfiguration (58.18s)

                                                
                                                
=== RUN   TestPause/serial/SecondStartNoReconfiguration
pause_test.go:92: (dbg) Run:  out/minikube-linux-arm64 start -p pause-683948 --alsologtostderr -v=1 --driver=docker  --container-runtime=docker
E0917 01:32:31.312538  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/skaffold-735592/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:32:59.014356  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/skaffold-735592/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
pause_test.go:92: (dbg) Done: out/minikube-linux-arm64 start -p pause-683948 --alsologtostderr -v=1 --driver=docker  --container-runtime=docker: (58.142619631s)
--- PASS: TestPause/serial/SecondStartNoReconfiguration (58.18s)

                                                
                                    
x
+
TestPause/serial/Pause (0.82s)

                                                
                                                
=== RUN   TestPause/serial/Pause
pause_test.go:110: (dbg) Run:  out/minikube-linux-arm64 pause -p pause-683948 --alsologtostderr -v=5
--- PASS: TestPause/serial/Pause (0.82s)

                                                
                                    
x
+
TestPause/serial/VerifyStatus (0.43s)

                                                
                                                
=== RUN   TestPause/serial/VerifyStatus
status_test.go:76: (dbg) Run:  out/minikube-linux-arm64 status -p pause-683948 --output=json --layout=cluster
status_test.go:76: (dbg) Non-zero exit: out/minikube-linux-arm64 status -p pause-683948 --output=json --layout=cluster: exit status 2 (433.896384ms)

                                                
                                                
-- stdout --
	{"Name":"pause-683948","StatusCode":418,"StatusName":"Paused","Step":"Done","StepDetail":"* Paused 12 containers in: kube-system, kubernetes-dashboard, storage-gluster, istio-operator","BinaryVersion":"v1.37.0","Components":{"kubeconfig":{"Name":"kubeconfig","StatusCode":200,"StatusName":"OK"}},"Nodes":[{"Name":"pause-683948","StatusCode":200,"StatusName":"OK","Components":{"apiserver":{"Name":"apiserver","StatusCode":418,"StatusName":"Paused"},"kubelet":{"Name":"kubelet","StatusCode":405,"StatusName":"Stopped"}}}]}

                                                
                                                
-- /stdout --
--- PASS: TestPause/serial/VerifyStatus (0.43s)

                                                
                                    
x
+
TestPause/serial/Unpause (0.64s)

                                                
                                                
=== RUN   TestPause/serial/Unpause
pause_test.go:121: (dbg) Run:  out/minikube-linux-arm64 unpause -p pause-683948 --alsologtostderr -v=5
--- PASS: TestPause/serial/Unpause (0.64s)

                                                
                                    
x
+
TestPause/serial/PauseAgain (1.05s)

                                                
                                                
=== RUN   TestPause/serial/PauseAgain
pause_test.go:110: (dbg) Run:  out/minikube-linux-arm64 pause -p pause-683948 --alsologtostderr -v=5
pause_test.go:110: (dbg) Done: out/minikube-linux-arm64 pause -p pause-683948 --alsologtostderr -v=5: (1.050195287s)
--- PASS: TestPause/serial/PauseAgain (1.05s)

                                                
                                    
x
+
TestPause/serial/DeletePaused (2.18s)

                                                
                                                
=== RUN   TestPause/serial/DeletePaused
pause_test.go:132: (dbg) Run:  out/minikube-linux-arm64 delete -p pause-683948 --alsologtostderr -v=5
pause_test.go:132: (dbg) Done: out/minikube-linux-arm64 delete -p pause-683948 --alsologtostderr -v=5: (2.178203001s)
--- PASS: TestPause/serial/DeletePaused (2.18s)

                                                
                                    
x
+
TestPause/serial/VerifyDeletedResources (14.16s)

                                                
                                                
=== RUN   TestPause/serial/VerifyDeletedResources
pause_test.go:142: (dbg) Run:  out/minikube-linux-arm64 profile list --output json
pause_test.go:142: (dbg) Done: out/minikube-linux-arm64 profile list --output json: (14.106433169s)
pause_test.go:168: (dbg) Run:  docker ps -a
pause_test.go:173: (dbg) Run:  docker volume inspect pause-683948
pause_test.go:173: (dbg) Non-zero exit: docker volume inspect pause-683948: exit status 1 (21.985544ms)

                                                
                                                
-- stdout --
	[]

                                                
                                                
-- /stdout --
** stderr ** 
	Error response from daemon: get pause-683948: no such volume

                                                
                                                
** /stderr **
pause_test.go:178: (dbg) Run:  docker network ls
--- PASS: TestPause/serial/VerifyDeletedResources (14.16s)

                                                
                                    
x
+
TestNetworkPlugins/group/auto/Start (74.73s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/auto/Start
net_test.go:112: (dbg) Run:  out/minikube-linux-arm64 start -p auto-490703 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --driver=docker  --container-runtime=docker
E0917 01:33:52.991185  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
net_test.go:112: (dbg) Done: out/minikube-linux-arm64 start -p auto-490703 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --driver=docker  --container-runtime=docker: (1m14.727091325s)
--- PASS: TestNetworkPlugins/group/auto/Start (74.73s)

                                                
                                    
x
+
TestNetworkPlugins/group/auto/KubeletFlags (0.29s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/auto/KubeletFlags
net_test.go:133: (dbg) Run:  out/minikube-linux-arm64 ssh -p auto-490703 "pgrep -a kubelet"
I0917 01:34:44.908538  578284 config.go:182] Loaded profile config "auto-490703": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
--- PASS: TestNetworkPlugins/group/auto/KubeletFlags (0.29s)

                                                
                                    
x
+
TestNetworkPlugins/group/auto/NetCatPod (10.43s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/auto/NetCatPod
net_test.go:149: (dbg) Run:  kubectl --context auto-490703 replace --force -f testdata/netcat-deployment.yaml
net_test.go:163: (dbg) TestNetworkPlugins/group/auto/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ...
helpers_test.go:352: "netcat-cd4db9dbf-j9544" [767ad28a-bf9b-4ead-9c1f-333b96804199] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils])
helpers_test.go:352: "netcat-cd4db9dbf-j9544" [767ad28a-bf9b-4ead-9c1f-333b96804199] Running
net_test.go:163: (dbg) TestNetworkPlugins/group/auto/NetCatPod: app=netcat healthy within 10.004141962s
--- PASS: TestNetworkPlugins/group/auto/NetCatPod (10.43s)

                                                
                                    
x
+
TestNetworkPlugins/group/auto/DNS (0.23s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/auto/DNS
net_test.go:175: (dbg) Run:  kubectl --context auto-490703 exec deployment/netcat -- nslookup kubernetes.default
--- PASS: TestNetworkPlugins/group/auto/DNS (0.23s)

                                                
                                    
x
+
TestNetworkPlugins/group/auto/Localhost (0.17s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/auto/Localhost
net_test.go:194: (dbg) Run:  kubectl --context auto-490703 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080"
--- PASS: TestNetworkPlugins/group/auto/Localhost (0.17s)

                                                
                                    
x
+
TestNetworkPlugins/group/auto/HairPin (0.21s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/auto/HairPin
net_test.go:264: (dbg) Run:  kubectl --context auto-490703 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080"
--- PASS: TestNetworkPlugins/group/auto/HairPin (0.21s)

                                                
                                    
x
+
TestNetworkPlugins/group/kindnet/Start (78.03s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kindnet/Start
net_test.go:112: (dbg) Run:  out/minikube-linux-arm64 start -p kindnet-490703 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=kindnet --driver=docker  --container-runtime=docker
net_test.go:112: (dbg) Done: out/minikube-linux-arm64 start -p kindnet-490703 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=kindnet --driver=docker  --container-runtime=docker: (1m18.033743447s)
--- PASS: TestNetworkPlugins/group/kindnet/Start (78.03s)

                                                
                                    
x
+
TestNetworkPlugins/group/calico/Start (71.31s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/calico/Start
net_test.go:112: (dbg) Run:  out/minikube-linux-arm64 start -p calico-490703 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=calico --driver=docker  --container-runtime=docker
E0917 01:36:17.252786  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/functional-918451/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
net_test.go:112: (dbg) Done: out/minikube-linux-arm64 start -p calico-490703 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=calico --driver=docker  --container-runtime=docker: (1m11.308090201s)
--- PASS: TestNetworkPlugins/group/calico/Start (71.31s)

                                                
                                    
x
+
TestNetworkPlugins/group/calico/ControllerPod (6.01s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/calico/ControllerPod
net_test.go:120: (dbg) TestNetworkPlugins/group/calico/ControllerPod: waiting 10m0s for pods matching "k8s-app=calico-node" in namespace "kube-system" ...
helpers_test.go:352: "calico-node-nts27" [b10dfcc9-295e-4103-89c5-95b273a6dbce] Running / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node])
net_test.go:120: (dbg) TestNetworkPlugins/group/calico/ControllerPod: k8s-app=calico-node healthy within 6.003809173s
--- PASS: TestNetworkPlugins/group/calico/ControllerPod (6.01s)

                                                
                                    
x
+
TestNetworkPlugins/group/kindnet/ControllerPod (6.01s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kindnet/ControllerPod
net_test.go:120: (dbg) TestNetworkPlugins/group/kindnet/ControllerPod: waiting 10m0s for pods matching "app=kindnet" in namespace "kube-system" ...
helpers_test.go:352: "kindnet-qdvhq" [b9b0643d-f0d9-48ce-aa63-84358ce58a2d] Running
net_test.go:120: (dbg) TestNetworkPlugins/group/kindnet/ControllerPod: app=kindnet healthy within 6.003682618s
--- PASS: TestNetworkPlugins/group/kindnet/ControllerPod (6.01s)

                                                
                                    
x
+
TestNetworkPlugins/group/calico/KubeletFlags (0.38s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/calico/KubeletFlags
net_test.go:133: (dbg) Run:  out/minikube-linux-arm64 ssh -p calico-490703 "pgrep -a kubelet"
I0917 01:36:42.230243  578284 config.go:182] Loaded profile config "calico-490703": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
--- PASS: TestNetworkPlugins/group/calico/KubeletFlags (0.38s)

                                                
                                    
x
+
TestNetworkPlugins/group/calico/NetCatPod (11.31s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/calico/NetCatPod
net_test.go:149: (dbg) Run:  kubectl --context calico-490703 replace --force -f testdata/netcat-deployment.yaml
net_test.go:163: (dbg) TestNetworkPlugins/group/calico/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ...
helpers_test.go:352: "netcat-cd4db9dbf-dzzpd" [05aad3c8-647e-4b8f-88e6-7e62d4066ef5] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils])
helpers_test.go:352: "netcat-cd4db9dbf-dzzpd" [05aad3c8-647e-4b8f-88e6-7e62d4066ef5] Running
net_test.go:163: (dbg) TestNetworkPlugins/group/calico/NetCatPod: app=netcat healthy within 11.003424558s
--- PASS: TestNetworkPlugins/group/calico/NetCatPod (11.31s)

                                                
                                    
x
+
TestNetworkPlugins/group/kindnet/KubeletFlags (0.43s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kindnet/KubeletFlags
net_test.go:133: (dbg) Run:  out/minikube-linux-arm64 ssh -p kindnet-490703 "pgrep -a kubelet"
I0917 01:36:45.100393  578284 config.go:182] Loaded profile config "kindnet-490703": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
--- PASS: TestNetworkPlugins/group/kindnet/KubeletFlags (0.43s)

                                                
                                    
x
+
TestNetworkPlugins/group/kindnet/NetCatPod (11.57s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kindnet/NetCatPod
net_test.go:149: (dbg) Run:  kubectl --context kindnet-490703 replace --force -f testdata/netcat-deployment.yaml
net_test.go:163: (dbg) TestNetworkPlugins/group/kindnet/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ...
helpers_test.go:352: "netcat-cd4db9dbf-5g275" [70ba99e5-6caa-401a-b992-6cec4f0f5298] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils])
helpers_test.go:352: "netcat-cd4db9dbf-5g275" [70ba99e5-6caa-401a-b992-6cec4f0f5298] Running
net_test.go:163: (dbg) TestNetworkPlugins/group/kindnet/NetCatPod: app=netcat healthy within 11.002651797s
--- PASS: TestNetworkPlugins/group/kindnet/NetCatPod (11.57s)

                                                
                                    
x
+
TestNetworkPlugins/group/calico/DNS (0.21s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/calico/DNS
net_test.go:175: (dbg) Run:  kubectl --context calico-490703 exec deployment/netcat -- nslookup kubernetes.default
--- PASS: TestNetworkPlugins/group/calico/DNS (0.21s)

                                                
                                    
x
+
TestNetworkPlugins/group/calico/Localhost (0.16s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/calico/Localhost
net_test.go:194: (dbg) Run:  kubectl --context calico-490703 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080"
--- PASS: TestNetworkPlugins/group/calico/Localhost (0.16s)

                                                
                                    
x
+
TestNetworkPlugins/group/calico/HairPin (0.19s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/calico/HairPin
net_test.go:264: (dbg) Run:  kubectl --context calico-490703 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080"
--- PASS: TestNetworkPlugins/group/calico/HairPin (0.19s)

                                                
                                    
x
+
TestNetworkPlugins/group/kindnet/DNS (0.2s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kindnet/DNS
net_test.go:175: (dbg) Run:  kubectl --context kindnet-490703 exec deployment/netcat -- nslookup kubernetes.default
--- PASS: TestNetworkPlugins/group/kindnet/DNS (0.20s)

                                                
                                    
x
+
TestNetworkPlugins/group/kindnet/Localhost (0.19s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kindnet/Localhost
net_test.go:194: (dbg) Run:  kubectl --context kindnet-490703 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080"
--- PASS: TestNetworkPlugins/group/kindnet/Localhost (0.19s)

                                                
                                    
x
+
TestNetworkPlugins/group/kindnet/HairPin (0.16s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kindnet/HairPin
net_test.go:264: (dbg) Run:  kubectl --context kindnet-490703 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080"
--- PASS: TestNetworkPlugins/group/kindnet/HairPin (0.16s)

                                                
                                    
x
+
TestNetworkPlugins/group/custom-flannel/Start (62.31s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/custom-flannel/Start
net_test.go:112: (dbg) Run:  out/minikube-linux-arm64 start -p custom-flannel-490703 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=testdata/kube-flannel.yaml --driver=docker  --container-runtime=docker
net_test.go:112: (dbg) Done: out/minikube-linux-arm64 start -p custom-flannel-490703 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=testdata/kube-flannel.yaml --driver=docker  --container-runtime=docker: (1m2.306975242s)
--- PASS: TestNetworkPlugins/group/custom-flannel/Start (62.31s)

                                                
                                    
x
+
TestNetworkPlugins/group/false/Start (82.22s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/false/Start
net_test.go:112: (dbg) Run:  out/minikube-linux-arm64 start -p false-490703 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=false --driver=docker  --container-runtime=docker
E0917 01:37:31.313064  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/skaffold-735592/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
net_test.go:112: (dbg) Done: out/minikube-linux-arm64 start -p false-490703 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=false --driver=docker  --container-runtime=docker: (1m22.223328758s)
--- PASS: TestNetworkPlugins/group/false/Start (82.22s)

                                                
                                    
x
+
TestNetworkPlugins/group/custom-flannel/KubeletFlags (0.31s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/custom-flannel/KubeletFlags
net_test.go:133: (dbg) Run:  out/minikube-linux-arm64 ssh -p custom-flannel-490703 "pgrep -a kubelet"
I0917 01:38:23.728597  578284 config.go:182] Loaded profile config "custom-flannel-490703": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
--- PASS: TestNetworkPlugins/group/custom-flannel/KubeletFlags (0.31s)

                                                
                                    
x
+
TestNetworkPlugins/group/custom-flannel/NetCatPod (10.29s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/custom-flannel/NetCatPod
net_test.go:149: (dbg) Run:  kubectl --context custom-flannel-490703 replace --force -f testdata/netcat-deployment.yaml
net_test.go:163: (dbg) TestNetworkPlugins/group/custom-flannel/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ...
helpers_test.go:352: "netcat-cd4db9dbf-ts4n5" [725c817b-29e7-499e-b01d-e01a052d4546] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils])
helpers_test.go:352: "netcat-cd4db9dbf-ts4n5" [725c817b-29e7-499e-b01d-e01a052d4546] Running
net_test.go:163: (dbg) TestNetworkPlugins/group/custom-flannel/NetCatPod: app=netcat healthy within 10.005258347s
--- PASS: TestNetworkPlugins/group/custom-flannel/NetCatPod (10.29s)

                                                
                                    
x
+
TestNetworkPlugins/group/custom-flannel/DNS (0.2s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/custom-flannel/DNS
net_test.go:175: (dbg) Run:  kubectl --context custom-flannel-490703 exec deployment/netcat -- nslookup kubernetes.default
--- PASS: TestNetworkPlugins/group/custom-flannel/DNS (0.20s)

                                                
                                    
x
+
TestNetworkPlugins/group/custom-flannel/Localhost (0.16s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/custom-flannel/Localhost
net_test.go:194: (dbg) Run:  kubectl --context custom-flannel-490703 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080"
--- PASS: TestNetworkPlugins/group/custom-flannel/Localhost (0.16s)

                                                
                                    
x
+
TestNetworkPlugins/group/custom-flannel/HairPin (0.15s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/custom-flannel/HairPin
net_test.go:264: (dbg) Run:  kubectl --context custom-flannel-490703 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080"
--- PASS: TestNetworkPlugins/group/custom-flannel/HairPin (0.15s)

                                                
                                    
x
+
TestNetworkPlugins/group/false/KubeletFlags (0.48s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/false/KubeletFlags
net_test.go:133: (dbg) Run:  out/minikube-linux-arm64 ssh -p false-490703 "pgrep -a kubelet"
I0917 01:38:45.807818  578284 config.go:182] Loaded profile config "false-490703": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
--- PASS: TestNetworkPlugins/group/false/KubeletFlags (0.48s)

                                                
                                    
x
+
TestNetworkPlugins/group/false/NetCatPod (10.38s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/false/NetCatPod
net_test.go:149: (dbg) Run:  kubectl --context false-490703 replace --force -f testdata/netcat-deployment.yaml
net_test.go:163: (dbg) TestNetworkPlugins/group/false/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ...
helpers_test.go:352: "netcat-cd4db9dbf-xr727" [116070bd-9031-4c7d-a05d-acbc6221b244] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils])
helpers_test.go:352: "netcat-cd4db9dbf-xr727" [116070bd-9031-4c7d-a05d-acbc6221b244] Running
E0917 01:38:52.991200  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
net_test.go:163: (dbg) TestNetworkPlugins/group/false/NetCatPod: app=netcat healthy within 10.003945192s
--- PASS: TestNetworkPlugins/group/false/NetCatPod (10.38s)

                                                
                                    
x
+
TestNetworkPlugins/group/false/DNS (0.19s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/false/DNS
net_test.go:175: (dbg) Run:  kubectl --context false-490703 exec deployment/netcat -- nslookup kubernetes.default
--- PASS: TestNetworkPlugins/group/false/DNS (0.19s)

                                                
                                    
x
+
TestNetworkPlugins/group/false/Localhost (0.22s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/false/Localhost
net_test.go:194: (dbg) Run:  kubectl --context false-490703 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080"
--- PASS: TestNetworkPlugins/group/false/Localhost (0.22s)

                                                
                                    
x
+
TestNetworkPlugins/group/false/HairPin (0.25s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/false/HairPin
net_test.go:264: (dbg) Run:  kubectl --context false-490703 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080"
--- PASS: TestNetworkPlugins/group/false/HairPin (0.25s)

                                                
                                    
x
+
TestNetworkPlugins/group/enable-default-cni/Start (83.25s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/enable-default-cni/Start
net_test.go:112: (dbg) Run:  out/minikube-linux-arm64 start -p enable-default-cni-490703 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --enable-default-cni=true --driver=docker  --container-runtime=docker
net_test.go:112: (dbg) Done: out/minikube-linux-arm64 start -p enable-default-cni-490703 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --enable-default-cni=true --driver=docker  --container-runtime=docker: (1m23.250379476s)
--- PASS: TestNetworkPlugins/group/enable-default-cni/Start (83.25s)

                                                
                                    
x
+
TestNetworkPlugins/group/flannel/Start (62.32s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/flannel/Start
net_test.go:112: (dbg) Run:  out/minikube-linux-arm64 start -p flannel-490703 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=flannel --driver=docker  --container-runtime=docker
E0917 01:39:45.310411  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/auto-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:39:45.316891  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/auto-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:39:45.328407  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/auto-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:39:45.349864  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/auto-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:39:45.391323  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/auto-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:39:45.472923  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/auto-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:39:45.635026  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/auto-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:39:45.956476  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/auto-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:39:46.598122  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/auto-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:39:47.879508  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/auto-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:39:50.440771  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/auto-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:39:55.562399  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/auto-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:40:05.804552  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/auto-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:40:16.070167  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
net_test.go:112: (dbg) Done: out/minikube-linux-arm64 start -p flannel-490703 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=flannel --driver=docker  --container-runtime=docker: (1m2.316257289s)
--- PASS: TestNetworkPlugins/group/flannel/Start (62.32s)

                                                
                                    
x
+
TestNetworkPlugins/group/enable-default-cni/KubeletFlags (0.34s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/enable-default-cni/KubeletFlags
net_test.go:133: (dbg) Run:  out/minikube-linux-arm64 ssh -p enable-default-cni-490703 "pgrep -a kubelet"
I0917 01:40:21.288265  578284 config.go:182] Loaded profile config "enable-default-cni-490703": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
--- PASS: TestNetworkPlugins/group/enable-default-cni/KubeletFlags (0.34s)

                                                
                                    
x
+
TestNetworkPlugins/group/enable-default-cni/NetCatPod (10.31s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/enable-default-cni/NetCatPod
net_test.go:149: (dbg) Run:  kubectl --context enable-default-cni-490703 replace --force -f testdata/netcat-deployment.yaml
net_test.go:163: (dbg) TestNetworkPlugins/group/enable-default-cni/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ...
helpers_test.go:352: "netcat-cd4db9dbf-dx8hn" [b503a591-a130-4036-9308-fc2492dd8628] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils])
helpers_test.go:352: "netcat-cd4db9dbf-dx8hn" [b503a591-a130-4036-9308-fc2492dd8628] Running
net_test.go:163: (dbg) TestNetworkPlugins/group/enable-default-cni/NetCatPod: app=netcat healthy within 10.004399521s
--- PASS: TestNetworkPlugins/group/enable-default-cni/NetCatPod (10.31s)

                                                
                                    
x
+
TestNetworkPlugins/group/flannel/ControllerPod (6.01s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/flannel/ControllerPod
net_test.go:120: (dbg) TestNetworkPlugins/group/flannel/ControllerPod: waiting 10m0s for pods matching "app=flannel" in namespace "kube-flannel" ...
helpers_test.go:352: "kube-flannel-ds-rkmgw" [3d98501b-5b77-4118-b828-c88db0610d08] Running
E0917 01:40:26.285931  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/auto-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
net_test.go:120: (dbg) TestNetworkPlugins/group/flannel/ControllerPod: app=flannel healthy within 6.003301845s
--- PASS: TestNetworkPlugins/group/flannel/ControllerPod (6.01s)

                                                
                                    
x
+
TestNetworkPlugins/group/flannel/KubeletFlags (0.29s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/flannel/KubeletFlags
net_test.go:133: (dbg) Run:  out/minikube-linux-arm64 ssh -p flannel-490703 "pgrep -a kubelet"
I0917 01:40:29.882806  578284 config.go:182] Loaded profile config "flannel-490703": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
--- PASS: TestNetworkPlugins/group/flannel/KubeletFlags (0.29s)

                                                
                                    
x
+
TestNetworkPlugins/group/flannel/NetCatPod (10.3s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/flannel/NetCatPod
net_test.go:149: (dbg) Run:  kubectl --context flannel-490703 replace --force -f testdata/netcat-deployment.yaml
net_test.go:163: (dbg) TestNetworkPlugins/group/flannel/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ...
helpers_test.go:352: "netcat-cd4db9dbf-5mzg6" [28668bc3-4f98-4580-953c-5107f277d860] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils])
helpers_test.go:352: "netcat-cd4db9dbf-5mzg6" [28668bc3-4f98-4580-953c-5107f277d860] Running
net_test.go:163: (dbg) TestNetworkPlugins/group/flannel/NetCatPod: app=netcat healthy within 10.008721338s
--- PASS: TestNetworkPlugins/group/flannel/NetCatPod (10.30s)

                                                
                                    
x
+
TestNetworkPlugins/group/enable-default-cni/DNS (0.3s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/enable-default-cni/DNS
net_test.go:175: (dbg) Run:  kubectl --context enable-default-cni-490703 exec deployment/netcat -- nslookup kubernetes.default
--- PASS: TestNetworkPlugins/group/enable-default-cni/DNS (0.30s)

                                                
                                    
x
+
TestNetworkPlugins/group/enable-default-cni/Localhost (0.26s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/enable-default-cni/Localhost
net_test.go:194: (dbg) Run:  kubectl --context enable-default-cni-490703 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080"
--- PASS: TestNetworkPlugins/group/enable-default-cni/Localhost (0.26s)

                                                
                                    
x
+
TestNetworkPlugins/group/enable-default-cni/HairPin (0.2s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/enable-default-cni/HairPin
net_test.go:264: (dbg) Run:  kubectl --context enable-default-cni-490703 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080"
--- PASS: TestNetworkPlugins/group/enable-default-cni/HairPin (0.20s)

                                                
                                    
x
+
TestNetworkPlugins/group/flannel/DNS (0.23s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/flannel/DNS
net_test.go:175: (dbg) Run:  kubectl --context flannel-490703 exec deployment/netcat -- nslookup kubernetes.default
--- PASS: TestNetworkPlugins/group/flannel/DNS (0.23s)

                                                
                                    
x
+
TestNetworkPlugins/group/flannel/Localhost (0.33s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/flannel/Localhost
net_test.go:194: (dbg) Run:  kubectl --context flannel-490703 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080"
--- PASS: TestNetworkPlugins/group/flannel/Localhost (0.33s)

                                                
                                    
x
+
TestNetworkPlugins/group/flannel/HairPin (0.31s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/flannel/HairPin
net_test.go:264: (dbg) Run:  kubectl --context flannel-490703 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080"
--- PASS: TestNetworkPlugins/group/flannel/HairPin (0.31s)

                                                
                                    
x
+
TestNetworkPlugins/group/bridge/Start (52.83s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/bridge/Start
net_test.go:112: (dbg) Run:  out/minikube-linux-arm64 start -p bridge-490703 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=bridge --driver=docker  --container-runtime=docker
net_test.go:112: (dbg) Done: out/minikube-linux-arm64 start -p bridge-490703 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=bridge --driver=docker  --container-runtime=docker: (52.833906986s)
--- PASS: TestNetworkPlugins/group/bridge/Start (52.83s)

                                                
                                    
x
+
TestNetworkPlugins/group/kubenet/Start (87.03s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kubenet/Start
net_test.go:112: (dbg) Run:  out/minikube-linux-arm64 start -p kubenet-490703 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --network-plugin=kubenet --driver=docker  --container-runtime=docker
E0917 01:41:17.253119  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/functional-918451/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:41:35.850355  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/calico-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:41:35.857013  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/calico-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:41:35.868339  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/calico-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:41:35.889671  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/calico-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:41:35.931260  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/calico-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:41:36.012617  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/calico-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:41:36.174238  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/calico-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:41:36.496007  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/calico-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:41:37.137446  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/calico-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:41:38.419237  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/calico-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:41:38.662607  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/kindnet-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:41:38.668963  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/kindnet-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:41:38.680336  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/kindnet-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:41:38.701679  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/kindnet-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:41:38.743825  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/kindnet-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:41:38.825145  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/kindnet-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:41:38.986628  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/kindnet-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:41:39.308552  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/kindnet-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:41:39.950912  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/kindnet-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:41:40.980810  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/calico-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:41:41.232529  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/kindnet-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:41:43.794945  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/kindnet-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:41:46.102957  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/calico-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
net_test.go:112: (dbg) Done: out/minikube-linux-arm64 start -p kubenet-490703 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --network-plugin=kubenet --driver=docker  --container-runtime=docker: (1m27.027704776s)
--- PASS: TestNetworkPlugins/group/kubenet/Start (87.03s)

                                                
                                    
x
+
TestNetworkPlugins/group/bridge/KubeletFlags (0.42s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/bridge/KubeletFlags
net_test.go:133: (dbg) Run:  out/minikube-linux-arm64 ssh -p bridge-490703 "pgrep -a kubelet"
I0917 01:41:48.638806  578284 config.go:182] Loaded profile config "bridge-490703": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
--- PASS: TestNetworkPlugins/group/bridge/KubeletFlags (0.42s)

                                                
                                    
x
+
TestNetworkPlugins/group/bridge/NetCatPod (11.41s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/bridge/NetCatPod
net_test.go:149: (dbg) Run:  kubectl --context bridge-490703 replace --force -f testdata/netcat-deployment.yaml
E0917 01:41:48.916578  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/kindnet-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
net_test.go:163: (dbg) TestNetworkPlugins/group/bridge/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ...
helpers_test.go:352: "netcat-cd4db9dbf-r7xrb" [5591d617-829f-4776-86cd-912d6ce7650b] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils])
helpers_test.go:352: "netcat-cd4db9dbf-r7xrb" [5591d617-829f-4776-86cd-912d6ce7650b] Running
E0917 01:41:56.344204  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/calico-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:41:59.158586  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/kindnet-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
net_test.go:163: (dbg) TestNetworkPlugins/group/bridge/NetCatPod: app=netcat healthy within 11.015781523s
--- PASS: TestNetworkPlugins/group/bridge/NetCatPod (11.41s)

                                                
                                    
x
+
TestNetworkPlugins/group/bridge/DNS (0.42s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/bridge/DNS
net_test.go:175: (dbg) Run:  kubectl --context bridge-490703 exec deployment/netcat -- nslookup kubernetes.default
--- PASS: TestNetworkPlugins/group/bridge/DNS (0.42s)

                                                
                                    
x
+
TestNetworkPlugins/group/bridge/Localhost (0.2s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/bridge/Localhost
net_test.go:194: (dbg) Run:  kubectl --context bridge-490703 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080"
--- PASS: TestNetworkPlugins/group/bridge/Localhost (0.20s)

                                                
                                    
x
+
TestNetworkPlugins/group/bridge/HairPin (0.17s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/bridge/HairPin
net_test.go:264: (dbg) Run:  kubectl --context bridge-490703 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080"
--- PASS: TestNetworkPlugins/group/bridge/HairPin (0.17s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/FirstStart (89.36s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/FirstStart
start_stop_delete_test.go:184: (dbg) Run:  out/minikube-linux-arm64 start -p old-k8s-version-523043 --memory=3072 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker  --container-runtime=docker --kubernetes-version=v1.28.0
E0917 01:42:29.169354  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/auto-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:42:31.312821  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/skaffold-735592/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
start_stop_delete_test.go:184: (dbg) Done: out/minikube-linux-arm64 start -p old-k8s-version-523043 --memory=3072 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker  --container-runtime=docker --kubernetes-version=v1.28.0: (1m29.357424508s)
--- PASS: TestStartStop/group/old-k8s-version/serial/FirstStart (89.36s)

                                                
                                    
x
+
TestNetworkPlugins/group/kubenet/KubeletFlags (0.41s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kubenet/KubeletFlags
net_test.go:133: (dbg) Run:  out/minikube-linux-arm64 ssh -p kubenet-490703 "pgrep -a kubelet"
I0917 01:42:35.232177  578284 config.go:182] Loaded profile config "kubenet-490703": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
--- PASS: TestNetworkPlugins/group/kubenet/KubeletFlags (0.41s)

                                                
                                    
x
+
TestNetworkPlugins/group/kubenet/NetCatPod (12.35s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kubenet/NetCatPod
net_test.go:149: (dbg) Run:  kubectl --context kubenet-490703 replace --force -f testdata/netcat-deployment.yaml
net_test.go:163: (dbg) TestNetworkPlugins/group/kubenet/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ...
helpers_test.go:352: "netcat-cd4db9dbf-jn6lf" [c5f517a0-8f83-4c68-a685-d2cc6f471bef] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils])
helpers_test.go:352: "netcat-cd4db9dbf-jn6lf" [c5f517a0-8f83-4c68-a685-d2cc6f471bef] Running
net_test.go:163: (dbg) TestNetworkPlugins/group/kubenet/NetCatPod: app=netcat healthy within 12.003965796s
--- PASS: TestNetworkPlugins/group/kubenet/NetCatPod (12.35s)

                                                
                                    
x
+
TestNetworkPlugins/group/kubenet/DNS (0.25s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kubenet/DNS
net_test.go:175: (dbg) Run:  kubectl --context kubenet-490703 exec deployment/netcat -- nslookup kubernetes.default
--- PASS: TestNetworkPlugins/group/kubenet/DNS (0.25s)

                                                
                                    
x
+
TestNetworkPlugins/group/kubenet/Localhost (0.26s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kubenet/Localhost
net_test.go:194: (dbg) Run:  kubectl --context kubenet-490703 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080"
--- PASS: TestNetworkPlugins/group/kubenet/Localhost (0.26s)

                                                
                                    
x
+
TestNetworkPlugins/group/kubenet/HairPin (0.24s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kubenet/HairPin
net_test.go:264: (dbg) Run:  kubectl --context kubenet-490703 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080"
--- PASS: TestNetworkPlugins/group/kubenet/HairPin (0.24s)
E0917 01:49:11.222928  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/old-k8s-version-523043/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:49:13.867923  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/false-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/FirstStart (79.21s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/FirstStart
start_stop_delete_test.go:184: (dbg) Run:  out/minikube-linux-arm64 start -p embed-certs-859319 --memory=3072 --alsologtostderr --wait=true --embed-certs --driver=docker  --container-runtime=docker --kubernetes-version=v1.34.0
E0917 01:43:23.989118  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/custom-flannel-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:43:23.995891  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/custom-flannel-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:43:24.007275  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/custom-flannel-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:43:24.028847  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/custom-flannel-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:43:24.070232  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/custom-flannel-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:43:24.152313  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/custom-flannel-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:43:24.313813  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/custom-flannel-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:43:24.635938  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/custom-flannel-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:43:25.277330  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/custom-flannel-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:43:26.559308  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/custom-flannel-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:43:29.121370  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/custom-flannel-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:43:34.242973  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/custom-flannel-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:43:44.484622  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/custom-flannel-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:43:46.161943  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/false-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:43:46.168389  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/false-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:43:46.179755  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/false-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:43:46.201640  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/false-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:43:46.243088  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/false-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:43:46.324528  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/false-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:43:46.486027  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/false-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:43:46.807837  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/false-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:43:47.449285  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/false-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:43:48.731460  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/false-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
start_stop_delete_test.go:184: (dbg) Done: out/minikube-linux-arm64 start -p embed-certs-859319 --memory=3072 --alsologtostderr --wait=true --embed-certs --driver=docker  --container-runtime=docker --kubernetes-version=v1.34.0: (1m19.212513317s)
--- PASS: TestStartStop/group/embed-certs/serial/FirstStart (79.21s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/DeployApp (10.51s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/DeployApp
start_stop_delete_test.go:194: (dbg) Run:  kubectl --context old-k8s-version-523043 create -f testdata/busybox.yaml
start_stop_delete_test.go:194: (dbg) TestStartStop/group/old-k8s-version/serial/DeployApp: waiting 8m0s for pods matching "integration-test=busybox" in namespace "default" ...
helpers_test.go:352: "busybox" [3d52b048-7200-4ed0-985d-8c285896e292] Pending
E0917 01:43:51.292866  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/false-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
helpers_test.go:352: "busybox" [3d52b048-7200-4ed0-985d-8c285896e292] Pending / Ready:ContainersNotReady (containers with unready status: [busybox]) / ContainersReady:ContainersNotReady (containers with unready status: [busybox])
E0917 01:43:52.991207  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:43:54.376190  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/skaffold-735592/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
helpers_test.go:352: "busybox" [3d52b048-7200-4ed0-985d-8c285896e292] Running
E0917 01:43:56.414694  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/false-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
start_stop_delete_test.go:194: (dbg) TestStartStop/group/old-k8s-version/serial/DeployApp: integration-test=busybox healthy within 10.003528531s
start_stop_delete_test.go:194: (dbg) Run:  kubectl --context old-k8s-version-523043 exec busybox -- /bin/sh -c "ulimit -n"
--- PASS: TestStartStop/group/old-k8s-version/serial/DeployApp (10.51s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/EnableAddonWhileActive (1.31s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/EnableAddonWhileActive
start_stop_delete_test.go:203: (dbg) Run:  out/minikube-linux-arm64 addons enable metrics-server -p old-k8s-version-523043 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain
start_stop_delete_test.go:203: (dbg) Done: out/minikube-linux-arm64 addons enable metrics-server -p old-k8s-version-523043 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain: (1.199783341s)
start_stop_delete_test.go:213: (dbg) Run:  kubectl --context old-k8s-version-523043 describe deploy/metrics-server -n kube-system
--- PASS: TestStartStop/group/old-k8s-version/serial/EnableAddonWhileActive (1.31s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/Stop (10.82s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/Stop
start_stop_delete_test.go:226: (dbg) Run:  out/minikube-linux-arm64 stop -p old-k8s-version-523043 --alsologtostderr -v=3
E0917 01:44:04.966426  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/custom-flannel-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:44:06.656943  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/false-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
start_stop_delete_test.go:226: (dbg) Done: out/minikube-linux-arm64 stop -p old-k8s-version-523043 --alsologtostderr -v=3: (10.819753787s)
--- PASS: TestStartStop/group/old-k8s-version/serial/Stop (10.82s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/EnableAddonAfterStop (0.19s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/EnableAddonAfterStop
start_stop_delete_test.go:237: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Host}} -p old-k8s-version-523043 -n old-k8s-version-523043
start_stop_delete_test.go:237: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.Host}} -p old-k8s-version-523043 -n old-k8s-version-523043: exit status 7 (69.375902ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:237: status error: exit status 7 (may be ok)
start_stop_delete_test.go:244: (dbg) Run:  out/minikube-linux-arm64 addons enable dashboard -p old-k8s-version-523043 --images=MetricsScraper=registry.k8s.io/echoserver:1.4
--- PASS: TestStartStop/group/old-k8s-version/serial/EnableAddonAfterStop (0.19s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/SecondStart (27.76s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/SecondStart
start_stop_delete_test.go:254: (dbg) Run:  out/minikube-linux-arm64 start -p old-k8s-version-523043 --memory=3072 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker  --container-runtime=docker --kubernetes-version=v1.28.0
E0917 01:44:19.712787  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/calico-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:44:22.523836  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/kindnet-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:44:27.138219  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/false-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
start_stop_delete_test.go:254: (dbg) Done: out/minikube-linux-arm64 start -p old-k8s-version-523043 --memory=3072 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker  --container-runtime=docker --kubernetes-version=v1.28.0: (27.22897939s)
start_stop_delete_test.go:260: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Host}} -p old-k8s-version-523043 -n old-k8s-version-523043
--- PASS: TestStartStop/group/old-k8s-version/serial/SecondStart (27.76s)

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/DeployApp (9.42s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/DeployApp
start_stop_delete_test.go:194: (dbg) Run:  kubectl --context embed-certs-859319 create -f testdata/busybox.yaml
start_stop_delete_test.go:194: (dbg) TestStartStop/group/embed-certs/serial/DeployApp: waiting 8m0s for pods matching "integration-test=busybox" in namespace "default" ...
helpers_test.go:352: "busybox" [52af5aed-ea0e-4659-a804-a1c7a3c9a52d] Pending
helpers_test.go:352: "busybox" [52af5aed-ea0e-4659-a804-a1c7a3c9a52d] Pending / Ready:ContainersNotReady (containers with unready status: [busybox]) / ContainersReady:ContainersNotReady (containers with unready status: [busybox])
helpers_test.go:352: "busybox" [52af5aed-ea0e-4659-a804-a1c7a3c9a52d] Running
start_stop_delete_test.go:194: (dbg) TestStartStop/group/embed-certs/serial/DeployApp: integration-test=busybox healthy within 9.010551794s
start_stop_delete_test.go:194: (dbg) Run:  kubectl --context embed-certs-859319 exec busybox -- /bin/sh -c "ulimit -n"
--- PASS: TestStartStop/group/embed-certs/serial/DeployApp (9.42s)

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/EnableAddonWhileActive (1.4s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/EnableAddonWhileActive
start_stop_delete_test.go:203: (dbg) Run:  out/minikube-linux-arm64 addons enable metrics-server -p embed-certs-859319 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain
start_stop_delete_test.go:203: (dbg) Done: out/minikube-linux-arm64 addons enable metrics-server -p embed-certs-859319 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain: (1.29262054s)
start_stop_delete_test.go:213: (dbg) Run:  kubectl --context embed-certs-859319 describe deploy/metrics-server -n kube-system
--- PASS: TestStartStop/group/embed-certs/serial/EnableAddonWhileActive (1.40s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/UserAppExistsAfterStop (12.01s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/UserAppExistsAfterStop
start_stop_delete_test.go:272: (dbg) TestStartStop/group/old-k8s-version/serial/UserAppExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ...
helpers_test.go:352: "kubernetes-dashboard-8694d4445c-6rxh7" [4bd3f23f-af25-411f-b998-07a4a9e8556d] Pending / Ready:ContainersNotReady (containers with unready status: [kubernetes-dashboard]) / ContainersReady:ContainersNotReady (containers with unready status: [kubernetes-dashboard])
E0917 01:44:45.310660  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/auto-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:44:45.928005  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/custom-flannel-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
helpers_test.go:352: "kubernetes-dashboard-8694d4445c-6rxh7" [4bd3f23f-af25-411f-b998-07a4a9e8556d] Running
start_stop_delete_test.go:272: (dbg) TestStartStop/group/old-k8s-version/serial/UserAppExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 12.004502817s
--- PASS: TestStartStop/group/old-k8s-version/serial/UserAppExistsAfterStop (12.01s)

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/Stop (10.99s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/Stop
start_stop_delete_test.go:226: (dbg) Run:  out/minikube-linux-arm64 stop -p embed-certs-859319 --alsologtostderr -v=3
start_stop_delete_test.go:226: (dbg) Done: out/minikube-linux-arm64 stop -p embed-certs-859319 --alsologtostderr -v=3: (10.993888767s)
--- PASS: TestStartStop/group/embed-certs/serial/Stop (10.99s)

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/EnableAddonAfterStop (0.2s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/EnableAddonAfterStop
start_stop_delete_test.go:237: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Host}} -p embed-certs-859319 -n embed-certs-859319
start_stop_delete_test.go:237: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.Host}} -p embed-certs-859319 -n embed-certs-859319: exit status 7 (77.764912ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:237: status error: exit status 7 (may be ok)
start_stop_delete_test.go:244: (dbg) Run:  out/minikube-linux-arm64 addons enable dashboard -p embed-certs-859319 --images=MetricsScraper=registry.k8s.io/echoserver:1.4
--- PASS: TestStartStop/group/embed-certs/serial/EnableAddonAfterStop (0.20s)

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/SecondStart (59.13s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/SecondStart
start_stop_delete_test.go:254: (dbg) Run:  out/minikube-linux-arm64 start -p embed-certs-859319 --memory=3072 --alsologtostderr --wait=true --embed-certs --driver=docker  --container-runtime=docker --kubernetes-version=v1.34.0
start_stop_delete_test.go:254: (dbg) Done: out/minikube-linux-arm64 start -p embed-certs-859319 --memory=3072 --alsologtostderr --wait=true --embed-certs --driver=docker  --container-runtime=docker --kubernetes-version=v1.34.0: (58.647361772s)
start_stop_delete_test.go:260: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Host}} -p embed-certs-859319 -n embed-certs-859319
--- PASS: TestStartStop/group/embed-certs/serial/SecondStart (59.13s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/AddonExistsAfterStop (5.12s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/AddonExistsAfterStop
start_stop_delete_test.go:285: (dbg) TestStartStop/group/old-k8s-version/serial/AddonExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ...
helpers_test.go:352: "kubernetes-dashboard-8694d4445c-6rxh7" [4bd3f23f-af25-411f-b998-07a4a9e8556d] Running
start_stop_delete_test.go:285: (dbg) TestStartStop/group/old-k8s-version/serial/AddonExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 5.003816896s
start_stop_delete_test.go:289: (dbg) Run:  kubectl --context old-k8s-version-523043 describe deploy/dashboard-metrics-scraper -n kubernetes-dashboard
--- PASS: TestStartStop/group/old-k8s-version/serial/AddonExistsAfterStop (5.12s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/VerifyKubernetesImages (0.27s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/VerifyKubernetesImages
start_stop_delete_test.go:302: (dbg) Run:  out/minikube-linux-arm64 -p old-k8s-version-523043 image list --format=json
start_stop_delete_test.go:302: Found non-minikube image: gcr.io/k8s-minikube/busybox:1.28.4-glibc
--- PASS: TestStartStop/group/old-k8s-version/serial/VerifyKubernetesImages (0.27s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/Pause (4.59s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/Pause
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-arm64 pause -p old-k8s-version-523043 --alsologtostderr -v=1
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-arm64 status --format={{.APIServer}} -p old-k8s-version-523043 -n old-k8s-version-523043
start_stop_delete_test.go:309: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.APIServer}} -p old-k8s-version-523043 -n old-k8s-version-523043: exit status 2 (425.098035ms)

                                                
                                                
-- stdout --
	Paused

                                                
                                                
-- /stdout --
start_stop_delete_test.go:309: status error: exit status 2 (may be ok)
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Kubelet}} -p old-k8s-version-523043 -n old-k8s-version-523043
start_stop_delete_test.go:309: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.Kubelet}} -p old-k8s-version-523043 -n old-k8s-version-523043: exit status 2 (863.620509ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:309: status error: exit status 2 (may be ok)
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-arm64 unpause -p old-k8s-version-523043 --alsologtostderr -v=1
start_stop_delete_test.go:309: (dbg) Done: out/minikube-linux-arm64 unpause -p old-k8s-version-523043 --alsologtostderr -v=1: (1.038925293s)
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-arm64 status --format={{.APIServer}} -p old-k8s-version-523043 -n old-k8s-version-523043
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Kubelet}} -p old-k8s-version-523043 -n old-k8s-version-523043
--- PASS: TestStartStop/group/old-k8s-version/serial/Pause (4.59s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/FirstStart (89.13s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/FirstStart
start_stop_delete_test.go:184: (dbg) Run:  out/minikube-linux-arm64 start -p no-preload-297358 --memory=3072 --alsologtostderr --wait=true --preload=false --driver=docker  --container-runtime=docker --kubernetes-version=v1.34.0
E0917 01:45:08.099967  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/false-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:45:13.011159  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/auto-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:45:21.574845  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/enable-default-cni-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:45:21.581185  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/enable-default-cni-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:45:21.593044  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/enable-default-cni-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:45:21.614386  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/enable-default-cni-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:45:21.655709  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/enable-default-cni-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:45:21.737039  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/enable-default-cni-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:45:21.898538  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/enable-default-cni-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:45:22.220015  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/enable-default-cni-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:45:22.861332  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/enable-default-cni-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:45:23.587436  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/flannel-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:45:23.593788  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/flannel-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:45:23.605136  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/flannel-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:45:23.626445  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/flannel-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:45:23.667809  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/flannel-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:45:23.749166  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/flannel-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:45:23.910516  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/flannel-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:45:24.143859  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/enable-default-cni-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:45:24.231854  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/flannel-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:45:24.873696  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/flannel-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:45:26.155125  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/flannel-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:45:26.706216  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/enable-default-cni-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:45:28.716970  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/flannel-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:45:31.827552  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/enable-default-cni-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:45:33.839250  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/flannel-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:45:42.070647  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/enable-default-cni-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:45:44.080993  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/flannel-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
start_stop_delete_test.go:184: (dbg) Done: out/minikube-linux-arm64 start -p no-preload-297358 --memory=3072 --alsologtostderr --wait=true --preload=false --driver=docker  --container-runtime=docker --kubernetes-version=v1.34.0: (1m29.131267091s)
--- PASS: TestStartStop/group/no-preload/serial/FirstStart (89.13s)

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/UserAppExistsAfterStop (6s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/UserAppExistsAfterStop
start_stop_delete_test.go:272: (dbg) TestStartStop/group/embed-certs/serial/UserAppExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ...
helpers_test.go:352: "kubernetes-dashboard-855c9754f9-z6jgh" [8f585d3c-0985-49dd-8786-d84a8b9d9ac3] Running
start_stop_delete_test.go:272: (dbg) TestStartStop/group/embed-certs/serial/UserAppExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 6.002951101s
--- PASS: TestStartStop/group/embed-certs/serial/UserAppExistsAfterStop (6.00s)

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/AddonExistsAfterStop (5.11s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/AddonExistsAfterStop
start_stop_delete_test.go:285: (dbg) TestStartStop/group/embed-certs/serial/AddonExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ...
helpers_test.go:352: "kubernetes-dashboard-855c9754f9-z6jgh" [8f585d3c-0985-49dd-8786-d84a8b9d9ac3] Running
E0917 01:46:00.318791  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/functional-918451/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:46:02.562393  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/enable-default-cni-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
start_stop_delete_test.go:285: (dbg) TestStartStop/group/embed-certs/serial/AddonExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 5.003561921s
start_stop_delete_test.go:289: (dbg) Run:  kubectl --context embed-certs-859319 describe deploy/dashboard-metrics-scraper -n kubernetes-dashboard
--- PASS: TestStartStop/group/embed-certs/serial/AddonExistsAfterStop (5.11s)

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/VerifyKubernetesImages (0.3s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/VerifyKubernetesImages
start_stop_delete_test.go:302: (dbg) Run:  out/minikube-linux-arm64 -p embed-certs-859319 image list --format=json
start_stop_delete_test.go:302: Found non-minikube image: gcr.io/k8s-minikube/busybox:1.28.4-glibc
--- PASS: TestStartStop/group/embed-certs/serial/VerifyKubernetesImages (0.30s)

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/Pause (3.15s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/Pause
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-arm64 pause -p embed-certs-859319 --alsologtostderr -v=1
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-arm64 status --format={{.APIServer}} -p embed-certs-859319 -n embed-certs-859319
start_stop_delete_test.go:309: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.APIServer}} -p embed-certs-859319 -n embed-certs-859319: exit status 2 (325.331896ms)

                                                
                                                
-- stdout --
	Paused

                                                
                                                
-- /stdout --
start_stop_delete_test.go:309: status error: exit status 2 (may be ok)
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Kubelet}} -p embed-certs-859319 -n embed-certs-859319
E0917 01:46:04.563097  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/flannel-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
start_stop_delete_test.go:309: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.Kubelet}} -p embed-certs-859319 -n embed-certs-859319: exit status 2 (408.105024ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:309: status error: exit status 2 (may be ok)
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-arm64 unpause -p embed-certs-859319 --alsologtostderr -v=1
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-arm64 status --format={{.APIServer}} -p embed-certs-859319 -n embed-certs-859319
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Kubelet}} -p embed-certs-859319 -n embed-certs-859319
--- PASS: TestStartStop/group/embed-certs/serial/Pause (3.15s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/FirstStart (78.04s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/FirstStart
start_stop_delete_test.go:184: (dbg) Run:  out/minikube-linux-arm64 start -p default-k8s-diff-port-756482 --memory=3072 --alsologtostderr --wait=true --apiserver-port=8444 --driver=docker  --container-runtime=docker --kubernetes-version=v1.34.0
E0917 01:46:17.252641  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/functional-918451/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:46:30.025721  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/false-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
start_stop_delete_test.go:184: (dbg) Done: out/minikube-linux-arm64 start -p default-k8s-diff-port-756482 --memory=3072 --alsologtostderr --wait=true --apiserver-port=8444 --driver=docker  --container-runtime=docker --kubernetes-version=v1.34.0: (1m18.039160089s)
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/FirstStart (78.04s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/DeployApp (11.46s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/DeployApp
start_stop_delete_test.go:194: (dbg) Run:  kubectl --context no-preload-297358 create -f testdata/busybox.yaml
start_stop_delete_test.go:194: (dbg) TestStartStop/group/no-preload/serial/DeployApp: waiting 8m0s for pods matching "integration-test=busybox" in namespace "default" ...
helpers_test.go:352: "busybox" [6bd2cb38-e547-49d4-8424-a28d32752e8b] Pending
E0917 01:46:35.850133  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/calico-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
helpers_test.go:352: "busybox" [6bd2cb38-e547-49d4-8424-a28d32752e8b] Pending / Ready:ContainersNotReady (containers with unready status: [busybox]) / ContainersReady:ContainersNotReady (containers with unready status: [busybox])
E0917 01:46:38.662805  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/kindnet-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
helpers_test.go:352: "busybox" [6bd2cb38-e547-49d4-8424-a28d32752e8b] Running
E0917 01:46:43.523752  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/enable-default-cni-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:46:45.525226  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/flannel-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
start_stop_delete_test.go:194: (dbg) TestStartStop/group/no-preload/serial/DeployApp: integration-test=busybox healthy within 11.003257364s
start_stop_delete_test.go:194: (dbg) Run:  kubectl --context no-preload-297358 exec busybox -- /bin/sh -c "ulimit -n"
--- PASS: TestStartStop/group/no-preload/serial/DeployApp (11.46s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/EnableAddonWhileActive (1.56s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/EnableAddonWhileActive
start_stop_delete_test.go:203: (dbg) Run:  out/minikube-linux-arm64 addons enable metrics-server -p no-preload-297358 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain
start_stop_delete_test.go:203: (dbg) Done: out/minikube-linux-arm64 addons enable metrics-server -p no-preload-297358 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain: (1.401516408s)
start_stop_delete_test.go:213: (dbg) Run:  kubectl --context no-preload-297358 describe deploy/metrics-server -n kube-system
--- PASS: TestStartStop/group/no-preload/serial/EnableAddonWhileActive (1.56s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/Stop (10.98s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/Stop
start_stop_delete_test.go:226: (dbg) Run:  out/minikube-linux-arm64 stop -p no-preload-297358 --alsologtostderr -v=3
E0917 01:46:48.996786  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/bridge-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:46:49.003159  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/bridge-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:46:49.014540  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/bridge-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:46:49.036526  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/bridge-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:46:49.077857  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/bridge-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:46:49.159318  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/bridge-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:46:49.320790  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/bridge-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:46:49.642313  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/bridge-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:46:50.283862  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/bridge-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:46:51.565190  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/bridge-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:46:54.126973  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/bridge-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:46:59.248509  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/bridge-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
start_stop_delete_test.go:226: (dbg) Done: out/minikube-linux-arm64 stop -p no-preload-297358 --alsologtostderr -v=3: (10.979868459s)
--- PASS: TestStartStop/group/no-preload/serial/Stop (10.98s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/EnableAddonAfterStop (0.19s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/EnableAddonAfterStop
start_stop_delete_test.go:237: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Host}} -p no-preload-297358 -n no-preload-297358
start_stop_delete_test.go:237: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.Host}} -p no-preload-297358 -n no-preload-297358: exit status 7 (68.311285ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:237: status error: exit status 7 (may be ok)
start_stop_delete_test.go:244: (dbg) Run:  out/minikube-linux-arm64 addons enable dashboard -p no-preload-297358 --images=MetricsScraper=registry.k8s.io/echoserver:1.4
--- PASS: TestStartStop/group/no-preload/serial/EnableAddonAfterStop (0.19s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/SecondStart (54s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/SecondStart
start_stop_delete_test.go:254: (dbg) Run:  out/minikube-linux-arm64 start -p no-preload-297358 --memory=3072 --alsologtostderr --wait=true --preload=false --driver=docker  --container-runtime=docker --kubernetes-version=v1.34.0
E0917 01:47:03.554669  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/calico-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:47:06.365340  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/kindnet-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:47:09.490052  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/bridge-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
start_stop_delete_test.go:254: (dbg) Done: out/minikube-linux-arm64 start -p no-preload-297358 --memory=3072 --alsologtostderr --wait=true --preload=false --driver=docker  --container-runtime=docker --kubernetes-version=v1.34.0: (53.606983182s)
start_stop_delete_test.go:260: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Host}} -p no-preload-297358 -n no-preload-297358
--- PASS: TestStartStop/group/no-preload/serial/SecondStart (54.00s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/DeployApp (10.39s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/DeployApp
start_stop_delete_test.go:194: (dbg) Run:  kubectl --context default-k8s-diff-port-756482 create -f testdata/busybox.yaml
start_stop_delete_test.go:194: (dbg) TestStartStop/group/default-k8s-diff-port/serial/DeployApp: waiting 8m0s for pods matching "integration-test=busybox" in namespace "default" ...
helpers_test.go:352: "busybox" [74ace540-3833-4927-ad19-3b79a9d050e6] Pending
helpers_test.go:352: "busybox" [74ace540-3833-4927-ad19-3b79a9d050e6] Pending / Ready:ContainersNotReady (containers with unready status: [busybox]) / ContainersReady:ContainersNotReady (containers with unready status: [busybox])
E0917 01:47:29.971884  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/bridge-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
helpers_test.go:352: "busybox" [74ace540-3833-4927-ad19-3b79a9d050e6] Running
E0917 01:47:31.312906  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/skaffold-735592/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:47:35.541181  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/kubenet-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:47:35.547507  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/kubenet-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:47:35.559686  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/kubenet-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:47:35.581130  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/kubenet-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:47:35.622584  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/kubenet-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:47:35.704277  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/kubenet-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:47:35.866003  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/kubenet-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:47:36.187695  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/kubenet-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:47:36.829095  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/kubenet-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
start_stop_delete_test.go:194: (dbg) TestStartStop/group/default-k8s-diff-port/serial/DeployApp: integration-test=busybox healthy within 10.003305608s
start_stop_delete_test.go:194: (dbg) Run:  kubectl --context default-k8s-diff-port-756482 exec busybox -- /bin/sh -c "ulimit -n"
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/DeployApp (10.39s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/EnableAddonWhileActive (1.11s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/EnableAddonWhileActive
start_stop_delete_test.go:203: (dbg) Run:  out/minikube-linux-arm64 addons enable metrics-server -p default-k8s-diff-port-756482 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain
E0917 01:47:38.110499  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/kubenet-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
start_stop_delete_test.go:203: (dbg) Done: out/minikube-linux-arm64 addons enable metrics-server -p default-k8s-diff-port-756482 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain: (1.020763108s)
start_stop_delete_test.go:213: (dbg) Run:  kubectl --context default-k8s-diff-port-756482 describe deploy/metrics-server -n kube-system
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/EnableAddonWhileActive (1.11s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/Stop (11.07s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/Stop
start_stop_delete_test.go:226: (dbg) Run:  out/minikube-linux-arm64 stop -p default-k8s-diff-port-756482 --alsologtostderr -v=3
E0917 01:47:40.672110  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/kubenet-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:47:45.793606  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/kubenet-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
start_stop_delete_test.go:226: (dbg) Done: out/minikube-linux-arm64 stop -p default-k8s-diff-port-756482 --alsologtostderr -v=3: (11.069080964s)
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/Stop (11.07s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/EnableAddonAfterStop (0.19s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/EnableAddonAfterStop
start_stop_delete_test.go:237: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Host}} -p default-k8s-diff-port-756482 -n default-k8s-diff-port-756482
start_stop_delete_test.go:237: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.Host}} -p default-k8s-diff-port-756482 -n default-k8s-diff-port-756482: exit status 7 (72.269755ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:237: status error: exit status 7 (may be ok)
start_stop_delete_test.go:244: (dbg) Run:  out/minikube-linux-arm64 addons enable dashboard -p default-k8s-diff-port-756482 --images=MetricsScraper=registry.k8s.io/echoserver:1.4
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/EnableAddonAfterStop (0.19s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/SecondStart (56.61s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/SecondStart
start_stop_delete_test.go:254: (dbg) Run:  out/minikube-linux-arm64 start -p default-k8s-diff-port-756482 --memory=3072 --alsologtostderr --wait=true --apiserver-port=8444 --driver=docker  --container-runtime=docker --kubernetes-version=v1.34.0
start_stop_delete_test.go:254: (dbg) Done: out/minikube-linux-arm64 start -p default-k8s-diff-port-756482 --memory=3072 --alsologtostderr --wait=true --apiserver-port=8444 --driver=docker  --container-runtime=docker --kubernetes-version=v1.34.0: (56.072965804s)
start_stop_delete_test.go:260: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Host}} -p default-k8s-diff-port-756482 -n default-k8s-diff-port-756482
E0917 01:48:46.161636  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/false-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/SecondStart (56.61s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/UserAppExistsAfterStop (6s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/UserAppExistsAfterStop
start_stop_delete_test.go:272: (dbg) TestStartStop/group/no-preload/serial/UserAppExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ...
helpers_test.go:352: "kubernetes-dashboard-855c9754f9-khdk2" [ffa26d15-4899-49e8-8745-3d4aef134b27] Running
E0917 01:47:56.035770  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/kubenet-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
start_stop_delete_test.go:272: (dbg) TestStartStop/group/no-preload/serial/UserAppExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 6.004072348s
--- PASS: TestStartStop/group/no-preload/serial/UserAppExistsAfterStop (6.00s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/AddonExistsAfterStop (5.14s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/AddonExistsAfterStop
start_stop_delete_test.go:285: (dbg) TestStartStop/group/no-preload/serial/AddonExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ...
helpers_test.go:352: "kubernetes-dashboard-855c9754f9-khdk2" [ffa26d15-4899-49e8-8745-3d4aef134b27] Running
start_stop_delete_test.go:285: (dbg) TestStartStop/group/no-preload/serial/AddonExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 5.003938754s
start_stop_delete_test.go:289: (dbg) Run:  kubectl --context no-preload-297358 describe deploy/dashboard-metrics-scraper -n kubernetes-dashboard
--- PASS: TestStartStop/group/no-preload/serial/AddonExistsAfterStop (5.14s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/VerifyKubernetesImages (0.29s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/VerifyKubernetesImages
start_stop_delete_test.go:302: (dbg) Run:  out/minikube-linux-arm64 -p no-preload-297358 image list --format=json
start_stop_delete_test.go:302: Found non-minikube image: gcr.io/k8s-minikube/busybox:1.28.4-glibc
--- PASS: TestStartStop/group/no-preload/serial/VerifyKubernetesImages (0.29s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/Pause (4.31s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/Pause
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-arm64 pause -p no-preload-297358 --alsologtostderr -v=1
E0917 01:48:05.446022  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/enable-default-cni-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-arm64 status --format={{.APIServer}} -p no-preload-297358 -n no-preload-297358
start_stop_delete_test.go:309: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.APIServer}} -p no-preload-297358 -n no-preload-297358: exit status 2 (480.460467ms)

                                                
                                                
-- stdout --
	Paused

                                                
                                                
-- /stdout --
start_stop_delete_test.go:309: status error: exit status 2 (may be ok)
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Kubelet}} -p no-preload-297358 -n no-preload-297358
start_stop_delete_test.go:309: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.Kubelet}} -p no-preload-297358 -n no-preload-297358: exit status 2 (462.163727ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:309: status error: exit status 2 (may be ok)
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-arm64 unpause -p no-preload-297358 --alsologtostderr -v=1
E0917 01:48:07.447422  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/flannel-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-arm64 status --format={{.APIServer}} -p no-preload-297358 -n no-preload-297358
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Kubelet}} -p no-preload-297358 -n no-preload-297358
--- PASS: TestStartStop/group/no-preload/serial/Pause (4.31s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/FirstStart (35.78s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/FirstStart
start_stop_delete_test.go:184: (dbg) Run:  out/minikube-linux-arm64 start -p newest-cni-365554 --memory=3072 --alsologtostderr --wait=apiserver,system_pods,default_sa --network-plugin=cni --extra-config=kubeadm.pod-network-cidr=10.42.0.0/16 --driver=docker  --container-runtime=docker --kubernetes-version=v1.34.0
E0917 01:48:16.518073  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/kubenet-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:48:23.987956  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/custom-flannel-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
start_stop_delete_test.go:184: (dbg) Done: out/minikube-linux-arm64 start -p newest-cni-365554 --memory=3072 --alsologtostderr --wait=apiserver,system_pods,default_sa --network-plugin=cni --extra-config=kubeadm.pod-network-cidr=10.42.0.0/16 --driver=docker  --container-runtime=docker --kubernetes-version=v1.34.0: (35.7807431s)
--- PASS: TestStartStop/group/newest-cni/serial/FirstStart (35.78s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/UserAppExistsAfterStop (6s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/UserAppExistsAfterStop
start_stop_delete_test.go:272: (dbg) TestStartStop/group/default-k8s-diff-port/serial/UserAppExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ...
helpers_test.go:352: "kubernetes-dashboard-855c9754f9-hspsh" [55a32e7a-4862-4e29-bd97-6b8f1b719f5d] Running
start_stop_delete_test.go:272: (dbg) TestStartStop/group/default-k8s-diff-port/serial/UserAppExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 6.00390614s
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/UserAppExistsAfterStop (6.00s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/DeployApp (0s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/DeployApp
--- PASS: TestStartStop/group/newest-cni/serial/DeployApp (0.00s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/EnableAddonWhileActive (1.37s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/EnableAddonWhileActive
start_stop_delete_test.go:203: (dbg) Run:  out/minikube-linux-arm64 addons enable metrics-server -p newest-cni-365554 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain
start_stop_delete_test.go:203: (dbg) Done: out/minikube-linux-arm64 addons enable metrics-server -p newest-cni-365554 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain: (1.37490586s)
start_stop_delete_test.go:209: WARNING: cni mode requires additional setup before pods can schedule :(
--- PASS: TestStartStop/group/newest-cni/serial/EnableAddonWhileActive (1.37s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/Stop (5.79s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/Stop
start_stop_delete_test.go:226: (dbg) Run:  out/minikube-linux-arm64 stop -p newest-cni-365554 --alsologtostderr -v=3
E0917 01:48:50.727237  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/old-k8s-version-523043/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:48:50.733985  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/old-k8s-version-523043/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:48:50.745545  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/old-k8s-version-523043/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:48:50.767908  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/old-k8s-version-523043/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:48:50.809256  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/old-k8s-version-523043/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:48:50.890682  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/old-k8s-version-523043/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:48:51.052238  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/old-k8s-version-523043/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:48:51.374070  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/old-k8s-version-523043/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:48:51.691847  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/custom-flannel-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:48:52.015496  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/old-k8s-version-523043/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
start_stop_delete_test.go:226: (dbg) Done: out/minikube-linux-arm64 stop -p newest-cni-365554 --alsologtostderr -v=3: (5.786824882s)
--- PASS: TestStartStop/group/newest-cni/serial/Stop (5.79s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/AddonExistsAfterStop (5.09s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/AddonExistsAfterStop
start_stop_delete_test.go:285: (dbg) TestStartStop/group/default-k8s-diff-port/serial/AddonExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ...
helpers_test.go:352: "kubernetes-dashboard-855c9754f9-hspsh" [55a32e7a-4862-4e29-bd97-6b8f1b719f5d] Running
E0917 01:48:52.991898  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 01:48:53.297692  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/old-k8s-version-523043/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
start_stop_delete_test.go:285: (dbg) TestStartStop/group/default-k8s-diff-port/serial/AddonExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 5.003822855s
start_stop_delete_test.go:289: (dbg) Run:  kubectl --context default-k8s-diff-port-756482 describe deploy/dashboard-metrics-scraper -n kubernetes-dashboard
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/AddonExistsAfterStop (5.09s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/EnableAddonAfterStop (0.2s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/EnableAddonAfterStop
start_stop_delete_test.go:237: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Host}} -p newest-cni-365554 -n newest-cni-365554
start_stop_delete_test.go:237: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.Host}} -p newest-cni-365554 -n newest-cni-365554: exit status 7 (72.977619ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:237: status error: exit status 7 (may be ok)
start_stop_delete_test.go:244: (dbg) Run:  out/minikube-linux-arm64 addons enable dashboard -p newest-cni-365554 --images=MetricsScraper=registry.k8s.io/echoserver:1.4
--- PASS: TestStartStop/group/newest-cni/serial/EnableAddonAfterStop (0.20s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/SecondStart (19.67s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/SecondStart
start_stop_delete_test.go:254: (dbg) Run:  out/minikube-linux-arm64 start -p newest-cni-365554 --memory=3072 --alsologtostderr --wait=apiserver,system_pods,default_sa --network-plugin=cni --extra-config=kubeadm.pod-network-cidr=10.42.0.0/16 --driver=docker  --container-runtime=docker --kubernetes-version=v1.34.0
E0917 01:48:55.859727  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/old-k8s-version-523043/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
start_stop_delete_test.go:254: (dbg) Done: out/minikube-linux-arm64 start -p newest-cni-365554 --memory=3072 --alsologtostderr --wait=apiserver,system_pods,default_sa --network-plugin=cni --extra-config=kubeadm.pod-network-cidr=10.42.0.0/16 --driver=docker  --container-runtime=docker --kubernetes-version=v1.34.0: (19.230157331s)
start_stop_delete_test.go:260: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Host}} -p newest-cni-365554 -n newest-cni-365554
--- PASS: TestStartStop/group/newest-cni/serial/SecondStart (19.67s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/VerifyKubernetesImages (0.24s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/VerifyKubernetesImages
start_stop_delete_test.go:302: (dbg) Run:  out/minikube-linux-arm64 -p default-k8s-diff-port-756482 image list --format=json
E0917 01:48:57.480292  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/kubenet-490703/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
start_stop_delete_test.go:302: Found non-minikube image: gcr.io/k8s-minikube/busybox:1.28.4-glibc
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/VerifyKubernetesImages (0.24s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/Pause (3.55s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/Pause
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-arm64 pause -p default-k8s-diff-port-756482 --alsologtostderr -v=1
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-arm64 status --format={{.APIServer}} -p default-k8s-diff-port-756482 -n default-k8s-diff-port-756482
start_stop_delete_test.go:309: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.APIServer}} -p default-k8s-diff-port-756482 -n default-k8s-diff-port-756482: exit status 2 (319.594663ms)

                                                
                                                
-- stdout --
	Paused

                                                
                                                
-- /stdout --
start_stop_delete_test.go:309: status error: exit status 2 (may be ok)
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Kubelet}} -p default-k8s-diff-port-756482 -n default-k8s-diff-port-756482
start_stop_delete_test.go:309: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.Kubelet}} -p default-k8s-diff-port-756482 -n default-k8s-diff-port-756482: exit status 2 (313.708047ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:309: status error: exit status 2 (may be ok)
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-arm64 unpause -p default-k8s-diff-port-756482 --alsologtostderr -v=1
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-arm64 status --format={{.APIServer}} -p default-k8s-diff-port-756482 -n default-k8s-diff-port-756482
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Kubelet}} -p default-k8s-diff-port-756482 -n default-k8s-diff-port-756482
E0917 01:49:00.981685  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/old-k8s-version-523043/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/Pause (3.55s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/UserAppExistsAfterStop (0s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/UserAppExistsAfterStop
start_stop_delete_test.go:271: WARNING: cni mode requires additional setup before pods can schedule :(
--- PASS: TestStartStop/group/newest-cni/serial/UserAppExistsAfterStop (0.00s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/AddonExistsAfterStop (0s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/AddonExistsAfterStop
start_stop_delete_test.go:282: WARNING: cni mode requires additional setup before pods can schedule :(
--- PASS: TestStartStop/group/newest-cni/serial/AddonExistsAfterStop (0.00s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/VerifyKubernetesImages (0.27s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/VerifyKubernetesImages
start_stop_delete_test.go:302: (dbg) Run:  out/minikube-linux-arm64 -p newest-cni-365554 image list --format=json
--- PASS: TestStartStop/group/newest-cni/serial/VerifyKubernetesImages (0.27s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/Pause (2.86s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/Pause
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-arm64 pause -p newest-cni-365554 --alsologtostderr -v=1
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-arm64 status --format={{.APIServer}} -p newest-cni-365554 -n newest-cni-365554
start_stop_delete_test.go:309: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.APIServer}} -p newest-cni-365554 -n newest-cni-365554: exit status 2 (319.136764ms)

                                                
                                                
-- stdout --
	Paused

                                                
                                                
-- /stdout --
start_stop_delete_test.go:309: status error: exit status 2 (may be ok)
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Kubelet}} -p newest-cni-365554 -n newest-cni-365554
start_stop_delete_test.go:309: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.Kubelet}} -p newest-cni-365554 -n newest-cni-365554: exit status 2 (308.290646ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:309: status error: exit status 2 (may be ok)
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-arm64 unpause -p newest-cni-365554 --alsologtostderr -v=1
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-arm64 status --format={{.APIServer}} -p newest-cni-365554 -n newest-cni-365554
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Kubelet}} -p newest-cni-365554 -n newest-cni-365554
--- PASS: TestStartStop/group/newest-cni/serial/Pause (2.86s)

                                                
                                    

Test skip (26/347)

x
+
TestDownloadOnly/v1.28.0/cached-images (0s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.28.0/cached-images
aaa_download_only_test.go:129: Preload exists, images won't be cached
--- SKIP: TestDownloadOnly/v1.28.0/cached-images (0.00s)

                                                
                                    
x
+
TestDownloadOnly/v1.28.0/binaries (0s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.28.0/binaries
aaa_download_only_test.go:151: Preload exists, binaries are present within.
--- SKIP: TestDownloadOnly/v1.28.0/binaries (0.00s)

                                                
                                    
x
+
TestDownloadOnly/v1.28.0/kubectl (0s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.28.0/kubectl
aaa_download_only_test.go:167: Test for darwin and windows
--- SKIP: TestDownloadOnly/v1.28.0/kubectl (0.00s)

                                                
                                    
x
+
TestDownloadOnly/v1.34.0/cached-images (0s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.34.0/cached-images
aaa_download_only_test.go:129: Preload exists, images won't be cached
--- SKIP: TestDownloadOnly/v1.34.0/cached-images (0.00s)

                                                
                                    
x
+
TestDownloadOnly/v1.34.0/binaries (0s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.34.0/binaries
aaa_download_only_test.go:151: Preload exists, binaries are present within.
--- SKIP: TestDownloadOnly/v1.34.0/binaries (0.00s)

                                                
                                    
x
+
TestDownloadOnly/v1.34.0/kubectl (0s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.34.0/kubectl
aaa_download_only_test.go:167: Test for darwin and windows
--- SKIP: TestDownloadOnly/v1.34.0/kubectl (0.00s)

                                                
                                    
x
+
TestDownloadOnlyKic (0.56s)

                                                
                                                
=== RUN   TestDownloadOnlyKic
aaa_download_only_test.go:232: (dbg) Run:  out/minikube-linux-arm64 start --download-only -p download-docker-416078 --alsologtostderr --driver=docker  --container-runtime=docker
aaa_download_only_test.go:244: Skip for arm64 platform. See https://github.com/kubernetes/minikube/issues/10144
helpers_test.go:175: Cleaning up "download-docker-416078" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p download-docker-416078
--- SKIP: TestDownloadOnlyKic (0.56s)

                                                
                                    
x
+
TestAddons/serial/GCPAuth/RealCredentials (0.01s)

                                                
                                                
=== RUN   TestAddons/serial/GCPAuth/RealCredentials
addons_test.go:759: This test requires a GCE instance (excluding Cloud Shell) with a container based driver
--- SKIP: TestAddons/serial/GCPAuth/RealCredentials (0.01s)

                                                
                                    
x
+
TestAddons/parallel/Olm (0s)

                                                
                                                
=== RUN   TestAddons/parallel/Olm
=== PAUSE TestAddons/parallel/Olm

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/Olm
addons_test.go:483: Skipping OLM addon test until https://github.com/operator-framework/operator-lifecycle-manager/issues/2534 is resolved
--- SKIP: TestAddons/parallel/Olm (0.00s)

                                                
                                    
x
+
TestAddons/parallel/AmdGpuDevicePlugin (0s)

                                                
                                                
=== RUN   TestAddons/parallel/AmdGpuDevicePlugin
=== PAUSE TestAddons/parallel/AmdGpuDevicePlugin

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/AmdGpuDevicePlugin
addons_test.go:1033: skip amd gpu test on all but docker driver and amd64 platform
--- SKIP: TestAddons/parallel/AmdGpuDevicePlugin (0.00s)

                                                
                                    
x
+
TestDockerEnvContainerd (0s)

                                                
                                                
=== RUN   TestDockerEnvContainerd
docker_test.go:170: running with docker true linux arm64
docker_test.go:172: skipping: TestDockerEnvContainerd can only be run with the containerd runtime on Docker driver
--- SKIP: TestDockerEnvContainerd (0.00s)

                                                
                                    
x
+
TestKVMDriverInstallOrUpdate (0s)

                                                
                                                
=== RUN   TestKVMDriverInstallOrUpdate
driver_install_or_update_test.go:45: Skip if arm64. See https://github.com/kubernetes/minikube/issues/10144
--- SKIP: TestKVMDriverInstallOrUpdate (0.00s)

                                                
                                    
x
+
TestHyperKitDriverInstallOrUpdate (0s)

                                                
                                                
=== RUN   TestHyperKitDriverInstallOrUpdate
driver_install_or_update_test.go:105: Skip if not darwin.
--- SKIP: TestHyperKitDriverInstallOrUpdate (0.00s)

                                                
                                    
x
+
TestHyperkitDriverSkipUpgrade (0s)

                                                
                                                
=== RUN   TestHyperkitDriverSkipUpgrade
driver_install_or_update_test.go:169: Skip if not darwin.
--- SKIP: TestHyperkitDriverSkipUpgrade (0.00s)

                                                
                                    
x
+
TestFunctional/parallel/MySQL (0s)

                                                
                                                
=== RUN   TestFunctional/parallel/MySQL
=== PAUSE TestFunctional/parallel/MySQL

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/MySQL
functional_test.go:1792: arm64 is not supported by mysql. Skip the test. See https://github.com/kubernetes/minikube/issues/10144
--- SKIP: TestFunctional/parallel/MySQL (0.00s)

                                                
                                    
x
+
TestFunctional/parallel/PodmanEnv (0s)

                                                
                                                
=== RUN   TestFunctional/parallel/PodmanEnv
=== PAUSE TestFunctional/parallel/PodmanEnv

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/PodmanEnv
functional_test.go:565: only validate podman env with docker container runtime, currently testing docker
--- SKIP: TestFunctional/parallel/PodmanEnv (0.00s)

                                                
                                    
x
+
TestFunctional/parallel/TunnelCmd/serial/DNSResolutionByDig (0s)

                                                
                                                
=== RUN   TestFunctional/parallel/TunnelCmd/serial/DNSResolutionByDig
functional_test_tunnel_test.go:99: DNS forwarding is only supported for Hyperkit on Darwin, skipping test DNS forwarding
--- SKIP: TestFunctional/parallel/TunnelCmd/serial/DNSResolutionByDig (0.00s)

                                                
                                    
x
+
TestFunctional/parallel/TunnelCmd/serial/DNSResolutionByDscacheutil (0s)

                                                
                                                
=== RUN   TestFunctional/parallel/TunnelCmd/serial/DNSResolutionByDscacheutil
functional_test_tunnel_test.go:99: DNS forwarding is only supported for Hyperkit on Darwin, skipping test DNS forwarding
--- SKIP: TestFunctional/parallel/TunnelCmd/serial/DNSResolutionByDscacheutil (0.00s)

                                                
                                    
x
+
TestFunctional/parallel/TunnelCmd/serial/AccessThroughDNS (0s)

                                                
                                                
=== RUN   TestFunctional/parallel/TunnelCmd/serial/AccessThroughDNS
functional_test_tunnel_test.go:99: DNS forwarding is only supported for Hyperkit on Darwin, skipping test DNS forwarding
--- SKIP: TestFunctional/parallel/TunnelCmd/serial/AccessThroughDNS (0.00s)

                                                
                                    
x
+
TestFunctionalNewestKubernetes (0s)

                                                
                                                
=== RUN   TestFunctionalNewestKubernetes
functional_test.go:82: 
--- SKIP: TestFunctionalNewestKubernetes (0.00s)

                                                
                                    
x
+
TestGvisorAddon (0s)

                                                
                                                
=== RUN   TestGvisorAddon
gvisor_addon_test.go:34: skipping test because --gvisor=false
--- SKIP: TestGvisorAddon (0.00s)

                                                
                                    
x
+
TestImageBuild/serial/validateImageBuildWithBuildEnv (0s)

                                                
                                                
=== RUN   TestImageBuild/serial/validateImageBuildWithBuildEnv
image_test.go:114: skipping due to https://github.com/kubernetes/minikube/issues/12431
--- SKIP: TestImageBuild/serial/validateImageBuildWithBuildEnv (0.00s)

                                                
                                    
x
+
TestChangeNoneUser (0s)

                                                
                                                
=== RUN   TestChangeNoneUser
none_test.go:38: Test requires none driver and SUDO_USER env to not be empty
--- SKIP: TestChangeNoneUser (0.00s)

                                                
                                    
x
+
TestScheduledStopWindows (0s)

                                                
                                                
=== RUN   TestScheduledStopWindows
scheduled_stop_test.go:42: test only runs on windows
--- SKIP: TestScheduledStopWindows (0.00s)

                                                
                                    
x
+
TestNetworkPlugins/group/cilium (3.94s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/cilium
net_test.go:102: Skipping the test as it's interfering with other tests and is outdated
E0917 01:23:52.991672  578284 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
panic.go:636: 
----------------------- debugLogs start: cilium-490703 [pass: true] --------------------------------
>>> netcat: nslookup kubernetes.default:
Error in configuration: context was not found for specified context: cilium-490703

                                                
                                                

                                                
                                                
>>> netcat: nslookup debug kubernetes.default a-records:
Error in configuration: context was not found for specified context: cilium-490703

                                                
                                                

                                                
                                                
>>> netcat: dig search kubernetes.default:
Error in configuration: context was not found for specified context: cilium-490703

                                                
                                                

                                                
                                                
>>> netcat: dig @10.96.0.10 kubernetes.default.svc.cluster.local udp/53:
Error in configuration: context was not found for specified context: cilium-490703

                                                
                                                

                                                
                                                
>>> netcat: dig @10.96.0.10 kubernetes.default.svc.cluster.local tcp/53:
Error in configuration: context was not found for specified context: cilium-490703

                                                
                                                

                                                
                                                
>>> netcat: nc 10.96.0.10 udp/53:
Error in configuration: context was not found for specified context: cilium-490703

                                                
                                                

                                                
                                                
>>> netcat: nc 10.96.0.10 tcp/53:
Error in configuration: context was not found for specified context: cilium-490703

                                                
                                                

                                                
                                                
>>> netcat: /etc/nsswitch.conf:
Error in configuration: context was not found for specified context: cilium-490703

                                                
                                                

                                                
                                                
>>> netcat: /etc/hosts:
Error in configuration: context was not found for specified context: cilium-490703

                                                
                                                

                                                
                                                
>>> netcat: /etc/resolv.conf:
Error in configuration: context was not found for specified context: cilium-490703

                                                
                                                

                                                
                                                
>>> host: /etc/nsswitch.conf:
* Profile "cilium-490703" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-490703"

                                                
                                                

                                                
                                                
>>> host: /etc/hosts:
* Profile "cilium-490703" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-490703"

                                                
                                                

                                                
                                                
>>> host: /etc/resolv.conf:
* Profile "cilium-490703" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-490703"

                                                
                                                

                                                
                                                
>>> k8s: nodes, services, endpoints, daemon sets, deployments and pods, :
Error in configuration: context was not found for specified context: cilium-490703

                                                
                                                

                                                
                                                
>>> host: crictl pods:
* Profile "cilium-490703" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-490703"

                                                
                                                

                                                
                                                
>>> host: crictl containers:
* Profile "cilium-490703" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-490703"

                                                
                                                

                                                
                                                
>>> k8s: describe netcat deployment:
error: context "cilium-490703" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe netcat pod(s):
error: context "cilium-490703" does not exist

                                                
                                                

                                                
                                                
>>> k8s: netcat logs:
error: context "cilium-490703" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe coredns deployment:
error: context "cilium-490703" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe coredns pods:
error: context "cilium-490703" does not exist

                                                
                                                

                                                
                                                
>>> k8s: coredns logs:
error: context "cilium-490703" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe api server pod(s):
error: context "cilium-490703" does not exist

                                                
                                                

                                                
                                                
>>> k8s: api server logs:
error: context "cilium-490703" does not exist

                                                
                                                

                                                
                                                
>>> host: /etc/cni:
* Profile "cilium-490703" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-490703"

                                                
                                                

                                                
                                                
>>> host: ip a s:
* Profile "cilium-490703" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-490703"

                                                
                                                

                                                
                                                
>>> host: ip r s:
* Profile "cilium-490703" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-490703"

                                                
                                                

                                                
                                                
>>> host: iptables-save:
* Profile "cilium-490703" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-490703"

                                                
                                                

                                                
                                                
>>> host: iptables table nat:
* Profile "cilium-490703" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-490703"

                                                
                                                

                                                
                                                
>>> k8s: describe cilium daemon set:
Error in configuration: context was not found for specified context: cilium-490703

                                                
                                                

                                                
                                                
>>> k8s: describe cilium daemon set pod(s):
Error in configuration: context was not found for specified context: cilium-490703

                                                
                                                

                                                
                                                
>>> k8s: cilium daemon set container(s) logs (current):
error: context "cilium-490703" does not exist

                                                
                                                

                                                
                                                
>>> k8s: cilium daemon set container(s) logs (previous):
error: context "cilium-490703" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe cilium deployment:
Error in configuration: context was not found for specified context: cilium-490703

                                                
                                                

                                                
                                                
>>> k8s: describe cilium deployment pod(s):
Error in configuration: context was not found for specified context: cilium-490703

                                                
                                                

                                                
                                                
>>> k8s: cilium deployment container(s) logs (current):
error: context "cilium-490703" does not exist

                                                
                                                

                                                
                                                
>>> k8s: cilium deployment container(s) logs (previous):
error: context "cilium-490703" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe kube-proxy daemon set:
error: context "cilium-490703" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe kube-proxy pod(s):
error: context "cilium-490703" does not exist

                                                
                                                

                                                
                                                
>>> k8s: kube-proxy logs:
error: context "cilium-490703" does not exist

                                                
                                                

                                                
                                                
>>> host: kubelet daemon status:
* Profile "cilium-490703" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-490703"

                                                
                                                

                                                
                                                
>>> host: kubelet daemon config:
* Profile "cilium-490703" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-490703"

                                                
                                                

                                                
                                                
>>> k8s: kubelet logs:
* Profile "cilium-490703" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-490703"

                                                
                                                

                                                
                                                
>>> host: /etc/kubernetes/kubelet.conf:
* Profile "cilium-490703" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-490703"

                                                
                                                

                                                
                                                
>>> host: /var/lib/kubelet/config.yaml:
* Profile "cilium-490703" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-490703"

                                                
                                                

                                                
                                                
>>> k8s: kubectl config:
apiVersion: v1
clusters:
- cluster:
certificate-authority: /home/jenkins/minikube-integration/21550-576428/.minikube/ca.crt
extensions:
- extension:
last-update: Wed, 17 Sep 2025 01:23:36 UTC
provider: minikube.sigs.k8s.io
version: v1.37.0
name: cluster_info
server: https://192.168.85.2:8443
name: NoKubernetes-037677
contexts:
- context:
cluster: NoKubernetes-037677
extensions:
- extension:
last-update: Wed, 17 Sep 2025 01:23:36 UTC
provider: minikube.sigs.k8s.io
version: v1.37.0
name: context_info
namespace: default
user: NoKubernetes-037677
name: NoKubernetes-037677
current-context: ""
kind: Config
preferences: {}
users:
- name: NoKubernetes-037677
user:
client-certificate: /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/NoKubernetes-037677/client.crt
client-key: /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/NoKubernetes-037677/client.key

                                                
                                                

                                                
                                                
>>> k8s: cms:
Error in configuration: context was not found for specified context: cilium-490703

                                                
                                                

                                                
                                                
>>> host: docker daemon status:
* Profile "cilium-490703" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-490703"

                                                
                                                

                                                
                                                
>>> host: docker daemon config:
* Profile "cilium-490703" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-490703"

                                                
                                                

                                                
                                                
>>> host: /etc/docker/daemon.json:
* Profile "cilium-490703" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-490703"

                                                
                                                

                                                
                                                
>>> host: docker system info:
* Profile "cilium-490703" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-490703"

                                                
                                                

                                                
                                                
>>> host: cri-docker daemon status:
* Profile "cilium-490703" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-490703"

                                                
                                                

                                                
                                                
>>> host: cri-docker daemon config:
* Profile "cilium-490703" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-490703"

                                                
                                                

                                                
                                                
>>> host: /etc/systemd/system/cri-docker.service.d/10-cni.conf:
* Profile "cilium-490703" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-490703"

                                                
                                                

                                                
                                                
>>> host: /usr/lib/systemd/system/cri-docker.service:
* Profile "cilium-490703" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-490703"

                                                
                                                

                                                
                                                
>>> host: cri-dockerd version:
* Profile "cilium-490703" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-490703"

                                                
                                                

                                                
                                                
>>> host: containerd daemon status:
* Profile "cilium-490703" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-490703"

                                                
                                                

                                                
                                                
>>> host: containerd daemon config:
* Profile "cilium-490703" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-490703"

                                                
                                                

                                                
                                                
>>> host: /lib/systemd/system/containerd.service:
* Profile "cilium-490703" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-490703"

                                                
                                                

                                                
                                                
>>> host: /etc/containerd/config.toml:
* Profile "cilium-490703" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-490703"

                                                
                                                

                                                
                                                
>>> host: containerd config dump:
* Profile "cilium-490703" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-490703"

                                                
                                                

                                                
                                                
>>> host: crio daemon status:
* Profile "cilium-490703" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-490703"

                                                
                                                

                                                
                                                
>>> host: crio daemon config:
* Profile "cilium-490703" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-490703"

                                                
                                                

                                                
                                                
>>> host: /etc/crio:
* Profile "cilium-490703" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-490703"

                                                
                                                

                                                
                                                
>>> host: crio config:
* Profile "cilium-490703" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-490703"

                                                
                                                
----------------------- debugLogs end: cilium-490703 [took: 3.755562134s] --------------------------------
helpers_test.go:175: Cleaning up "cilium-490703" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p cilium-490703
--- SKIP: TestNetworkPlugins/group/cilium (3.94s)

                                                
                                    
x
+
TestStartStop/group/disable-driver-mounts (0.21s)

                                                
                                                
=== RUN   TestStartStop/group/disable-driver-mounts
=== PAUSE TestStartStop/group/disable-driver-mounts

                                                
                                                

                                                
                                                
=== CONT  TestStartStop/group/disable-driver-mounts
start_stop_delete_test.go:101: skipping TestStartStop/group/disable-driver-mounts - only runs on virtualbox
helpers_test.go:175: Cleaning up "disable-driver-mounts-013015" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p disable-driver-mounts-013015
--- SKIP: TestStartStop/group/disable-driver-mounts (0.21s)

                                                
                                    
Copied to clipboard