=== RUN TestAddons/parallel/LocalPath
=== PAUSE TestAddons/parallel/LocalPath
=== CONT TestAddons/parallel/LocalPath
addons_test.go:949: (dbg) Run: kubectl --context addons-235235 apply -f testdata/storage-provisioner-rancher/pvc.yaml
addons_test.go:955: (dbg) Run: kubectl --context addons-235235 apply -f testdata/storage-provisioner-rancher/pod.yaml
addons_test.go:959: (dbg) TestAddons/parallel/LocalPath: waiting 5m0s for pvc "test-pvc" in namespace "default" ...
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Non-zero exit: kubectl --context addons-235235 get pvc test-pvc -o jsonpath={.status.phase} -n default: context deadline exceeded (1.583µs)
helpers_test.go:404: TestAddons/parallel/LocalPath: WARNING: PVC get for "default" "test-pvc" returned: context deadline exceeded
addons_test.go:960: failed waiting for PVC test-pvc: context deadline exceeded
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:223: ======> post-mortem[TestAddons/parallel/LocalPath]: network settings <======
helpers_test.go:230: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:238: ======> post-mortem[TestAddons/parallel/LocalPath]: docker inspect <======
helpers_test.go:239: (dbg) Run: docker inspect addons-235235
helpers_test.go:243: (dbg) docker inspect addons-235235:
-- stdout --
[
{
"Id": "d707243db987958c2a899d9ce1405bbbbc539e428eaf168e3fdcd254e5e44bc2",
"Created": "2025-09-17T00:21:46.080276124Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 579441,
"ExitCode": 0,
"Error": "",
"StartedAt": "2025-09-17T00:21:46.144946221Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:3d6f74760dfc17060da5abc5d463d3d45b4ceea05955c9cc42b3ec56cb38cc48",
"ResolvConfPath": "/var/lib/docker/containers/d707243db987958c2a899d9ce1405bbbbc539e428eaf168e3fdcd254e5e44bc2/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/d707243db987958c2a899d9ce1405bbbbc539e428eaf168e3fdcd254e5e44bc2/hostname",
"HostsPath": "/var/lib/docker/containers/d707243db987958c2a899d9ce1405bbbbc539e428eaf168e3fdcd254e5e44bc2/hosts",
"LogPath": "/var/lib/docker/containers/d707243db987958c2a899d9ce1405bbbbc539e428eaf168e3fdcd254e5e44bc2/d707243db987958c2a899d9ce1405bbbbc539e428eaf168e3fdcd254e5e44bc2-json.log",
"Name": "/addons-235235",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"addons-235235:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {}
},
"NetworkMode": "addons-235235",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "host",
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 4294967296,
"NanoCpus": 2000000000,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 8589934592,
"MemorySwappiness": null,
"OomKillDisable": false,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"ID": "d707243db987958c2a899d9ce1405bbbbc539e428eaf168e3fdcd254e5e44bc2",
"LowerDir": "/var/lib/docker/overlay2/12b87eaa1e6f960abbe0d94b1cecd14dc21d2449be15a55da06824528543005b-init/diff:/var/lib/docker/overlay2/6bf7b6c5df3b8adf86744064027446440589049694f02d12745ec1de281bdb92/diff",
"MergedDir": "/var/lib/docker/overlay2/12b87eaa1e6f960abbe0d94b1cecd14dc21d2449be15a55da06824528543005b/merged",
"UpperDir": "/var/lib/docker/overlay2/12b87eaa1e6f960abbe0d94b1cecd14dc21d2449be15a55da06824528543005b/diff",
"WorkDir": "/var/lib/docker/overlay2/12b87eaa1e6f960abbe0d94b1cecd14dc21d2449be15a55da06824528543005b/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "volume",
"Name": "addons-235235",
"Source": "/var/lib/docker/volumes/addons-235235/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
},
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
}
],
"Config": {
"Hostname": "addons-235235",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"OnBuild": null,
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "addons-235235",
"name.minikube.sigs.k8s.io": "addons-235235",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "a43a40c4fa98667e767bf14a111b23f82aa9a3ec39c79413d4bd461cc7ae9299",
"SandboxKey": "/var/run/docker/netns/a43a40c4fa98",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33505"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33506"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33509"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33507"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33508"
}
]
},
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"addons-235235": {
"IPAMConfig": {
"IPv4Address": "192.168.49.2"
},
"Links": null,
"Aliases": null,
"MacAddress": "62:f3:eb:53:e2:f6",
"DriverOpts": null,
"GwPriority": 0,
"NetworkID": "60650bbded5295efe88096468307a83afd002d873e3296f6c84175bd2508d292",
"EndpointID": "edd46cd0c3a6ca4e5cb23dc4c256bcb2950d4889ac948e44e2d64fdfed41cc13",
"Gateway": "192.168.49.1",
"IPAddress": "192.168.49.2",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"addons-235235",
"d707243db987"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:247: (dbg) Run: out/minikube-linux-arm64 status --format={{.Host}} -p addons-235235 -n addons-235235
helpers_test.go:252: <<< TestAddons/parallel/LocalPath FAILED: start of post-mortem logs <<<
helpers_test.go:253: ======> post-mortem[TestAddons/parallel/LocalPath]: minikube logs <======
helpers_test.go:255: (dbg) Run: out/minikube-linux-arm64 -p addons-235235 logs -n 25
helpers_test.go:255: (dbg) Done: out/minikube-linux-arm64 -p addons-235235 logs -n 25: (1.366342078s)
helpers_test.go:260: TestAddons/parallel/LocalPath logs:
-- stdout --
==> Audit <==
┌─────────┬───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬────────────────────────┬─────────┬─────────┬─────────────────────┬─────────────────────┐
│ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │
├─────────┼───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼────────────────────────┼─────────┼─────────┼─────────────────────┼─────────────────────┤
│ delete │ -p download-docker-416078 │ download-docker-416078 │ jenkins │ v1.37.0 │ 17 Sep 25 00:21 UTC │ 17 Sep 25 00:21 UTC │
│ start │ --download-only -p binary-mirror-287818 --alsologtostderr --binary-mirror http://127.0.0.1:36367 --driver=docker --container-runtime=docker │ binary-mirror-287818 │ jenkins │ v1.37.0 │ 17 Sep 25 00:21 UTC │ │
│ delete │ -p binary-mirror-287818 │ binary-mirror-287818 │ jenkins │ v1.37.0 │ 17 Sep 25 00:21 UTC │ 17 Sep 25 00:21 UTC │
│ addons │ enable dashboard -p addons-235235 │ addons-235235 │ jenkins │ v1.37.0 │ 17 Sep 25 00:21 UTC │ │
│ addons │ disable dashboard -p addons-235235 │ addons-235235 │ jenkins │ v1.37.0 │ 17 Sep 25 00:21 UTC │ │
│ start │ -p addons-235235 --wait=true --memory=4096 --alsologtostderr --addons=registry --addons=registry-creds --addons=metrics-server --addons=volumesnapshots --addons=csi-hostpath-driver --addons=gcp-auth --addons=cloud-spanner --addons=inspektor-gadget --addons=nvidia-device-plugin --addons=yakd --addons=volcano --addons=amd-gpu-device-plugin --driver=docker --container-runtime=docker --addons=ingress --addons=ingress-dns --addons=storage-provisioner-rancher │ addons-235235 │ jenkins │ v1.37.0 │ 17 Sep 25 00:21 UTC │ 17 Sep 25 00:23 UTC │
│ addons │ addons-235235 addons disable volcano --alsologtostderr -v=1 │ addons-235235 │ jenkins │ v1.37.0 │ 17 Sep 25 00:24 UTC │ 17 Sep 25 00:24 UTC │
│ addons │ addons-235235 addons disable gcp-auth --alsologtostderr -v=1 │ addons-235235 │ jenkins │ v1.37.0 │ 17 Sep 25 00:24 UTC │ 17 Sep 25 00:24 UTC │
│ addons │ enable headlamp -p addons-235235 --alsologtostderr -v=1 │ addons-235235 │ jenkins │ v1.37.0 │ 17 Sep 25 00:24 UTC │ 17 Sep 25 00:24 UTC │
│ addons │ addons-235235 addons disable headlamp --alsologtostderr -v=1 │ addons-235235 │ jenkins │ v1.37.0 │ 17 Sep 25 00:25 UTC │ 17 Sep 25 00:25 UTC │
│ ip │ addons-235235 ip │ addons-235235 │ jenkins │ v1.37.0 │ 17 Sep 25 00:25 UTC │ 17 Sep 25 00:25 UTC │
│ addons │ addons-235235 addons disable registry --alsologtostderr -v=1 │ addons-235235 │ jenkins │ v1.37.0 │ 17 Sep 25 00:25 UTC │ 17 Sep 25 00:25 UTC │
│ addons │ addons-235235 addons disable metrics-server --alsologtostderr -v=1 │ addons-235235 │ jenkins │ v1.37.0 │ 17 Sep 25 00:25 UTC │ 17 Sep 25 00:25 UTC │
│ addons │ addons-235235 addons disable inspektor-gadget --alsologtostderr -v=1 │ addons-235235 │ jenkins │ v1.37.0 │ 17 Sep 25 00:25 UTC │ 17 Sep 25 00:25 UTC │
│ ssh │ addons-235235 ssh curl -s http://127.0.0.1/ -H 'Host: nginx.example.com' │ addons-235235 │ jenkins │ v1.37.0 │ 17 Sep 25 00:25 UTC │ 17 Sep 25 00:25 UTC │
│ ip │ addons-235235 ip │ addons-235235 │ jenkins │ v1.37.0 │ 17 Sep 25 00:25 UTC │ 17 Sep 25 00:25 UTC │
│ addons │ addons-235235 addons disable ingress-dns --alsologtostderr -v=1 │ addons-235235 │ jenkins │ v1.37.0 │ 17 Sep 25 00:25 UTC │ 17 Sep 25 00:25 UTC │
│ addons │ addons-235235 addons disable ingress --alsologtostderr -v=1 │ addons-235235 │ jenkins │ v1.37.0 │ 17 Sep 25 00:25 UTC │ 17 Sep 25 00:25 UTC │
│ addons │ configure registry-creds -f ./testdata/addons_testconfig.json -p addons-235235 │ addons-235235 │ jenkins │ v1.37.0 │ 17 Sep 25 00:25 UTC │ 17 Sep 25 00:25 UTC │
│ addons │ addons-235235 addons disable registry-creds --alsologtostderr -v=1 │ addons-235235 │ jenkins │ v1.37.0 │ 17 Sep 25 00:25 UTC │ 17 Sep 25 00:25 UTC │
│ addons │ addons-235235 addons disable nvidia-device-plugin --alsologtostderr -v=1 │ addons-235235 │ jenkins │ v1.37.0 │ 17 Sep 25 00:25 UTC │ 17 Sep 25 00:25 UTC │
│ addons │ addons-235235 addons disable yakd --alsologtostderr -v=1 │ addons-235235 │ jenkins │ v1.37.0 │ 17 Sep 25 00:25 UTC │ 17 Sep 25 00:26 UTC │
│ addons │ addons-235235 addons disable volumesnapshots --alsologtostderr -v=1 │ addons-235235 │ jenkins │ v1.37.0 │ 17 Sep 25 00:26 UTC │ 17 Sep 25 00:26 UTC │
│ addons │ addons-235235 addons disable csi-hostpath-driver --alsologtostderr -v=1 │ addons-235235 │ jenkins │ v1.37.0 │ 17 Sep 25 00:26 UTC │ 17 Sep 25 00:26 UTC │
│ addons │ addons-235235 addons disable cloud-spanner --alsologtostderr -v=1 │ addons-235235 │ jenkins │ v1.37.0 │ 17 Sep 25 00:26 UTC │ 17 Sep 25 00:26 UTC │
└─────────┴───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴────────────────────────┴─────────┴─────────┴─────────────────────┴─────────────────────┘
==> Last Start <==
Log file created at: 2025/09/17 00:21:21
Running on machine: ip-172-31-21-244
Binary: Built with gc go1.24.6 for linux/arm64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I0917 00:21:21.575028 579049 out.go:360] Setting OutFile to fd 1 ...
I0917 00:21:21.575207 579049 out.go:408] TERM=,COLORTERM=, which probably does not support color
I0917 00:21:21.575232 579049 out.go:374] Setting ErrFile to fd 2...
I0917 00:21:21.575254 579049 out.go:408] TERM=,COLORTERM=, which probably does not support color
I0917 00:21:21.575549 579049 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21550-576428/.minikube/bin
I0917 00:21:21.576022 579049 out.go:368] Setting JSON to false
I0917 00:21:21.576893 579049 start.go:130] hostinfo: {"hostname":"ip-172-31-21-244","uptime":11027,"bootTime":1758057455,"procs":148,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1084-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"da8ac1fd-6236-412a-a346-95873c98230d"}
I0917 00:21:21.576984 579049 start.go:140] virtualization:
I0917 00:21:21.580251 579049 out.go:179] * [addons-235235] minikube v1.37.0 on Ubuntu 20.04 (arm64)
I0917 00:21:21.583995 579049 out.go:179] - MINIKUBE_LOCATION=21550
I0917 00:21:21.584058 579049 notify.go:220] Checking for updates...
I0917 00:21:21.589730 579049 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I0917 00:21:21.592567 579049 out.go:179] - KUBECONFIG=/home/jenkins/minikube-integration/21550-576428/kubeconfig
I0917 00:21:21.595509 579049 out.go:179] - MINIKUBE_HOME=/home/jenkins/minikube-integration/21550-576428/.minikube
I0917 00:21:21.598356 579049 out.go:179] - MINIKUBE_BIN=out/minikube-linux-arm64
I0917 00:21:21.601213 579049 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I0917 00:21:21.604306 579049 driver.go:421] Setting default libvirt URI to qemu:///system
I0917 00:21:21.625256 579049 docker.go:123] docker version: linux-28.1.1:Docker Engine - Community
I0917 00:21:21.625390 579049 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0917 00:21:21.685346 579049 info.go:266] docker info: {ID:5FDH:SA5P:5GCT:NLAS:B73P:SGDQ:PBG5:UBVH:UZY3:RXGO:CI7S:WAIH Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:29 OomKillDisable:true NGoroutines:47 SystemTime:2025-09-17 00:21:21.676486714 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1084-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214835200 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-21-244 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx P
ath:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1]] Warnings:<nil>}}
I0917 00:21:21.685483 579049 docker.go:318] overlay module found
I0917 00:21:21.688541 579049 out.go:179] * Using the docker driver based on user configuration
I0917 00:21:21.691206 579049 start.go:304] selected driver: docker
I0917 00:21:21.691230 579049 start.go:918] validating driver "docker" against <nil>
I0917 00:21:21.691252 579049 start.go:929] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I0917 00:21:21.691974 579049 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0917 00:21:21.747252 579049 info.go:266] docker info: {ID:5FDH:SA5P:5GCT:NLAS:B73P:SGDQ:PBG5:UBVH:UZY3:RXGO:CI7S:WAIH Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:29 OomKillDisable:true NGoroutines:47 SystemTime:2025-09-17 00:21:21.737982702 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1084-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214835200 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-21-244 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx P
ath:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1]] Warnings:<nil>}}
I0917 00:21:21.747418 579049 start_flags.go:327] no existing cluster config was found, will generate one from the flags
I0917 00:21:21.747655 579049 start_flags.go:992] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I0917 00:21:21.750585 579049 out.go:179] * Using Docker driver with root privileges
I0917 00:21:21.753414 579049 cni.go:84] Creating CNI manager for ""
I0917 00:21:21.753496 579049 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I0917 00:21:21.753509 579049 start_flags.go:336] Found "bridge CNI" CNI - setting NetworkPlugin=cni
I0917 00:21:21.753597 579049 start.go:348] cluster config:
{Name:addons-235235 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:4096 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:addons-235235 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: Netwo
rkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1
m0s}
I0917 00:21:21.756589 579049 out.go:179] * Starting "addons-235235" primary control-plane node in "addons-235235" cluster
I0917 00:21:21.759495 579049 cache.go:123] Beginning downloading kic base image for docker with docker
I0917 00:21:21.762345 579049 out.go:179] * Pulling base image v0.0.48 ...
I0917 00:21:21.765171 579049 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime docker
I0917 00:21:21.765228 579049 preload.go:146] Found local preload: /home/jenkins/minikube-integration/21550-576428/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-docker-overlay2-arm64.tar.lz4
I0917 00:21:21.765240 579049 cache.go:58] Caching tarball of preloaded images
I0917 00:21:21.765246 579049 image.go:81] Checking for gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 in local docker daemon
I0917 00:21:21.765334 579049 preload.go:172] Found /home/jenkins/minikube-integration/21550-576428/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-docker-overlay2-arm64.tar.lz4 in cache, skipping download
I0917 00:21:21.765346 579049 cache.go:61] Finished verifying existence of preloaded tar for v1.34.0 on docker
I0917 00:21:21.765742 579049 profile.go:143] Saving config to /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/config.json ...
I0917 00:21:21.765772 579049 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/config.json: {Name:mka6cd9dd804af0eb234f92dd0d9458ad6607892 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0917 00:21:21.780916 579049 cache.go:152] Downloading gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 to local cache
I0917 00:21:21.781018 579049 image.go:65] Checking for gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 in local cache directory
I0917 00:21:21.781042 579049 image.go:68] Found gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 in local cache directory, skipping pull
I0917 00:21:21.781050 579049 image.go:137] gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 exists in cache, skipping pull
I0917 00:21:21.781057 579049 cache.go:155] successfully saved gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 as a tarball
I0917 00:21:21.781063 579049 cache.go:165] Loading gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 from local cache
I0917 00:21:39.237948 579049 cache.go:167] successfully loaded and using gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 from cached tarball
I0917 00:21:39.237990 579049 cache.go:232] Successfully downloaded all kic artifacts
I0917 00:21:39.238038 579049 start.go:360] acquireMachinesLock for addons-235235: {Name:mk420ada22966e913cee54d953c4c96eb7228735 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0917 00:21:39.238736 579049 start.go:364] duration metric: took 670.901µs to acquireMachinesLock for "addons-235235"
I0917 00:21:39.238775 579049 start.go:93] Provisioning new machine with config: &{Name:addons-235235 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:4096 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:addons-235235 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[
] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPat
h: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}
I0917 00:21:39.238850 579049 start.go:125] createHost starting for "" (driver="docker")
I0917 00:21:39.242186 579049 out.go:252] * Creating docker container (CPUs=2, Memory=4096MB) ...
I0917 00:21:39.242413 579049 start.go:159] libmachine.API.Create for "addons-235235" (driver="docker")
I0917 00:21:39.242449 579049 client.go:168] LocalClient.Create starting
I0917 00:21:39.242573 579049 main.go:141] libmachine: Creating CA: /home/jenkins/minikube-integration/21550-576428/.minikube/certs/ca.pem
I0917 00:21:39.701609 579049 main.go:141] libmachine: Creating client certificate: /home/jenkins/minikube-integration/21550-576428/.minikube/certs/cert.pem
I0917 00:21:40.116625 579049 cli_runner.go:164] Run: docker network inspect addons-235235 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W0917 00:21:40.132770 579049 cli_runner.go:211] docker network inspect addons-235235 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I0917 00:21:40.132854 579049 network_create.go:284] running [docker network inspect addons-235235] to gather additional debugging logs...
I0917 00:21:40.132893 579049 cli_runner.go:164] Run: docker network inspect addons-235235
W0917 00:21:40.148702 579049 cli_runner.go:211] docker network inspect addons-235235 returned with exit code 1
I0917 00:21:40.148750 579049 network_create.go:287] error running [docker network inspect addons-235235]: docker network inspect addons-235235: exit status 1
stdout:
[]
stderr:
Error response from daemon: network addons-235235 not found
I0917 00:21:40.148764 579049 network_create.go:289] output of [docker network inspect addons-235235]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network addons-235235 not found
** /stderr **
I0917 00:21:40.148907 579049 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0917 00:21:40.165466 579049 network.go:206] using free private subnet 192.168.49.0/24: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0x40017f8e00}
I0917 00:21:40.165507 579049 network_create.go:124] attempt to create docker network addons-235235 192.168.49.0/24 with gateway 192.168.49.1 and MTU of 1500 ...
I0917 00:21:40.165567 579049 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.49.0/24 --gateway=192.168.49.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=addons-235235 addons-235235
I0917 00:21:40.224981 579049 network_create.go:108] docker network addons-235235 192.168.49.0/24 created
I0917 00:21:40.225012 579049 kic.go:121] calculated static IP "192.168.49.2" for the "addons-235235" container
I0917 00:21:40.225096 579049 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I0917 00:21:40.240204 579049 cli_runner.go:164] Run: docker volume create addons-235235 --label name.minikube.sigs.k8s.io=addons-235235 --label created_by.minikube.sigs.k8s.io=true
I0917 00:21:40.257434 579049 oci.go:103] Successfully created a docker volume addons-235235
I0917 00:21:40.257520 579049 cli_runner.go:164] Run: docker run --rm --name addons-235235-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-235235 --entrypoint /usr/bin/test -v addons-235235:/var gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -d /var/lib
I0917 00:21:42.332421 579049 cli_runner.go:217] Completed: docker run --rm --name addons-235235-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-235235 --entrypoint /usr/bin/test -v addons-235235:/var gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -d /var/lib: (2.074852116s)
I0917 00:21:42.332482 579049 oci.go:107] Successfully prepared a docker volume addons-235235
I0917 00:21:42.332519 579049 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime docker
I0917 00:21:42.332540 579049 kic.go:194] Starting extracting preloaded images to volume ...
I0917 00:21:42.332620 579049 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21550-576428/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-docker-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v addons-235235:/extractDir gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -I lz4 -xf /preloaded.tar -C /extractDir
I0917 00:21:46.003007 579049 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21550-576428/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-docker-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v addons-235235:/extractDir gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -I lz4 -xf /preloaded.tar -C /extractDir: (3.670344697s)
I0917 00:21:46.003047 579049 kic.go:203] duration metric: took 3.670502674s to extract preloaded images to volume ...
W0917 00:21:46.003215 579049 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
I0917 00:21:46.003342 579049 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I0917 00:21:46.065517 579049 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname addons-235235 --name addons-235235 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-235235 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=addons-235235 --network addons-235235 --ip 192.168.49.2 --volume addons-235235:/var --security-opt apparmor=unconfined --memory=4096mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1
I0917 00:21:46.363848 579049 cli_runner.go:164] Run: docker container inspect addons-235235 --format={{.State.Running}}
I0917 00:21:46.384702 579049 cli_runner.go:164] Run: docker container inspect addons-235235 --format={{.State.Status}}
I0917 00:21:46.403801 579049 cli_runner.go:164] Run: docker exec addons-235235 stat /var/lib/dpkg/alternatives/iptables
I0917 00:21:46.455139 579049 oci.go:144] the created container "addons-235235" has a running status.
I0917 00:21:46.455168 579049 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/21550-576428/.minikube/machines/addons-235235/id_rsa...
I0917 00:21:46.510166 579049 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/21550-576428/.minikube/machines/addons-235235/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I0917 00:21:46.530338 579049 cli_runner.go:164] Run: docker container inspect addons-235235 --format={{.State.Status}}
I0917 00:21:46.549851 579049 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I0917 00:21:46.549871 579049 kic_runner.go:114] Args: [docker exec --privileged addons-235235 chown docker:docker /home/docker/.ssh/authorized_keys]
I0917 00:21:46.600724 579049 cli_runner.go:164] Run: docker container inspect addons-235235 --format={{.State.Status}}
I0917 00:21:46.626397 579049 machine.go:93] provisionDockerMachine start ...
I0917 00:21:46.626500 579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
I0917 00:21:46.647098 579049 main.go:141] libmachine: Using SSH client type: native
I0917 00:21:46.647544 579049 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3ef1e0] 0x3f19a0 <nil> [] 0s} 127.0.0.1 33505 <nil> <nil>}
I0917 00:21:46.647565 579049 main.go:141] libmachine: About to run SSH command:
hostname
I0917 00:21:46.648327 579049 main.go:141] libmachine: Error dialing TCP: ssh: handshake failed: EOF
I0917 00:21:49.787981 579049 main.go:141] libmachine: SSH cmd err, output: <nil>: addons-235235
I0917 00:21:49.788006 579049 ubuntu.go:182] provisioning hostname "addons-235235"
I0917 00:21:49.788066 579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
I0917 00:21:49.806736 579049 main.go:141] libmachine: Using SSH client type: native
I0917 00:21:49.807057 579049 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3ef1e0] 0x3f19a0 <nil> [] 0s} 127.0.0.1 33505 <nil> <nil>}
I0917 00:21:49.807073 579049 main.go:141] libmachine: About to run SSH command:
sudo hostname addons-235235 && echo "addons-235235" | sudo tee /etc/hostname
I0917 00:21:49.959594 579049 main.go:141] libmachine: SSH cmd err, output: <nil>: addons-235235
I0917 00:21:49.959672 579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
I0917 00:21:49.976980 579049 main.go:141] libmachine: Using SSH client type: native
I0917 00:21:49.977314 579049 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3ef1e0] 0x3f19a0 <nil> [] 0s} 127.0.0.1 33505 <nil> <nil>}
I0917 00:21:49.977346 579049 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\saddons-235235' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 addons-235235/g' /etc/hosts;
else
echo '127.0.1.1 addons-235235' | sudo tee -a /etc/hosts;
fi
fi
I0917 00:21:50.117147 579049 main.go:141] libmachine: SSH cmd err, output: <nil>:
I0917 00:21:50.117174 579049 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/21550-576428/.minikube CaCertPath:/home/jenkins/minikube-integration/21550-576428/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21550-576428/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21550-576428/.minikube}
I0917 00:21:50.117193 579049 ubuntu.go:190] setting up certificates
I0917 00:21:50.117204 579049 provision.go:84] configureAuth start
I0917 00:21:50.117265 579049 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-235235
I0917 00:21:50.135563 579049 provision.go:143] copyHostCerts
I0917 00:21:50.135658 579049 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21550-576428/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21550-576428/.minikube/ca.pem (1082 bytes)
I0917 00:21:50.135794 579049 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21550-576428/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21550-576428/.minikube/cert.pem (1123 bytes)
I0917 00:21:50.135873 579049 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21550-576428/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21550-576428/.minikube/key.pem (1675 bytes)
I0917 00:21:50.135938 579049 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21550-576428/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21550-576428/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21550-576428/.minikube/certs/ca-key.pem org=jenkins.addons-235235 san=[127.0.0.1 192.168.49.2 addons-235235 localhost minikube]
I0917 00:21:50.541509 579049 provision.go:177] copyRemoteCerts
I0917 00:21:50.541577 579049 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I0917 00:21:50.541623 579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
I0917 00:21:50.558273 579049 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33505 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/addons-235235/id_rsa Username:docker}
I0917 00:21:50.657010 579049 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-576428/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I0917 00:21:50.681604 579049 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-576428/.minikube/machines/server.pem --> /etc/docker/server.pem (1208 bytes)
I0917 00:21:50.705284 579049 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-576428/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I0917 00:21:50.728479 579049 provision.go:87] duration metric: took 611.21857ms to configureAuth
I0917 00:21:50.728507 579049 ubuntu.go:206] setting minikube options for container-runtime
I0917 00:21:50.728697 579049 config.go:182] Loaded profile config "addons-235235": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
I0917 00:21:50.728759 579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
I0917 00:21:50.745617 579049 main.go:141] libmachine: Using SSH client type: native
I0917 00:21:50.745937 579049 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3ef1e0] 0x3f19a0 <nil> [] 0s} 127.0.0.1 33505 <nil> <nil>}
I0917 00:21:50.745953 579049 main.go:141] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I0917 00:21:50.884904 579049 main.go:141] libmachine: SSH cmd err, output: <nil>: overlay
I0917 00:21:50.884930 579049 ubuntu.go:71] root file system type: overlay
I0917 00:21:50.885055 579049 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
I0917 00:21:50.885124 579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
I0917 00:21:50.901703 579049 main.go:141] libmachine: Using SSH client type: native
I0917 00:21:50.902000 579049 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3ef1e0] 0x3f19a0 <nil> [] 0s} 127.0.0.1 33505 <nil> <nil>}
I0917 00:21:50.902092 579049 main.go:141] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 \
-H fd:// --containerd=/run/containerd/containerd.sock \
-H unix:///var/run/docker.sock \
--default-ulimit=nofile=1048576:1048576 \
--tlsverify \
--tlscacert /etc/docker/ca.pem \
--tlscert /etc/docker/server.pem \
--tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I0917 00:21:51.051889 579049 main.go:141] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
I0917 00:21:51.051978 579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
I0917 00:21:51.070361 579049 main.go:141] libmachine: Using SSH client type: native
I0917 00:21:51.070688 579049 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3ef1e0] 0x3f19a0 <nil> [] 0s} 127.0.0.1 33505 <nil> <nil>}
I0917 00:21:51.070712 579049 main.go:141] libmachine: About to run SSH command:
sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
I0917 00:21:51.887936 579049 main.go:141] libmachine: SSH cmd err, output: <nil>: --- /lib/systemd/system/docker.service 2025-09-03 20:57:01.000000000 +0000
+++ /lib/systemd/system/docker.service.new 2025-09-17 00:21:51.044692854 +0000
@@ -9,23 +9,34 @@
[Service]
Type=notify
-# the default is not to use systemd for cgroups because the delegate issues still
-# exists and systemd currently does not support the cgroup feature set required
-# for containers run by docker
-ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
-ExecReload=/bin/kill -s HUP $MAINPID
-TimeoutStartSec=0
-RestartSec=2
Restart=always
+
+
+# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
+# The base configuration already specifies an 'ExecStart=...' command. The first directive
+# here is to clear out that command inherited from the base configuration. Without this,
+# the command from the base configuration and the command specified here are treated as
+# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
+# will catch this invalid input and refuse to start the service with an error like:
+# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
+
+# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
+# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
+ExecStart=
+ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
+ExecReload=/bin/kill -s HUP $MAINPID
+
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
+LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
-# Comment TasksMax if your systemd version does not support it.
-# Only systemd 226 and above support this option.
+# Uncomment TasksMax if your systemd version supports it.
+# Only systemd 226 and above support this version.
TasksMax=infinity
+TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
Synchronizing state of docker.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable docker
I0917 00:21:51.887961 579049 machine.go:96] duration metric: took 5.261543896s to provisionDockerMachine
I0917 00:21:51.887974 579049 client.go:171] duration metric: took 12.645517134s to LocalClient.Create
I0917 00:21:51.887992 579049 start.go:167] duration metric: took 12.645579343s to libmachine.API.Create "addons-235235"
I0917 00:21:51.888003 579049 start.go:293] postStartSetup for "addons-235235" (driver="docker")
I0917 00:21:51.888013 579049 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I0917 00:21:51.888093 579049 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I0917 00:21:51.888152 579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
I0917 00:21:51.905418 579049 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33505 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/addons-235235/id_rsa Username:docker}
I0917 00:21:52.018304 579049 ssh_runner.go:195] Run: cat /etc/os-release
I0917 00:21:52.021860 579049 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I0917 00:21:52.021893 579049 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
I0917 00:21:52.021904 579049 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
I0917 00:21:52.021911 579049 info.go:137] Remote host: Ubuntu 22.04.5 LTS
I0917 00:21:52.021922 579049 filesync.go:126] Scanning /home/jenkins/minikube-integration/21550-576428/.minikube/addons for local assets ...
I0917 00:21:52.021997 579049 filesync.go:126] Scanning /home/jenkins/minikube-integration/21550-576428/.minikube/files for local assets ...
I0917 00:21:52.022025 579049 start.go:296] duration metric: took 134.016299ms for postStartSetup
I0917 00:21:52.022352 579049 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-235235
I0917 00:21:52.041209 579049 profile.go:143] Saving config to /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/config.json ...
I0917 00:21:52.041537 579049 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I0917 00:21:52.041600 579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
I0917 00:21:52.062226 579049 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33505 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/addons-235235/id_rsa Username:docker}
I0917 00:21:52.165262 579049 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I0917 00:21:52.169996 579049 start.go:128] duration metric: took 12.931126842s to createHost
I0917 00:21:52.170022 579049 start.go:83] releasing machines lock for "addons-235235", held for 12.931267713s
I0917 00:21:52.170096 579049 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-235235
I0917 00:21:52.186894 579049 ssh_runner.go:195] Run: cat /version.json
I0917 00:21:52.186955 579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
I0917 00:21:52.187205 579049 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I0917 00:21:52.187269 579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
I0917 00:21:52.204716 579049 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33505 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/addons-235235/id_rsa Username:docker}
I0917 00:21:52.223706 579049 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33505 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/addons-235235/id_rsa Username:docker}
I0917 00:21:52.300108 579049 ssh_runner.go:195] Run: systemctl --version
I0917 00:21:52.429222 579049 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
I0917 00:21:52.433763 579049 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
I0917 00:21:52.458377 579049 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
I0917 00:21:52.458464 579049 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I0917 00:21:52.489630 579049 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/100-crio-bridge.conf] bridge cni config(s)
I0917 00:21:52.489700 579049 start.go:495] detecting cgroup driver to use...
I0917 00:21:52.489742 579049 detect.go:187] detected "cgroupfs" cgroup driver on host os
I0917 00:21:52.489851 579049 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I0917 00:21:52.505791 579049 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I0917 00:21:52.517581 579049 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I0917 00:21:52.528064 579049 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I0917 00:21:52.528150 579049 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I0917 00:21:52.539055 579049 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0917 00:21:52.548794 579049 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I0917 00:21:52.558539 579049 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0917 00:21:52.569361 579049 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I0917 00:21:52.578347 579049 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I0917 00:21:52.587980 579049 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I0917 00:21:52.597499 579049 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I0917 00:21:52.607117 579049 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I0917 00:21:52.615625 579049 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I0917 00:21:52.624283 579049 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0917 00:21:52.715592 579049 ssh_runner.go:195] Run: sudo systemctl restart containerd
I0917 00:21:52.798437 579049 start.go:495] detecting cgroup driver to use...
I0917 00:21:52.798486 579049 detect.go:187] detected "cgroupfs" cgroup driver on host os
I0917 00:21:52.798536 579049 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I0917 00:21:52.811932 579049 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I0917 00:21:52.824748 579049 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
I0917 00:21:52.841549 579049 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I0917 00:21:52.853296 579049 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I0917 00:21:52.864655 579049 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
" | sudo tee /etc/crictl.yaml"
I0917 00:21:52.880551 579049 ssh_runner.go:195] Run: which cri-dockerd
I0917 00:21:52.883971 579049 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
I0917 00:21:52.892513 579049 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (192 bytes)
I0917 00:21:52.910305 579049 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I0917 00:21:53.006270 579049 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I0917 00:21:53.106321 579049 docker.go:575] configuring docker to use "cgroupfs" as cgroup driver...
I0917 00:21:53.106467 579049 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (130 bytes)
I0917 00:21:53.124526 579049 ssh_runner.go:195] Run: sudo systemctl reset-failed docker
I0917 00:21:53.135633 579049 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0917 00:21:53.222370 579049 ssh_runner.go:195] Run: sudo systemctl restart docker
I0917 00:21:53.592077 579049 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I0917 00:21:53.603412 579049 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
I0917 00:21:53.615682 579049 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I0917 00:21:53.627819 579049 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
I0917 00:21:53.716473 579049 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
I0917 00:21:53.805388 579049 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0917 00:21:53.891592 579049 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
I0917 00:21:53.904811 579049 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
I0917 00:21:53.915815 579049 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0917 00:21:54.000668 579049 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I0917 00:21:54.067673 579049 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I0917 00:21:54.081897 579049 start.go:542] Will wait 60s for socket path /var/run/cri-dockerd.sock
I0917 00:21:54.082049 579049 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
I0917 00:21:54.085819 579049 start.go:563] Will wait 60s for crictl version
I0917 00:21:54.085910 579049 ssh_runner.go:195] Run: which crictl
I0917 00:21:54.089251 579049 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I0917 00:21:54.126515 579049 start.go:579] Version: 0.1.0
RuntimeName: docker
RuntimeVersion: 28.4.0
RuntimeApiVersion: v1
I0917 00:21:54.126630 579049 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0917 00:21:54.147693 579049 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0917 00:21:54.181014 579049 out.go:252] * Preparing Kubernetes v1.34.0 on Docker 28.4.0 ...
I0917 00:21:54.181127 579049 cli_runner.go:164] Run: docker network inspect addons-235235 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0917 00:21:54.196147 579049 ssh_runner.go:195] Run: grep 192.168.49.1 host.minikube.internal$ /etc/hosts
I0917 00:21:54.199689 579049 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.49.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0917 00:21:54.210089 579049 kubeadm.go:875] updating cluster {Name:addons-235235 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:4096 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:addons-235235 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] D
NSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: Sock
etVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I0917 00:21:54.210217 579049 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime docker
I0917 00:21:54.210286 579049 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I0917 00:21:54.227698 579049 docker.go:691] Got preloaded images: -- stdout --
registry.k8s.io/kube-apiserver:v1.34.0
registry.k8s.io/kube-scheduler:v1.34.0
registry.k8s.io/kube-controller-manager:v1.34.0
registry.k8s.io/kube-proxy:v1.34.0
registry.k8s.io/etcd:3.6.4-0
registry.k8s.io/pause:3.10.1
registry.k8s.io/coredns/coredns:v1.12.1
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I0917 00:21:54.227719 579049 docker.go:621] Images already preloaded, skipping extraction
I0917 00:21:54.227842 579049 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I0917 00:21:54.246045 579049 docker.go:691] Got preloaded images: -- stdout --
registry.k8s.io/kube-apiserver:v1.34.0
registry.k8s.io/kube-scheduler:v1.34.0
registry.k8s.io/kube-controller-manager:v1.34.0
registry.k8s.io/kube-proxy:v1.34.0
registry.k8s.io/etcd:3.6.4-0
registry.k8s.io/pause:3.10.1
registry.k8s.io/coredns/coredns:v1.12.1
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I0917 00:21:54.246070 579049 cache_images.go:85] Images are preloaded, skipping loading
I0917 00:21:54.246084 579049 kubeadm.go:926] updating node { 192.168.49.2 8443 v1.34.0 docker true true} ...
I0917 00:21:54.246180 579049 kubeadm.go:938] kubelet [Unit]
Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.34.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=addons-235235 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.49.2
[Install]
config:
{KubernetesVersion:v1.34.0 ClusterName:addons-235235 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I0917 00:21:54.246249 579049 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
I0917 00:21:54.293114 579049 cni.go:84] Creating CNI manager for ""
I0917 00:21:54.293139 579049 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I0917 00:21:54.293150 579049 kubeadm.go:84] Using pod CIDR: 10.244.0.0/16
I0917 00:21:54.293173 579049 kubeadm.go:189] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.49.2 APIServerPort:8443 KubernetesVersion:v1.34.0 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:addons-235235 NodeName:addons-235235 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.49.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.49.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:/etc/kuber
netes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///var/run/cri-dockerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I0917 00:21:54.293297 579049 kubeadm.go:195] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.49.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///var/run/cri-dockerd.sock
name: "addons-235235"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.49.2"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.49.2"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
kubernetesVersion: v1.34.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I0917 00:21:54.293364 579049 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.0
I0917 00:21:54.302150 579049 binaries.go:44] Found k8s binaries, skipping transfer
I0917 00:21:54.302218 579049 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I0917 00:21:54.310488 579049 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (312 bytes)
I0917 00:21:54.327652 579049 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I0917 00:21:54.345355 579049 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2214 bytes)
I0917 00:21:54.362817 579049 ssh_runner.go:195] Run: grep 192.168.49.2 control-plane.minikube.internal$ /etc/hosts
I0917 00:21:54.366255 579049 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.49.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0917 00:21:54.376487 579049 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0917 00:21:54.462688 579049 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0917 00:21:54.478534 579049 certs.go:68] Setting up /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235 for IP: 192.168.49.2
I0917 00:21:54.478555 579049 certs.go:194] generating shared ca certs ...
I0917 00:21:54.478571 579049 certs.go:226] acquiring lock for ca certs: {Name:mk04b183dabeee5957951eb115c646a018da171d Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0917 00:21:54.479358 579049 certs.go:240] generating "minikubeCA" ca cert: /home/jenkins/minikube-integration/21550-576428/.minikube/ca.key
I0917 00:21:54.996977 579049 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21550-576428/.minikube/ca.crt ...
I0917 00:21:54.997009 579049 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21550-576428/.minikube/ca.crt: {Name:mkf0a697fcbe2a0d7404f79998ce7d05a56f9b21 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0917 00:21:54.997771 579049 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21550-576428/.minikube/ca.key ...
I0917 00:21:54.997789 579049 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21550-576428/.minikube/ca.key: {Name:mk1affc9bfbafcb9724a2477ed588b90f352fe2b Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0917 00:21:54.997896 579049 certs.go:240] generating "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21550-576428/.minikube/proxy-client-ca.key
I0917 00:21:55.508397 579049 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21550-576428/.minikube/proxy-client-ca.crt ...
I0917 00:21:55.508430 579049 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21550-576428/.minikube/proxy-client-ca.crt: {Name:mk63f938da2b7f092b546377ccdb97820e1deb65 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0917 00:21:55.509217 579049 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21550-576428/.minikube/proxy-client-ca.key ...
I0917 00:21:55.509235 579049 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21550-576428/.minikube/proxy-client-ca.key: {Name:mk0b72927909072f74409784a63f5cf1a0f52efa Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0917 00:21:55.510579 579049 certs.go:256] generating profile certs ...
I0917 00:21:55.510647 579049 certs.go:363] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/client.key
I0917 00:21:55.510660 579049 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/client.crt with IP's: []
I0917 00:21:56.217917 579049 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/client.crt ...
I0917 00:21:56.217949 579049 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/client.crt: {Name:mk4b75e1b38631d642201a7e78d079bdbcb807a0 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0917 00:21:56.218141 579049 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/client.key ...
I0917 00:21:56.218154 579049 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/client.key: {Name:mk43515ac5339f5f12428b15b1543e198224bd20 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0917 00:21:56.218240 579049 certs.go:363] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/apiserver.key.07c8a096
I0917 00:21:56.218261 579049 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/apiserver.crt.07c8a096 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.49.2]
I0917 00:21:57.306815 579049 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/apiserver.crt.07c8a096 ...
I0917 00:21:57.306850 579049 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/apiserver.crt.07c8a096: {Name:mk2d071d9f4b6dc2f36419dd2e49c9cd929adc28 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0917 00:21:57.307044 579049 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/apiserver.key.07c8a096 ...
I0917 00:21:57.307058 579049 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/apiserver.key.07c8a096: {Name:mk0977e0aa9abadab25a197623ceabb9d71ac336 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0917 00:21:57.308130 579049 certs.go:381] copying /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/apiserver.crt.07c8a096 -> /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/apiserver.crt
I0917 00:21:57.308231 579049 certs.go:385] copying /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/apiserver.key.07c8a096 -> /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/apiserver.key
I0917 00:21:57.308292 579049 certs.go:363] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/proxy-client.key
I0917 00:21:57.308321 579049 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/proxy-client.crt with IP's: []
I0917 00:21:57.456147 579049 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/proxy-client.crt ...
I0917 00:21:57.456177 579049 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/proxy-client.crt: {Name:mk2ab853f095cd406bcc75fee4a7d90cf2717a27 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0917 00:21:57.456925 579049 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/proxy-client.key ...
I0917 00:21:57.456943 579049 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/proxy-client.key: {Name:mk0068b799899ac8e9a1b70775fefb46581bed03 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0917 00:21:57.457587 579049 certs.go:484] found cert: /home/jenkins/minikube-integration/21550-576428/.minikube/certs/ca-key.pem (1671 bytes)
I0917 00:21:57.457629 579049 certs.go:484] found cert: /home/jenkins/minikube-integration/21550-576428/.minikube/certs/ca.pem (1082 bytes)
I0917 00:21:57.457664 579049 certs.go:484] found cert: /home/jenkins/minikube-integration/21550-576428/.minikube/certs/cert.pem (1123 bytes)
I0917 00:21:57.457699 579049 certs.go:484] found cert: /home/jenkins/minikube-integration/21550-576428/.minikube/certs/key.pem (1675 bytes)
I0917 00:21:57.458310 579049 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-576428/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I0917 00:21:57.482173 579049 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-576428/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I0917 00:21:57.509090 579049 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-576428/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I0917 00:21:57.535147 579049 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-576428/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I0917 00:21:57.564117 579049 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1419 bytes)
I0917 00:21:57.587464 579049 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes)
I0917 00:21:57.611051 579049 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I0917 00:21:57.634617 579049 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-576428/.minikube/profiles/addons-235235/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I0917 00:21:57.657866 579049 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-576428/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I0917 00:21:57.681005 579049 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I0917 00:21:57.699236 579049 ssh_runner.go:195] Run: openssl version
I0917 00:21:57.704793 579049 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I0917 00:21:57.714086 579049 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I0917 00:21:57.717662 579049 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Sep 17 00:21 /usr/share/ca-certificates/minikubeCA.pem
I0917 00:21:57.717720 579049 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I0917 00:21:57.724265 579049 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I0917 00:21:57.733683 579049 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I0917 00:21:57.736972 579049 certs.go:399] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I0917 00:21:57.737019 579049 kubeadm.go:392] StartCluster: {Name:addons-235235 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:4096 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:addons-235235 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSD
omain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketV
MnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0917 00:21:57.737137 579049 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
I0917 00:21:57.753540 579049 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I0917 00:21:57.762649 579049 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I0917 00:21:57.771322 579049 kubeadm.go:214] ignoring SystemVerification for kubeadm because of docker driver
I0917 00:21:57.771406 579049 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I0917 00:21:57.779897 579049 kubeadm.go:155] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I0917 00:21:57.779915 579049 kubeadm.go:157] found existing configuration files:
I0917 00:21:57.779963 579049 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I0917 00:21:57.788488 579049 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I0917 00:21:57.788580 579049 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I0917 00:21:57.797066 579049 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I0917 00:21:57.805092 579049 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I0917 00:21:57.805181 579049 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I0917 00:21:57.813476 579049 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I0917 00:21:57.822730 579049 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I0917 00:21:57.822825 579049 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I0917 00:21:57.831201 579049 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I0917 00:21:57.839955 579049 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I0917 00:21:57.840048 579049 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I0917 00:21:57.848234 579049 ssh_runner.go:286] Start: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I0917 00:21:57.889439 579049 kubeadm.go:310] [init] Using Kubernetes version: v1.34.0
I0917 00:21:57.889533 579049 kubeadm.go:310] [preflight] Running pre-flight checks
I0917 00:21:57.909841 579049 kubeadm.go:310] [preflight] The system verification failed. Printing the output from the verification:
I0917 00:21:57.909941 579049 kubeadm.go:310] [0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1084-aws[0m
I0917 00:21:57.909991 579049 kubeadm.go:310] [0;37mOS[0m: [0;32mLinux[0m
I0917 00:21:57.910097 579049 kubeadm.go:310] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I0917 00:21:57.910180 579049 kubeadm.go:310] [0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
I0917 00:21:57.910262 579049 kubeadm.go:310] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I0917 00:21:57.910368 579049 kubeadm.go:310] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I0917 00:21:57.910448 579049 kubeadm.go:310] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I0917 00:21:57.910522 579049 kubeadm.go:310] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I0917 00:21:57.910620 579049 kubeadm.go:310] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I0917 00:21:57.910695 579049 kubeadm.go:310] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I0917 00:21:57.910769 579049 kubeadm.go:310] [0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
I0917 00:21:57.974704 579049 kubeadm.go:310] [preflight] Pulling images required for setting up a Kubernetes cluster
I0917 00:21:57.974855 579049 kubeadm.go:310] [preflight] This might take a minute or two, depending on the speed of your internet connection
I0917 00:21:57.974967 579049 kubeadm.go:310] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
I0917 00:21:57.992912 579049 kubeadm.go:310] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I0917 00:21:57.999386 579049 out.go:252] - Generating certificates and keys ...
I0917 00:21:57.999562 579049 kubeadm.go:310] [certs] Using existing ca certificate authority
I0917 00:21:57.999687 579049 kubeadm.go:310] [certs] Using existing apiserver certificate and key on disk
I0917 00:21:58.805184 579049 kubeadm.go:310] [certs] Generating "apiserver-kubelet-client" certificate and key
I0917 00:21:58.947726 579049 kubeadm.go:310] [certs] Generating "front-proxy-ca" certificate and key
I0917 00:21:59.277680 579049 kubeadm.go:310] [certs] Generating "front-proxy-client" certificate and key
I0917 00:21:59.723727 579049 kubeadm.go:310] [certs] Generating "etcd/ca" certificate and key
I0917 00:21:59.795659 579049 kubeadm.go:310] [certs] Generating "etcd/server" certificate and key
I0917 00:21:59.796771 579049 kubeadm.go:310] [certs] etcd/server serving cert is signed for DNS names [addons-235235 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I0917 00:22:00.174798 579049 kubeadm.go:310] [certs] Generating "etcd/peer" certificate and key
I0917 00:22:00.179633 579049 kubeadm.go:310] [certs] etcd/peer serving cert is signed for DNS names [addons-235235 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I0917 00:22:00.645369 579049 kubeadm.go:310] [certs] Generating "etcd/healthcheck-client" certificate and key
I0917 00:22:00.971446 579049 kubeadm.go:310] [certs] Generating "apiserver-etcd-client" certificate and key
I0917 00:22:01.466751 579049 kubeadm.go:310] [certs] Generating "sa" key and public key
I0917 00:22:01.467061 579049 kubeadm.go:310] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I0917 00:22:01.991011 579049 kubeadm.go:310] [kubeconfig] Writing "admin.conf" kubeconfig file
I0917 00:22:02.830639 579049 kubeadm.go:310] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I0917 00:22:03.820572 579049 kubeadm.go:310] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I0917 00:22:04.480770 579049 kubeadm.go:310] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I0917 00:22:04.729327 579049 kubeadm.go:310] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I0917 00:22:04.730011 579049 kubeadm.go:310] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I0917 00:22:04.732630 579049 kubeadm.go:310] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I0917 00:22:04.736203 579049 out.go:252] - Booting up control plane ...
I0917 00:22:04.736314 579049 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-apiserver"
I0917 00:22:04.736404 579049 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I0917 00:22:04.736498 579049 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-scheduler"
I0917 00:22:04.746717 579049 kubeadm.go:310] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I0917 00:22:04.746839 579049 kubeadm.go:310] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
I0917 00:22:04.752934 579049 kubeadm.go:310] [patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
I0917 00:22:04.753312 579049 kubeadm.go:310] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I0917 00:22:04.753371 579049 kubeadm.go:310] [kubelet-start] Starting the kubelet
I0917 00:22:04.860970 579049 kubeadm.go:310] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I0917 00:22:04.861096 579049 kubeadm.go:310] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I0917 00:22:06.360579 579049 kubeadm.go:310] [kubelet-check] The kubelet is healthy after 1.501263247s
I0917 00:22:06.364324 579049 kubeadm.go:310] [control-plane-check] Waiting for healthy control plane components. This can take up to 4m0s
I0917 00:22:06.364453 579049 kubeadm.go:310] [control-plane-check] Checking kube-apiserver at https://192.168.49.2:8443/livez
I0917 00:22:06.364562 579049 kubeadm.go:310] [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz
I0917 00:22:06.364651 579049 kubeadm.go:310] [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez
I0917 00:22:09.897887 579049 kubeadm.go:310] [control-plane-check] kube-controller-manager is healthy after 3.532628125s
I0917 00:22:11.771445 579049 kubeadm.go:310] [control-plane-check] kube-scheduler is healthy after 5.407083046s
I0917 00:22:12.865779 579049 kubeadm.go:310] [control-plane-check] kube-apiserver is healthy after 6.501347269s
I0917 00:22:12.885266 579049 kubeadm.go:310] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I0917 00:22:12.898862 579049 kubeadm.go:310] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I0917 00:22:12.913392 579049 kubeadm.go:310] [upload-certs] Skipping phase. Please see --upload-certs
I0917 00:22:12.913662 579049 kubeadm.go:310] [mark-control-plane] Marking the node addons-235235 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I0917 00:22:12.925485 579049 kubeadm.go:310] [bootstrap-token] Using token: cvmje2.b2v7plsvdopt3p6d
I0917 00:22:12.928401 579049 out.go:252] - Configuring RBAC rules ...
I0917 00:22:12.928561 579049 kubeadm.go:310] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I0917 00:22:12.932858 579049 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I0917 00:22:12.940381 579049 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I0917 00:22:12.946541 579049 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I0917 00:22:12.950757 579049 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I0917 00:22:12.954681 579049 kubeadm.go:310] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I0917 00:22:13.273886 579049 kubeadm.go:310] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I0917 00:22:13.709285 579049 kubeadm.go:310] [addons] Applied essential addon: CoreDNS
I0917 00:22:14.273326 579049 kubeadm.go:310] [addons] Applied essential addon: kube-proxy
I0917 00:22:14.274605 579049 kubeadm.go:310]
I0917 00:22:14.274689 579049 kubeadm.go:310] Your Kubernetes control-plane has initialized successfully!
I0917 00:22:14.274695 579049 kubeadm.go:310]
I0917 00:22:14.274776 579049 kubeadm.go:310] To start using your cluster, you need to run the following as a regular user:
I0917 00:22:14.274781 579049 kubeadm.go:310]
I0917 00:22:14.274807 579049 kubeadm.go:310] mkdir -p $HOME/.kube
I0917 00:22:14.274869 579049 kubeadm.go:310] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I0917 00:22:14.274922 579049 kubeadm.go:310] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I0917 00:22:14.274926 579049 kubeadm.go:310]
I0917 00:22:14.274983 579049 kubeadm.go:310] Alternatively, if you are the root user, you can run:
I0917 00:22:14.274987 579049 kubeadm.go:310]
I0917 00:22:14.275038 579049 kubeadm.go:310] export KUBECONFIG=/etc/kubernetes/admin.conf
I0917 00:22:14.275042 579049 kubeadm.go:310]
I0917 00:22:14.275096 579049 kubeadm.go:310] You should now deploy a pod network to the cluster.
I0917 00:22:14.275188 579049 kubeadm.go:310] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I0917 00:22:14.275260 579049 kubeadm.go:310] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I0917 00:22:14.275264 579049 kubeadm.go:310]
I0917 00:22:14.275352 579049 kubeadm.go:310] You can now join any number of control-plane nodes by copying certificate authorities
I0917 00:22:14.275432 579049 kubeadm.go:310] and service account keys on each node and then running the following as root:
I0917 00:22:14.275436 579049 kubeadm.go:310]
I0917 00:22:14.275524 579049 kubeadm.go:310] kubeadm join control-plane.minikube.internal:8443 --token cvmje2.b2v7plsvdopt3p6d \
I0917 00:22:14.275631 579049 kubeadm.go:310] --discovery-token-ca-cert-hash sha256:35b41cd3859f8de52bd979a1594263a0a6bc8247f58714753744f2ce2587da45 \
I0917 00:22:14.275652 579049 kubeadm.go:310] --control-plane
I0917 00:22:14.275656 579049 kubeadm.go:310]
I0917 00:22:14.275745 579049 kubeadm.go:310] Then you can join any number of worker nodes by running the following on each as root:
I0917 00:22:14.275749 579049 kubeadm.go:310]
I0917 00:22:14.275835 579049 kubeadm.go:310] kubeadm join control-plane.minikube.internal:8443 --token cvmje2.b2v7plsvdopt3p6d \
I0917 00:22:14.275941 579049 kubeadm.go:310] --discovery-token-ca-cert-hash sha256:35b41cd3859f8de52bd979a1594263a0a6bc8247f58714753744f2ce2587da45
I0917 00:22:14.279625 579049 kubeadm.go:310] [WARNING SystemVerification]: cgroups v1 support is in maintenance mode, please migrate to cgroups v2
I0917 00:22:14.279883 579049 kubeadm.go:310] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1084-aws\n", err: exit status 1
I0917 00:22:14.280015 579049 kubeadm.go:310] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I0917 00:22:14.280030 579049 cni.go:84] Creating CNI manager for ""
I0917 00:22:14.280044 579049 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I0917 00:22:14.283191 579049 out.go:179] * Configuring bridge CNI (Container Networking Interface) ...
I0917 00:22:14.285942 579049 ssh_runner.go:195] Run: sudo mkdir -p /etc/cni/net.d
I0917 00:22:14.294820 579049 ssh_runner.go:362] scp memory --> /etc/cni/net.d/1-k8s.conflist (496 bytes)
I0917 00:22:14.313038 579049 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I0917 00:22:14.313179 579049 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I0917 00:22:14.313266 579049 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes addons-235235 minikube.k8s.io/updated_at=2025_09_17T00_22_14_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=9829f0bc17c523e4378d28e0c25741106f24f00a minikube.k8s.io/name=addons-235235 minikube.k8s.io/primary=true
I0917 00:22:14.437821 579049 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0917 00:22:14.437912 579049 ops.go:34] apiserver oom_adj: -16
I0917 00:22:14.937948 579049 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0917 00:22:15.438802 579049 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0917 00:22:15.938472 579049 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0917 00:22:16.438274 579049 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0917 00:22:16.937920 579049 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0917 00:22:17.438806 579049 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0917 00:22:17.938258 579049 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0917 00:22:18.438033 579049 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0917 00:22:18.938219 579049 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0917 00:22:19.437918 579049 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0917 00:22:19.527165 579049 kubeadm.go:1105] duration metric: took 5.214030381s to wait for elevateKubeSystemPrivileges
I0917 00:22:19.527199 579049 kubeadm.go:394] duration metric: took 21.790182686s to StartCluster
I0917 00:22:19.527217 579049 settings.go:142] acquiring lock: {Name:mkeeff7458e530a541c151580b54d47f2e77f0de Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0917 00:22:19.527863 579049 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/21550-576428/kubeconfig
I0917 00:22:19.528270 579049 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21550-576428/kubeconfig: {Name:mk3b9e4b05730cfa71613487e1675bc90b668ce8 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0917 00:22:19.528891 579049 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I0917 00:22:19.528923 579049 start.go:235] Will wait 6m0s for node &{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}
I0917 00:22:19.529159 579049 config.go:182] Loaded profile config "addons-235235": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
I0917 00:22:19.529188 579049 addons.go:511] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:true auto-pause:false cloud-spanner:true csi-hostpath-driver:true dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:true gvisor:false headlamp:false inaccel:false ingress:true ingress-dns:true inspektor-gadget:true istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:true nvidia-device-plugin:true nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:true registry-aliases:false registry-creds:true storage-provisioner:true storage-provisioner-gluster:false storage-provisioner-rancher:true volcano:true volumesnapshots:true yakd:true]
I0917 00:22:19.529258 579049 addons.go:69] Setting yakd=true in profile "addons-235235"
I0917 00:22:19.529272 579049 addons.go:238] Setting addon yakd=true in "addons-235235"
I0917 00:22:19.529292 579049 addons.go:69] Setting inspektor-gadget=true in profile "addons-235235"
I0917 00:22:19.529309 579049 addons.go:238] Setting addon inspektor-gadget=true in "addons-235235"
I0917 00:22:19.529314 579049 addons.go:69] Setting metrics-server=true in profile "addons-235235"
I0917 00:22:19.529329 579049 host.go:66] Checking if "addons-235235" exists ...
I0917 00:22:19.529335 579049 addons.go:238] Setting addon metrics-server=true in "addons-235235"
I0917 00:22:19.529351 579049 host.go:66] Checking if "addons-235235" exists ...
I0917 00:22:19.529820 579049 cli_runner.go:164] Run: docker container inspect addons-235235 --format={{.State.Status}}
I0917 00:22:19.529926 579049 cli_runner.go:164] Run: docker container inspect addons-235235 --format={{.State.Status}}
I0917 00:22:19.530167 579049 addons.go:69] Setting nvidia-device-plugin=true in profile "addons-235235"
I0917 00:22:19.530193 579049 addons.go:238] Setting addon nvidia-device-plugin=true in "addons-235235"
I0917 00:22:19.530214 579049 host.go:66] Checking if "addons-235235" exists ...
I0917 00:22:19.530608 579049 cli_runner.go:164] Run: docker container inspect addons-235235 --format={{.State.Status}}
I0917 00:22:19.533005 579049 addons.go:69] Setting registry=true in profile "addons-235235"
I0917 00:22:19.533064 579049 addons.go:238] Setting addon registry=true in "addons-235235"
I0917 00:22:19.533105 579049 host.go:66] Checking if "addons-235235" exists ...
I0917 00:22:19.533598 579049 cli_runner.go:164] Run: docker container inspect addons-235235 --format={{.State.Status}}
I0917 00:22:19.533721 579049 addons.go:69] Setting registry-creds=true in profile "addons-235235"
I0917 00:22:19.553513 579049 addons.go:238] Setting addon registry-creds=true in "addons-235235"
I0917 00:22:19.553554 579049 host.go:66] Checking if "addons-235235" exists ...
I0917 00:22:19.554001 579049 cli_runner.go:164] Run: docker container inspect addons-235235 --format={{.State.Status}}
I0917 00:22:19.533731 579049 addons.go:69] Setting storage-provisioner=true in profile "addons-235235"
I0917 00:22:19.554558 579049 addons.go:238] Setting addon storage-provisioner=true in "addons-235235"
I0917 00:22:19.554586 579049 host.go:66] Checking if "addons-235235" exists ...
I0917 00:22:19.555019 579049 cli_runner.go:164] Run: docker container inspect addons-235235 --format={{.State.Status}}
I0917 00:22:19.533736 579049 addons.go:69] Setting storage-provisioner-rancher=true in profile "addons-235235"
I0917 00:22:19.572135 579049 addons_storage_classes.go:33] enableOrDisableStorageClasses storage-provisioner-rancher=true on "addons-235235"
I0917 00:22:19.533739 579049 addons.go:69] Setting volcano=true in profile "addons-235235"
I0917 00:22:19.576505 579049 addons.go:238] Setting addon volcano=true in "addons-235235"
I0917 00:22:19.533742 579049 addons.go:69] Setting volumesnapshots=true in profile "addons-235235"
I0917 00:22:19.533785 579049 out.go:179] * Verifying Kubernetes components...
I0917 00:22:19.533978 579049 addons.go:69] Setting default-storageclass=true in profile "addons-235235"
I0917 00:22:19.533987 579049 addons.go:69] Setting amd-gpu-device-plugin=true in profile "addons-235235"
I0917 00:22:19.534002 579049 addons.go:69] Setting cloud-spanner=true in profile "addons-235235"
I0917 00:22:19.534006 579049 addons.go:69] Setting csi-hostpath-driver=true in profile "addons-235235"
I0917 00:22:19.534010 579049 addons.go:69] Setting ingress=true in profile "addons-235235"
I0917 00:22:19.534017 579049 addons.go:69] Setting gcp-auth=true in profile "addons-235235"
I0917 00:22:19.534022 579049 addons.go:69] Setting ingress-dns=true in profile "addons-235235"
I0917 00:22:19.529295 579049 host.go:66] Checking if "addons-235235" exists ...
I0917 00:22:19.577395 579049 cli_runner.go:164] Run: docker container inspect addons-235235 --format={{.State.Status}}
I0917 00:22:19.577415 579049 host.go:66] Checking if "addons-235235" exists ...
I0917 00:22:19.577420 579049 addons.go:238] Setting addon volumesnapshots=true in "addons-235235"
I0917 00:22:19.578621 579049 host.go:66] Checking if "addons-235235" exists ...
I0917 00:22:19.589790 579049 cli_runner.go:164] Run: docker container inspect addons-235235 --format={{.State.Status}}
I0917 00:22:19.591337 579049 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "addons-235235"
I0917 00:22:19.591681 579049 cli_runner.go:164] Run: docker container inspect addons-235235 --format={{.State.Status}}
I0917 00:22:19.605010 579049 addons.go:238] Setting addon amd-gpu-device-plugin=true in "addons-235235"
I0917 00:22:19.605685 579049 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0917 00:22:19.605936 579049 host.go:66] Checking if "addons-235235" exists ...
I0917 00:22:19.606433 579049 cli_runner.go:164] Run: docker container inspect addons-235235 --format={{.State.Status}}
I0917 00:22:19.610273 579049 mustload.go:65] Loading cluster: addons-235235
I0917 00:22:19.610605 579049 config.go:182] Loaded profile config "addons-235235": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
I0917 00:22:19.610965 579049 cli_runner.go:164] Run: docker container inspect addons-235235 --format={{.State.Status}}
I0917 00:22:19.622466 579049 addons.go:238] Setting addon ingress-dns=true in "addons-235235"
I0917 00:22:19.622523 579049 host.go:66] Checking if "addons-235235" exists ...
I0917 00:22:19.627116 579049 cli_runner.go:164] Run: docker container inspect addons-235235 --format={{.State.Status}}
I0917 00:22:19.663876 579049 cli_runner.go:164] Run: docker container inspect addons-235235 --format={{.State.Status}}
I0917 00:22:19.678927 579049 addons.go:238] Setting addon cloud-spanner=true in "addons-235235"
I0917 00:22:19.678989 579049 host.go:66] Checking if "addons-235235" exists ...
I0917 00:22:19.679545 579049 cli_runner.go:164] Run: docker container inspect addons-235235 --format={{.State.Status}}
I0917 00:22:19.693752 579049 addons.go:238] Setting addon csi-hostpath-driver=true in "addons-235235"
I0917 00:22:19.693802 579049 host.go:66] Checking if "addons-235235" exists ...
I0917 00:22:19.694361 579049 cli_runner.go:164] Run: docker container inspect addons-235235 --format={{.State.Status}}
I0917 00:22:19.717165 579049 out.go:179] - Using image nvcr.io/nvidia/k8s-device-plugin:v0.17.3
I0917 00:22:19.717821 579049 cli_runner.go:164] Run: docker container inspect addons-235235 --format={{.State.Status}}
I0917 00:22:19.723675 579049 addons.go:435] installing /etc/kubernetes/addons/nvidia-device-plugin.yaml
I0917 00:22:19.723700 579049 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/nvidia-device-plugin.yaml (1966 bytes)
I0917 00:22:19.723768 579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
I0917 00:22:19.748756 579049 addons.go:238] Setting addon ingress=true in "addons-235235"
I0917 00:22:19.748839 579049 host.go:66] Checking if "addons-235235" exists ...
I0917 00:22:19.749572 579049 cli_runner.go:164] Run: docker container inspect addons-235235 --format={{.State.Status}}
I0917 00:22:19.756519 579049 out.go:179] - Using image registry.k8s.io/metrics-server/metrics-server:v0.8.0
I0917 00:22:19.763197 579049 addons.go:435] installing /etc/kubernetes/addons/metrics-apiservice.yaml
I0917 00:22:19.763223 579049 ssh_runner.go:362] scp metrics-server/metrics-apiservice.yaml --> /etc/kubernetes/addons/metrics-apiservice.yaml (424 bytes)
I0917 00:22:19.763306 579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
I0917 00:22:19.812721 579049 out.go:179] - Using image docker.io/upmcenterprises/registry-creds:1.10
I0917 00:22:19.815731 579049 addons.go:435] installing /etc/kubernetes/addons/registry-creds-rc.yaml
I0917 00:22:19.815756 579049 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/registry-creds-rc.yaml (3306 bytes)
I0917 00:22:19.815835 579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
I0917 00:22:19.838323 579049 host.go:66] Checking if "addons-235235" exists ...
I0917 00:22:19.845511 579049 addons.go:238] Setting addon default-storageclass=true in "addons-235235"
I0917 00:22:19.845553 579049 host.go:66] Checking if "addons-235235" exists ...
I0917 00:22:19.846041 579049 cli_runner.go:164] Run: docker container inspect addons-235235 --format={{.State.Status}}
I0917 00:22:19.860247 579049 out.go:179] - Using image registry.k8s.io/sig-storage/snapshot-controller:v6.1.0
I0917 00:22:19.866691 579049 out.go:179] - Using image docker.io/rocm/k8s-device-plugin:1.25.2.8
I0917 00:22:19.868578 579049 out.go:179] - Using image docker.io/kicbase/minikube-ingress-dns:0.0.4
I0917 00:22:19.868786 579049 out.go:179] - Using image ghcr.io/inspektor-gadget/inspektor-gadget:v0.44.1
I0917 00:22:19.873647 579049 out.go:179] - Using image gcr.io/cloud-spanner-emulator/emulator:1.5.41
I0917 00:22:19.868799 579049 out.go:179] - Using image gcr.io/k8s-minikube/kube-registry-proxy:0.0.9
I0917 00:22:19.869804 579049 addons.go:435] installing /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml
I0917 00:22:19.869810 579049 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I0917 00:22:19.898606 579049 addons.go:435] installing /etc/kubernetes/addons/deployment.yaml
I0917 00:22:19.898675 579049 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/deployment.yaml (1004 bytes)
I0917 00:22:19.898773 579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
I0917 00:22:19.898982 579049 addons.go:435] installing /etc/kubernetes/addons/storage-provisioner.yaml
I0917 00:22:19.899016 579049 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I0917 00:22:19.899071 579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
I0917 00:22:19.919903 579049 addons.go:435] installing /etc/kubernetes/addons/ig-crd.yaml
I0917 00:22:19.919926 579049 ssh_runner.go:362] scp inspektor-gadget/ig-crd.yaml --> /etc/kubernetes/addons/ig-crd.yaml (14 bytes)
I0917 00:22:19.919997 579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
I0917 00:22:19.922402 579049 out.go:179] - Using image docker.io/marcnuri/yakd:0.0.5
I0917 00:22:19.925824 579049 out.go:179] - Using image docker.io/registry:3.0.0
I0917 00:22:19.928851 579049 addons.go:435] installing /etc/kubernetes/addons/registry-rc.yaml
I0917 00:22:19.928874 579049 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/registry-rc.yaml (860 bytes)
I0917 00:22:19.928945 579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
I0917 00:22:19.929156 579049 addons.go:435] installing /etc/kubernetes/addons/yakd-ns.yaml
I0917 00:22:19.929181 579049 ssh_runner.go:362] scp yakd/yakd-ns.yaml --> /etc/kubernetes/addons/yakd-ns.yaml (171 bytes)
I0917 00:22:19.929248 579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
I0917 00:22:19.944797 579049 ssh_runner.go:362] scp volumesnapshots/csi-hostpath-snapshotclass.yaml --> /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml (934 bytes)
I0917 00:22:19.944885 579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
I0917 00:22:19.945181 579049 addons.go:435] installing /etc/kubernetes/addons/ingress-dns-pod.yaml
I0917 00:22:19.945220 579049 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ingress-dns-pod.yaml (2889 bytes)
I0917 00:22:19.945288 579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
I0917 00:22:19.969993 579049 addons.go:435] installing /etc/kubernetes/addons/amd-gpu-device-plugin.yaml
I0917 00:22:19.970015 579049 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/amd-gpu-device-plugin.yaml (1868 bytes)
I0917 00:22:19.970084 579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
I0917 00:22:19.987595 579049 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.49.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I0917 00:22:19.998391 579049 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33505 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/addons-235235/id_rsa Username:docker}
I0917 00:22:20.010306 579049 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33505 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/addons-235235/id_rsa Username:docker}
I0917 00:22:20.037808 579049 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33505 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/addons-235235/id_rsa Username:docker}
I0917 00:22:20.046051 579049 out.go:179] - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.6.2
I0917 00:22:20.049476 579049 out.go:179] - Using image registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0
I0917 00:22:20.056378 579049 out.go:179] - Using image registry.k8s.io/sig-storage/csi-provisioner:v3.3.0
I0917 00:22:20.058379 579049 addons.go:238] Setting addon storage-provisioner-rancher=true in "addons-235235"
I0917 00:22:20.058469 579049 host.go:66] Checking if "addons-235235" exists ...
I0917 00:22:20.058947 579049 cli_runner.go:164] Run: docker container inspect addons-235235 --format={{.State.Status}}
I0917 00:22:20.067368 579049 out.go:179] - Using image docker.io/volcanosh/vc-scheduler:v1.12.2
I0917 00:22:20.067553 579049 out.go:179] - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.6.2
I0917 00:22:20.068597 579049 addons.go:435] installing /etc/kubernetes/addons/storageclass.yaml
I0917 00:22:20.068613 579049 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I0917 00:22:20.068675 579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
I0917 00:22:20.077879 579049 out.go:179] - Using image registry.k8s.io/sig-storage/csi-attacher:v4.0.0
I0917 00:22:20.078875 579049 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33505 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/addons-235235/id_rsa Username:docker}
I0917 00:22:20.084720 579049 out.go:179] - Using image registry.k8s.io/ingress-nginx/controller:v1.13.2
I0917 00:22:20.087502 579049 out.go:179] - Using image registry.k8s.io/sig-storage/csi-external-health-monitor-controller:v0.7.0
I0917 00:22:20.093179 579049 out.go:179] - Using image registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.6.0
I0917 00:22:20.093467 579049 addons.go:435] installing /etc/kubernetes/addons/ingress-deploy.yaml
I0917 00:22:20.093503 579049 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ingress-deploy.yaml (16078 bytes)
I0917 00:22:20.093618 579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
I0917 00:22:20.093889 579049 out.go:179] - Using image docker.io/volcanosh/vc-webhook-manager:v1.12.2
I0917 00:22:20.094721 579049 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0917 00:22:20.102090 579049 out.go:179] - Using image registry.k8s.io/sig-storage/hostpathplugin:v1.9.0
I0917 00:22:20.102260 579049 out.go:179] - Using image docker.io/volcanosh/vc-controller-manager:v1.12.2
I0917 00:22:20.105802 579049 out.go:179] - Using image registry.k8s.io/sig-storage/livenessprobe:v2.8.0
I0917 00:22:20.110505 579049 addons.go:435] installing /etc/kubernetes/addons/volcano-deployment.yaml
I0917 00:22:20.110593 579049 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/volcano-deployment.yaml (498149 bytes)
I0917 00:22:20.110706 579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
I0917 00:22:20.111106 579049 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33505 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/addons-235235/id_rsa Username:docker}
I0917 00:22:20.117453 579049 out.go:179] - Using image registry.k8s.io/sig-storage/csi-resizer:v1.6.0
I0917 00:22:20.120309 579049 addons.go:435] installing /etc/kubernetes/addons/rbac-external-attacher.yaml
I0917 00:22:20.120335 579049 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-attacher.yaml --> /etc/kubernetes/addons/rbac-external-attacher.yaml (3073 bytes)
I0917 00:22:20.120410 579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
I0917 00:22:20.202345 579049 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33505 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/addons-235235/id_rsa Username:docker}
I0917 00:22:20.216867 579049 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33505 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/addons-235235/id_rsa Username:docker}
I0917 00:22:20.218283 579049 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33505 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/addons-235235/id_rsa Username:docker}
I0917 00:22:20.230169 579049 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33505 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/addons-235235/id_rsa Username:docker}
I0917 00:22:20.231429 579049 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33505 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/addons-235235/id_rsa Username:docker}
I0917 00:22:20.244728 579049 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33505 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/addons-235235/id_rsa Username:docker}
I0917 00:22:20.265918 579049 out.go:179] - Using image docker.io/rancher/local-path-provisioner:v0.0.22
I0917 00:22:20.269669 579049 out.go:179] - Using image docker.io/busybox:stable
I0917 00:22:20.273694 579049 addons.go:435] installing /etc/kubernetes/addons/storage-provisioner-rancher.yaml
I0917 00:22:20.273715 579049 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner-rancher.yaml (3113 bytes)
I0917 00:22:20.274082 579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
I0917 00:22:20.274501 579049 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33505 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/addons-235235/id_rsa Username:docker}
I0917 00:22:20.283477 579049 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33505 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/addons-235235/id_rsa Username:docker}
W0917 00:22:20.285066 579049 sshutil.go:64] dial failure (will retry): ssh: handshake failed: EOF
I0917 00:22:20.285100 579049 retry.go:31] will retry after 252.540607ms: ssh: handshake failed: EOF
W0917 00:22:20.288539 579049 sshutil.go:64] dial failure (will retry): ssh: handshake failed: EOF
I0917 00:22:20.288569 579049 retry.go:31] will retry after 264.62158ms: ssh: handshake failed: EOF
I0917 00:22:20.310748 579049 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33505 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/addons-235235/id_rsa Username:docker}
I0917 00:22:20.320974 579049 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33505 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/addons-235235/id_rsa Username:docker}
I0917 00:22:20.325337 579049 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33505 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/addons-235235/id_rsa Username:docker}
W0917 00:22:20.330706 579049 sshutil.go:64] dial failure (will retry): ssh: handshake failed: EOF
I0917 00:22:20.330738 579049 retry.go:31] will retry after 286.431927ms: ssh: handshake failed: EOF
W0917 00:22:20.331009 579049 sshutil.go:64] dial failure (will retry): ssh: handshake failed: EOF
I0917 00:22:20.331022 579049 retry.go:31] will retry after 225.439884ms: ssh: handshake failed: EOF
W0917 00:22:20.560617 579049 sshutil.go:64] dial failure (will retry): ssh: handshake failed: EOF
I0917 00:22:20.560694 579049 retry.go:31] will retry after 498.859934ms: ssh: handshake failed: EOF
W0917 00:22:20.618933 579049 sshutil.go:64] dial failure (will retry): ssh: handshake failed: EOF
I0917 00:22:20.619001 579049 retry.go:31] will retry after 250.417921ms: ssh: handshake failed: EOF
I0917 00:22:20.999362 579049 addons.go:435] installing /etc/kubernetes/addons/ig-deployment.yaml
I0917 00:22:20.999427 579049 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ig-deployment.yaml (15034 bytes)
I0917 00:22:21.120689 579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/registry-creds-rc.yaml
I0917 00:22:21.131890 579049 addons.go:435] installing /etc/kubernetes/addons/metrics-server-deployment.yaml
I0917 00:22:21.131913 579049 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/metrics-server-deployment.yaml (1907 bytes)
I0917 00:22:21.215923 579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/nvidia-device-plugin.yaml
I0917 00:22:21.256240 579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/deployment.yaml
I0917 00:22:21.278795 579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I0917 00:22:21.309143 579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml
I0917 00:22:21.310139 579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/ingress-dns-pod.yaml
I0917 00:22:21.320908 579049 addons.go:435] installing /etc/kubernetes/addons/metrics-server-rbac.yaml
I0917 00:22:21.320973 579049 ssh_runner.go:362] scp metrics-server/metrics-server-rbac.yaml --> /etc/kubernetes/addons/metrics-server-rbac.yaml (2175 bytes)
I0917 00:22:21.350747 579049 addons.go:435] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml
I0917 00:22:21.350814 579049 ssh_runner.go:362] scp volumesnapshots/snapshot.storage.k8s.io_volumesnapshotclasses.yaml --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml (6471 bytes)
I0917 00:22:21.363215 579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I0917 00:22:21.366593 579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/amd-gpu-device-plugin.yaml
I0917 00:22:21.376668 579049 addons.go:435] installing /etc/kubernetes/addons/registry-svc.yaml
I0917 00:22:21.376740 579049 ssh_runner.go:362] scp registry/registry-svc.yaml --> /etc/kubernetes/addons/registry-svc.yaml (398 bytes)
I0917 00:22:21.379853 579049 addons.go:435] installing /etc/kubernetes/addons/yakd-sa.yaml
I0917 00:22:21.379924 579049 ssh_runner.go:362] scp yakd/yakd-sa.yaml --> /etc/kubernetes/addons/yakd-sa.yaml (247 bytes)
I0917 00:22:21.454575 579049 addons.go:435] installing /etc/kubernetes/addons/rbac-hostpath.yaml
I0917 00:22:21.454649 579049 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-hostpath.yaml --> /etc/kubernetes/addons/rbac-hostpath.yaml (4266 bytes)
I0917 00:22:21.481397 579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/ingress-deploy.yaml
I0917 00:22:21.508249 579049 addons.go:435] installing /etc/kubernetes/addons/metrics-server-service.yaml
I0917 00:22:21.508324 579049 ssh_runner.go:362] scp metrics-server/metrics-server-service.yaml --> /etc/kubernetes/addons/metrics-server-service.yaml (446 bytes)
I0917 00:22:21.575611 579049 addons.go:435] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml
I0917 00:22:21.575687 579049 ssh_runner.go:362] scp volumesnapshots/snapshot.storage.k8s.io_volumesnapshotcontents.yaml --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml (23126 bytes)
I0917 00:22:21.588569 579049 addons.go:435] installing /etc/kubernetes/addons/yakd-crb.yaml
I0917 00:22:21.588590 579049 ssh_runner.go:362] scp yakd/yakd-crb.yaml --> /etc/kubernetes/addons/yakd-crb.yaml (422 bytes)
I0917 00:22:21.601465 579049 addons.go:435] installing /etc/kubernetes/addons/registry-proxy.yaml
I0917 00:22:21.601485 579049 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/registry-proxy.yaml (947 bytes)
I0917 00:22:21.639347 579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/volcano-deployment.yaml
I0917 00:22:21.842186 579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
I0917 00:22:21.852122 579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner-rancher.yaml
I0917 00:22:21.855602 579049 addons.go:435] installing /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml
I0917 00:22:21.855679 579049 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-health-monitor-controller.yaml --> /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml (3038 bytes)
I0917 00:22:21.870985 579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/registry-rc.yaml -f /etc/kubernetes/addons/registry-svc.yaml -f /etc/kubernetes/addons/registry-proxy.yaml
I0917 00:22:21.955013 579049 addons.go:435] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml
I0917 00:22:21.955038 579049 ssh_runner.go:362] scp volumesnapshots/snapshot.storage.k8s.io_volumesnapshots.yaml --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml (19582 bytes)
I0917 00:22:21.956595 579049 addons.go:435] installing /etc/kubernetes/addons/yakd-svc.yaml
I0917 00:22:21.956611 579049 ssh_runner.go:362] scp yakd/yakd-svc.yaml --> /etc/kubernetes/addons/yakd-svc.yaml (412 bytes)
I0917 00:22:22.004673 579049 addons.go:435] installing /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml
I0917 00:22:22.004702 579049 ssh_runner.go:362] scp volumesnapshots/rbac-volume-snapshot-controller.yaml --> /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml (3545 bytes)
I0917 00:22:22.015168 579049 addons.go:435] installing /etc/kubernetes/addons/yakd-dp.yaml
I0917 00:22:22.015243 579049 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/yakd-dp.yaml (2017 bytes)
I0917 00:22:22.054170 579049 addons.go:435] installing /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
I0917 00:22:22.054234 579049 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml (1475 bytes)
I0917 00:22:22.150580 579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
I0917 00:22:22.163336 579049 addons.go:435] installing /etc/kubernetes/addons/rbac-external-provisioner.yaml
I0917 00:22:22.163415 579049 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-provisioner.yaml --> /etc/kubernetes/addons/rbac-external-provisioner.yaml (4442 bytes)
I0917 00:22:22.247512 579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/yakd-ns.yaml -f /etc/kubernetes/addons/yakd-sa.yaml -f /etc/kubernetes/addons/yakd-crb.yaml -f /etc/kubernetes/addons/yakd-svc.yaml -f /etc/kubernetes/addons/yakd-dp.yaml
I0917 00:22:22.580252 579049 addons.go:435] installing /etc/kubernetes/addons/rbac-external-resizer.yaml
I0917 00:22:22.580326 579049 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-resizer.yaml --> /etc/kubernetes/addons/rbac-external-resizer.yaml (2943 bytes)
I0917 00:22:22.867348 579049 ssh_runner.go:235] Completed: sudo systemctl start kubelet: (2.772593683s)
I0917 00:22:22.867983 579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/registry-creds-rc.yaml: (1.747263334s)
I0917 00:22:22.868231 579049 ssh_runner.go:235] Completed: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.49.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -": (2.880521592s)
I0917 00:22:22.868267 579049 start.go:976] {"host.minikube.internal": 192.168.49.1} host record injected into CoreDNS's ConfigMap
I0917 00:22:22.868866 579049 node_ready.go:35] waiting up to 6m0s for node "addons-235235" to be "Ready" ...
I0917 00:22:22.869306 579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/nvidia-device-plugin.yaml: (1.653355535s)
I0917 00:22:22.873217 579049 node_ready.go:49] node "addons-235235" is "Ready"
I0917 00:22:22.873266 579049 node_ready.go:38] duration metric: took 4.346882ms for node "addons-235235" to be "Ready" ...
I0917 00:22:22.873304 579049 api_server.go:52] waiting for apiserver process to appear ...
I0917 00:22:22.873388 579049 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I0917 00:22:23.325708 579049 addons.go:435] installing /etc/kubernetes/addons/rbac-external-snapshotter.yaml
I0917 00:22:23.325782 579049 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-snapshotter.yaml --> /etc/kubernetes/addons/rbac-external-snapshotter.yaml (3149 bytes)
I0917 00:22:23.372792 579049 kapi.go:214] "coredns" deployment in "kube-system" namespace and "addons-235235" context rescaled to 1 replicas
I0917 00:22:23.671947 579049 addons.go:435] installing /etc/kubernetes/addons/csi-hostpath-attacher.yaml
I0917 00:22:23.672021 579049 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-attacher.yaml (2143 bytes)
I0917 00:22:23.831484 579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/deployment.yaml: (2.575195536s)
I0917 00:22:24.232998 579049 addons.go:435] installing /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml
I0917 00:22:24.233020 579049 ssh_runner.go:362] scp csi-hostpath-driver/deploy/csi-hostpath-driverinfo.yaml --> /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml (1274 bytes)
I0917 00:22:24.555276 579049 addons.go:435] installing /etc/kubernetes/addons/csi-hostpath-plugin.yaml
I0917 00:22:24.555301 579049 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-plugin.yaml (8201 bytes)
I0917 00:22:25.321586 579049 addons.go:435] installing /etc/kubernetes/addons/csi-hostpath-resizer.yaml
I0917 00:22:25.321609 579049 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-resizer.yaml (2191 bytes)
I0917 00:22:25.652309 579049 addons.go:435] installing /etc/kubernetes/addons/csi-hostpath-storageclass.yaml
I0917 00:22:25.652335 579049 ssh_runner.go:362] scp csi-hostpath-driver/deploy/csi-hostpath-storageclass.yaml --> /etc/kubernetes/addons/csi-hostpath-storageclass.yaml (846 bytes)
I0917 00:22:25.894792 579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/rbac-external-attacher.yaml -f /etc/kubernetes/addons/rbac-hostpath.yaml -f /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml -f /etc/kubernetes/addons/rbac-external-provisioner.yaml -f /etc/kubernetes/addons/rbac-external-resizer.yaml -f /etc/kubernetes/addons/rbac-external-snapshotter.yaml -f /etc/kubernetes/addons/csi-hostpath-attacher.yaml -f /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml -f /etc/kubernetes/addons/csi-hostpath-plugin.yaml -f /etc/kubernetes/addons/csi-hostpath-resizer.yaml -f /etc/kubernetes/addons/csi-hostpath-storageclass.yaml
I0917 00:22:27.067585 579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: (5.788759397s)
I0917 00:22:27.067698 579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: (5.758533525s)
W0917 00:22:27.067719 579049 addons.go:461] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: Process exited with status 1
stdout:
namespace/gadget created
serviceaccount/gadget created
configmap/gadget created
clusterrole.rbac.authorization.k8s.io/gadget-cluster-role created
clusterrolebinding.rbac.authorization.k8s.io/gadget-cluster-role-binding created
role.rbac.authorization.k8s.io/gadget-role created
rolebinding.rbac.authorization.k8s.io/gadget-role-binding created
daemonset.apps/gadget created
stderr:
Warning: spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/gadget]: deprecated since v1.30; use the "appArmorProfile" field instead
error: error validating "/etc/kubernetes/addons/ig-crd.yaml": error validating data: [apiVersion not set, kind not set]; if you choose to ignore these errors, turn validation off with --validate=false
I0917 00:22:27.067740 579049 retry.go:31] will retry after 187.564418ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: Process exited with status 1
stdout:
namespace/gadget created
serviceaccount/gadget created
configmap/gadget created
clusterrole.rbac.authorization.k8s.io/gadget-cluster-role created
clusterrolebinding.rbac.authorization.k8s.io/gadget-cluster-role-binding created
role.rbac.authorization.k8s.io/gadget-role created
rolebinding.rbac.authorization.k8s.io/gadget-role-binding created
daemonset.apps/gadget created
stderr:
Warning: spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/gadget]: deprecated since v1.30; use the "appArmorProfile" field instead
error: error validating "/etc/kubernetes/addons/ig-crd.yaml": error validating data: [apiVersion not set, kind not set]; if you choose to ignore these errors, turn validation off with --validate=false
I0917 00:22:27.067780 579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/ingress-dns-pod.yaml: (5.757625871s)
I0917 00:22:27.067808 579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml: (5.704571183s)
I0917 00:22:27.068035 579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/amd-gpu-device-plugin.yaml: (5.701381369s)
I0917 00:22:27.255577 579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml
I0917 00:22:27.282212 579049 ssh_runner.go:362] scp memory --> /var/lib/minikube/google_application_credentials.json (162 bytes)
I0917 00:22:27.282292 579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
I0917 00:22:27.307633 579049 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33505 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/addons-235235/id_rsa Username:docker}
I0917 00:22:28.198316 579049 ssh_runner.go:362] scp memory --> /var/lib/minikube/google_cloud_project (12 bytes)
I0917 00:22:28.732874 579049 addons.go:238] Setting addon gcp-auth=true in "addons-235235"
I0917 00:22:28.732978 579049 host.go:66] Checking if "addons-235235" exists ...
I0917 00:22:28.733534 579049 cli_runner.go:164] Run: docker container inspect addons-235235 --format={{.State.Status}}
I0917 00:22:28.760672 579049 ssh_runner.go:195] Run: cat /var/lib/minikube/google_application_credentials.json
I0917 00:22:28.760726 579049 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-235235
I0917 00:22:28.786938 579049 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33505 SSHKeyPath:/home/jenkins/minikube-integration/21550-576428/.minikube/machines/addons-235235/id_rsa Username:docker}
I0917 00:22:29.463041 579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/ingress-deploy.yaml: (7.981552717s)
I0917 00:22:29.463076 579049 addons.go:479] Verifying addon ingress=true in "addons-235235"
I0917 00:22:29.475984 579049 out.go:179] * Verifying ingress addon...
I0917 00:22:29.479749 579049 kapi.go:75] Waiting for pod with label "app.kubernetes.io/name=ingress-nginx" in ns "ingress-nginx" ...
I0917 00:22:29.483072 579049 kapi.go:86] Found 3 Pods for label selector app.kubernetes.io/name=ingress-nginx
I0917 00:22:29.483090 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:29.983564 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:30.520900 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:30.986245 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:31.513540 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:31.657033 579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/volcano-deployment.yaml: (10.017646567s)
I0917 00:22:31.657111 579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: (9.814852375s)
I0917 00:22:31.657121 579049 addons.go:479] Verifying addon metrics-server=true in "addons-235235"
I0917 00:22:31.657163 579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner-rancher.yaml: (9.804981697s)
I0917 00:22:31.657324 579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/registry-rc.yaml -f /etc/kubernetes/addons/registry-svc.yaml -f /etc/kubernetes/addons/registry-proxy.yaml: (9.786267653s)
I0917 00:22:31.657335 579049 addons.go:479] Verifying addon registry=true in "addons-235235"
I0917 00:22:31.657603 579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: (9.50694528s)
W0917 00:22:31.657627 579049 addons.go:461] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: Process exited with status 1
stdout:
customresourcedefinition.apiextensions.k8s.io/volumesnapshotclasses.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshotcontents.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshots.snapshot.storage.k8s.io created
serviceaccount/snapshot-controller created
clusterrole.rbac.authorization.k8s.io/snapshot-controller-runner created
clusterrolebinding.rbac.authorization.k8s.io/snapshot-controller-role created
role.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
rolebinding.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
deployment.apps/snapshot-controller created
stderr:
Warning: unrecognized format "int64"
error: resource mapping not found for name: "csi-hostpath-snapclass" namespace: "" from "/etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml": no matches for kind "VolumeSnapshotClass" in version "snapshot.storage.k8s.io/v1"
ensure CRDs are installed first
I0917 00:22:31.657644 579049 retry.go:31] will retry after 208.434362ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: Process exited with status 1
stdout:
customresourcedefinition.apiextensions.k8s.io/volumesnapshotclasses.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshotcontents.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshots.snapshot.storage.k8s.io created
serviceaccount/snapshot-controller created
clusterrole.rbac.authorization.k8s.io/snapshot-controller-runner created
clusterrolebinding.rbac.authorization.k8s.io/snapshot-controller-role created
role.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
rolebinding.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
deployment.apps/snapshot-controller created
stderr:
Warning: unrecognized format "int64"
error: resource mapping not found for name: "csi-hostpath-snapclass" namespace: "" from "/etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml": no matches for kind "VolumeSnapshotClass" in version "snapshot.storage.k8s.io/v1"
ensure CRDs are installed first
I0917 00:22:31.657687 579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/yakd-ns.yaml -f /etc/kubernetes/addons/yakd-sa.yaml -f /etc/kubernetes/addons/yakd-crb.yaml -f /etc/kubernetes/addons/yakd-svc.yaml -f /etc/kubernetes/addons/yakd-dp.yaml: (9.410102979s)
I0917 00:22:31.657913 579049 ssh_runner.go:235] Completed: sudo pgrep -xnf kube-apiserver.*minikube.*: (8.784482182s)
I0917 00:22:31.657946 579049 api_server.go:72] duration metric: took 12.12899934s to wait for apiserver process to appear ...
I0917 00:22:31.657961 579049 api_server.go:88] waiting for apiserver healthz status ...
I0917 00:22:31.657976 579049 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8443/healthz ...
I0917 00:22:31.660605 579049 out.go:179] * Verifying registry addon...
I0917 00:22:31.662797 579049 out.go:179] * To access YAKD - Kubernetes Dashboard, wait for Pod to be ready and run the following command:
minikube -p addons-235235 service yakd-dashboard -n yakd-dashboard
I0917 00:22:31.665554 579049 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=registry" in ns "kube-system" ...
I0917 00:22:31.705531 579049 api_server.go:279] https://192.168.49.2:8443/healthz returned 200:
ok
I0917 00:22:31.713969 579049 api_server.go:141] control plane version: v1.34.0
I0917 00:22:31.714001 579049 api_server.go:131] duration metric: took 56.033586ms to wait for apiserver health ...
I0917 00:22:31.714010 579049 system_pods.go:43] waiting for kube-system pods to appear ...
I0917 00:22:31.734358 579049 kapi.go:86] Found 2 Pods for label selector kubernetes.io/minikube-addons=registry
I0917 00:22:31.734427 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:22:31.735262 579049 system_pods.go:59] 16 kube-system pods found
I0917 00:22:31.735335 579049 system_pods.go:61] "coredns-66bc5c9577-6jkl2" [b422966b-a3ef-457a-9695-5acb333105f2] Running / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I0917 00:22:31.735358 579049 system_pods.go:61] "coredns-66bc5c9577-s7lnm" [c37a81dc-9db1-4641-ad42-980e711a8985] Running / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I0917 00:22:31.735398 579049 system_pods.go:61] "etcd-addons-235235" [438d620c-e5f7-4568-8e78-168d015441a0] Running
I0917 00:22:31.735420 579049 system_pods.go:61] "kube-apiserver-addons-235235" [04bff5c3-c307-41ec-84bd-84774ce5ac2b] Running
I0917 00:22:31.735436 579049 system_pods.go:61] "kube-controller-manager-addons-235235" [11b56f68-ebfa-458b-9ab6-bc0a77081554] Running
I0917 00:22:31.735456 579049 system_pods.go:61] "kube-ingress-dns-minikube" [3513e960-b391-45fd-bb89-61ffc65dd2d8] Pending / Ready:ContainersNotReady (containers with unready status: [minikube-ingress-dns]) / ContainersReady:ContainersNotReady (containers with unready status: [minikube-ingress-dns])
I0917 00:22:31.735485 579049 system_pods.go:61] "kube-proxy-7ccvd" [afbd7d53-723a-44f0-a2bc-90e8e2f43355] Running
I0917 00:22:31.735510 579049 system_pods.go:61] "kube-scheduler-addons-235235" [8fba6417-b80a-410d-ac0d-bdea71e0ad08] Running
I0917 00:22:31.735528 579049 system_pods.go:61] "metrics-server-85b7d694d7-wxzn6" [07657805-0866-4a88-9dc0-bde04eb55366] Pending / Ready:ContainersNotReady (containers with unready status: [metrics-server]) / ContainersReady:ContainersNotReady (containers with unready status: [metrics-server])
I0917 00:22:31.735567 579049 system_pods.go:61] "nvidia-device-plugin-daemonset-wwzd9" [62b9d30e-619a-4e29-816c-46c303ee603b] Pending / Ready:ContainersNotReady (containers with unready status: [nvidia-device-plugin-ctr]) / ContainersReady:ContainersNotReady (containers with unready status: [nvidia-device-plugin-ctr])
I0917 00:22:31.735614 579049 system_pods.go:61] "registry-66898fdd98-wd8hb" [8ab107e4-0115-4366-9bd4-27d43e0f5fde] Pending / Ready:ContainersNotReady (containers with unready status: [registry]) / ContainersReady:ContainersNotReady (containers with unready status: [registry])
I0917 00:22:31.735652 579049 system_pods.go:61] "registry-creds-764b6fb674-bpfj4" [fceed494-b40b-4705-bc7a-4fcd4cae83e7] Pending / Ready:ContainersNotReady (containers with unready status: [registry-creds]) / ContainersReady:ContainersNotReady (containers with unready status: [registry-creds])
I0917 00:22:31.735677 579049 system_pods.go:61] "registry-proxy-w6q4j" [bd100a37-3e81-4ba5-9d5c-654e6cabeefe] Pending / Ready:ContainersNotReady (containers with unready status: [registry-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [registry-proxy])
I0917 00:22:31.735704 579049 system_pods.go:61] "snapshot-controller-7d9fbc56b8-7dk8p" [40f72e03-be95-4f94-96f1-7fb9bfd7782e] Pending / Ready:ContainersNotReady (containers with unready status: [volume-snapshot-controller]) / ContainersReady:ContainersNotReady (containers with unready status: [volume-snapshot-controller])
I0917 00:22:31.735737 579049 system_pods.go:61] "snapshot-controller-7d9fbc56b8-dr9xq" [5754c01a-b776-41d8-a534-a2efd263a92d] Pending / Ready:ContainersNotReady (containers with unready status: [volume-snapshot-controller]) / ContainersReady:ContainersNotReady (containers with unready status: [volume-snapshot-controller])
I0917 00:22:31.735759 579049 system_pods.go:61] "storage-provisioner" [0da33418-3586-4572-95eb-054917cd0df1] Running
I0917 00:22:31.735778 579049 system_pods.go:74] duration metric: took 21.762446ms to wait for pod list to return data ...
I0917 00:22:31.735811 579049 default_sa.go:34] waiting for default service account to be created ...
I0917 00:22:31.797765 579049 default_sa.go:45] found service account: "default"
I0917 00:22:31.797840 579049 default_sa.go:55] duration metric: took 62.007252ms for default service account to be created ...
I0917 00:22:31.797865 579049 system_pods.go:116] waiting for k8s-apps to be running ...
I0917 00:22:31.866304 579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
I0917 00:22:31.870565 579049 system_pods.go:86] 16 kube-system pods found
I0917 00:22:31.870658 579049 system_pods.go:89] "coredns-66bc5c9577-6jkl2" [b422966b-a3ef-457a-9695-5acb333105f2] Running / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I0917 00:22:31.870682 579049 system_pods.go:89] "coredns-66bc5c9577-s7lnm" [c37a81dc-9db1-4641-ad42-980e711a8985] Running / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I0917 00:22:31.870721 579049 system_pods.go:89] "etcd-addons-235235" [438d620c-e5f7-4568-8e78-168d015441a0] Running
I0917 00:22:31.870751 579049 system_pods.go:89] "kube-apiserver-addons-235235" [04bff5c3-c307-41ec-84bd-84774ce5ac2b] Running
I0917 00:22:31.870775 579049 system_pods.go:89] "kube-controller-manager-addons-235235" [11b56f68-ebfa-458b-9ab6-bc0a77081554] Running
I0917 00:22:31.870809 579049 system_pods.go:89] "kube-ingress-dns-minikube" [3513e960-b391-45fd-bb89-61ffc65dd2d8] Pending / Ready:ContainersNotReady (containers with unready status: [minikube-ingress-dns]) / ContainersReady:ContainersNotReady (containers with unready status: [minikube-ingress-dns])
I0917 00:22:31.870833 579049 system_pods.go:89] "kube-proxy-7ccvd" [afbd7d53-723a-44f0-a2bc-90e8e2f43355] Running
I0917 00:22:31.870855 579049 system_pods.go:89] "kube-scheduler-addons-235235" [8fba6417-b80a-410d-ac0d-bdea71e0ad08] Running
I0917 00:22:31.870892 579049 system_pods.go:89] "metrics-server-85b7d694d7-wxzn6" [07657805-0866-4a88-9dc0-bde04eb55366] Pending / Ready:ContainersNotReady (containers with unready status: [metrics-server]) / ContainersReady:ContainersNotReady (containers with unready status: [metrics-server])
I0917 00:22:31.870923 579049 system_pods.go:89] "nvidia-device-plugin-daemonset-wwzd9" [62b9d30e-619a-4e29-816c-46c303ee603b] Pending / Ready:ContainersNotReady (containers with unready status: [nvidia-device-plugin-ctr]) / ContainersReady:ContainersNotReady (containers with unready status: [nvidia-device-plugin-ctr])
I0917 00:22:31.870961 579049 system_pods.go:89] "registry-66898fdd98-wd8hb" [8ab107e4-0115-4366-9bd4-27d43e0f5fde] Pending / Ready:ContainersNotReady (containers with unready status: [registry]) / ContainersReady:ContainersNotReady (containers with unready status: [registry])
I0917 00:22:31.870986 579049 system_pods.go:89] "registry-creds-764b6fb674-bpfj4" [fceed494-b40b-4705-bc7a-4fcd4cae83e7] Pending / Ready:ContainersNotReady (containers with unready status: [registry-creds]) / ContainersReady:ContainersNotReady (containers with unready status: [registry-creds])
I0917 00:22:31.871011 579049 system_pods.go:89] "registry-proxy-w6q4j" [bd100a37-3e81-4ba5-9d5c-654e6cabeefe] Pending / Ready:ContainersNotReady (containers with unready status: [registry-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [registry-proxy])
I0917 00:22:31.871050 579049 system_pods.go:89] "snapshot-controller-7d9fbc56b8-7dk8p" [40f72e03-be95-4f94-96f1-7fb9bfd7782e] Pending / Ready:ContainersNotReady (containers with unready status: [volume-snapshot-controller]) / ContainersReady:ContainersNotReady (containers with unready status: [volume-snapshot-controller])
I0917 00:22:31.871079 579049 system_pods.go:89] "snapshot-controller-7d9fbc56b8-dr9xq" [5754c01a-b776-41d8-a534-a2efd263a92d] Pending / Ready:ContainersNotReady (containers with unready status: [volume-snapshot-controller]) / ContainersReady:ContainersNotReady (containers with unready status: [volume-snapshot-controller])
I0917 00:22:31.871098 579049 system_pods.go:89] "storage-provisioner" [0da33418-3586-4572-95eb-054917cd0df1] Running
I0917 00:22:31.871133 579049 system_pods.go:126] duration metric: took 73.249376ms to wait for k8s-apps to be running ...
I0917 00:22:31.871157 579049 system_svc.go:44] waiting for kubelet service to be running ....
I0917 00:22:31.871237 579049 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I0917 00:22:32.006531 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:32.182920 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:22:32.494952 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:32.500319 579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/rbac-external-attacher.yaml -f /etc/kubernetes/addons/rbac-hostpath.yaml -f /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml -f /etc/kubernetes/addons/rbac-external-provisioner.yaml -f /etc/kubernetes/addons/rbac-external-resizer.yaml -f /etc/kubernetes/addons/rbac-external-snapshotter.yaml -f /etc/kubernetes/addons/csi-hostpath-attacher.yaml -f /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml -f /etc/kubernetes/addons/csi-hostpath-plugin.yaml -f /etc/kubernetes/addons/csi-hostpath-resizer.yaml -f /etc/kubernetes/addons/csi-hostpath-storageclass.yaml: (6.605437979s)
I0917 00:22:32.500359 579049 addons.go:479] Verifying addon csi-hostpath-driver=true in "addons-235235"
I0917 00:22:32.503793 579049 out.go:179] * Verifying csi-hostpath-driver addon...
I0917 00:22:32.508235 579049 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=csi-hostpath-driver" in ns "kube-system" ...
I0917 00:22:32.512657 579049 kapi.go:86] Found 3 Pods for label selector kubernetes.io/minikube-addons=csi-hostpath-driver
I0917 00:22:32.512730 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:22:32.669884 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:22:32.983997 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:33.012337 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:22:33.044778 579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: (5.78915837s)
W0917 00:22:33.044815 579049 addons.go:461] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: Process exited with status 1
stdout:
namespace/gadget unchanged
serviceaccount/gadget unchanged
configmap/gadget unchanged
clusterrole.rbac.authorization.k8s.io/gadget-cluster-role unchanged
clusterrolebinding.rbac.authorization.k8s.io/gadget-cluster-role-binding unchanged
role.rbac.authorization.k8s.io/gadget-role unchanged
rolebinding.rbac.authorization.k8s.io/gadget-role-binding unchanged
daemonset.apps/gadget configured
stderr:
error: error validating "/etc/kubernetes/addons/ig-crd.yaml": error validating data: [apiVersion not set, kind not set]; if you choose to ignore these errors, turn validation off with --validate=false
I0917 00:22:33.044834 579049 retry.go:31] will retry after 198.566688ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: Process exited with status 1
stdout:
namespace/gadget unchanged
serviceaccount/gadget unchanged
configmap/gadget unchanged
clusterrole.rbac.authorization.k8s.io/gadget-cluster-role unchanged
clusterrolebinding.rbac.authorization.k8s.io/gadget-cluster-role-binding unchanged
role.rbac.authorization.k8s.io/gadget-role unchanged
rolebinding.rbac.authorization.k8s.io/gadget-role-binding unchanged
daemonset.apps/gadget configured
stderr:
error: error validating "/etc/kubernetes/addons/ig-crd.yaml": error validating data: [apiVersion not set, kind not set]; if you choose to ignore these errors, turn validation off with --validate=false
I0917 00:22:33.044883 579049 ssh_runner.go:235] Completed: cat /var/lib/minikube/google_application_credentials.json: (4.284194298s)
I0917 00:22:33.048034 579049 out.go:179] - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.6.2
I0917 00:22:33.050893 579049 out.go:179] - Using image gcr.io/k8s-minikube/gcp-auth-webhook:v0.1.3
I0917 00:22:33.054753 579049 addons.go:435] installing /etc/kubernetes/addons/gcp-auth-ns.yaml
I0917 00:22:33.054792 579049 ssh_runner.go:362] scp gcp-auth/gcp-auth-ns.yaml --> /etc/kubernetes/addons/gcp-auth-ns.yaml (700 bytes)
I0917 00:22:33.169519 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:22:33.172724 579049 addons.go:435] installing /etc/kubernetes/addons/gcp-auth-service.yaml
I0917 00:22:33.172798 579049 ssh_runner.go:362] scp gcp-auth/gcp-auth-service.yaml --> /etc/kubernetes/addons/gcp-auth-service.yaml (788 bytes)
I0917 00:22:33.243834 579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml
I0917 00:22:33.301590 579049 addons.go:435] installing /etc/kubernetes/addons/gcp-auth-webhook.yaml
I0917 00:22:33.301660 579049 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/gcp-auth-webhook.yaml (5421 bytes)
I0917 00:22:33.406768 579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/gcp-auth-ns.yaml -f /etc/kubernetes/addons/gcp-auth-service.yaml -f /etc/kubernetes/addons/gcp-auth-webhook.yaml
I0917 00:22:33.485607 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:33.586879 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:22:33.688148 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:22:33.984133 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:34.012679 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:22:34.168943 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:22:34.482387 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:34.511735 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:22:34.668984 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:22:34.710856 579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: (2.844466616s)
I0917 00:22:34.710903 579049 ssh_runner.go:235] Completed: sudo systemctl is-active --quiet service kubelet: (2.839642651s)
I0917 00:22:34.710915 579049 system_svc.go:56] duration metric: took 2.839756774s WaitForService to wait for kubelet
I0917 00:22:34.710925 579049 kubeadm.go:578] duration metric: took 15.181976358s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I0917 00:22:34.710947 579049 node_conditions.go:102] verifying NodePressure condition ...
I0917 00:22:34.714122 579049 node_conditions.go:122] node storage ephemeral capacity is 203034800Ki
I0917 00:22:34.714155 579049 node_conditions.go:123] node cpu capacity is 2
I0917 00:22:34.714169 579049 node_conditions.go:105] duration metric: took 3.215734ms to run NodePressure ...
I0917 00:22:34.714181 579049 start.go:241] waiting for startup goroutines ...
I0917 00:22:34.983917 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:35.013235 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:22:35.169149 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:22:35.419972 579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/gcp-auth-ns.yaml -f /etc/kubernetes/addons/gcp-auth-service.yaml -f /etc/kubernetes/addons/gcp-auth-webhook.yaml: (2.013163249s)
I0917 00:22:35.420606 579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: (2.17666923s)
W0917 00:22:35.420684 579049 addons.go:461] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: Process exited with status 1
stdout:
namespace/gadget unchanged
serviceaccount/gadget unchanged
configmap/gadget unchanged
clusterrole.rbac.authorization.k8s.io/gadget-cluster-role unchanged
clusterrolebinding.rbac.authorization.k8s.io/gadget-cluster-role-binding unchanged
role.rbac.authorization.k8s.io/gadget-role unchanged
rolebinding.rbac.authorization.k8s.io/gadget-role-binding unchanged
daemonset.apps/gadget configured
stderr:
error: error validating "/etc/kubernetes/addons/ig-crd.yaml": error validating data: [apiVersion not set, kind not set]; if you choose to ignore these errors, turn validation off with --validate=false
I0917 00:22:35.420715 579049 retry.go:31] will retry after 478.039335ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: Process exited with status 1
stdout:
namespace/gadget unchanged
serviceaccount/gadget unchanged
configmap/gadget unchanged
clusterrole.rbac.authorization.k8s.io/gadget-cluster-role unchanged
clusterrolebinding.rbac.authorization.k8s.io/gadget-cluster-role-binding unchanged
role.rbac.authorization.k8s.io/gadget-role unchanged
rolebinding.rbac.authorization.k8s.io/gadget-role-binding unchanged
daemonset.apps/gadget configured
stderr:
error: error validating "/etc/kubernetes/addons/ig-crd.yaml": error validating data: [apiVersion not set, kind not set]; if you choose to ignore these errors, turn validation off with --validate=false
I0917 00:22:35.422881 579049 addons.go:479] Verifying addon gcp-auth=true in "addons-235235"
I0917 00:22:35.426486 579049 out.go:179] * Verifying gcp-auth addon...
I0917 00:22:35.430334 579049 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=gcp-auth" in ns "gcp-auth" ...
I0917 00:22:35.433311 579049 kapi.go:86] Found 1 Pods for label selector kubernetes.io/minikube-addons=gcp-auth
I0917 00:22:35.433335 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:22:35.483522 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:35.512985 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:22:35.669283 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:22:35.899660 579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml
I0917 00:22:35.934061 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:22:35.983075 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:36.014126 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:22:36.169228 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:22:36.433391 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:22:36.484048 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:36.513719 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:22:36.669883 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:22:36.934019 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:22:36.967996 579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: (1.068279172s)
W0917 00:22:36.968081 579049 addons.go:461] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: Process exited with status 1
stdout:
namespace/gadget unchanged
serviceaccount/gadget unchanged
configmap/gadget unchanged
clusterrole.rbac.authorization.k8s.io/gadget-cluster-role unchanged
clusterrolebinding.rbac.authorization.k8s.io/gadget-cluster-role-binding unchanged
role.rbac.authorization.k8s.io/gadget-role unchanged
rolebinding.rbac.authorization.k8s.io/gadget-role-binding unchanged
daemonset.apps/gadget configured
stderr:
error: error validating "/etc/kubernetes/addons/ig-crd.yaml": error validating data: [apiVersion not set, kind not set]; if you choose to ignore these errors, turn validation off with --validate=false
I0917 00:22:36.968114 579049 retry.go:31] will retry after 429.933718ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: Process exited with status 1
stdout:
namespace/gadget unchanged
serviceaccount/gadget unchanged
configmap/gadget unchanged
clusterrole.rbac.authorization.k8s.io/gadget-cluster-role unchanged
clusterrolebinding.rbac.authorization.k8s.io/gadget-cluster-role-binding unchanged
role.rbac.authorization.k8s.io/gadget-role unchanged
rolebinding.rbac.authorization.k8s.io/gadget-role-binding unchanged
daemonset.apps/gadget configured
stderr:
error: error validating "/etc/kubernetes/addons/ig-crd.yaml": error validating data: [apiVersion not set, kind not set]; if you choose to ignore these errors, turn validation off with --validate=false
I0917 00:22:36.983492 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:37.012578 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:22:37.169608 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:22:37.398868 579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml
I0917 00:22:37.434035 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:22:37.483695 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:37.513097 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:22:37.669254 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:22:37.933828 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:22:37.983753 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:38.012359 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:22:38.169280 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:22:38.433659 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:22:38.472091 579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: (1.073117553s)
W0917 00:22:38.472179 579049 addons.go:461] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: Process exited with status 1
stdout:
namespace/gadget unchanged
serviceaccount/gadget unchanged
configmap/gadget unchanged
clusterrole.rbac.authorization.k8s.io/gadget-cluster-role unchanged
clusterrolebinding.rbac.authorization.k8s.io/gadget-cluster-role-binding unchanged
role.rbac.authorization.k8s.io/gadget-role unchanged
rolebinding.rbac.authorization.k8s.io/gadget-role-binding unchanged
daemonset.apps/gadget configured
stderr:
error: error validating "/etc/kubernetes/addons/ig-crd.yaml": error validating data: [apiVersion not set, kind not set]; if you choose to ignore these errors, turn validation off with --validate=false
I0917 00:22:38.472211 579049 retry.go:31] will retry after 1.093261919s: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: Process exited with status 1
stdout:
namespace/gadget unchanged
serviceaccount/gadget unchanged
configmap/gadget unchanged
clusterrole.rbac.authorization.k8s.io/gadget-cluster-role unchanged
clusterrolebinding.rbac.authorization.k8s.io/gadget-cluster-role-binding unchanged
role.rbac.authorization.k8s.io/gadget-role unchanged
rolebinding.rbac.authorization.k8s.io/gadget-role-binding unchanged
daemonset.apps/gadget configured
stderr:
error: error validating "/etc/kubernetes/addons/ig-crd.yaml": error validating data: [apiVersion not set, kind not set]; if you choose to ignore these errors, turn validation off with --validate=false
I0917 00:22:38.483107 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:38.513748 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:22:38.669564 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:22:38.933810 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:22:39.035792 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:39.036182 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:22:39.169358 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:22:39.433484 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:22:39.483655 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:39.512275 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:22:39.566570 579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml
I0917 00:22:39.671658 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:22:39.933810 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:22:39.982626 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:40.012312 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:22:40.168520 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:22:40.433728 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:22:40.482815 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:40.511323 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:22:40.663119 579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: (1.096514525s)
W0917 00:22:40.663150 579049 addons.go:461] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: Process exited with status 1
stdout:
namespace/gadget unchanged
serviceaccount/gadget unchanged
configmap/gadget unchanged
clusterrole.rbac.authorization.k8s.io/gadget-cluster-role unchanged
clusterrolebinding.rbac.authorization.k8s.io/gadget-cluster-role-binding unchanged
role.rbac.authorization.k8s.io/gadget-role unchanged
rolebinding.rbac.authorization.k8s.io/gadget-role-binding unchanged
daemonset.apps/gadget configured
stderr:
error: error validating "/etc/kubernetes/addons/ig-crd.yaml": error validating data: [apiVersion not set, kind not set]; if you choose to ignore these errors, turn validation off with --validate=false
I0917 00:22:40.663168 579049 retry.go:31] will retry after 968.834ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: Process exited with status 1
stdout:
namespace/gadget unchanged
serviceaccount/gadget unchanged
configmap/gadget unchanged
clusterrole.rbac.authorization.k8s.io/gadget-cluster-role unchanged
clusterrolebinding.rbac.authorization.k8s.io/gadget-cluster-role-binding unchanged
role.rbac.authorization.k8s.io/gadget-role unchanged
rolebinding.rbac.authorization.k8s.io/gadget-role-binding unchanged
daemonset.apps/gadget configured
stderr:
error: error validating "/etc/kubernetes/addons/ig-crd.yaml": error validating data: [apiVersion not set, kind not set]; if you choose to ignore these errors, turn validation off with --validate=false
I0917 00:22:40.669504 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:22:40.934323 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:22:40.983478 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:41.012058 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:22:41.169418 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:22:41.433562 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:22:41.483160 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:41.511746 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:22:41.632349 579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml
I0917 00:22:41.668836 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:22:41.939173 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:22:41.983603 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:42.013137 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:22:42.178902 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:22:42.434743 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:22:42.483015 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:42.512312 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:22:42.672319 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:22:42.698504 579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: (1.066119827s)
W0917 00:22:42.698536 579049 addons.go:461] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: Process exited with status 1
stdout:
namespace/gadget unchanged
serviceaccount/gadget unchanged
configmap/gadget unchanged
clusterrole.rbac.authorization.k8s.io/gadget-cluster-role unchanged
clusterrolebinding.rbac.authorization.k8s.io/gadget-cluster-role-binding unchanged
role.rbac.authorization.k8s.io/gadget-role unchanged
rolebinding.rbac.authorization.k8s.io/gadget-role-binding unchanged
daemonset.apps/gadget configured
stderr:
error: error validating "/etc/kubernetes/addons/ig-crd.yaml": error validating data: [apiVersion not set, kind not set]; if you choose to ignore these errors, turn validation off with --validate=false
I0917 00:22:42.698554 579049 retry.go:31] will retry after 3.87974874s: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: Process exited with status 1
stdout:
namespace/gadget unchanged
serviceaccount/gadget unchanged
configmap/gadget unchanged
clusterrole.rbac.authorization.k8s.io/gadget-cluster-role unchanged
clusterrolebinding.rbac.authorization.k8s.io/gadget-cluster-role-binding unchanged
role.rbac.authorization.k8s.io/gadget-role unchanged
rolebinding.rbac.authorization.k8s.io/gadget-role-binding unchanged
daemonset.apps/gadget configured
stderr:
error: error validating "/etc/kubernetes/addons/ig-crd.yaml": error validating data: [apiVersion not set, kind not set]; if you choose to ignore these errors, turn validation off with --validate=false
I0917 00:22:42.935006 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:22:42.983177 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:43.011513 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:22:43.168634 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:22:43.434743 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:22:43.483672 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:43.512419 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:22:43.669559 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:22:43.934133 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:22:43.985194 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:44.013102 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:22:44.169064 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:22:44.434452 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:22:44.484735 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:44.516687 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:22:44.669016 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:22:44.934907 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:22:45.036222 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:22:45.036251 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:45.183204 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:22:45.435398 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:22:45.483797 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:45.512339 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:22:45.676517 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:22:45.933912 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:22:46.035397 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:46.036327 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:22:46.169499 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:22:46.433966 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:22:46.482966 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:46.511660 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:22:46.578871 579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml
I0917 00:22:46.669739 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:22:46.934755 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:22:46.983637 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:47.014965 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:22:47.170179 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:22:47.434208 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:22:47.484303 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:47.512218 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:22:47.669451 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:22:47.722398 579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: (1.143486123s)
W0917 00:22:47.722481 579049 addons.go:461] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: Process exited with status 1
stdout:
namespace/gadget unchanged
serviceaccount/gadget unchanged
configmap/gadget unchanged
clusterrole.rbac.authorization.k8s.io/gadget-cluster-role unchanged
clusterrolebinding.rbac.authorization.k8s.io/gadget-cluster-role-binding unchanged
role.rbac.authorization.k8s.io/gadget-role unchanged
rolebinding.rbac.authorization.k8s.io/gadget-role-binding unchanged
daemonset.apps/gadget configured
stderr:
error: error validating "/etc/kubernetes/addons/ig-crd.yaml": error validating data: [apiVersion not set, kind not set]; if you choose to ignore these errors, turn validation off with --validate=false
I0917 00:22:47.722516 579049 retry.go:31] will retry after 3.865203544s: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: Process exited with status 1
stdout:
namespace/gadget unchanged
serviceaccount/gadget unchanged
configmap/gadget unchanged
clusterrole.rbac.authorization.k8s.io/gadget-cluster-role unchanged
clusterrolebinding.rbac.authorization.k8s.io/gadget-cluster-role-binding unchanged
role.rbac.authorization.k8s.io/gadget-role unchanged
rolebinding.rbac.authorization.k8s.io/gadget-role-binding unchanged
daemonset.apps/gadget configured
stderr:
error: error validating "/etc/kubernetes/addons/ig-crd.yaml": error validating data: [apiVersion not set, kind not set]; if you choose to ignore these errors, turn validation off with --validate=false
I0917 00:22:47.934104 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:22:47.983251 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:48.012414 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:22:48.228151 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:22:48.441992 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:22:48.491006 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:48.517229 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:22:48.670336 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:22:48.935197 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:22:49.035902 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:22:49.036115 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:49.169418 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:22:49.434110 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:22:49.483835 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:49.512599 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:22:49.669628 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:22:49.934176 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:22:49.983522 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:50.013156 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:22:50.169713 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:22:50.434906 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:22:50.483748 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:50.512896 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:22:50.669471 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:22:50.934381 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:22:50.983906 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:51.016707 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:22:51.169495 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:22:51.433642 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:22:51.484021 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:51.512789 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:22:51.588323 579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml
I0917 00:22:51.668870 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:22:51.934839 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:22:51.983106 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:52.013018 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:22:52.169566 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:22:52.445730 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:22:52.483931 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:52.511423 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:22:52.669221 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:22:52.937597 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:22:53.000759 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:53.016261 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:22:53.109924 579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: (1.521553496s)
W0917 00:22:53.109964 579049 addons.go:461] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: Process exited with status 1
stdout:
namespace/gadget unchanged
serviceaccount/gadget unchanged
configmap/gadget unchanged
clusterrole.rbac.authorization.k8s.io/gadget-cluster-role unchanged
clusterrolebinding.rbac.authorization.k8s.io/gadget-cluster-role-binding unchanged
role.rbac.authorization.k8s.io/gadget-role unchanged
rolebinding.rbac.authorization.k8s.io/gadget-role-binding unchanged
daemonset.apps/gadget configured
stderr:
error: error validating "/etc/kubernetes/addons/ig-crd.yaml": error validating data: [apiVersion not set, kind not set]; if you choose to ignore these errors, turn validation off with --validate=false
I0917 00:22:53.110032 579049 retry.go:31] will retry after 5.363493142s: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: Process exited with status 1
stdout:
namespace/gadget unchanged
serviceaccount/gadget unchanged
configmap/gadget unchanged
clusterrole.rbac.authorization.k8s.io/gadget-cluster-role unchanged
clusterrolebinding.rbac.authorization.k8s.io/gadget-cluster-role-binding unchanged
role.rbac.authorization.k8s.io/gadget-role unchanged
rolebinding.rbac.authorization.k8s.io/gadget-role-binding unchanged
daemonset.apps/gadget configured
stderr:
error: error validating "/etc/kubernetes/addons/ig-crd.yaml": error validating data: [apiVersion not set, kind not set]; if you choose to ignore these errors, turn validation off with --validate=false
I0917 00:22:53.169589 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:22:53.436641 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:22:53.484203 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:53.512129 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:22:53.669404 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:22:53.933687 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:22:53.984009 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:54.012523 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:22:54.168759 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:22:54.436940 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:22:54.482957 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:54.512511 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:22:54.669715 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:22:54.933652 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:22:54.983709 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:55.016170 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:22:55.169198 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:22:55.434414 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:22:55.483608 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:55.511338 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:22:55.669199 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:22:55.934082 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:22:55.982975 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:56.012228 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:22:56.169443 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:22:56.433537 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:22:56.483574 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:56.511919 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:22:56.669526 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:22:56.934029 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:22:56.983078 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:57.012894 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:22:57.168345 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:22:57.433231 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:22:57.483322 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:57.511707 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:22:57.668839 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:22:57.933467 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:22:57.983865 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:58.012707 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:22:58.169780 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:22:58.433750 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:22:58.474033 579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml
I0917 00:22:58.483484 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:58.512004 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:22:58.668825 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:22:58.934274 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:22:58.987068 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:59.035579 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:22:59.168985 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:22:59.433843 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:22:59.483657 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:22:59.511417 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:22:59.658322 579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: (1.184206111s)
W0917 00:22:59.658400 579049 addons.go:461] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: Process exited with status 1
stdout:
namespace/gadget unchanged
serviceaccount/gadget unchanged
configmap/gadget unchanged
clusterrole.rbac.authorization.k8s.io/gadget-cluster-role unchanged
clusterrolebinding.rbac.authorization.k8s.io/gadget-cluster-role-binding unchanged
role.rbac.authorization.k8s.io/gadget-role unchanged
rolebinding.rbac.authorization.k8s.io/gadget-role-binding unchanged
daemonset.apps/gadget configured
stderr:
error: error validating "/etc/kubernetes/addons/ig-crd.yaml": error validating data: [apiVersion not set, kind not set]; if you choose to ignore these errors, turn validation off with --validate=false
I0917 00:22:59.658437 579049 retry.go:31] will retry after 9.746511661s: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: Process exited with status 1
stdout:
namespace/gadget unchanged
serviceaccount/gadget unchanged
configmap/gadget unchanged
clusterrole.rbac.authorization.k8s.io/gadget-cluster-role unchanged
clusterrolebinding.rbac.authorization.k8s.io/gadget-cluster-role-binding unchanged
role.rbac.authorization.k8s.io/gadget-role unchanged
rolebinding.rbac.authorization.k8s.io/gadget-role-binding unchanged
daemonset.apps/gadget configured
stderr:
error: error validating "/etc/kubernetes/addons/ig-crd.yaml": error validating data: [apiVersion not set, kind not set]; if you choose to ignore these errors, turn validation off with --validate=false
I0917 00:22:59.669263 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:22:59.934238 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:22:59.982864 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:00.016052 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:00.174691 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:23:00.434984 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:00.483876 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:00.513476 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:00.669584 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:23:00.933971 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:00.982750 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:01.011860 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:01.169096 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:23:01.436942 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:01.537211 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:01.538183 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:01.669014 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:23:01.935900 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:01.983355 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:02.012728 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:02.169264 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:23:02.438468 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:02.486595 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:02.518387 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:02.669406 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:23:02.976760 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:02.982537 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:03.012025 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:03.168995 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:23:03.434586 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:03.483947 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:03.513255 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:03.671520 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:23:03.934836 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:03.983476 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:04.012791 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:04.169795 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:23:04.434192 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:04.483011 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:04.512545 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:04.669008 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:23:04.934442 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:05.035350 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:05.035523 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:05.168304 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:23:05.433164 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:05.483321 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:05.511434 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:05.668126 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:23:05.933337 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:05.983583 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:06.012788 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:06.168724 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:23:06.434027 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:06.483588 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:06.512253 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:06.668834 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:23:06.934114 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:06.983375 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:07.011687 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:07.170210 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:23:07.434042 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:07.484735 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:07.512586 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:07.669109 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:23:07.949118 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:07.991221 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:08.017216 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:08.169471 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:23:08.433198 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:08.483313 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:08.511644 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:08.668653 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:23:08.933720 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:08.984291 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:09.012628 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:09.168820 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:23:09.405193 579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml
I0917 00:23:09.434292 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:09.483808 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:09.512517 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:09.668467 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:23:09.933869 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:09.983452 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:10.013181 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:10.169685 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:23:10.433630 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:10.484107 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:10.512966 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:10.551154 579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: (1.145912711s)
W0917 00:23:10.551189 579049 addons.go:461] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: Process exited with status 1
stdout:
namespace/gadget unchanged
serviceaccount/gadget unchanged
configmap/gadget unchanged
clusterrole.rbac.authorization.k8s.io/gadget-cluster-role unchanged
clusterrolebinding.rbac.authorization.k8s.io/gadget-cluster-role-binding unchanged
role.rbac.authorization.k8s.io/gadget-role unchanged
rolebinding.rbac.authorization.k8s.io/gadget-role-binding unchanged
daemonset.apps/gadget configured
stderr:
error: error validating "/etc/kubernetes/addons/ig-crd.yaml": error validating data: [apiVersion not set, kind not set]; if you choose to ignore these errors, turn validation off with --validate=false
I0917 00:23:10.551207 579049 retry.go:31] will retry after 10.609466096s: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: Process exited with status 1
stdout:
namespace/gadget unchanged
serviceaccount/gadget unchanged
configmap/gadget unchanged
clusterrole.rbac.authorization.k8s.io/gadget-cluster-role unchanged
clusterrolebinding.rbac.authorization.k8s.io/gadget-cluster-role-binding unchanged
role.rbac.authorization.k8s.io/gadget-role unchanged
rolebinding.rbac.authorization.k8s.io/gadget-role-binding unchanged
daemonset.apps/gadget configured
stderr:
error: error validating "/etc/kubernetes/addons/ig-crd.yaml": error validating data: [apiVersion not set, kind not set]; if you choose to ignore these errors, turn validation off with --validate=false
I0917 00:23:10.669918 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0917 00:23:10.935398 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:11.035890 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:11.035927 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:11.170517 579049 kapi.go:107] duration metric: took 39.504954611s to wait for kubernetes.io/minikube-addons=registry ...
I0917 00:23:11.435194 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:11.483475 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:11.512376 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:11.934148 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:11.983014 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:12.013691 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:12.433817 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:12.482974 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:12.512574 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:12.934094 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:12.987820 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:13.086240 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:13.434623 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:13.483502 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:13.511875 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:13.934639 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:14.035725 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:14.035890 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:14.441837 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:14.482977 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:14.511985 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:14.933846 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:14.982978 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:15.035820 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:15.434072 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:15.483545 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:15.528537 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:15.936337 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:15.984195 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:16.013827 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:16.433875 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:16.483351 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:16.511950 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:16.933619 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:16.983912 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:17.013130 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:17.433715 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:17.483887 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:17.512579 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:17.936202 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:17.983438 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:18.012291 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:18.438017 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:18.537157 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:18.537329 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:18.939228 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:18.983501 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:19.012876 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:19.434128 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:19.483738 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:19.511993 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:19.940784 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:19.984848 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:20.016983 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:20.443095 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:20.483572 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:20.512361 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:20.933072 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:20.982973 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:21.012269 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:21.161633 579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml
I0917 00:23:21.434508 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:21.484749 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:21.518514 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:21.933502 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:21.983271 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:22.011521 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:22.438977 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:22.483224 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:22.520929 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:22.527795 579049 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: (1.36612796s)
W0917 00:23:22.527836 579049 addons.go:461] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: Process exited with status 1
stdout:
namespace/gadget unchanged
serviceaccount/gadget unchanged
configmap/gadget unchanged
clusterrole.rbac.authorization.k8s.io/gadget-cluster-role unchanged
clusterrolebinding.rbac.authorization.k8s.io/gadget-cluster-role-binding unchanged
role.rbac.authorization.k8s.io/gadget-role unchanged
rolebinding.rbac.authorization.k8s.io/gadget-role-binding unchanged
daemonset.apps/gadget configured
stderr:
error: error validating "/etc/kubernetes/addons/ig-crd.yaml": error validating data: [apiVersion not set, kind not set]; if you choose to ignore these errors, turn validation off with --validate=false
I0917 00:23:22.527856 579049 retry.go:31] will retry after 27.856550582s: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: Process exited with status 1
stdout:
namespace/gadget unchanged
serviceaccount/gadget unchanged
configmap/gadget unchanged
clusterrole.rbac.authorization.k8s.io/gadget-cluster-role unchanged
clusterrolebinding.rbac.authorization.k8s.io/gadget-cluster-role-binding unchanged
role.rbac.authorization.k8s.io/gadget-role unchanged
rolebinding.rbac.authorization.k8s.io/gadget-role-binding unchanged
daemonset.apps/gadget configured
stderr:
error: error validating "/etc/kubernetes/addons/ig-crd.yaml": error validating data: [apiVersion not set, kind not set]; if you choose to ignore these errors, turn validation off with --validate=false
I0917 00:23:22.934334 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:23.013809 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:23.014105 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:23.433945 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:23.483038 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:23.512502 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:23.933589 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:23.983501 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:24.013504 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:24.441542 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:24.484414 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:24.512098 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:24.934001 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:24.983644 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:25.020238 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:25.433777 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:25.483554 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:25.511476 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:25.935514 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:26.036451 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:26.036871 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:26.434657 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:26.484766 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:26.512035 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:26.933479 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:26.983466 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:27.012054 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:27.433378 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:27.483258 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:27.511607 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:27.936267 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:27.983608 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:28.016562 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:28.434426 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:28.483992 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:28.512861 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:28.961694 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:29.055767 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:29.056248 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:29.433985 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:29.483331 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:29.511649 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:29.956412 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:29.983770 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:30.015076 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:30.433022 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:30.483280 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:30.511812 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:30.933487 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:30.984911 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:31.015167 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:31.433730 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:31.517425 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:31.518948 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:31.933295 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:31.983012 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:32.012898 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:32.433836 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:32.483221 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:32.512177 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:32.938217 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:33.014504 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:33.014790 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:33.434075 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:33.483110 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:33.512209 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:33.934251 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:33.984054 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:34.012293 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0917 00:23:34.436238 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:34.483272 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:34.512989 579049 kapi.go:107] duration metric: took 1m2.004754049s to wait for kubernetes.io/minikube-addons=csi-hostpath-driver ...
I0917 00:23:34.935107 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:35.035360 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:35.440491 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:35.483684 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:35.934278 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:35.983480 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:36.433866 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:36.483489 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:36.934081 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:36.983363 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:37.434402 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:37.483348 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:37.933582 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:37.983191 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:38.433926 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:38.483203 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:38.934315 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:38.984100 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:39.433869 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:39.483591 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:39.938352 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:40.039485 579049 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0917 00:23:40.434201 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:40.483155 579049 kapi.go:107] duration metric: took 1m11.003407728s to wait for app.kubernetes.io/name=ingress-nginx ...
I0917 00:23:40.933502 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:41.434827 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:41.933247 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:42.434075 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:42.934274 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:43.435033 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:43.934501 579049 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0917 00:23:44.436090 579049 kapi.go:107] duration metric: took 1m9.005751766s to wait for kubernetes.io/minikube-addons=gcp-auth ...
I0917 00:23:44.439490 579049 out.go:179] * Your GCP credentials will now be mounted into every pod created in the addons-235235 cluster.
I0917 00:23:44.443208 579049 out.go:179] * If you don't want your credentials mounted into a specific pod, add a label with the `gcp-auth-skip-secret` key to your pod configuration.
I0917 00:23:44.446137 579049 out.go:179] * If you want existing pods to be mounted with credentials, either recreate them or rerun addons enable with --refresh.
I0917 00:23:50.384717 579049 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml
W0917 00:23:51.262614 579049 addons.go:461] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: Process exited with status 1
stdout:
namespace/gadget unchanged
serviceaccount/gadget unchanged
configmap/gadget unchanged
clusterrole.rbac.authorization.k8s.io/gadget-cluster-role unchanged
clusterrolebinding.rbac.authorization.k8s.io/gadget-cluster-role-binding unchanged
role.rbac.authorization.k8s.io/gadget-role unchanged
rolebinding.rbac.authorization.k8s.io/gadget-role-binding unchanged
daemonset.apps/gadget configured
stderr:
error: error validating "/etc/kubernetes/addons/ig-crd.yaml": error validating data: [apiVersion not set, kind not set]; if you choose to ignore these errors, turn validation off with --validate=false
W0917 00:23:51.262722 579049 out.go:285] ! Enabling 'inspektor-gadget' returned an error: running callbacks: [sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply --force -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: Process exited with status 1
stdout:
namespace/gadget unchanged
serviceaccount/gadget unchanged
configmap/gadget unchanged
clusterrole.rbac.authorization.k8s.io/gadget-cluster-role unchanged
clusterrolebinding.rbac.authorization.k8s.io/gadget-cluster-role-binding unchanged
role.rbac.authorization.k8s.io/gadget-role unchanged
rolebinding.rbac.authorization.k8s.io/gadget-role-binding unchanged
daemonset.apps/gadget configured
stderr:
error: error validating "/etc/kubernetes/addons/ig-crd.yaml": error validating data: [apiVersion not set, kind not set]; if you choose to ignore these errors, turn validation off with --validate=false
]
I0917 00:23:51.267881 579049 out.go:179] * Enabled addons: registry-creds, nvidia-device-plugin, cloud-spanner, storage-provisioner, ingress-dns, amd-gpu-device-plugin, default-storageclass, volcano, metrics-server, yakd, storage-provisioner-rancher, volumesnapshots, registry, csi-hostpath-driver, ingress, gcp-auth
I0917 00:23:51.271569 579049 addons.go:514] duration metric: took 1m31.741443043s for enable addons: enabled=[registry-creds nvidia-device-plugin cloud-spanner storage-provisioner ingress-dns amd-gpu-device-plugin default-storageclass volcano metrics-server yakd storage-provisioner-rancher volumesnapshots registry csi-hostpath-driver ingress gcp-auth]
I0917 00:23:51.271626 579049 start.go:246] waiting for cluster config update ...
I0917 00:23:51.271648 579049 start.go:255] writing updated cluster config ...
I0917 00:23:51.271955 579049 ssh_runner.go:195] Run: rm -f paused
I0917 00:23:51.276590 579049 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I0917 00:23:51.281629 579049 pod_ready.go:83] waiting for pod "coredns-66bc5c9577-s7lnm" in "kube-system" namespace to be "Ready" or be gone ...
I0917 00:23:51.307466 579049 pod_ready.go:94] pod "coredns-66bc5c9577-s7lnm" is "Ready"
I0917 00:23:51.307495 579049 pod_ready.go:86] duration metric: took 25.844332ms for pod "coredns-66bc5c9577-s7lnm" in "kube-system" namespace to be "Ready" or be gone ...
I0917 00:23:51.312687 579049 pod_ready.go:83] waiting for pod "etcd-addons-235235" in "kube-system" namespace to be "Ready" or be gone ...
I0917 00:23:51.317890 579049 pod_ready.go:94] pod "etcd-addons-235235" is "Ready"
I0917 00:23:51.317973 579049 pod_ready.go:86] duration metric: took 5.198143ms for pod "etcd-addons-235235" in "kube-system" namespace to be "Ready" or be gone ...
I0917 00:23:51.320783 579049 pod_ready.go:83] waiting for pod "kube-apiserver-addons-235235" in "kube-system" namespace to be "Ready" or be gone ...
I0917 00:23:51.325836 579049 pod_ready.go:94] pod "kube-apiserver-addons-235235" is "Ready"
I0917 00:23:51.325916 579049 pod_ready.go:86] duration metric: took 5.053621ms for pod "kube-apiserver-addons-235235" in "kube-system" namespace to be "Ready" or be gone ...
I0917 00:23:51.328222 579049 pod_ready.go:83] waiting for pod "kube-controller-manager-addons-235235" in "kube-system" namespace to be "Ready" or be gone ...
I0917 00:23:51.680573 579049 pod_ready.go:94] pod "kube-controller-manager-addons-235235" is "Ready"
I0917 00:23:51.680603 579049 pod_ready.go:86] duration metric: took 352.316029ms for pod "kube-controller-manager-addons-235235" in "kube-system" namespace to be "Ready" or be gone ...
I0917 00:23:51.880693 579049 pod_ready.go:83] waiting for pod "kube-proxy-7ccvd" in "kube-system" namespace to be "Ready" or be gone ...
I0917 00:23:52.281104 579049 pod_ready.go:94] pod "kube-proxy-7ccvd" is "Ready"
I0917 00:23:52.281132 579049 pod_ready.go:86] duration metric: took 400.409037ms for pod "kube-proxy-7ccvd" in "kube-system" namespace to be "Ready" or be gone ...
I0917 00:23:52.480042 579049 pod_ready.go:83] waiting for pod "kube-scheduler-addons-235235" in "kube-system" namespace to be "Ready" or be gone ...
I0917 00:23:52.880499 579049 pod_ready.go:94] pod "kube-scheduler-addons-235235" is "Ready"
I0917 00:23:52.880528 579049 pod_ready.go:86] duration metric: took 400.460556ms for pod "kube-scheduler-addons-235235" in "kube-system" namespace to be "Ready" or be gone ...
I0917 00:23:52.880542 579049 pod_ready.go:40] duration metric: took 1.603920721s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I0917 00:23:52.938822 579049 start.go:617] kubectl: 1.33.2, cluster: 1.34.0 (minor skew: 1)
I0917 00:23:52.941854 579049 out.go:179] * Done! kubectl is now configured to use "addons-235235" cluster and "default" namespace by default
==> Docker <==
Sep 17 00:26:18 addons-235235 dockerd[1178]: time="2025-09-17T00:26:18.421150810Z" level=info msg="ignoring event" container=8310810da55356feadee71e0fdd7606d87d39af34fa418cf2a110898a0bb1229 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 17 00:26:46 addons-235235 dockerd[1178]: time="2025-09-17T00:26:46.649330164Z" level=warning msg="reference for unknown type: " digest="sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79" remote="docker.io/library/busybox@sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79"
Sep 17 00:26:46 addons-235235 dockerd[1178]: time="2025-09-17T00:26:46.737763322Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
Sep 17 00:27:02 addons-235235 dockerd[1178]: time="2025-09-17T00:27:02.834303238Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
Sep 17 00:27:37 addons-235235 dockerd[1178]: time="2025-09-17T00:27:37.650197669Z" level=warning msg="reference for unknown type: " digest="sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79" remote="docker.io/library/busybox@sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79"
Sep 17 00:27:37 addons-235235 dockerd[1178]: time="2025-09-17T00:27:37.848639655Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
Sep 17 00:27:37 addons-235235 cri-dockerd[1478]: time="2025-09-17T00:27:37Z" level=info msg="Stop pulling image docker.io/busybox:stable@sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79: docker.io/library/busybox@sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79: Pulling from library/busybox"
Sep 17 00:28:05 addons-235235 dockerd[1178]: time="2025-09-17T00:28:05.120881541Z" level=info msg="ignoring event" container=d3e79390835e0e0e8cae64267db2629eb221a2249ed2e8df729cb3781e22ad61 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 17 00:28:20 addons-235235 cri-dockerd[1478]: time="2025-09-17T00:28:20Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/b77675601378ff5c97c1767fba1e24aff7a14d87c69900baba5082df85dddfc7/resolv.conf as [nameserver 10.96.0.10 search local-path-storage.svc.cluster.local svc.cluster.local cluster.local us-east-2.compute.internal options ndots:5]"
Sep 17 00:28:20 addons-235235 dockerd[1178]: time="2025-09-17T00:28:20.655165490Z" level=warning msg="reference for unknown type: " digest="sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79" remote="docker.io/library/busybox@sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79"
Sep 17 00:28:20 addons-235235 dockerd[1178]: time="2025-09-17T00:28:20.741495844Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
Sep 17 00:28:29 addons-235235 dockerd[1178]: time="2025-09-17T00:28:29.820977327Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
Sep 17 00:28:34 addons-235235 dockerd[1178]: time="2025-09-17T00:28:34.642739380Z" level=warning msg="reference for unknown type: " digest="sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79" remote="docker.io/library/busybox@sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79"
Sep 17 00:28:34 addons-235235 dockerd[1178]: time="2025-09-17T00:28:34.739117138Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
Sep 17 00:28:57 addons-235235 dockerd[1178]: time="2025-09-17T00:28:57.652065247Z" level=warning msg="reference for unknown type: " digest="sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79" remote="docker.io/library/busybox@sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79"
Sep 17 00:28:57 addons-235235 dockerd[1178]: time="2025-09-17T00:28:57.735030726Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
Sep 17 00:29:46 addons-235235 dockerd[1178]: time="2025-09-17T00:29:46.652301167Z" level=warning msg="reference for unknown type: " digest="sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79" remote="docker.io/library/busybox@sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79"
Sep 17 00:29:46 addons-235235 dockerd[1178]: time="2025-09-17T00:29:46.835696258Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
Sep 17 00:29:46 addons-235235 cri-dockerd[1478]: time="2025-09-17T00:29:46Z" level=info msg="Stop pulling image docker.io/busybox:stable@sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79: docker.io/library/busybox@sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79: Pulling from library/busybox"
Sep 17 00:30:20 addons-235235 dockerd[1178]: time="2025-09-17T00:30:20.707809978Z" level=info msg="ignoring event" container=b77675601378ff5c97c1767fba1e24aff7a14d87c69900baba5082df85dddfc7 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 17 00:30:51 addons-235235 cri-dockerd[1478]: time="2025-09-17T00:30:51Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/80fc587f34c85f71988c3be8046cb4f607010d68fb7c68e94832bf4d5d59b4f6/resolv.conf as [nameserver 10.96.0.10 search local-path-storage.svc.cluster.local svc.cluster.local cluster.local us-east-2.compute.internal options ndots:5]"
Sep 17 00:30:51 addons-235235 dockerd[1178]: time="2025-09-17T00:30:51.215693201Z" level=warning msg="reference for unknown type: " digest="sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79" remote="docker.io/library/busybox@sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79"
Sep 17 00:30:51 addons-235235 dockerd[1178]: time="2025-09-17T00:30:51.307349823Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
Sep 17 00:31:03 addons-235235 dockerd[1178]: time="2025-09-17T00:31:03.649158464Z" level=warning msg="reference for unknown type: " digest="sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79" remote="docker.io/library/busybox@sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79"
Sep 17 00:31:03 addons-235235 dockerd[1178]: time="2025-09-17T00:31:03.740429266Z" level=error msg="Not continuing with pull after error" error="toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit"
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD
fba2a89454255 nginx@sha256:42a516af16b852e33b7682d5ef8acbd5d13fe08fecadc7ed98605ba5e3b26ab8 5 minutes ago Running nginx 0 63ebf5457695b nginx
e2c5ba2f8355f gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e 6 minutes ago Running busybox 0 bc4b89ca8105d busybox
82404e257ef79 rancher/local-path-provisioner@sha256:e34c88ae0affb1cdefbb874140d6339d4a27ec4ee420ae8199cd839997b05246 7 minutes ago Running local-path-provisioner 0 ac4816af246bf local-path-provisioner-648f6765c9-lpgz2
cbc586ed79b42 ghcr.io/inspektor-gadget/inspektor-gadget@sha256:66fdf18cc8a577423b2a36b96a5be40fe690fdb986bfe7875f54edfa9c7d19a5 8 minutes ago Running gadget 0 bd72c74811a18 gadget-4bk2n
b279765d02101 ba04bb24b9575 8 minutes ago Running storage-provisioner 0 537bddb7fc1f8 storage-provisioner
994b418390c96 138784d87c9c5 8 minutes ago Running coredns 0 9d39a312841ad coredns-66bc5c9577-s7lnm
e39e9e3a27c04 6fc32d66c1411 8 minutes ago Running kube-proxy 0 20f8154ff37ed kube-proxy-7ccvd
c29889afefd1f 996be7e86d9b3 8 minutes ago Running kube-controller-manager 0 005f2c1a7c358 kube-controller-manager-addons-235235
3ba1c9fd171c9 a25f5ef9c34c3 8 minutes ago Running kube-scheduler 0 d7b768bb065c7 kube-scheduler-addons-235235
da4746efd3426 d291939e99406 8 minutes ago Running kube-apiserver 0 55660481e3eb0 kube-apiserver-addons-235235
afa22acfe31f9 a1894772a478e 8 minutes ago Running etcd 0 813d3a00d28e7 etcd-addons-235235
==> coredns [994b418390c9] <==
[INFO] 10.244.0.24:43663 - 23106 "AAAA IN hello-world-app.default.svc.cluster.local.cluster.local. udp 73 false 512" NXDOMAIN qr,aa,rd 166 0.000099444s
[INFO] 10.244.0.24:43663 - 53628 "A IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.000927592s
[INFO] 10.244.0.24:44728 - 6127 "A IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.002355348s
[INFO] 10.244.0.24:43663 - 14310 "AAAA IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.001372479s
[INFO] 10.244.0.24:44728 - 39738 "AAAA IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.001929118s
[INFO] 10.244.0.24:44728 - 1608 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000105401s
[INFO] 10.244.0.24:43663 - 24802 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.00009247s
[INFO] 10.244.0.24:35588 - 21373 "A IN hello-world-app.default.svc.cluster.local.ingress-nginx.svc.cluster.local. udp 91 false 512" NXDOMAIN qr,aa,rd 184 0.000222632s
[INFO] 10.244.0.24:60461 - 64061 "A IN hello-world-app.default.svc.cluster.local.ingress-nginx.svc.cluster.local. udp 91 false 512" NXDOMAIN qr,aa,rd 184 0.000114254s
[INFO] 10.244.0.24:60461 - 14808 "AAAA IN hello-world-app.default.svc.cluster.local.ingress-nginx.svc.cluster.local. udp 91 false 512" NXDOMAIN qr,aa,rd 184 0.000106041s
[INFO] 10.244.0.24:60461 - 58271 "A IN hello-world-app.default.svc.cluster.local.svc.cluster.local. udp 77 false 512" NXDOMAIN qr,aa,rd 170 0.000099132s
[INFO] 10.244.0.24:60461 - 25338 "AAAA IN hello-world-app.default.svc.cluster.local.svc.cluster.local. udp 77 false 512" NXDOMAIN qr,aa,rd 170 0.000099887s
[INFO] 10.244.0.24:60461 - 35311 "A IN hello-world-app.default.svc.cluster.local.cluster.local. udp 73 false 512" NXDOMAIN qr,aa,rd 166 0.000105031s
[INFO] 10.244.0.24:60461 - 4918 "AAAA IN hello-world-app.default.svc.cluster.local.cluster.local. udp 73 false 512" NXDOMAIN qr,aa,rd 166 0.000100748s
[INFO] 10.244.0.24:35588 - 63902 "AAAA IN hello-world-app.default.svc.cluster.local.ingress-nginx.svc.cluster.local. udp 91 false 512" NXDOMAIN qr,aa,rd 184 0.000112564s
[INFO] 10.244.0.24:35588 - 58247 "A IN hello-world-app.default.svc.cluster.local.svc.cluster.local. udp 77 false 512" NXDOMAIN qr,aa,rd 170 0.000122754s
[INFO] 10.244.0.24:60461 - 14588 "A IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.001645761s
[INFO] 10.244.0.24:35588 - 64085 "AAAA IN hello-world-app.default.svc.cluster.local.svc.cluster.local. udp 77 false 512" NXDOMAIN qr,aa,rd 170 0.000096211s
[INFO] 10.244.0.24:35588 - 61625 "A IN hello-world-app.default.svc.cluster.local.cluster.local. udp 73 false 512" NXDOMAIN qr,aa,rd 166 0.000116206s
[INFO] 10.244.0.24:35588 - 3142 "AAAA IN hello-world-app.default.svc.cluster.local.cluster.local. udp 73 false 512" NXDOMAIN qr,aa,rd 166 0.000081606s
[INFO] 10.244.0.24:60461 - 861 "AAAA IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.001721116s
[INFO] 10.244.0.24:60461 - 41183 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000101241s
[INFO] 10.244.0.24:35588 - 29487 "A IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.001062259s
[INFO] 10.244.0.24:35588 - 58951 "AAAA IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.001063219s
[INFO] 10.244.0.24:35588 - 25364 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000116518s
==> describe nodes <==
Name: addons-235235
Roles: control-plane
Labels: beta.kubernetes.io/arch=arm64
beta.kubernetes.io/os=linux
kubernetes.io/arch=arm64
kubernetes.io/hostname=addons-235235
kubernetes.io/os=linux
minikube.k8s.io/commit=9829f0bc17c523e4378d28e0c25741106f24f00a
minikube.k8s.io/name=addons-235235
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2025_09_17T00_22_14_0700
minikube.k8s.io/version=v1.37.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
topology.hostpath.csi/node=addons-235235
Annotations: node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Wed, 17 Sep 2025 00:22:11 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: addons-235235
AcquireTime: <unset>
RenewTime: Wed, 17 Sep 2025 00:31:04 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Wed, 17 Sep 2025 00:25:47 +0000 Wed, 17 Sep 2025 00:22:07 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Wed, 17 Sep 2025 00:25:47 +0000 Wed, 17 Sep 2025 00:22:07 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Wed, 17 Sep 2025 00:25:47 +0000 Wed, 17 Sep 2025 00:22:07 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Wed, 17 Sep 2025 00:25:47 +0000 Wed, 17 Sep 2025 00:22:11 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.49.2
Hostname: addons-235235
Capacity:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022300Ki
pods: 110
Allocatable:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022300Ki
pods: 110
System Info:
Machine ID: b08b2949c1734d3d9463c78e2767d05b
System UUID: 992d6b4e-453a-4002-aab2-36cce8203392
Boot ID: 54a40c62-e2ca-4fe1-8de3-5249514e3fbf
Kernel Version: 5.15.0-1084-aws
OS Image: Ubuntu 22.04.5 LTS
Operating System: linux
Architecture: arm64
Container Runtime Version: docker://28.4.0
Kubelet Version: v1.34.0
Kube-Proxy Version:
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (13 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 6m31s
default hello-world-app-5d498dc89-8ftcl 0 (0%) 0 (0%) 0 (0%) 0 (0%) 5m31s
default nginx 0 (0%) 0 (0%) 0 (0%) 0 (0%) 5m42s
gadget gadget-4bk2n 0 (0%) 0 (0%) 0 (0%) 0 (0%) 8m40s
kube-system coredns-66bc5c9577-s7lnm 100m (5%) 0 (0%) 70Mi (0%) 170Mi (2%) 8m46s
kube-system etcd-addons-235235 100m (5%) 0 (0%) 100Mi (1%) 0 (0%) 8m52s
kube-system kube-apiserver-addons-235235 250m (12%) 0 (0%) 0 (0%) 0 (0%) 8m52s
kube-system kube-controller-manager-addons-235235 200m (10%) 0 (0%) 0 (0%) 0 (0%) 8m52s
kube-system kube-proxy-7ccvd 0 (0%) 0 (0%) 0 (0%) 0 (0%) 8m46s
kube-system kube-scheduler-addons-235235 100m (5%) 0 (0%) 0 (0%) 0 (0%) 8m52s
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 8m38s
local-path-storage helper-pod-create-pvc-6b6d1188-0b38-4324-abab-29964246affb 0 (0%) 0 (0%) 0 (0%) 0 (0%) 15s
local-path-storage local-path-provisioner-648f6765c9-lpgz2 0 (0%) 0 (0%) 0 (0%) 0 (0%) 8m38s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 750m (37%) 0 (0%)
memory 170Mi (2%) 170Mi (2%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
hugepages-32Mi 0 (0%) 0 (0%)
hugepages-64Ki 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 8m44s kube-proxy
Normal NodeHasSufficientMemory 8m59s (x8 over 8m59s) kubelet Node addons-235235 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 8m59s (x8 over 8m59s) kubelet Node addons-235235 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 8m59s (x7 over 8m59s) kubelet Node addons-235235 status is now: NodeHasSufficientPID
Normal NodeAllocatableEnforced 8m59s kubelet Updated Node Allocatable limit across pods
Normal Starting 8m52s kubelet Starting kubelet.
Warning CgroupV1 8m52s kubelet cgroup v1 support is in maintenance mode, please migrate to cgroup v2
Normal NodeAllocatableEnforced 8m52s kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 8m52s kubelet Node addons-235235 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 8m52s kubelet Node addons-235235 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 8m52s kubelet Node addons-235235 status is now: NodeHasSufficientPID
Normal RegisteredNode 8m47s node-controller Node addons-235235 event: Registered Node addons-235235 in Controller
==> dmesg <==
[Sep16 22:47] kauditd_printk_skb: 8 callbacks suppressed
[Sep17 00:20] kauditd_printk_skb: 8 callbacks suppressed
==> etcd [afa22acfe31f] <==
{"level":"warn","ts":"2025-09-17T00:22:09.862673Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:48544","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-17T00:22:09.883446Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:48560","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-17T00:22:09.908885Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:48572","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-17T00:22:09.933076Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:48604","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-17T00:22:09.947149Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:48612","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-17T00:22:09.976939Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:48624","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-17T00:22:10.000993Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:48644","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-17T00:22:10.028362Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:48664","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-17T00:22:10.054661Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:48674","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-17T00:22:10.068432Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:48682","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-17T00:22:10.162261Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:48698","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-17T00:22:33.424386Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:43378","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-17T00:22:33.442190Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:43406","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-17T00:22:48.188356Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41564","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-17T00:22:48.275302Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41578","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-17T00:22:48.304728Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41596","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-17T00:22:48.372770Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41610","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-17T00:22:48.418026Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41638","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-17T00:22:48.451608Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41662","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-17T00:22:48.489918Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41666","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-17T00:22:48.523009Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41694","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-17T00:22:48.550961Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41714","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-17T00:22:48.606919Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41752","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-17T00:22:48.648329Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41756","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-17T00:22:48.688947Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:41778","server-name":"","error":"EOF"}
==> kernel <==
00:31:05 up 3:13, 0 users, load average: 0.54, 2.40, 3.17
Linux addons-235235 5.15.0-1084-aws #91~20.04.1-Ubuntu SMP Fri May 2 07:00:04 UTC 2025 aarch64 aarch64 aarch64 GNU/Linux
PRETTY_NAME="Ubuntu 22.04.5 LTS"
==> kube-apiserver [da4746efd342] <==
I0917 00:25:32.453975 1 controller.go:667] quota admission added evaluator for: volumesnapshots.snapshot.storage.k8s.io
I0917 00:25:34.605239 1 alloc.go:328] "allocated clusterIPs" service="default/hello-world-app" clusterIPs={"IPv4":"10.106.235.163"}
I0917 00:26:02.165021 1 stats.go:136] "Error getting keys" err="empty key: \"\""
I0917 00:26:02.410726 1 controller.go:129] OpenAPI AggregationController: action for item v1beta1.metrics.k8s.io: Nothing (removed from the queue).
I0917 00:26:04.058365 1 handler.go:285] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0917 00:26:04.058414 1 handler.go:285] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I0917 00:26:04.117836 1 handler.go:285] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0917 00:26:04.117879 1 handler.go:285] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I0917 00:26:04.199437 1 handler.go:285] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0917 00:26:04.199501 1 handler.go:285] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I0917 00:26:04.315955 1 handler.go:285] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0917 00:26:04.316153 1 handler.go:285] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I0917 00:26:04.389300 1 handler.go:285] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0917 00:26:04.389340 1 handler.go:285] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
W0917 00:26:05.118236 1 cacher.go:182] Terminating all watchers from cacher volumesnapshotclasses.snapshot.storage.k8s.io
W0917 00:26:05.389566 1 cacher.go:182] Terminating all watchers from cacher volumesnapshots.snapshot.storage.k8s.io
W0917 00:26:05.486218 1 cacher.go:182] Terminating all watchers from cacher volumesnapshotcontents.snapshot.storage.k8s.io
I0917 00:26:20.062672 1 stats.go:136] "Error getting keys" err="empty key: \"\""
I0917 00:27:05.320870 1 stats.go:136] "Error getting keys" err="empty key: \"\""
I0917 00:27:25.932699 1 stats.go:136] "Error getting keys" err="empty key: \"\""
I0917 00:28:22.063797 1 stats.go:136] "Error getting keys" err="empty key: \"\""
I0917 00:28:52.615213 1 stats.go:136] "Error getting keys" err="empty key: \"\""
I0917 00:29:30.337032 1 stats.go:136] "Error getting keys" err="empty key: \"\""
I0917 00:30:06.780130 1 stats.go:136] "Error getting keys" err="empty key: \"\""
I0917 00:30:46.059351 1 stats.go:136] "Error getting keys" err="empty key: \"\""
==> kube-controller-manager [c29889afefd1] <==
E0917 00:30:06.198922 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" type="*v1.PartialObjectMetadata"
E0917 00:30:08.758062 1 reflector.go:422] "The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking" err="the server could not find the requested resource"
E0917 00:30:08.759225 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" type="*v1.PartialObjectMetadata"
E0917 00:30:10.616644 1 reflector.go:422] "The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking" err="the server could not find the requested resource"
E0917 00:30:10.617699 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" type="*v1.PartialObjectMetadata"
E0917 00:30:17.890021 1 reflector.go:422] "The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking" err="the server could not find the requested resource"
E0917 00:30:17.891302 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" type="*v1.PartialObjectMetadata"
E0917 00:30:26.267407 1 reflector.go:422] "The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking" err="the server could not find the requested resource"
E0917 00:30:26.268575 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" type="*v1.PartialObjectMetadata"
E0917 00:30:27.805363 1 reflector.go:422] "The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking" err="the server could not find the requested resource"
E0917 00:30:27.806501 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" type="*v1.PartialObjectMetadata"
E0917 00:30:31.243089 1 reflector.go:422] "The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking" err="the server could not find the requested resource"
E0917 00:30:31.244174 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" type="*v1.PartialObjectMetadata"
E0917 00:30:33.721970 1 reflector.go:422] "The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking" err="the server could not find the requested resource"
E0917 00:30:33.723164 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" type="*v1.PartialObjectMetadata"
E0917 00:30:47.422934 1 reflector.go:422] "The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking" err="the server could not find the requested resource"
E0917 00:30:47.424112 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" type="*v1.PartialObjectMetadata"
E0917 00:30:48.953425 1 reflector.go:422] "The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking" err="the server could not find the requested resource"
E0917 00:30:48.954394 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" type="*v1.PartialObjectMetadata"
E0917 00:30:57.786179 1 reflector.go:422] "The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking" err="the server could not find the requested resource"
E0917 00:30:57.787366 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" type="*v1.PartialObjectMetadata"
E0917 00:31:00.094533 1 reflector.go:422] "The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking" err="the server could not find the requested resource"
E0917 00:31:00.100278 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" type="*v1.PartialObjectMetadata"
E0917 00:31:03.982717 1 reflector.go:422] "The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking" err="the server could not find the requested resource"
E0917 00:31:03.983948 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" type="*v1.PartialObjectMetadata"
==> kube-proxy [e39e9e3a27c0] <==
I0917 00:22:20.738742 1 server_linux.go:53] "Using iptables proxy"
I0917 00:22:20.930177 1 shared_informer.go:349] "Waiting for caches to sync" controller="node informer cache"
I0917 00:22:21.030707 1 shared_informer.go:356] "Caches are synced" controller="node informer cache"
I0917 00:22:21.030776 1 server.go:219] "Successfully retrieved NodeIPs" NodeIPs=["192.168.49.2"]
E0917 00:22:21.030914 1 server.go:256] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
I0917 00:22:21.052890 1 server.go:265] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I0917 00:22:21.052944 1 server_linux.go:132] "Using iptables Proxier"
I0917 00:22:21.059415 1 proxier.go:242] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
I0917 00:22:21.060002 1 server.go:527] "Version info" version="v1.34.0"
I0917 00:22:21.060027 1 server.go:529] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I0917 00:22:21.066591 1 config.go:106] "Starting endpoint slice config controller"
I0917 00:22:21.066610 1 shared_informer.go:349] "Waiting for caches to sync" controller="endpoint slice config"
I0917 00:22:21.066875 1 config.go:200] "Starting service config controller"
I0917 00:22:21.066882 1 shared_informer.go:349] "Waiting for caches to sync" controller="service config"
I0917 00:22:21.068286 1 config.go:403] "Starting serviceCIDR config controller"
I0917 00:22:21.068297 1 shared_informer.go:349] "Waiting for caches to sync" controller="serviceCIDR config"
I0917 00:22:21.069263 1 config.go:309] "Starting node config controller"
I0917 00:22:21.069274 1 shared_informer.go:349] "Waiting for caches to sync" controller="node config"
I0917 00:22:21.069280 1 shared_informer.go:356] "Caches are synced" controller="node config"
I0917 00:22:21.166927 1 shared_informer.go:356] "Caches are synced" controller="endpoint slice config"
I0917 00:22:21.166966 1 shared_informer.go:356] "Caches are synced" controller="service config"
I0917 00:22:21.169420 1 shared_informer.go:356] "Caches are synced" controller="serviceCIDR config"
==> kube-scheduler [3ba1c9fd171c] <==
I0917 00:22:11.751477 1 server.go:177] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I0917 00:22:11.753675 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
I0917 00:22:11.753707 1 shared_informer.go:349] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
I0917 00:22:11.754695 1 secure_serving.go:211] Serving securely on 127.0.0.1:10259
I0917 00:22:11.754864 1 tlsconfig.go:243] "Starting DynamicServingCertificateController"
E0917 00:22:11.756984 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ConfigMap: configmaps \"extension-apiserver-authentication\" is forbidden: User \"system:kube-scheduler\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"kube-system\"" logger="UnhandledError" reflector="runtime/asm_arm64.s:1223" type="*v1.ConfigMap"
E0917 00:22:11.765217 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Service: services is forbidden: User \"system:kube-scheduler\" cannot list resource \"services\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Service"
E0917 00:22:11.765491 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolume"
E0917 00:22:11.765716 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csidrivers\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIDriver"
E0917 00:22:11.765893 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Namespace: namespaces is forbidden: User \"system:kube-scheduler\" cannot list resource \"namespaces\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Namespace"
E0917 00:22:11.766045 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Pod: pods is forbidden: User \"system:kube-scheduler\" cannot list resource \"pods\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Pod"
E0917 00:22:11.766188 1 reflector.go:205] "Failed to watch" err="failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"storageclasses\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.StorageClass"
E0917 00:22:11.766367 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User \"system:kube-scheduler\" cannot list resource \"poddisruptionbudgets\" in API group \"policy\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PodDisruptionBudget"
E0917 00:22:11.766507 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicationcontrollers\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ReplicationController"
E0917 00:22:11.766674 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ResourceSlice: resourceslices.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"resourceslices\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ResourceSlice"
E0917 00:22:11.766857 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicasets\" in API group \"apps\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ReplicaSet"
E0917 00:22:11.767028 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csinodes\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSINode"
E0917 00:22:11.767210 1 reflector.go:205] "Failed to watch" err="failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"statefulsets\" in API group \"apps\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.StatefulSet"
E0917 00:22:11.767366 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Node: nodes is forbidden: User \"system:kube-scheduler\" cannot list resource \"nodes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Node"
E0917 00:22:11.767519 1 reflector.go:205] "Failed to watch" err="failed to list *v1.VolumeAttachment: volumeattachments.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"volumeattachments\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.VolumeAttachment"
E0917 00:22:11.767626 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csistoragecapacities\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIStorageCapacity"
E0917 00:22:11.767736 1 reflector.go:205] "Failed to watch" err="failed to list *v1.DeviceClass: deviceclasses.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"deviceclasses\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.DeviceClass"
E0917 00:22:11.767785 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ResourceClaim: resourceclaims.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"resourceclaims\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ResourceClaim"
E0917 00:22:11.767944 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumeclaims\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolumeClaim"
I0917 00:22:13.354770 1 shared_informer.go:356] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
==> kubelet <==
Sep 17 00:30:20 addons-235235 kubelet[2320]: I0917 00:30:20.843623 2320 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d1c771fa-70ab-40b4-aa3f-199a0039d79e-script" (OuterVolumeSpecName: "script") pod "d1c771fa-70ab-40b4-aa3f-199a0039d79e" (UID: "d1c771fa-70ab-40b4-aa3f-199a0039d79e"). InnerVolumeSpecName "script". PluginName "kubernetes.io/configmap", VolumeGIDValue ""
Sep 17 00:30:20 addons-235235 kubelet[2320]: I0917 00:30:20.843688 2320 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d1c771fa-70ab-40b4-aa3f-199a0039d79e-data" (OuterVolumeSpecName: "data") pod "d1c771fa-70ab-40b4-aa3f-199a0039d79e" (UID: "d1c771fa-70ab-40b4-aa3f-199a0039d79e"). InnerVolumeSpecName "data". PluginName "kubernetes.io/host-path", VolumeGIDValue ""
Sep 17 00:30:20 addons-235235 kubelet[2320]: I0917 00:30:20.847536 2320 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d1c771fa-70ab-40b4-aa3f-199a0039d79e-kube-api-access-fn25h" (OuterVolumeSpecName: "kube-api-access-fn25h") pod "d1c771fa-70ab-40b4-aa3f-199a0039d79e" (UID: "d1c771fa-70ab-40b4-aa3f-199a0039d79e"). InnerVolumeSpecName "kube-api-access-fn25h". PluginName "kubernetes.io/projected", VolumeGIDValue ""
Sep 17 00:30:20 addons-235235 kubelet[2320]: I0917 00:30:20.943943 2320 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-fn25h\" (UniqueName: \"kubernetes.io/projected/d1c771fa-70ab-40b4-aa3f-199a0039d79e-kube-api-access-fn25h\") on node \"addons-235235\" DevicePath \"\""
Sep 17 00:30:20 addons-235235 kubelet[2320]: I0917 00:30:20.943991 2320 reconciler_common.go:299] "Volume detached for volume \"script\" (UniqueName: \"kubernetes.io/configmap/d1c771fa-70ab-40b4-aa3f-199a0039d79e-script\") on node \"addons-235235\" DevicePath \"\""
Sep 17 00:30:20 addons-235235 kubelet[2320]: I0917 00:30:20.944001 2320 reconciler_common.go:299] "Volume detached for volume \"data\" (UniqueName: \"kubernetes.io/host-path/d1c771fa-70ab-40b4-aa3f-199a0039d79e-data\") on node \"addons-235235\" DevicePath \"\""
Sep 17 00:30:23 addons-235235 kubelet[2320]: I0917 00:30:23.605951 2320 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d1c771fa-70ab-40b4-aa3f-199a0039d79e" path="/var/lib/kubelet/pods/d1c771fa-70ab-40b4-aa3f-199a0039d79e/volumes"
Sep 17 00:30:24 addons-235235 kubelet[2320]: E0917 00:30:24.599468 2320 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"hello-world-app\" with ImagePullBackOff: \"Back-off pulling image \\\"docker.io/kicbase/echo-server:1.0\\\": ErrImagePull: Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/hello-world-app-5d498dc89-8ftcl" podUID="df51ee14-35c6-4bbe-9f2e-9e2363828d12"
Sep 17 00:30:36 addons-235235 kubelet[2320]: E0917 00:30:36.599303 2320 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"hello-world-app\" with ImagePullBackOff: \"Back-off pulling image \\\"docker.io/kicbase/echo-server:1.0\\\": ErrImagePull: Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/hello-world-app-5d498dc89-8ftcl" podUID="df51ee14-35c6-4bbe-9f2e-9e2363828d12"
Sep 17 00:30:49 addons-235235 kubelet[2320]: E0917 00:30:49.599574 2320 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"hello-world-app\" with ImagePullBackOff: \"Back-off pulling image \\\"docker.io/kicbase/echo-server:1.0\\\": ErrImagePull: Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/hello-world-app-5d498dc89-8ftcl" podUID="df51ee14-35c6-4bbe-9f2e-9e2363828d12"
Sep 17 00:30:50 addons-235235 kubelet[2320]: I0917 00:30:50.753986 2320 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data\" (UniqueName: \"kubernetes.io/host-path/c840396e-7af2-418e-938e-f4879a2f827c-data\") pod \"helper-pod-create-pvc-6b6d1188-0b38-4324-abab-29964246affb\" (UID: \"c840396e-7af2-418e-938e-f4879a2f827c\") " pod="local-path-storage/helper-pod-create-pvc-6b6d1188-0b38-4324-abab-29964246affb"
Sep 17 00:30:50 addons-235235 kubelet[2320]: I0917 00:30:50.754038 2320 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qs76q\" (UniqueName: \"kubernetes.io/projected/c840396e-7af2-418e-938e-f4879a2f827c-kube-api-access-qs76q\") pod \"helper-pod-create-pvc-6b6d1188-0b38-4324-abab-29964246affb\" (UID: \"c840396e-7af2-418e-938e-f4879a2f827c\") " pod="local-path-storage/helper-pod-create-pvc-6b6d1188-0b38-4324-abab-29964246affb"
Sep 17 00:30:50 addons-235235 kubelet[2320]: I0917 00:30:50.754067 2320 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"script\" (UniqueName: \"kubernetes.io/configmap/c840396e-7af2-418e-938e-f4879a2f827c-script\") pod \"helper-pod-create-pvc-6b6d1188-0b38-4324-abab-29964246affb\" (UID: \"c840396e-7af2-418e-938e-f4879a2f827c\") " pod="local-path-storage/helper-pod-create-pvc-6b6d1188-0b38-4324-abab-29964246affb"
Sep 17 00:30:51 addons-235235 kubelet[2320]: I0917 00:30:51.103026 2320 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="80fc587f34c85f71988c3be8046cb4f607010d68fb7c68e94832bf4d5d59b4f6"
Sep 17 00:30:51 addons-235235 kubelet[2320]: E0917 00:30:51.310745 2320 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit" image="docker.io/busybox:stable@sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79"
Sep 17 00:30:51 addons-235235 kubelet[2320]: E0917 00:30:51.310800 2320 kuberuntime_image.go:43] "Failed to pull image" err="Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit" image="docker.io/busybox:stable@sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79"
Sep 17 00:30:51 addons-235235 kubelet[2320]: E0917 00:30:51.310907 2320 kuberuntime_manager.go:1449] "Unhandled Error" err="container helper-pod start failed in pod helper-pod-create-pvc-6b6d1188-0b38-4324-abab-29964246affb_local-path-storage(c840396e-7af2-418e-938e-f4879a2f827c): ErrImagePull: Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit" logger="UnhandledError"
Sep 17 00:30:51 addons-235235 kubelet[2320]: E0917 00:30:51.310946 2320 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"helper-pod\" with ErrImagePull: \"Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="local-path-storage/helper-pod-create-pvc-6b6d1188-0b38-4324-abab-29964246affb" podUID="c840396e-7af2-418e-938e-f4879a2f827c"
Sep 17 00:30:52 addons-235235 kubelet[2320]: E0917 00:30:52.118753 2320 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"helper-pod\" with ImagePullBackOff: \"Back-off pulling image \\\"docker.io/busybox:stable@sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79\\\": ErrImagePull: Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="local-path-storage/helper-pod-create-pvc-6b6d1188-0b38-4324-abab-29964246affb" podUID="c840396e-7af2-418e-938e-f4879a2f827c"
Sep 17 00:30:53 addons-235235 kubelet[2320]: E0917 00:30:53.126667 2320 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"helper-pod\" with ImagePullBackOff: \"Back-off pulling image \\\"docker.io/busybox:stable@sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79\\\": ErrImagePull: Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="local-path-storage/helper-pod-create-pvc-6b6d1188-0b38-4324-abab-29964246affb" podUID="c840396e-7af2-418e-938e-f4879a2f827c"
Sep 17 00:31:00 addons-235235 kubelet[2320]: E0917 00:31:00.599529 2320 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"hello-world-app\" with ImagePullBackOff: \"Back-off pulling image \\\"docker.io/kicbase/echo-server:1.0\\\": ErrImagePull: Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="default/hello-world-app-5d498dc89-8ftcl" podUID="df51ee14-35c6-4bbe-9f2e-9e2363828d12"
Sep 17 00:31:03 addons-235235 kubelet[2320]: E0917 00:31:03.744700 2320 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit" image="docker.io/busybox:stable@sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79"
Sep 17 00:31:03 addons-235235 kubelet[2320]: E0917 00:31:03.744766 2320 kuberuntime_image.go:43] "Failed to pull image" err="Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit" image="docker.io/busybox:stable@sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79"
Sep 17 00:31:03 addons-235235 kubelet[2320]: E0917 00:31:03.744853 2320 kuberuntime_manager.go:1449] "Unhandled Error" err="container helper-pod start failed in pod helper-pod-create-pvc-6b6d1188-0b38-4324-abab-29964246affb_local-path-storage(c840396e-7af2-418e-938e-f4879a2f827c): ErrImagePull: Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit" logger="UnhandledError"
Sep 17 00:31:03 addons-235235 kubelet[2320]: E0917 00:31:03.744890 2320 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"helper-pod\" with ErrImagePull: \"Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit\"" pod="local-path-storage/helper-pod-create-pvc-6b6d1188-0b38-4324-abab-29964246affb" podUID="c840396e-7af2-418e-938e-f4879a2f827c"
==> storage-provisioner [b279765d0210] <==
W0917 00:30:39.684502 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W0917 00:30:41.687992 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W0917 00:30:41.692845 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W0917 00:30:43.695566 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W0917 00:30:43.700392 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W0917 00:30:45.703777 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W0917 00:30:45.708694 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W0917 00:30:47.712540 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W0917 00:30:47.717255 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W0917 00:30:49.720314 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W0917 00:30:49.724602 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W0917 00:30:51.728415 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W0917 00:30:51.732730 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W0917 00:30:53.735553 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W0917 00:30:53.750218 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W0917 00:30:55.819023 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W0917 00:30:55.825986 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W0917 00:30:57.830431 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W0917 00:30:57.835086 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W0917 00:30:59.837939 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W0917 00:30:59.842249 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W0917 00:31:01.846579 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W0917 00:31:01.852861 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W0917 00:31:03.858323 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W0917 00:31:03.865096 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
-- /stdout --
helpers_test.go:262: (dbg) Run: out/minikube-linux-arm64 status --format={{.APIServer}} -p addons-235235 -n addons-235235
helpers_test.go:269: (dbg) Run: kubectl --context addons-235235 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:280: non-running pods: hello-world-app-5d498dc89-8ftcl test-local-path helper-pod-create-pvc-6b6d1188-0b38-4324-abab-29964246affb
helpers_test.go:282: ======> post-mortem[TestAddons/parallel/LocalPath]: describe non-running pods <======
helpers_test.go:285: (dbg) Run: kubectl --context addons-235235 describe pod hello-world-app-5d498dc89-8ftcl test-local-path helper-pod-create-pvc-6b6d1188-0b38-4324-abab-29964246affb
helpers_test.go:285: (dbg) Non-zero exit: kubectl --context addons-235235 describe pod hello-world-app-5d498dc89-8ftcl test-local-path helper-pod-create-pvc-6b6d1188-0b38-4324-abab-29964246affb: exit status 1 (116.827786ms)
-- stdout --
Name: hello-world-app-5d498dc89-8ftcl
Namespace: default
Priority: 0
Service Account: default
Node: addons-235235/192.168.49.2
Start Time: Wed, 17 Sep 2025 00:25:34 +0000
Labels: app=hello-world-app
pod-template-hash=5d498dc89
Annotations: <none>
Status: Pending
IP: 10.244.0.33
IPs:
IP: 10.244.0.33
Controlled By: ReplicaSet/hello-world-app-5d498dc89
Containers:
hello-world-app:
Container ID:
Image: docker.io/kicbase/echo-server:1.0
Image ID:
Port: 8080/TCP
Host Port: 0/TCP
State: Waiting
Reason: ImagePullBackOff
Ready: False
Restart Count: 0
Environment: <none>
Mounts:
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-v5vm8 (ro)
Conditions:
Type Status
PodReadyToStartContainers True
Initialized True
Ready False
ContainersReady False
PodScheduled True
Volumes:
kube-api-access-v5vm8:
Type: Projected (a volume that contains injected data from multiple sources)
TokenExpirationSeconds: 3607
ConfigMapName: kube-root-ca.crt
Optional: false
DownwardAPI: true
QoS Class: BestEffort
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 5m32s default-scheduler Successfully assigned default/hello-world-app-5d498dc89-8ftcl to addons-235235
Warning Failed 5m19s kubelet Failed to pull image "docker.io/kicbase/echo-server:1.0": toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit
Normal Pulling 2m37s (x5 over 5m31s) kubelet Pulling image "docker.io/kicbase/echo-server:1.0"
Warning Failed 2m37s (x4 over 5m31s) kubelet Failed to pull image "docker.io/kicbase/echo-server:1.0": Error response from daemon: toomanyrequests: You have reached your unauthenticated pull rate limit. https://www.docker.com/increase-rate-limit
Warning Failed 2m37s (x5 over 5m31s) kubelet Error: ErrImagePull
Normal BackOff 30s (x21 over 5m31s) kubelet Back-off pulling image "docker.io/kicbase/echo-server:1.0"
Warning Failed 30s (x21 over 5m31s) kubelet Error: ImagePullBackOff
Name: test-local-path
Namespace: default
Priority: 0
Service Account: default
Node: <none>
Labels: run=test-local-path
Annotations: <none>
Status: Pending
IP:
IPs: <none>
Containers:
busybox:
Image: busybox:stable
Port: <none>
Host Port: <none>
Command:
sh
-c
echo 'local-path-provisioner' > /test/file1
Environment: <none>
Mounts:
/test from data (rw)
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-f7bg7 (ro)
Volumes:
data:
Type: PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
ClaimName: test-pvc
ReadOnly: false
kube-api-access-f7bg7:
Type: Projected (a volume that contains injected data from multiple sources)
TokenExpirationSeconds: 3607
ConfigMapName: kube-root-ca.crt
Optional: false
DownwardAPI: true
QoS Class: BestEffort
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events: <none>
-- /stdout --
** stderr **
Error from server (NotFound): pods "helper-pod-create-pvc-6b6d1188-0b38-4324-abab-29964246affb" not found
** /stderr **
helpers_test.go:287: kubectl --context addons-235235 describe pod hello-world-app-5d498dc89-8ftcl test-local-path helper-pod-create-pvc-6b6d1188-0b38-4324-abab-29964246affb: exit status 1
addons_test.go:1053: (dbg) Run: out/minikube-linux-arm64 -p addons-235235 addons disable storage-provisioner-rancher --alsologtostderr -v=1
addons_test.go:1053: (dbg) Done: out/minikube-linux-arm64 -p addons-235235 addons disable storage-provisioner-rancher --alsologtostderr -v=1: (42.809308777s)
--- FAIL: TestAddons/parallel/LocalPath (345.79s)