=== RUN TestStartStop/group/old-k8s-version/serial/DeployApp
start_stop_delete_test.go:194: (dbg) Run: kubectl --context old-k8s-version-709593 create -f testdata/busybox.yaml
start_stop_delete_test.go:194: (dbg) TestStartStop/group/old-k8s-version/serial/DeployApp: waiting 8m0s for pods matching "integration-test=busybox" in namespace "default" ...
helpers_test.go:352: "busybox" [bea346d9-0dca-482c-b9f9-7b71741b18d7] Pending
helpers_test.go:352: "busybox" [bea346d9-0dca-482c-b9f9-7b71741b18d7] Pending / Ready:ContainersNotReady (containers with unready status: [busybox]) / ContainersReady:ContainersNotReady (containers with unready status: [busybox])
helpers_test.go:352: "busybox" [bea346d9-0dca-482c-b9f9-7b71741b18d7] Running
start_stop_delete_test.go:194: (dbg) TestStartStop/group/old-k8s-version/serial/DeployApp: integration-test=busybox healthy within 10.005092881s
start_stop_delete_test.go:194: (dbg) Run: kubectl --context old-k8s-version-709593 exec busybox -- /bin/sh -c "ulimit -n"
start_stop_delete_test.go:194: 'ulimit -n' returned 1024, expected 1048576
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:223: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: network settings <======
helpers_test.go:230: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:238: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: docker inspect <======
helpers_test.go:239: (dbg) Run: docker inspect old-k8s-version-709593
helpers_test.go:243: (dbg) docker inspect old-k8s-version-709593:
-- stdout --
[
{
"Id": "29cb528aee84df4277faf7afff19daffc07e3b9a021296ff004f8b42489e8384",
"Created": "2025-11-23T09:56:47.666891207Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 294280,
"ExitCode": 0,
"Error": "",
"StartedAt": "2025-11-23T09:56:47.720935343Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:133ca4ac39008d0056ad45d8cb70521d6b70d6e1b8bbff4678fd4b354efbdf70",
"ResolvConfPath": "/var/lib/docker/containers/29cb528aee84df4277faf7afff19daffc07e3b9a021296ff004f8b42489e8384/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/29cb528aee84df4277faf7afff19daffc07e3b9a021296ff004f8b42489e8384/hostname",
"HostsPath": "/var/lib/docker/containers/29cb528aee84df4277faf7afff19daffc07e3b9a021296ff004f8b42489e8384/hosts",
"LogPath": "/var/lib/docker/containers/29cb528aee84df4277faf7afff19daffc07e3b9a021296ff004f8b42489e8384/29cb528aee84df4277faf7afff19daffc07e3b9a021296ff004f8b42489e8384-json.log",
"Name": "/old-k8s-version-709593",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"old-k8s-version-709593:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {
"max-size": "100m"
}
},
"NetworkMode": "old-k8s-version-709593",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "private",
"Dns": null,
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 3221225472,
"NanoCpus": 0,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 6442450944,
"MemorySwappiness": null,
"OomKillDisable": null,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"ID": "29cb528aee84df4277faf7afff19daffc07e3b9a021296ff004f8b42489e8384",
"LowerDir": "/var/lib/docker/overlay2/ea62ac2e144b45f2284ed569ef537390326f82b0cb3d40e4d46e0ff286b7eb90-init/diff:/var/lib/docker/overlay2/c80a0dfdb81b7753b0a82e2bc6458805cbbad0a9ce5819c63e1d9b7b71ba226c/diff",
"MergedDir": "/var/lib/docker/overlay2/ea62ac2e144b45f2284ed569ef537390326f82b0cb3d40e4d46e0ff286b7eb90/merged",
"UpperDir": "/var/lib/docker/overlay2/ea62ac2e144b45f2284ed569ef537390326f82b0cb3d40e4d46e0ff286b7eb90/diff",
"WorkDir": "/var/lib/docker/overlay2/ea62ac2e144b45f2284ed569ef537390326f82b0cb3d40e4d46e0ff286b7eb90/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
},
{
"Type": "volume",
"Name": "old-k8s-version-709593",
"Source": "/var/lib/docker/volumes/old-k8s-version-709593/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
}
],
"Config": {
"Hostname": "old-k8s-version-709593",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "old-k8s-version-709593",
"name.minikube.sigs.k8s.io": "old-k8s-version-709593",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"SandboxID": "b544aba317fcf40d3e61edbec3240f39587be7e914d5c21fc69a6535b296b152",
"SandboxKey": "/var/run/docker/netns/b544aba317fc",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33093"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33094"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33097"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33095"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33096"
}
]
},
"Networks": {
"old-k8s-version-709593": {
"IPAMConfig": {
"IPv4Address": "192.168.76.2",
"IPv6Address": ""
},
"Links": null,
"Aliases": null,
"DriverOpts": null,
"GwPriority": 0,
"NetworkID": "4fa988beb7cda350f0c11b822dcc90801b7cc48baa23c5c851d275a8d3ed42f8",
"EndpointID": "da8f042fa74ebc4420b7404b4cac4144f9e37e8a91e96eb145a8c67dcfe76dd3",
"Gateway": "192.168.76.1",
"IPAddress": "192.168.76.2",
"MacAddress": "76:bc:b6:48:41:0f",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"old-k8s-version-709593",
"29cb528aee84"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:247: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p old-k8s-version-709593 -n old-k8s-version-709593
helpers_test.go:252: <<< TestStartStop/group/old-k8s-version/serial/DeployApp FAILED: start of post-mortem logs <<<
helpers_test.go:253: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: minikube logs <======
helpers_test.go:255: (dbg) Run: out/minikube-linux-amd64 -p old-k8s-version-709593 logs -n 25
helpers_test.go:255: (dbg) Done: out/minikube-linux-amd64 -p old-k8s-version-709593 logs -n 25: (1.332498622s)
helpers_test.go:260: TestStartStop/group/old-k8s-version/serial/DeployApp logs:
-- stdout --
==> Audit <==
┌─────────┬────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬──────────────────────────────┬─────────┬─────────┬─────────────────────┬─────────────────────┐
│ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │
├─────────┼────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼──────────────────────────────┼─────────┼─────────┼─────────────────────┼─────────────────────┤
│ ssh │ -p bridge-676928 sudo systemctl cat kubelet --no-pager │ bridge-676928 │ jenkins │ v1.37.0 │ 23 Nov 25 09:57 UTC │ 23 Nov 25 09:57 UTC │
│ ssh │ -p bridge-676928 sudo journalctl -xeu kubelet --all --full --no-pager │ bridge-676928 │ jenkins │ v1.37.0 │ 23 Nov 25 09:57 UTC │ 23 Nov 25 09:57 UTC │
│ ssh │ -p bridge-676928 sudo cat /etc/kubernetes/kubelet.conf │ bridge-676928 │ jenkins │ v1.37.0 │ 23 Nov 25 09:57 UTC │ 23 Nov 25 09:57 UTC │
│ ssh │ -p bridge-676928 sudo cat /var/lib/kubelet/config.yaml │ bridge-676928 │ jenkins │ v1.37.0 │ 23 Nov 25 09:57 UTC │ 23 Nov 25 09:57 UTC │
│ ssh │ -p bridge-676928 sudo systemctl status docker --all --full --no-pager │ bridge-676928 │ jenkins │ v1.37.0 │ 23 Nov 25 09:57 UTC │ │
│ ssh │ -p bridge-676928 sudo systemctl cat docker --no-pager │ bridge-676928 │ jenkins │ v1.37.0 │ 23 Nov 25 09:57 UTC │ 23 Nov 25 09:57 UTC │
│ ssh │ -p bridge-676928 sudo cat /etc/docker/daemon.json │ bridge-676928 │ jenkins │ v1.37.0 │ 23 Nov 25 09:57 UTC │ │
│ ssh │ -p bridge-676928 sudo docker system info │ bridge-676928 │ jenkins │ v1.37.0 │ 23 Nov 25 09:57 UTC │ │
│ ssh │ -p bridge-676928 sudo systemctl status cri-docker --all --full --no-pager │ bridge-676928 │ jenkins │ v1.37.0 │ 23 Nov 25 09:57 UTC │ │
│ ssh │ -p bridge-676928 sudo systemctl cat cri-docker --no-pager │ bridge-676928 │ jenkins │ v1.37.0 │ 23 Nov 25 09:57 UTC │ 23 Nov 25 09:57 UTC │
│ ssh │ -p bridge-676928 sudo cat /etc/systemd/system/cri-docker.service.d/10-cni.conf │ bridge-676928 │ jenkins │ v1.37.0 │ 23 Nov 25 09:57 UTC │ │
│ ssh │ -p bridge-676928 sudo cat /usr/lib/systemd/system/cri-docker.service │ bridge-676928 │ jenkins │ v1.37.0 │ 23 Nov 25 09:57 UTC │ 23 Nov 25 09:57 UTC │
│ ssh │ -p bridge-676928 sudo cri-dockerd --version │ bridge-676928 │ jenkins │ v1.37.0 │ 23 Nov 25 09:57 UTC │ 23 Nov 25 09:57 UTC │
│ ssh │ -p bridge-676928 sudo systemctl status containerd --all --full --no-pager │ bridge-676928 │ jenkins │ v1.37.0 │ 23 Nov 25 09:57 UTC │ 23 Nov 25 09:57 UTC │
│ ssh │ -p bridge-676928 sudo systemctl cat containerd --no-pager │ bridge-676928 │ jenkins │ v1.37.0 │ 23 Nov 25 09:57 UTC │ 23 Nov 25 09:57 UTC │
│ ssh │ -p bridge-676928 sudo cat /lib/systemd/system/containerd.service │ bridge-676928 │ jenkins │ v1.37.0 │ 23 Nov 25 09:57 UTC │ 23 Nov 25 09:57 UTC │
│ ssh │ -p bridge-676928 sudo cat /etc/containerd/config.toml │ bridge-676928 │ jenkins │ v1.37.0 │ 23 Nov 25 09:57 UTC │ 23 Nov 25 09:57 UTC │
│ ssh │ -p bridge-676928 sudo containerd config dump │ bridge-676928 │ jenkins │ v1.37.0 │ 23 Nov 25 09:57 UTC │ 23 Nov 25 09:57 UTC │
│ ssh │ -p bridge-676928 sudo systemctl status crio --all --full --no-pager │ bridge-676928 │ jenkins │ v1.37.0 │ 23 Nov 25 09:57 UTC │ │
│ ssh │ -p bridge-676928 sudo systemctl cat crio --no-pager │ bridge-676928 │ jenkins │ v1.37.0 │ 23 Nov 25 09:57 UTC │ 23 Nov 25 09:57 UTC │
│ ssh │ -p bridge-676928 sudo find /etc/crio -type f -exec sh -c 'echo {}; cat {}' \; │ bridge-676928 │ jenkins │ v1.37.0 │ 23 Nov 25 09:57 UTC │ 23 Nov 25 09:57 UTC │
│ ssh │ -p bridge-676928 sudo crio config │ bridge-676928 │ jenkins │ v1.37.0 │ 23 Nov 25 09:57 UTC │ 23 Nov 25 09:57 UTC │
│ delete │ -p bridge-676928 │ bridge-676928 │ jenkins │ v1.37.0 │ 23 Nov 25 09:57 UTC │ 23 Nov 25 09:57 UTC │
│ delete │ -p disable-driver-mounts-178820 │ disable-driver-mounts-178820 │ jenkins │ v1.37.0 │ 23 Nov 25 09:57 UTC │ 23 Nov 25 09:57 UTC │
│ start │ -p default-k8s-diff-port-696492 --memory=3072 --alsologtostderr --wait=true --apiserver-port=8444 --driver=docker --container-runtime=containerd --kubernetes-version=v1.34.1 │ default-k8s-diff-port-696492 │ jenkins │ v1.37.0 │ 23 Nov 25 09:57 UTC │ │
└─────────┴────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴──────────────────────────────┴─────────┴─────────┴─────────────────────┴─────────────────────┘
==> Last Start <==
Log file created at: 2025/11/23 09:57:41
Running on machine: ubuntu-20-agent
Binary: Built with gc go1.25.3 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I1123 09:57:41.194019 311138 out.go:360] Setting OutFile to fd 1 ...
I1123 09:57:41.194298 311138 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1123 09:57:41.194308 311138 out.go:374] Setting ErrFile to fd 2...
I1123 09:57:41.194312 311138 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1123 09:57:41.194606 311138 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21968-3552/.minikube/bin
I1123 09:57:41.195144 311138 out.go:368] Setting JSON to false
I1123 09:57:41.196591 311138 start.go:133] hostinfo: {"hostname":"ubuntu-20-agent","uptime":2400,"bootTime":1763889461,"procs":331,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"22.04","kernelVersion":"6.8.0-1044-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
I1123 09:57:41.196668 311138 start.go:143] virtualization: kvm guest
I1123 09:57:41.199167 311138 out.go:179] * [default-k8s-diff-port-696492] minikube v1.37.0 on Ubuntu 22.04 (kvm/amd64)
I1123 09:57:41.201043 311138 out.go:179] - MINIKUBE_LOCATION=21968
I1123 09:57:41.201094 311138 notify.go:221] Checking for updates...
I1123 09:57:41.204382 311138 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I1123 09:57:41.206017 311138 out.go:179] - KUBECONFIG=/home/jenkins/minikube-integration/21968-3552/kubeconfig
I1123 09:57:41.207959 311138 out.go:179] - MINIKUBE_HOME=/home/jenkins/minikube-integration/21968-3552/.minikube
I1123 09:57:41.209794 311138 out.go:179] - MINIKUBE_BIN=out/minikube-linux-amd64
I1123 09:57:41.211809 311138 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I1123 09:57:41.214009 311138 config.go:182] Loaded profile config "embed-certs-412583": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1123 09:57:41.214105 311138 config.go:182] Loaded profile config "no-preload-309734": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1123 09:57:41.214180 311138 config.go:182] Loaded profile config "old-k8s-version-709593": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1123 09:57:41.214271 311138 driver.go:422] Setting default libvirt URI to qemu:///system
I1123 09:57:41.241306 311138 docker.go:124] docker version: linux-29.0.2:Docker Engine - Community
I1123 09:57:41.241474 311138 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1123 09:57:41.312013 311138 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:3 ContainersRunning:3 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:63 OomKillDisable:false NGoroutines:75 SystemTime:2025-11-23 09:57:41.299959199 +0000 UTC LoggingDriver:json-file CgroupDriver:systemd NEventsListener:0 KernelVersion:6.8.0-1044-gcp OperatingSystem:Ubuntu 22.04.5 LTS OSType:linux Architecture:x
86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8 ::1/128] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33652080640 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent Labels:[] ExperimentalBuild:false ServerVersion:29.0.2 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:fcd43222d6b07379a4be9786bda52438f0dd16a1 Expected:} RuncCommit:{ID:v1.3.3-0-gd842d771 Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map
[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.30.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.40.3] map[Name:model Path:/usr/libexec/docker/cli-plugins/docker-model SchemaVersion:0.1.0 ShortDescription:Docker Model Runner Vendor:Docker Inc. Version:v1.0.1] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I1123 09:57:41.312116 311138 docker.go:319] overlay module found
I1123 09:57:41.314243 311138 out.go:179] * Using the docker driver based on user configuration
I1123 09:57:41.316002 311138 start.go:309] selected driver: docker
I1123 09:57:41.316024 311138 start.go:927] validating driver "docker" against <nil>
I1123 09:57:41.316037 311138 start.go:938] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I1123 09:57:41.316751 311138 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1123 09:57:41.385595 311138 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:3 ContainersRunning:3 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:63 OomKillDisable:false NGoroutines:75 SystemTime:2025-11-23 09:57:41.373759534 +0000 UTC LoggingDriver:json-file CgroupDriver:systemd NEventsListener:0 KernelVersion:6.8.0-1044-gcp OperatingSystem:Ubuntu 22.04.5 LTS OSType:linux Architecture:x
86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8 ::1/128] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33652080640 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent Labels:[] ExperimentalBuild:false ServerVersion:29.0.2 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:fcd43222d6b07379a4be9786bda52438f0dd16a1 Expected:} RuncCommit:{ID:v1.3.3-0-gd842d771 Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map
[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.30.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.40.3] map[Name:model Path:/usr/libexec/docker/cli-plugins/docker-model SchemaVersion:0.1.0 ShortDescription:Docker Model Runner Vendor:Docker Inc. Version:v1.0.1] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I1123 09:57:41.385794 311138 start_flags.go:327] no existing cluster config was found, will generate one from the flags
I1123 09:57:41.386023 311138 start_flags.go:992] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1123 09:57:41.388087 311138 out.go:179] * Using Docker driver with root privileges
I1123 09:57:41.389651 311138 cni.go:84] Creating CNI manager for ""
I1123 09:57:41.389725 311138 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1123 09:57:41.389738 311138 start_flags.go:336] Found "CNI" CNI - setting NetworkPlugin=cni
I1123 09:57:41.389816 311138 start.go:353] cluster config:
{Name:default-k8s-diff-port-696492 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8444 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:default-k8s-diff-port-696492 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:
cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8444 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath:
StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1123 09:57:41.391556 311138 out.go:179] * Starting "default-k8s-diff-port-696492" primary control-plane node in "default-k8s-diff-port-696492" cluster
I1123 09:57:41.392982 311138 cache.go:134] Beginning downloading kic base image for docker with containerd
I1123 09:57:41.394476 311138 out.go:179] * Pulling base image v0.0.48-1763789673-21948 ...
I1123 09:57:41.395978 311138 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime containerd
I1123 09:57:41.396028 311138 preload.go:203] Found local preload: /home/jenkins/minikube-integration/21968-3552/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-containerd-overlay2-amd64.tar.lz4
I1123 09:57:41.396036 311138 cache.go:65] Caching tarball of preloaded images
I1123 09:57:41.396075 311138 image.go:81] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f in local docker daemon
I1123 09:57:41.396157 311138 preload.go:238] Found /home/jenkins/minikube-integration/21968-3552/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-containerd-overlay2-amd64.tar.lz4 in cache, skipping download
I1123 09:57:41.396175 311138 cache.go:68] Finished verifying existence of preloaded tar for v1.34.1 on containerd
I1123 09:57:41.396320 311138 profile.go:143] Saving config to /home/jenkins/minikube-integration/21968-3552/.minikube/profiles/default-k8s-diff-port-696492/config.json ...
I1123 09:57:41.396374 311138 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21968-3552/.minikube/profiles/default-k8s-diff-port-696492/config.json: {Name:mk3b81d8fd8561a54828649e3e510565221995b5 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 09:57:41.422089 311138 image.go:100] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f in local docker daemon, skipping pull
I1123 09:57:41.422112 311138 cache.go:158] gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f exists in daemon, skipping load
I1123 09:57:41.422133 311138 cache.go:243] Successfully downloaded all kic artifacts
I1123 09:57:41.422177 311138 start.go:360] acquireMachinesLock for default-k8s-diff-port-696492: {Name:mkc8ee83ed2b7a995e355ddec223dfeea233bbf7 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1123 09:57:41.422316 311138 start.go:364] duration metric: took 112.296µs to acquireMachinesLock for "default-k8s-diff-port-696492"
I1123 09:57:41.422500 311138 start.go:93] Provisioning new machine with config: &{Name:default-k8s-diff-port-696492 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8444 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:default-k8s-diff-port-696492 Namespace:default API
ServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8444 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false Disabl
eCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8444 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1123 09:57:41.422632 311138 start.go:125] createHost starting for "" (driver="docker")
W1123 09:57:37.251564 300017 node_ready.go:57] node "embed-certs-412583" has "Ready":"False" status (will retry)
W1123 09:57:39.751746 300017 node_ready.go:57] node "embed-certs-412583" has "Ready":"False" status (will retry)
I1123 09:57:42.255256 300017 node_ready.go:49] node "embed-certs-412583" is "Ready"
I1123 09:57:42.255291 300017 node_ready.go:38] duration metric: took 11.507766088s for node "embed-certs-412583" to be "Ready" ...
I1123 09:57:42.255310 300017 api_server.go:52] waiting for apiserver process to appear ...
I1123 09:57:42.255471 300017 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1123 09:57:42.277737 300017 api_server.go:72] duration metric: took 12.028046262s to wait for apiserver process to appear ...
I1123 09:57:42.277770 300017 api_server.go:88] waiting for apiserver healthz status ...
I1123 09:57:42.277792 300017 api_server.go:253] Checking apiserver healthz at https://192.168.103.2:8443/healthz ...
I1123 09:57:42.285468 300017 api_server.go:279] https://192.168.103.2:8443/healthz returned 200:
ok
I1123 09:57:42.287274 300017 api_server.go:141] control plane version: v1.34.1
I1123 09:57:42.287395 300017 api_server.go:131] duration metric: took 9.61454ms to wait for apiserver health ...
I1123 09:57:42.287422 300017 system_pods.go:43] waiting for kube-system pods to appear ...
I1123 09:57:42.294433 300017 system_pods.go:59] 8 kube-system pods found
I1123 09:57:42.294478 300017 system_pods.go:61] "coredns-66bc5c9577-8dgc7" [f685cc03-30df-4119-9d66-0e808c2d3c93] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1123 09:57:42.294486 300017 system_pods.go:61] "etcd-embed-certs-412583" [ea8b65e6-8c1f-4dda-8902-6b6be242b01f] Running
I1123 09:57:42.294493 300017 system_pods.go:61] "kindnet-f76c2" [16967e76-b4bf-4a99-aab9-d7f76cbb0830] Running
I1123 09:57:42.294499 300017 system_pods.go:61] "kube-apiserver-embed-certs-412583" [7eee3d42-8f6d-4f15-8eb6-d6cb611f8904] Running
I1123 09:57:42.294505 300017 system_pods.go:61] "kube-controller-manager-embed-certs-412583" [e118b0d0-9dad-4c49-beb5-fa7d32814216] Running
I1123 09:57:42.294510 300017 system_pods.go:61] "kube-proxy-wm7k2" [120a9b03-e7bf-4f4d-9b8c-6fa05d3739d9] Running
I1123 09:57:42.294515 300017 system_pods.go:61] "kube-scheduler-embed-certs-412583" [dde2c2e0-b58a-4028-a671-1a8f577dd063] Running
I1123 09:57:42.294526 300017 system_pods.go:61] "storage-provisioner" [dcf16920-e30b-42ab-8195-4ef946498d0f] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1123 09:57:42.294539 300017 system_pods.go:74] duration metric: took 7.098728ms to wait for pod list to return data ...
I1123 09:57:42.294549 300017 default_sa.go:34] waiting for default service account to be created ...
I1123 09:57:42.298321 300017 default_sa.go:45] found service account: "default"
I1123 09:57:42.298368 300017 default_sa.go:55] duration metric: took 3.811774ms for default service account to be created ...
I1123 09:57:42.298382 300017 system_pods.go:116] waiting for k8s-apps to be running ...
I1123 09:57:42.302807 300017 system_pods.go:86] 8 kube-system pods found
I1123 09:57:42.302871 300017 system_pods.go:89] "coredns-66bc5c9577-8dgc7" [f685cc03-30df-4119-9d66-0e808c2d3c93] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1123 09:57:42.302887 300017 system_pods.go:89] "etcd-embed-certs-412583" [ea8b65e6-8c1f-4dda-8902-6b6be242b01f] Running
I1123 09:57:42.302896 300017 system_pods.go:89] "kindnet-f76c2" [16967e76-b4bf-4a99-aab9-d7f76cbb0830] Running
I1123 09:57:42.302903 300017 system_pods.go:89] "kube-apiserver-embed-certs-412583" [7eee3d42-8f6d-4f15-8eb6-d6cb611f8904] Running
I1123 09:57:42.302927 300017 system_pods.go:89] "kube-controller-manager-embed-certs-412583" [e118b0d0-9dad-4c49-beb5-fa7d32814216] Running
I1123 09:57:42.302937 300017 system_pods.go:89] "kube-proxy-wm7k2" [120a9b03-e7bf-4f4d-9b8c-6fa05d3739d9] Running
I1123 09:57:42.302943 300017 system_pods.go:89] "kube-scheduler-embed-certs-412583" [dde2c2e0-b58a-4028-a671-1a8f577dd063] Running
I1123 09:57:42.302954 300017 system_pods.go:89] "storage-provisioner" [dcf16920-e30b-42ab-8195-4ef946498d0f] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1123 09:57:42.303049 300017 retry.go:31] will retry after 268.599682ms: missing components: kube-dns
I1123 09:57:42.577490 300017 system_pods.go:86] 8 kube-system pods found
I1123 09:57:42.577531 300017 system_pods.go:89] "coredns-66bc5c9577-8dgc7" [f685cc03-30df-4119-9d66-0e808c2d3c93] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1123 09:57:42.577541 300017 system_pods.go:89] "etcd-embed-certs-412583" [ea8b65e6-8c1f-4dda-8902-6b6be242b01f] Running
I1123 09:57:42.577550 300017 system_pods.go:89] "kindnet-f76c2" [16967e76-b4bf-4a99-aab9-d7f76cbb0830] Running
I1123 09:57:42.577557 300017 system_pods.go:89] "kube-apiserver-embed-certs-412583" [7eee3d42-8f6d-4f15-8eb6-d6cb611f8904] Running
I1123 09:57:42.577563 300017 system_pods.go:89] "kube-controller-manager-embed-certs-412583" [e118b0d0-9dad-4c49-beb5-fa7d32814216] Running
I1123 09:57:42.577568 300017 system_pods.go:89] "kube-proxy-wm7k2" [120a9b03-e7bf-4f4d-9b8c-6fa05d3739d9] Running
I1123 09:57:42.577573 300017 system_pods.go:89] "kube-scheduler-embed-certs-412583" [dde2c2e0-b58a-4028-a671-1a8f577dd063] Running
I1123 09:57:42.577581 300017 system_pods.go:89] "storage-provisioner" [dcf16920-e30b-42ab-8195-4ef946498d0f] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1123 09:57:42.577600 300017 retry.go:31] will retry after 240.156475ms: missing components: kube-dns
I1123 09:57:42.822131 300017 system_pods.go:86] 8 kube-system pods found
I1123 09:57:42.822171 300017 system_pods.go:89] "coredns-66bc5c9577-8dgc7" [f685cc03-30df-4119-9d66-0e808c2d3c93] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1123 09:57:42.822177 300017 system_pods.go:89] "etcd-embed-certs-412583" [ea8b65e6-8c1f-4dda-8902-6b6be242b01f] Running
I1123 09:57:42.822182 300017 system_pods.go:89] "kindnet-f76c2" [16967e76-b4bf-4a99-aab9-d7f76cbb0830] Running
I1123 09:57:42.822186 300017 system_pods.go:89] "kube-apiserver-embed-certs-412583" [7eee3d42-8f6d-4f15-8eb6-d6cb611f8904] Running
I1123 09:57:42.822190 300017 system_pods.go:89] "kube-controller-manager-embed-certs-412583" [e118b0d0-9dad-4c49-beb5-fa7d32814216] Running
I1123 09:57:42.822194 300017 system_pods.go:89] "kube-proxy-wm7k2" [120a9b03-e7bf-4f4d-9b8c-6fa05d3739d9] Running
I1123 09:57:42.822197 300017 system_pods.go:89] "kube-scheduler-embed-certs-412583" [dde2c2e0-b58a-4028-a671-1a8f577dd063] Running
I1123 09:57:42.822202 300017 system_pods.go:89] "storage-provisioner" [dcf16920-e30b-42ab-8195-4ef946498d0f] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1123 09:57:42.822216 300017 retry.go:31] will retry after 383.926777ms: missing components: kube-dns
I1123 09:57:43.211532 300017 system_pods.go:86] 8 kube-system pods found
I1123 09:57:43.211575 300017 system_pods.go:89] "coredns-66bc5c9577-8dgc7" [f685cc03-30df-4119-9d66-0e808c2d3c93] Running
I1123 09:57:43.211585 300017 system_pods.go:89] "etcd-embed-certs-412583" [ea8b65e6-8c1f-4dda-8902-6b6be242b01f] Running
I1123 09:57:43.211592 300017 system_pods.go:89] "kindnet-f76c2" [16967e76-b4bf-4a99-aab9-d7f76cbb0830] Running
I1123 09:57:43.211600 300017 system_pods.go:89] "kube-apiserver-embed-certs-412583" [7eee3d42-8f6d-4f15-8eb6-d6cb611f8904] Running
I1123 09:57:43.211608 300017 system_pods.go:89] "kube-controller-manager-embed-certs-412583" [e118b0d0-9dad-4c49-beb5-fa7d32814216] Running
I1123 09:57:43.211624 300017 system_pods.go:89] "kube-proxy-wm7k2" [120a9b03-e7bf-4f4d-9b8c-6fa05d3739d9] Running
I1123 09:57:43.211635 300017 system_pods.go:89] "kube-scheduler-embed-certs-412583" [dde2c2e0-b58a-4028-a671-1a8f577dd063] Running
I1123 09:57:43.211640 300017 system_pods.go:89] "storage-provisioner" [dcf16920-e30b-42ab-8195-4ef946498d0f] Running
I1123 09:57:43.211650 300017 system_pods.go:126] duration metric: took 913.260942ms to wait for k8s-apps to be running ...
I1123 09:57:43.211661 300017 system_svc.go:44] waiting for kubelet service to be running ....
I1123 09:57:43.211722 300017 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1123 09:57:43.226055 300017 system_svc.go:56] duration metric: took 14.383207ms WaitForService to wait for kubelet
I1123 09:57:43.226087 300017 kubeadm.go:587] duration metric: took 12.976401428s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1123 09:57:43.226108 300017 node_conditions.go:102] verifying NodePressure condition ...
I1123 09:57:43.229492 300017 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I1123 09:57:43.229524 300017 node_conditions.go:123] node cpu capacity is 8
I1123 09:57:43.229547 300017 node_conditions.go:105] duration metric: took 3.432669ms to run NodePressure ...
I1123 09:57:43.229560 300017 start.go:242] waiting for startup goroutines ...
I1123 09:57:43.229570 300017 start.go:247] waiting for cluster config update ...
I1123 09:57:43.229583 300017 start.go:256] writing updated cluster config ...
I1123 09:57:43.229975 300017 ssh_runner.go:195] Run: rm -f paused
I1123 09:57:43.235596 300017 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1123 09:57:43.243251 300017 pod_ready.go:83] waiting for pod "coredns-66bc5c9577-8dgc7" in "kube-system" namespace to be "Ready" or be gone ...
I1123 09:57:43.248984 300017 pod_ready.go:94] pod "coredns-66bc5c9577-8dgc7" is "Ready"
I1123 09:57:43.249015 300017 pod_ready.go:86] duration metric: took 5.729453ms for pod "coredns-66bc5c9577-8dgc7" in "kube-system" namespace to be "Ready" or be gone ...
I1123 09:57:43.251635 300017 pod_ready.go:83] waiting for pod "etcd-embed-certs-412583" in "kube-system" namespace to be "Ready" or be gone ...
I1123 09:57:43.256613 300017 pod_ready.go:94] pod "etcd-embed-certs-412583" is "Ready"
I1123 09:57:43.256645 300017 pod_ready.go:86] duration metric: took 4.984583ms for pod "etcd-embed-certs-412583" in "kube-system" namespace to be "Ready" or be gone ...
I1123 09:57:43.259023 300017 pod_ready.go:83] waiting for pod "kube-apiserver-embed-certs-412583" in "kube-system" namespace to be "Ready" or be gone ...
I1123 09:57:43.264242 300017 pod_ready.go:94] pod "kube-apiserver-embed-certs-412583" is "Ready"
I1123 09:57:43.264273 300017 pod_ready.go:86] duration metric: took 5.223434ms for pod "kube-apiserver-embed-certs-412583" in "kube-system" namespace to be "Ready" or be gone ...
I1123 09:57:43.311182 300017 pod_ready.go:83] waiting for pod "kube-controller-manager-embed-certs-412583" in "kube-system" namespace to be "Ready" or be gone ...
I1123 09:57:43.642602 300017 pod_ready.go:94] pod "kube-controller-manager-embed-certs-412583" is "Ready"
I1123 09:57:43.642637 300017 pod_ready.go:86] duration metric: took 331.426321ms for pod "kube-controller-manager-embed-certs-412583" in "kube-system" namespace to be "Ready" or be gone ...
I1123 09:57:43.843849 300017 pod_ready.go:83] waiting for pod "kube-proxy-wm7k2" in "kube-system" namespace to be "Ready" or be gone ...
I1123 09:57:44.244623 300017 pod_ready.go:94] pod "kube-proxy-wm7k2" is "Ready"
I1123 09:57:44.244667 300017 pod_ready.go:86] duration metric: took 400.77745ms for pod "kube-proxy-wm7k2" in "kube-system" namespace to be "Ready" or be gone ...
I1123 09:57:44.444056 300017 pod_ready.go:83] waiting for pod "kube-scheduler-embed-certs-412583" in "kube-system" namespace to be "Ready" or be gone ...
I1123 09:57:44.843963 300017 pod_ready.go:94] pod "kube-scheduler-embed-certs-412583" is "Ready"
I1123 09:57:44.843992 300017 pod_ready.go:86] duration metric: took 399.904179ms for pod "kube-scheduler-embed-certs-412583" in "kube-system" namespace to be "Ready" or be gone ...
I1123 09:57:44.844006 300017 pod_ready.go:40] duration metric: took 1.608365258s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1123 09:57:44.891853 300017 start.go:625] kubectl: 1.34.2, cluster: 1.34.1 (minor skew: 0)
I1123 09:57:44.964864 300017 out.go:179] * Done! kubectl is now configured to use "embed-certs-412583" cluster and "default" namespace by default
W1123 09:57:41.488122 296642 node_ready.go:57] node "no-preload-309734" has "Ready":"False" status (will retry)
W1123 09:57:43.488201 296642 node_ready.go:57] node "no-preload-309734" has "Ready":"False" status (will retry)
I1123 09:57:43.988019 296642 node_ready.go:49] node "no-preload-309734" is "Ready"
I1123 09:57:43.988052 296642 node_ready.go:38] duration metric: took 14.003534589s for node "no-preload-309734" to be "Ready" ...
I1123 09:57:43.988069 296642 api_server.go:52] waiting for apiserver process to appear ...
I1123 09:57:43.988149 296642 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1123 09:57:44.008503 296642 api_server.go:72] duration metric: took 14.434117996s to wait for apiserver process to appear ...
I1123 09:57:44.008530 296642 api_server.go:88] waiting for apiserver healthz status ...
I1123 09:57:44.008551 296642 api_server.go:253] Checking apiserver healthz at https://192.168.94.2:8443/healthz ...
I1123 09:57:44.017109 296642 api_server.go:279] https://192.168.94.2:8443/healthz returned 200:
ok
I1123 09:57:44.018176 296642 api_server.go:141] control plane version: v1.34.1
I1123 09:57:44.018200 296642 api_server.go:131] duration metric: took 9.663468ms to wait for apiserver health ...
I1123 09:57:44.018208 296642 system_pods.go:43] waiting for kube-system pods to appear ...
I1123 09:57:44.022287 296642 system_pods.go:59] 8 kube-system pods found
I1123 09:57:44.022324 296642 system_pods.go:61] "coredns-66bc5c9577-sx25q" [50adb46a-6c29-465a-adba-f806eeef81aa] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1123 09:57:44.022351 296642 system_pods.go:61] "etcd-no-preload-309734" [debda9ed-65d8-4a7e-99a0-42943a3c0520] Running
I1123 09:57:44.022364 296642 system_pods.go:61] "kindnet-d6zbp" [d1c56dde-7af0-49ca-a982-04ae56add5f9] Running
I1123 09:57:44.022369 296642 system_pods.go:61] "kube-apiserver-no-preload-309734" [165ccf5d-2d0c-4395-b9e8-31308c188f74] Running
I1123 09:57:44.022375 296642 system_pods.go:61] "kube-controller-manager-no-preload-309734" [d70022cf-2aaa-45a7-bcb0-0563bf832d88] Running
I1123 09:57:44.022381 296642 system_pods.go:61] "kube-proxy-jpvhc" [eb0ab966-23fc-429f-bcfe-eb5726b865be] Running
I1123 09:57:44.022387 296642 system_pods.go:61] "kube-scheduler-no-preload-309734" [c1fac6cc-06b9-419d-b9e5-e99b01de4dd2] Running
I1123 09:57:44.022397 296642 system_pods.go:61] "storage-provisioner" [b1352952-5fff-47aa-af05-dd6b2078fa39] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1123 09:57:44.022406 296642 system_pods.go:74] duration metric: took 4.191598ms to wait for pod list to return data ...
I1123 09:57:44.022421 296642 default_sa.go:34] waiting for default service account to be created ...
I1123 09:57:44.025262 296642 default_sa.go:45] found service account: "default"
I1123 09:57:44.025287 296642 default_sa.go:55] duration metric: took 2.858313ms for default service account to be created ...
I1123 09:57:44.025300 296642 system_pods.go:116] waiting for k8s-apps to be running ...
I1123 09:57:44.028240 296642 system_pods.go:86] 8 kube-system pods found
I1123 09:57:44.028269 296642 system_pods.go:89] "coredns-66bc5c9577-sx25q" [50adb46a-6c29-465a-adba-f806eeef81aa] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1123 09:57:44.028275 296642 system_pods.go:89] "etcd-no-preload-309734" [debda9ed-65d8-4a7e-99a0-42943a3c0520] Running
I1123 09:57:44.028281 296642 system_pods.go:89] "kindnet-d6zbp" [d1c56dde-7af0-49ca-a982-04ae56add5f9] Running
I1123 09:57:44.028285 296642 system_pods.go:89] "kube-apiserver-no-preload-309734" [165ccf5d-2d0c-4395-b9e8-31308c188f74] Running
I1123 09:57:44.028289 296642 system_pods.go:89] "kube-controller-manager-no-preload-309734" [d70022cf-2aaa-45a7-bcb0-0563bf832d88] Running
I1123 09:57:44.028293 296642 system_pods.go:89] "kube-proxy-jpvhc" [eb0ab966-23fc-429f-bcfe-eb5726b865be] Running
I1123 09:57:44.028296 296642 system_pods.go:89] "kube-scheduler-no-preload-309734" [c1fac6cc-06b9-419d-b9e5-e99b01de4dd2] Running
I1123 09:57:44.028300 296642 system_pods.go:89] "storage-provisioner" [b1352952-5fff-47aa-af05-dd6b2078fa39] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1123 09:57:44.028346 296642 retry.go:31] will retry after 283.472429ms: missing components: kube-dns
I1123 09:57:44.317300 296642 system_pods.go:86] 8 kube-system pods found
I1123 09:57:44.317353 296642 system_pods.go:89] "coredns-66bc5c9577-sx25q" [50adb46a-6c29-465a-adba-f806eeef81aa] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1123 09:57:44.317361 296642 system_pods.go:89] "etcd-no-preload-309734" [debda9ed-65d8-4a7e-99a0-42943a3c0520] Running
I1123 09:57:44.317370 296642 system_pods.go:89] "kindnet-d6zbp" [d1c56dde-7af0-49ca-a982-04ae56add5f9] Running
I1123 09:57:44.317376 296642 system_pods.go:89] "kube-apiserver-no-preload-309734" [165ccf5d-2d0c-4395-b9e8-31308c188f74] Running
I1123 09:57:44.317382 296642 system_pods.go:89] "kube-controller-manager-no-preload-309734" [d70022cf-2aaa-45a7-bcb0-0563bf832d88] Running
I1123 09:57:44.317387 296642 system_pods.go:89] "kube-proxy-jpvhc" [eb0ab966-23fc-429f-bcfe-eb5726b865be] Running
I1123 09:57:44.317391 296642 system_pods.go:89] "kube-scheduler-no-preload-309734" [c1fac6cc-06b9-419d-b9e5-e99b01de4dd2] Running
I1123 09:57:44.317397 296642 system_pods.go:89] "storage-provisioner" [b1352952-5fff-47aa-af05-dd6b2078fa39] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1123 09:57:44.317416 296642 retry.go:31] will retry after 321.7427ms: missing components: kube-dns
I1123 09:57:44.689277 296642 system_pods.go:86] 8 kube-system pods found
I1123 09:57:44.689322 296642 system_pods.go:89] "coredns-66bc5c9577-sx25q" [50adb46a-6c29-465a-adba-f806eeef81aa] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1123 09:57:44.689344 296642 system_pods.go:89] "etcd-no-preload-309734" [debda9ed-65d8-4a7e-99a0-42943a3c0520] Running
I1123 09:57:44.689353 296642 system_pods.go:89] "kindnet-d6zbp" [d1c56dde-7af0-49ca-a982-04ae56add5f9] Running
I1123 09:57:44.689359 296642 system_pods.go:89] "kube-apiserver-no-preload-309734" [165ccf5d-2d0c-4395-b9e8-31308c188f74] Running
I1123 09:57:44.689366 296642 system_pods.go:89] "kube-controller-manager-no-preload-309734" [d70022cf-2aaa-45a7-bcb0-0563bf832d88] Running
I1123 09:57:44.689370 296642 system_pods.go:89] "kube-proxy-jpvhc" [eb0ab966-23fc-429f-bcfe-eb5726b865be] Running
I1123 09:57:44.689375 296642 system_pods.go:89] "kube-scheduler-no-preload-309734" [c1fac6cc-06b9-419d-b9e5-e99b01de4dd2] Running
I1123 09:57:44.689382 296642 system_pods.go:89] "storage-provisioner" [b1352952-5fff-47aa-af05-dd6b2078fa39] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1123 09:57:44.689411 296642 retry.go:31] will retry after 353.961831ms: missing components: kube-dns
I1123 09:57:45.048995 296642 system_pods.go:86] 8 kube-system pods found
I1123 09:57:45.049060 296642 system_pods.go:89] "coredns-66bc5c9577-sx25q" [50adb46a-6c29-465a-adba-f806eeef81aa] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1123 09:57:45.049069 296642 system_pods.go:89] "etcd-no-preload-309734" [debda9ed-65d8-4a7e-99a0-42943a3c0520] Running
I1123 09:57:45.049078 296642 system_pods.go:89] "kindnet-d6zbp" [d1c56dde-7af0-49ca-a982-04ae56add5f9] Running
I1123 09:57:45.049084 296642 system_pods.go:89] "kube-apiserver-no-preload-309734" [165ccf5d-2d0c-4395-b9e8-31308c188f74] Running
I1123 09:57:45.049090 296642 system_pods.go:89] "kube-controller-manager-no-preload-309734" [d70022cf-2aaa-45a7-bcb0-0563bf832d88] Running
I1123 09:57:45.049099 296642 system_pods.go:89] "kube-proxy-jpvhc" [eb0ab966-23fc-429f-bcfe-eb5726b865be] Running
I1123 09:57:45.049104 296642 system_pods.go:89] "kube-scheduler-no-preload-309734" [c1fac6cc-06b9-419d-b9e5-e99b01de4dd2] Running
I1123 09:57:45.049116 296642 system_pods.go:89] "storage-provisioner" [b1352952-5fff-47aa-af05-dd6b2078fa39] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1123 09:57:45.049135 296642 retry.go:31] will retry after 412.630882ms: missing components: kube-dns
I1123 09:57:45.607770 296642 system_pods.go:86] 8 kube-system pods found
I1123 09:57:45.607816 296642 system_pods.go:89] "coredns-66bc5c9577-sx25q" [50adb46a-6c29-465a-adba-f806eeef81aa] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1123 09:57:45.607826 296642 system_pods.go:89] "etcd-no-preload-309734" [debda9ed-65d8-4a7e-99a0-42943a3c0520] Running
I1123 09:57:45.607836 296642 system_pods.go:89] "kindnet-d6zbp" [d1c56dde-7af0-49ca-a982-04ae56add5f9] Running
I1123 09:57:45.607841 296642 system_pods.go:89] "kube-apiserver-no-preload-309734" [165ccf5d-2d0c-4395-b9e8-31308c188f74] Running
I1123 09:57:45.607847 296642 system_pods.go:89] "kube-controller-manager-no-preload-309734" [d70022cf-2aaa-45a7-bcb0-0563bf832d88] Running
I1123 09:57:45.607851 296642 system_pods.go:89] "kube-proxy-jpvhc" [eb0ab966-23fc-429f-bcfe-eb5726b865be] Running
I1123 09:57:45.607856 296642 system_pods.go:89] "kube-scheduler-no-preload-309734" [c1fac6cc-06b9-419d-b9e5-e99b01de4dd2] Running
I1123 09:57:45.607873 296642 system_pods.go:89] "storage-provisioner" [b1352952-5fff-47aa-af05-dd6b2078fa39] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1123 09:57:45.607891 296642 retry.go:31] will retry after 544.365573ms: missing components: kube-dns
I1123 09:57:41.425584 311138 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ...
I1123 09:57:41.425893 311138 start.go:159] libmachine.API.Create for "default-k8s-diff-port-696492" (driver="docker")
I1123 09:57:41.425945 311138 client.go:173] LocalClient.Create starting
I1123 09:57:41.426056 311138 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21968-3552/.minikube/certs/ca.pem
I1123 09:57:41.426100 311138 main.go:143] libmachine: Decoding PEM data...
I1123 09:57:41.426121 311138 main.go:143] libmachine: Parsing certificate...
I1123 09:57:41.426185 311138 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21968-3552/.minikube/certs/cert.pem
I1123 09:57:41.426208 311138 main.go:143] libmachine: Decoding PEM data...
I1123 09:57:41.426217 311138 main.go:143] libmachine: Parsing certificate...
I1123 09:57:41.426608 311138 cli_runner.go:164] Run: docker network inspect default-k8s-diff-port-696492 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W1123 09:57:41.445568 311138 cli_runner.go:211] docker network inspect default-k8s-diff-port-696492 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I1123 09:57:41.445670 311138 network_create.go:284] running [docker network inspect default-k8s-diff-port-696492] to gather additional debugging logs...
I1123 09:57:41.445697 311138 cli_runner.go:164] Run: docker network inspect default-k8s-diff-port-696492
W1123 09:57:41.465174 311138 cli_runner.go:211] docker network inspect default-k8s-diff-port-696492 returned with exit code 1
I1123 09:57:41.465216 311138 network_create.go:287] error running [docker network inspect default-k8s-diff-port-696492]: docker network inspect default-k8s-diff-port-696492: exit status 1
stdout:
[]
stderr:
Error response from daemon: network default-k8s-diff-port-696492 not found
I1123 09:57:41.465236 311138 network_create.go:289] output of [docker network inspect default-k8s-diff-port-696492]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network default-k8s-diff-port-696492 not found
** /stderr **
I1123 09:57:41.465403 311138 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1123 09:57:41.487255 311138 network.go:211] skipping subnet 192.168.49.0/24 that is taken: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName:br-de5cba392bb4 IfaceIPv4:192.168.49.1 IfaceMTU:1500 IfaceMAC:ea:8d:f5:88:bc:8b} reservation:<nil>}
I1123 09:57:41.488105 311138 network.go:211] skipping subnet 192.168.58.0/24 that is taken: &{IP:192.168.58.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.58.0/24 Gateway:192.168.58.1 ClientMin:192.168.58.2 ClientMax:192.168.58.254 Broadcast:192.168.58.255 IsPrivate:true Interface:{IfaceName:br-e2eabbe85d5b IfaceIPv4:192.168.58.1 IfaceMTU:1500 IfaceMAC:da:f4:02:bd:23:31} reservation:<nil>}
I1123 09:57:41.489037 311138 network.go:211] skipping subnet 192.168.67.0/24 that is taken: &{IP:192.168.67.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.67.0/24 Gateway:192.168.67.1 ClientMin:192.168.67.2 ClientMax:192.168.67.254 Broadcast:192.168.67.255 IsPrivate:true Interface:{IfaceName:br-22e47e96d08e IfaceIPv4:192.168.67.1 IfaceMTU:1500 IfaceMAC:da:9e:83:f9:9f:f6} reservation:<nil>}
I1123 09:57:41.489614 311138 network.go:211] skipping subnet 192.168.76.0/24 that is taken: &{IP:192.168.76.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.76.0/24 Gateway:192.168.76.1 ClientMin:192.168.76.2 ClientMax:192.168.76.254 Broadcast:192.168.76.255 IsPrivate:true Interface:{IfaceName:br-4fa988beb7cd IfaceIPv4:192.168.76.1 IfaceMTU:1500 IfaceMAC:1a:18:12:be:77:f6} reservation:<nil>}
I1123 09:57:41.492079 311138 network.go:206] using free private subnet 192.168.85.0/24: &{IP:192.168.85.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.85.0/24 Gateway:192.168.85.1 ClientMin:192.168.85.2 ClientMax:192.168.85.254 Broadcast:192.168.85.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc001d80820}
I1123 09:57:41.492121 311138 network_create.go:124] attempt to create docker network default-k8s-diff-port-696492 192.168.85.0/24 with gateway 192.168.85.1 and MTU of 1500 ...
I1123 09:57:41.492171 311138 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.85.0/24 --gateway=192.168.85.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=default-k8s-diff-port-696492 default-k8s-diff-port-696492
I1123 09:57:41.554538 311138 network_create.go:108] docker network default-k8s-diff-port-696492 192.168.85.0/24 created
I1123 09:57:41.554588 311138 kic.go:121] calculated static IP "192.168.85.2" for the "default-k8s-diff-port-696492" container
I1123 09:57:41.554664 311138 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I1123 09:57:41.575522 311138 cli_runner.go:164] Run: docker volume create default-k8s-diff-port-696492 --label name.minikube.sigs.k8s.io=default-k8s-diff-port-696492 --label created_by.minikube.sigs.k8s.io=true
I1123 09:57:41.598058 311138 oci.go:103] Successfully created a docker volume default-k8s-diff-port-696492
I1123 09:57:41.598141 311138 cli_runner.go:164] Run: docker run --rm --name default-k8s-diff-port-696492-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=default-k8s-diff-port-696492 --entrypoint /usr/bin/test -v default-k8s-diff-port-696492:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f -d /var/lib
I1123 09:57:42.041176 311138 oci.go:107] Successfully prepared a docker volume default-k8s-diff-port-696492
I1123 09:57:42.041254 311138 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime containerd
I1123 09:57:42.041269 311138 kic.go:194] Starting extracting preloaded images to volume ...
I1123 09:57:42.041325 311138 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21968-3552/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-containerd-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v default-k8s-diff-port-696492:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f -I lz4 -xf /preloaded.tar -C /extractDir
I1123 09:57:46.265821 296642 system_pods.go:86] 8 kube-system pods found
I1123 09:57:46.265851 296642 system_pods.go:89] "coredns-66bc5c9577-sx25q" [50adb46a-6c29-465a-adba-f806eeef81aa] Running
I1123 09:57:46.265856 296642 system_pods.go:89] "etcd-no-preload-309734" [debda9ed-65d8-4a7e-99a0-42943a3c0520] Running
I1123 09:57:46.265860 296642 system_pods.go:89] "kindnet-d6zbp" [d1c56dde-7af0-49ca-a982-04ae56add5f9] Running
I1123 09:57:46.265863 296642 system_pods.go:89] "kube-apiserver-no-preload-309734" [165ccf5d-2d0c-4395-b9e8-31308c188f74] Running
I1123 09:57:46.265868 296642 system_pods.go:89] "kube-controller-manager-no-preload-309734" [d70022cf-2aaa-45a7-bcb0-0563bf832d88] Running
I1123 09:57:46.265870 296642 system_pods.go:89] "kube-proxy-jpvhc" [eb0ab966-23fc-429f-bcfe-eb5726b865be] Running
I1123 09:57:46.265875 296642 system_pods.go:89] "kube-scheduler-no-preload-309734" [c1fac6cc-06b9-419d-b9e5-e99b01de4dd2] Running
I1123 09:57:46.265879 296642 system_pods.go:89] "storage-provisioner" [b1352952-5fff-47aa-af05-dd6b2078fa39] Running
I1123 09:57:46.265889 296642 system_pods.go:126] duration metric: took 2.240582653s to wait for k8s-apps to be running ...
I1123 09:57:46.265903 296642 system_svc.go:44] waiting for kubelet service to be running ....
I1123 09:57:46.265972 296642 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1123 09:57:46.283075 296642 system_svc.go:56] duration metric: took 17.161056ms WaitForService to wait for kubelet
I1123 09:57:46.283105 296642 kubeadm.go:587] duration metric: took 16.70872571s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1123 09:57:46.283128 296642 node_conditions.go:102] verifying NodePressure condition ...
I1123 09:57:46.491444 296642 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I1123 09:57:46.491473 296642 node_conditions.go:123] node cpu capacity is 8
I1123 09:57:46.491486 296642 node_conditions.go:105] duration metric: took 208.353263ms to run NodePressure ...
I1123 09:57:46.491509 296642 start.go:242] waiting for startup goroutines ...
I1123 09:57:46.491520 296642 start.go:247] waiting for cluster config update ...
I1123 09:57:46.491533 296642 start.go:256] writing updated cluster config ...
I1123 09:57:46.491804 296642 ssh_runner.go:195] Run: rm -f paused
I1123 09:57:46.498152 296642 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1123 09:57:46.503240 296642 pod_ready.go:83] waiting for pod "coredns-66bc5c9577-sx25q" in "kube-system" namespace to be "Ready" or be gone ...
I1123 09:57:46.508998 296642 pod_ready.go:94] pod "coredns-66bc5c9577-sx25q" is "Ready"
I1123 09:57:46.509028 296642 pod_ready.go:86] duration metric: took 5.757344ms for pod "coredns-66bc5c9577-sx25q" in "kube-system" namespace to be "Ready" or be gone ...
I1123 09:57:46.512072 296642 pod_ready.go:83] waiting for pod "etcd-no-preload-309734" in "kube-system" namespace to be "Ready" or be gone ...
I1123 09:57:46.517750 296642 pod_ready.go:94] pod "etcd-no-preload-309734" is "Ready"
I1123 09:57:46.517777 296642 pod_ready.go:86] duration metric: took 5.673234ms for pod "etcd-no-preload-309734" in "kube-system" namespace to be "Ready" or be gone ...
I1123 09:57:46.520446 296642 pod_ready.go:83] waiting for pod "kube-apiserver-no-preload-309734" in "kube-system" namespace to be "Ready" or be gone ...
I1123 09:57:46.525480 296642 pod_ready.go:94] pod "kube-apiserver-no-preload-309734" is "Ready"
I1123 09:57:46.525513 296642 pod_ready.go:86] duration metric: took 5.036877ms for pod "kube-apiserver-no-preload-309734" in "kube-system" namespace to be "Ready" or be gone ...
I1123 09:57:46.528196 296642 pod_ready.go:83] waiting for pod "kube-controller-manager-no-preload-309734" in "kube-system" namespace to be "Ready" or be gone ...
I1123 09:57:46.902790 296642 pod_ready.go:94] pod "kube-controller-manager-no-preload-309734" is "Ready"
I1123 09:57:46.902815 296642 pod_ready.go:86] duration metric: took 374.588413ms for pod "kube-controller-manager-no-preload-309734" in "kube-system" namespace to be "Ready" or be gone ...
I1123 09:57:47.104263 296642 pod_ready.go:83] waiting for pod "kube-proxy-jpvhc" in "kube-system" namespace to be "Ready" or be gone ...
I1123 09:57:47.504876 296642 pod_ready.go:94] pod "kube-proxy-jpvhc" is "Ready"
I1123 09:57:47.504999 296642 pod_ready.go:86] duration metric: took 400.696383ms for pod "kube-proxy-jpvhc" in "kube-system" namespace to be "Ready" or be gone ...
I1123 09:57:47.706275 296642 pod_ready.go:83] waiting for pod "kube-scheduler-no-preload-309734" in "kube-system" namespace to be "Ready" or be gone ...
I1123 09:57:48.104684 296642 pod_ready.go:94] pod "kube-scheduler-no-preload-309734" is "Ready"
I1123 09:57:48.104720 296642 pod_ready.go:86] duration metric: took 398.41369ms for pod "kube-scheduler-no-preload-309734" in "kube-system" namespace to be "Ready" or be gone ...
I1123 09:57:48.104739 296642 pod_ready.go:40] duration metric: took 1.606531718s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1123 09:57:48.181507 296642 start.go:625] kubectl: 1.34.2, cluster: 1.34.1 (minor skew: 0)
I1123 09:57:48.183959 296642 out.go:179] * Done! kubectl is now configured to use "no-preload-309734" cluster and "default" namespace by default
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE
8d7f40f8f4e07 56cc512116c8f 8 seconds ago Running busybox 0 fef27a1a4d0d4 busybox default
d15093524dcf0 ead0a4a53df89 14 seconds ago Running coredns 0 1410c58ee49e1 coredns-5dd5756b68-gf5sx kube-system
6188a0a11a558 6e38f40d628db 14 seconds ago Running storage-provisioner 0 d10f215129879 storage-provisioner kube-system
a1af83bb67492 409467f978b4a 25 seconds ago Running kindnet-cni 0 0d60321491712 kindnet-tpvt2 kube-system
e82a6fec044de ea1030da44aa1 28 seconds ago Running kube-proxy 0 11e7ed694601b kube-proxy-sgv48 kube-system
1b2964c416267 4be79c38a4bab 50 seconds ago Running kube-controller-manager 0 2cc4143ea8b90 kube-controller-manager-old-k8s-version-709593 kube-system
33f6ed017ec88 f6f496300a2ae 50 seconds ago Running kube-scheduler 0 11295be3c0583 kube-scheduler-old-k8s-version-709593 kube-system
9ab267968c030 bb5e0dde9054c 50 seconds ago Running kube-apiserver 0 86d19ce97a6b1 kube-apiserver-old-k8s-version-709593 kube-system
d4c298d1c8060 73deb9a3f7025 50 seconds ago Running etcd 0 2f9ec40d5f287 etcd-old-k8s-version-709593 kube-system
==> containerd <==
Nov 23 09:57:34 old-k8s-version-709593 containerd[660]: time="2025-11-23T09:57:34.420590122Z" level=info msg="CreateContainer within sandbox \"d10f2151298793071f334a433fb6cfce4b8b35c05f27a6d4e58960cedbf96462\" for &ContainerMetadata{Name:storage-provisioner,Attempt:0,} returns container id \"6188a0a11a558ccfe4a936446819a158ec0f3ff08b1c7692bf3db57ce82539bc\""
Nov 23 09:57:34 old-k8s-version-709593 containerd[660]: time="2025-11-23T09:57:34.421304727Z" level=info msg="StartContainer for \"6188a0a11a558ccfe4a936446819a158ec0f3ff08b1c7692bf3db57ce82539bc\""
Nov 23 09:57:34 old-k8s-version-709593 containerd[660]: time="2025-11-23T09:57:34.423036667Z" level=info msg="connecting to shim 6188a0a11a558ccfe4a936446819a158ec0f3ff08b1c7692bf3db57ce82539bc" address="unix:///run/containerd/s/1f0be7d26635bbcb41f6c32b3d2f1385a50ecbc1dec74ce6548e85610e0cefc1" protocol=ttrpc version=3
Nov 23 09:57:34 old-k8s-version-709593 containerd[660]: time="2025-11-23T09:57:34.423927224Z" level=info msg="CreateContainer within sandbox \"1410c58ee49e106f41592b5e6ae663765165c9b234249dacefc4e2eccebfec08\" for &ContainerMetadata{Name:coredns,Attempt:0,} returns container id \"d15093524dcf0f71add09a89666b6ef551f8abcfe19462f1f52e6396cfa9b90f\""
Nov 23 09:57:34 old-k8s-version-709593 containerd[660]: time="2025-11-23T09:57:34.424701663Z" level=info msg="StartContainer for \"d15093524dcf0f71add09a89666b6ef551f8abcfe19462f1f52e6396cfa9b90f\""
Nov 23 09:57:34 old-k8s-version-709593 containerd[660]: time="2025-11-23T09:57:34.425764608Z" level=info msg="connecting to shim d15093524dcf0f71add09a89666b6ef551f8abcfe19462f1f52e6396cfa9b90f" address="unix:///run/containerd/s/fe12e30014183b4c11ebd3e6acfbe97fc1992c631d1626cb13faef4fe4d22ee6" protocol=ttrpc version=3
Nov 23 09:57:34 old-k8s-version-709593 containerd[660]: time="2025-11-23T09:57:34.488919409Z" level=info msg="StartContainer for \"d15093524dcf0f71add09a89666b6ef551f8abcfe19462f1f52e6396cfa9b90f\" returns successfully"
Nov 23 09:57:34 old-k8s-version-709593 containerd[660]: time="2025-11-23T09:57:34.489532054Z" level=info msg="StartContainer for \"6188a0a11a558ccfe4a936446819a158ec0f3ff08b1c7692bf3db57ce82539bc\" returns successfully"
Nov 23 09:57:37 old-k8s-version-709593 containerd[660]: time="2025-11-23T09:57:37.817959050Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:bea346d9-0dca-482c-b9f9-7b71741b18d7,Namespace:default,Attempt:0,}"
Nov 23 09:57:37 old-k8s-version-709593 containerd[660]: time="2025-11-23T09:57:37.866021477Z" level=info msg="connecting to shim fef27a1a4d0d4d0fd89a702b88e4f10a3d0f81a41d5a766dcd38d6273f063615" address="unix:///run/containerd/s/f66c8e58b533a67c21226ca176913c77f22823731a0ac223ff958c8fefe43b11" namespace=k8s.io protocol=ttrpc version=3
Nov 23 09:57:37 old-k8s-version-709593 containerd[660]: time="2025-11-23T09:57:37.950965400Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:bea346d9-0dca-482c-b9f9-7b71741b18d7,Namespace:default,Attempt:0,} returns sandbox id \"fef27a1a4d0d4d0fd89a702b88e4f10a3d0f81a41d5a766dcd38d6273f063615\""
Nov 23 09:57:37 old-k8s-version-709593 containerd[660]: time="2025-11-23T09:57:37.953294596Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\""
Nov 23 09:57:40 old-k8s-version-709593 containerd[660]: time="2025-11-23T09:57:40.223204984Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 23 09:57:40 old-k8s-version-709593 containerd[660]: time="2025-11-23T09:57:40.224183979Z" level=info msg="stop pulling image gcr.io/k8s-minikube/busybox:1.28.4-glibc: active requests=0, bytes read=2396648"
Nov 23 09:57:40 old-k8s-version-709593 containerd[660]: time="2025-11-23T09:57:40.226078502Z" level=info msg="ImageCreate event name:\"sha256:56cc512116c8f894f11ce1995460aef1ee0972d48bc2a3bdb1faaac7c020289c\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 23 09:57:40 old-k8s-version-709593 containerd[660]: time="2025-11-23T09:57:40.228512955Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 23 09:57:40 old-k8s-version-709593 containerd[660]: time="2025-11-23T09:57:40.229002948Z" level=info msg="Pulled image \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" with image id \"sha256:56cc512116c8f894f11ce1995460aef1ee0972d48bc2a3bdb1faaac7c020289c\", repo tag \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\", repo digest \"gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e\", size \"2395207\" in 2.275384117s"
Nov 23 09:57:40 old-k8s-version-709593 containerd[660]: time="2025-11-23T09:57:40.229045171Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" returns image reference \"sha256:56cc512116c8f894f11ce1995460aef1ee0972d48bc2a3bdb1faaac7c020289c\""
Nov 23 09:57:40 old-k8s-version-709593 containerd[660]: time="2025-11-23T09:57:40.230910353Z" level=info msg="CreateContainer within sandbox \"fef27a1a4d0d4d0fd89a702b88e4f10a3d0f81a41d5a766dcd38d6273f063615\" for container &ContainerMetadata{Name:busybox,Attempt:0,}"
Nov 23 09:57:40 old-k8s-version-709593 containerd[660]: time="2025-11-23T09:57:40.242585175Z" level=info msg="Container 8d7f40f8f4e0763efe28dd2b910dd945b4ad8925953ca7a945bf4566509889f4: CDI devices from CRI Config.CDIDevices: []"
Nov 23 09:57:40 old-k8s-version-709593 containerd[660]: time="2025-11-23T09:57:40.253136286Z" level=info msg="CreateContainer within sandbox \"fef27a1a4d0d4d0fd89a702b88e4f10a3d0f81a41d5a766dcd38d6273f063615\" for &ContainerMetadata{Name:busybox,Attempt:0,} returns container id \"8d7f40f8f4e0763efe28dd2b910dd945b4ad8925953ca7a945bf4566509889f4\""
Nov 23 09:57:40 old-k8s-version-709593 containerd[660]: time="2025-11-23T09:57:40.253869141Z" level=info msg="StartContainer for \"8d7f40f8f4e0763efe28dd2b910dd945b4ad8925953ca7a945bf4566509889f4\""
Nov 23 09:57:40 old-k8s-version-709593 containerd[660]: time="2025-11-23T09:57:40.258087383Z" level=info msg="connecting to shim 8d7f40f8f4e0763efe28dd2b910dd945b4ad8925953ca7a945bf4566509889f4" address="unix:///run/containerd/s/f66c8e58b533a67c21226ca176913c77f22823731a0ac223ff958c8fefe43b11" protocol=ttrpc version=3
Nov 23 09:57:40 old-k8s-version-709593 containerd[660]: time="2025-11-23T09:57:40.328511725Z" level=info msg="StartContainer for \"8d7f40f8f4e0763efe28dd2b910dd945b4ad8925953ca7a945bf4566509889f4\" returns successfully"
Nov 23 09:57:47 old-k8s-version-709593 containerd[660]: E1123 09:57:47.651496 660 websocket.go:100] "Unhandled Error" err="unable to upgrade websocket connection: websocket server finished before becoming ready" logger="UnhandledError"
==> coredns [d15093524dcf0f71add09a89666b6ef551f8abcfe19462f1f52e6396cfa9b90f] <==
.:53
[INFO] plugin/reload: Running configuration SHA512 = b7aacdf6a6aa730aafe4d018cac9b7b5ecfb346cba84a99f64521f87aef8b4958639c1cf97967716465791d05bd38f372615327b7cb1d93c850bae532744d54d
CoreDNS-1.10.1
linux/amd64, go1.20, 055b2c3
[INFO] 127.0.0.1:34931 - 60518 "HINFO IN 7244376839273605299.5052886007572092194. udp 57 false 512" NXDOMAIN qr,rd,ra 132 0.04020687s
==> describe nodes <==
Name: old-k8s-version-709593
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=old-k8s-version-709593
kubernetes.io/os=linux
minikube.k8s.io/commit=37270640e5bc1cd4189f05b508feb80c8debef53
minikube.k8s.io/name=old-k8s-version-709593
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2025_11_23T09_57_07_0700
minikube.k8s.io/version=v1.37.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///run/containerd/containerd.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Sun, 23 Nov 2025 09:57:00 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: old-k8s-version-709593
AcquireTime: <unset>
RenewTime: Sun, 23 Nov 2025 09:57:47 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Sun, 23 Nov 2025 09:57:36 +0000 Sun, 23 Nov 2025 09:56:58 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Sun, 23 Nov 2025 09:57:36 +0000 Sun, 23 Nov 2025 09:56:58 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Sun, 23 Nov 2025 09:57:36 +0000 Sun, 23 Nov 2025 09:56:58 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Sun, 23 Nov 2025 09:57:36 +0000 Sun, 23 Nov 2025 09:57:33 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.76.2
Hostname: old-k8s-version-709593
Capacity:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863360Ki
pods: 110
Allocatable:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863360Ki
pods: 110
System Info:
Machine ID: 9629f1d5bc1ed524a56ce23c69214c09
System UUID: 9e6f0832-18db-4c8d-86e4-20812ea439e5
Boot ID: e4c4d39b-bebd-4037-9237-26b945dbe084
Kernel Version: 6.8.0-1044-gcp
OS Image: Debian GNU/Linux 12 (bookworm)
Operating System: linux
Architecture: amd64
Container Runtime Version: containerd://2.1.5
Kubelet Version: v1.28.0
Kube-Proxy Version: v1.28.0
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (9 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 12s
kube-system coredns-5dd5756b68-gf5sx 100m (1%) 0 (0%) 70Mi (0%) 170Mi (0%) 31s
kube-system etcd-old-k8s-version-709593 100m (1%) 0 (0%) 100Mi (0%) 0 (0%) 43s
kube-system kindnet-tpvt2 100m (1%) 100m (1%) 50Mi (0%) 50Mi (0%) 31s
kube-system kube-apiserver-old-k8s-version-709593 250m (3%) 0 (0%) 0 (0%) 0 (0%) 46s
kube-system kube-controller-manager-old-k8s-version-709593 200m (2%) 0 (0%) 0 (0%) 0 (0%) 45s
kube-system kube-proxy-sgv48 0 (0%) 0 (0%) 0 (0%) 0 (0%) 31s
kube-system kube-scheduler-old-k8s-version-709593 100m (1%) 0 (0%) 0 (0%) 0 (0%) 46s
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 30s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 850m (10%) 100m (1%)
memory 220Mi (0%) 220Mi (0%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 28s kube-proxy
Normal Starting 52s kubelet Starting kubelet.
Normal NodeHasSufficientMemory 52s (x8 over 52s) kubelet Node old-k8s-version-709593 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 52s (x8 over 52s) kubelet Node old-k8s-version-709593 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 52s (x7 over 52s) kubelet Node old-k8s-version-709593 status is now: NodeHasSufficientPID
Normal NodeAllocatableEnforced 52s kubelet Updated Node Allocatable limit across pods
Normal Starting 43s kubelet Starting kubelet.
Normal NodeAllocatableEnforced 43s kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 43s kubelet Node old-k8s-version-709593 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 43s kubelet Node old-k8s-version-709593 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 43s kubelet Node old-k8s-version-709593 status is now: NodeHasSufficientPID
Normal RegisteredNode 31s node-controller Node old-k8s-version-709593 event: Registered Node old-k8s-version-709593 in Controller
Normal NodeReady 16s kubelet Node old-k8s-version-709593 status is now: NodeReady
==> dmesg <==
[ +6.288463] kauditd_printk_skb: 47 callbacks suppressed
[Nov23 09:55] IPv4: martian source 10.244.0.1 from 10.244.0.2, on dev eth0
[ +0.000008] ll header: 00000000: ff ff ff ff ff ff ba 2b 39 eb 11 2b 08 06
[Nov23 09:56] IPv4: martian source 10.244.0.1 from 10.244.0.2, on dev eth0
[ +0.000007] ll header: 00000000: ff ff ff ff ff ff 8e bd c3 0c c1 99 08 06
[ +10.195562] IPv4: martian source 10.244.0.1 from 10.244.0.2, on dev eth0
[ +0.000010] ll header: 00000000: ff ff ff ff ff ff 5e 49 b3 20 41 43 08 06
[ +5.912917] IPv4: martian source 10.244.0.1 from 10.244.0.3, on dev eth0
[ +0.000007] ll header: 00000000: ff ff ff ff ff ff f2 c0 1c 98 33 a9 08 06
[ +0.000437] IPv4: martian source 10.244.0.3 from 10.244.0.2, on dev eth0
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff 8e bd c3 0c c1 99 08 06
[ +10.002091] IPv4: martian source 10.244.0.1 from 10.244.0.3, on dev eth0
[ +0.000008] ll header: 00000000: ff ff ff ff ff ff 1e 47 bd bf 96 57 08 06
[ +0.000405] IPv4: martian source 10.244.0.3 from 10.244.0.2, on dev eth0
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff ba 2b 39 eb 11 2b 08 06
[ +4.460318] IPv4: martian source 10.244.0.1 from 10.244.0.3, on dev eth0
[ +0.000008] ll header: 00000000: ff ff ff ff ff ff 3e 85 b9 91 f8 a4 08 06
[ +0.000372] IPv4: martian source 10.244.0.3 from 10.244.0.2, on dev eth0
[ +0.000005] ll header: 00000000: ff ff ff ff ff ff 5e 49 b3 20 41 43 08 06
[ +2.904694] IPv4: martian source 10.244.0.1 from 10.244.0.2, on dev eth0
[ +0.000010] ll header: 00000000: ff ff ff ff ff ff 9e 48 a2 4c da c6 08 06
[Nov23 09:57] IPv4: martian source 10.244.0.1 from 10.244.0.3, on dev eth0
[ +0.000009] ll header: 00000000: ff ff ff ff ff ff 76 48 bf 8b d1 fc 08 06
[ +0.000931] IPv4: martian source 10.244.0.3 from 10.244.0.2, on dev eth0
[ +0.000005] ll header: 00000000: ff ff ff ff ff ff 9e 48 a2 4c da c6 08 06
==> etcd [d4c298d1c8060139c5bb973acee87dc3fbc6b6454b9e3c8ebe9c6b86a2e5a7b8] <==
{"level":"info","ts":"2025-11-23T09:56:58.59753Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.76.2:2379"}
{"level":"info","ts":"2025-11-23T09:56:58.597864Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"6f20f2c4b2fb5f8a","local-member-id":"ea7e25599daad906","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-23T09:56:58.597974Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-23T09:56:58.598004Z","caller":"etcdserver/server.go:2595","msg":"cluster version is updated","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-23T09:56:58.599014Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
{"level":"warn","ts":"2025-11-23T09:57:01.971736Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"124.487229ms","expected-duration":"100ms","prefix":"","request":"header:<ID:15638356837419424543 username:\"kube-apiserver-etcd-client\" auth_revision:1 > txn:<compare:<target:MOD key:\"/registry/clusterroles/system:monitoring\" mod_revision:0 > success:<request_put:<key:\"/registry/clusterroles/system:monitoring\" value_size:573 >> failure:<>>","response":"size:14"}
{"level":"info","ts":"2025-11-23T09:57:01.971868Z","caller":"traceutil/trace.go:171","msg":"trace[1367842110] transaction","detail":"{read_only:false; response_revision:112; number_of_response:1; }","duration":"185.333295ms","start":"2025-11-23T09:57:01.786515Z","end":"2025-11-23T09:57:01.971849Z","steps":["trace[1367842110] 'process raft request' (duration: 59.969834ms)","trace[1367842110] 'compare' (duration: 124.335128ms)"],"step_count":2}
{"level":"warn","ts":"2025-11-23T09:57:02.204167Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"132.409698ms","expected-duration":"100ms","prefix":"","request":"header:<ID:15638356837419424553 username:\"kube-apiserver-etcd-client\" auth_revision:1 > txn:<compare:<target:MOD key:\"/registry/clusterroles/view\" mod_revision:0 > success:<request_put:<key:\"/registry/clusterroles/view\" value_size:673 >> failure:<>>","response":"size:14"}
{"level":"info","ts":"2025-11-23T09:57:02.204261Z","caller":"traceutil/trace.go:171","msg":"trace[1142240257] transaction","detail":"{read_only:false; response_revision:117; number_of_response:1; }","duration":"141.084345ms","start":"2025-11-23T09:57:02.063163Z","end":"2025-11-23T09:57:02.204247Z","steps":["trace[1142240257] 'compare' (duration: 132.298203ms)"],"step_count":1}
{"level":"warn","ts":"2025-11-23T09:57:02.49574Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"128.58211ms","expected-duration":"100ms","prefix":"","request":"header:<ID:15638356837419424557 username:\"kube-apiserver-etcd-client\" auth_revision:1 > txn:<compare:<target:MOD key:\"/registry/clusterroles/system:aggregate-to-edit\" mod_revision:0 > success:<request_put:<key:\"/registry/clusterroles/system:aggregate-to-edit\" value_size:1957 >> failure:<>>","response":"size:14"}
{"level":"info","ts":"2025-11-23T09:57:02.495841Z","caller":"traceutil/trace.go:171","msg":"trace[1763507131] transaction","detail":"{read_only:false; response_revision:119; number_of_response:1; }","duration":"249.990542ms","start":"2025-11-23T09:57:02.245837Z","end":"2025-11-23T09:57:02.495828Z","steps":["trace[1763507131] 'process raft request' (duration: 121.258106ms)","trace[1763507131] 'compare' (duration: 128.446744ms)"],"step_count":2}
{"level":"warn","ts":"2025-11-23T09:57:02.811736Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"142.743867ms","expected-duration":"100ms","prefix":"","request":"header:<ID:15638356837419424559 username:\"kube-apiserver-etcd-client\" auth_revision:1 > txn:<compare:<target:MOD key:\"/registry/clusterroles/system:aggregate-to-view\" mod_revision:0 > success:<request_put:<key:\"/registry/clusterroles/system:aggregate-to-view\" value_size:1862 >> failure:<>>","response":"size:14"}
{"level":"info","ts":"2025-11-23T09:57:02.811827Z","caller":"traceutil/trace.go:171","msg":"trace[334752838] linearizableReadLoop","detail":"{readStateIndex:125; appliedIndex:124; }","duration":"197.624876ms","start":"2025-11-23T09:57:02.614187Z","end":"2025-11-23T09:57:02.811812Z","steps":["trace[334752838] 'read index received' (duration: 54.776357ms)","trace[334752838] 'applied index is now lower than readState.Index' (duration: 142.846972ms)"],"step_count":2}
{"level":"info","ts":"2025-11-23T09:57:02.811874Z","caller":"traceutil/trace.go:171","msg":"trace[577911190] transaction","detail":"{read_only:false; response_revision:120; number_of_response:1; }","duration":"309.546043ms","start":"2025-11-23T09:57:02.502295Z","end":"2025-11-23T09:57:02.811841Z","steps":["trace[577911190] 'process raft request' (duration: 166.630437ms)","trace[577911190] 'compare' (duration: 142.557878ms)"],"step_count":2}
{"level":"warn","ts":"2025-11-23T09:57:02.811926Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"197.752655ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/health\" ","response":"range_response_count:0 size:4"}
{"level":"info","ts":"2025-11-23T09:57:02.811961Z","caller":"traceutil/trace.go:171","msg":"trace[450821894] range","detail":"{range_begin:/registry/health; range_end:; response_count:0; response_revision:120; }","duration":"197.79258ms","start":"2025-11-23T09:57:02.614154Z","end":"2025-11-23T09:57:02.811947Z","steps":["trace[450821894] 'agreement among raft nodes before linearized reading' (duration: 197.694344ms)"],"step_count":1}
{"level":"warn","ts":"2025-11-23T09:57:02.812003Z","caller":"v3rpc/interceptor.go:197","msg":"request stats","start time":"2025-11-23T09:57:02.50227Z","time spent":"309.683301ms","remote":"127.0.0.1:39468","response type":"/etcdserverpb.KV/Txn","request count":1,"request size":1917,"response count":0,"response size":37,"request content":"compare:<target:MOD key:\"/registry/clusterroles/system:aggregate-to-view\" mod_revision:0 > success:<request_put:<key:\"/registry/clusterroles/system:aggregate-to-view\" value_size:1862 >> failure:<>"}
{"level":"warn","ts":"2025-11-23T09:57:03.126521Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"185.304764ms","expected-duration":"100ms","prefix":"","request":"header:<ID:15638356837419424563 username:\"kube-apiserver-etcd-client\" auth_revision:1 > txn:<compare:<target:MOD key:\"/registry/clusterroles/system:heapster\" mod_revision:0 > success:<request_put:<key:\"/registry/clusterroles/system:heapster\" value_size:579 >> failure:<>>","response":"size:14"}
{"level":"info","ts":"2025-11-23T09:57:03.126599Z","caller":"traceutil/trace.go:171","msg":"trace[1403684060] transaction","detail":"{read_only:false; response_revision:121; number_of_response:1; }","duration":"309.884743ms","start":"2025-11-23T09:57:02.816704Z","end":"2025-11-23T09:57:03.126589Z","steps":["trace[1403684060] 'process raft request' (duration: 124.45761ms)","trace[1403684060] 'compare' (duration: 185.120538ms)"],"step_count":2}
{"level":"warn","ts":"2025-11-23T09:57:03.126635Z","caller":"v3rpc/interceptor.go:197","msg":"request stats","start time":"2025-11-23T09:57:02.816683Z","time spent":"309.941015ms","remote":"127.0.0.1:39468","response type":"/etcdserverpb.KV/Txn","request count":1,"request size":625,"response count":0,"response size":37,"request content":"compare:<target:MOD key:\"/registry/clusterroles/system:heapster\" mod_revision:0 > success:<request_put:<key:\"/registry/clusterroles/system:heapster\" value_size:579 >> failure:<>"}
{"level":"warn","ts":"2025-11-23T09:57:03.378154Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"117.573425ms","expected-duration":"100ms","prefix":"","request":"header:<ID:15638356837419424567 username:\"kube-apiserver-etcd-client\" auth_revision:1 > txn:<compare:<target:MOD key:\"/registry/clusterroles/system:node-problem-detector\" mod_revision:0 > success:<request_put:<key:\"/registry/clusterroles/system:node-problem-detector\" value_size:583 >> failure:<>>","response":"size:14"}
{"level":"info","ts":"2025-11-23T09:57:03.37825Z","caller":"traceutil/trace.go:171","msg":"trace[407529311] transaction","detail":"{read_only:false; response_revision:123; number_of_response:1; }","duration":"236.959494ms","start":"2025-11-23T09:57:03.141275Z","end":"2025-11-23T09:57:03.378235Z","steps":["trace[407529311] 'process raft request' (duration: 119.236514ms)","trace[407529311] 'compare' (duration: 117.440472ms)"],"step_count":2}
{"level":"info","ts":"2025-11-23T09:57:03.488901Z","caller":"traceutil/trace.go:171","msg":"trace[331049729] transaction","detail":"{read_only:false; response_revision:124; number_of_response:1; }","duration":"105.829119ms","start":"2025-11-23T09:57:03.38305Z","end":"2025-11-23T09:57:03.488879Z","steps":["trace[331049729] 'process raft request' (duration: 105.359949ms)"],"step_count":1}
{"level":"info","ts":"2025-11-23T09:57:03.685992Z","caller":"traceutil/trace.go:171","msg":"trace[1238052414] transaction","detail":"{read_only:false; response_revision:127; number_of_response:1; }","duration":"180.587913ms","start":"2025-11-23T09:57:03.505382Z","end":"2025-11-23T09:57:03.68597Z","steps":["trace[1238052414] 'process raft request' (duration: 128.699733ms)","trace[1238052414] 'compare' (duration: 51.773911ms)"],"step_count":2}
{"level":"info","ts":"2025-11-23T09:57:44.684831Z","caller":"traceutil/trace.go:171","msg":"trace[671402052] transaction","detail":"{read_only:false; response_revision:477; number_of_response:1; }","duration":"110.153636ms","start":"2025-11-23T09:57:44.574655Z","end":"2025-11-23T09:57:44.684809Z","steps":["trace[671402052] 'process raft request' (duration: 110.003906ms)"],"step_count":1}
==> kernel <==
09:57:49 up 40 min, 0 user, load average: 5.55, 4.20, 2.64
Linux old-k8s-version-709593 6.8.0-1044-gcp #47~22.04.1-Ubuntu SMP Thu Oct 23 21:07:54 UTC 2025 x86_64 GNU/Linux
PRETTY_NAME="Debian GNU/Linux 12 (bookworm)"
==> kindnet [a1af83bb6749287f8df2adaeff4c43c5820f5194cb24f7fe3eb5ef134893d93c] <==
I1123 09:57:23.601786 1 main.go:109] connected to apiserver: https://10.96.0.1:443
I1123 09:57:23.602109 1 main.go:139] hostIP = 192.168.76.2
podIP = 192.168.76.2
I1123 09:57:23.602284 1 main.go:148] setting mtu 1500 for CNI
I1123 09:57:23.602304 1 main.go:178] kindnetd IP family: "ipv4"
I1123 09:57:23.602318 1 main.go:182] noMask IPv4 subnets: [10.244.0.0/16]
time="2025-11-23T09:57:23Z" level=info msg="Created plugin 10-kube-network-policies (kindnetd, handles RunPodSandbox,RemovePodSandbox)"
I1123 09:57:23.855098 1 controller.go:377] "Starting controller" name="kube-network-policies"
I1123 09:57:23.855140 1 controller.go:381] "Waiting for informer caches to sync"
I1123 09:57:23.855154 1 shared_informer.go:350] "Waiting for caches to sync" controller="kube-network-policies"
I1123 09:57:23.900801 1 controller.go:390] nri plugin exited: failed to connect to NRI service: dial unix /var/run/nri/nri.sock: connect: no such file or directory
I1123 09:57:24.355697 1 shared_informer.go:357] "Caches are synced" controller="kube-network-policies"
I1123 09:57:24.355735 1 metrics.go:72] Registering metrics
I1123 09:57:24.355844 1 controller.go:711] "Syncing nftables rules"
I1123 09:57:33.855972 1 main.go:297] Handling node with IPs: map[192.168.76.2:{}]
I1123 09:57:33.856030 1 main.go:301] handling current node
I1123 09:57:43.856054 1 main.go:297] Handling node with IPs: map[192.168.76.2:{}]
I1123 09:57:43.856111 1 main.go:301] handling current node
==> kube-apiserver [9ab267968c030e0a3bce6b123e59cf0e26705c3742842d1fe84461463f48a663] <==
I1123 09:57:00.606586 1 shared_informer.go:318] Caches are synced for crd-autoregister
I1123 09:57:00.606625 1 aggregator.go:166] initial CRD sync complete...
I1123 09:57:00.606634 1 autoregister_controller.go:141] Starting autoregister controller
I1123 09:57:00.606641 1 cache.go:32] Waiting for caches to sync for autoregister controller
I1123 09:57:00.606650 1 cache.go:39] Caches are synced for autoregister controller
I1123 09:57:00.608306 1 controller.go:624] quota admission added evaluator for: namespaces
I1123 09:57:00.609050 1 shared_informer.go:318] Caches are synced for configmaps
I1123 09:57:00.624076 1 controller.go:624] quota admission added evaluator for: leases.coordination.k8s.io
I1123 09:57:00.649174 1 shared_informer.go:318] Caches are synced for node_authorizer
I1123 09:57:01.610779 1 storage_scheduling.go:95] created PriorityClass system-node-critical with value 2000001000
I1123 09:57:01.702685 1 storage_scheduling.go:95] created PriorityClass system-cluster-critical with value 2000000000
I1123 09:57:01.702703 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist.
I1123 09:57:04.338662 1 controller.go:624] quota admission added evaluator for: roles.rbac.authorization.k8s.io
I1123 09:57:04.416324 1 controller.go:624] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
I1123 09:57:04.524354 1 alloc.go:330] "allocated clusterIPs" service="default/kubernetes" clusterIPs={"IPv4":"10.96.0.1"}
W1123 09:57:04.538023 1 lease.go:263] Resetting endpoints for master service "kubernetes" to [192.168.76.2]
I1123 09:57:04.540122 1 controller.go:624] quota admission added evaluator for: endpoints
I1123 09:57:04.546988 1 controller.go:624] quota admission added evaluator for: endpointslices.discovery.k8s.io
I1123 09:57:04.575545 1 controller.go:624] quota admission added evaluator for: serviceaccounts
I1123 09:57:05.959109 1 controller.go:624] quota admission added evaluator for: deployments.apps
I1123 09:57:05.975157 1 alloc.go:330] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs={"IPv4":"10.96.0.10"}
I1123 09:57:05.986661 1 controller.go:624] quota admission added evaluator for: daemonsets.apps
I1123 09:57:17.926455 1 controller.go:624] quota admission added evaluator for: replicasets.apps
I1123 09:57:18.460236 1 controller.go:624] quota admission added evaluator for: controllerrevisions.apps
E1123 09:57:47.744877 1 upgradeaware.go:425] Error proxying data from client to backend: write tcp 192.168.76.2:47470->192.168.76.2:10250: write: connection reset by peer
==> kube-controller-manager [1b2964c41626762d3beb765fa131cc83c8eafa60068157afab3d1e775a761750] <==
I1123 09:57:18.051120 1 shared_informer.go:318] Caches are synced for resource quota
I1123 09:57:18.052924 1 event.go:307] "Event occurred" object="kube-system/kube-apiserver-old-k8s-version-709593" fieldPath="" kind="Pod" apiVersion="v1" type="Warning" reason="NodeNotReady" message="Node is not ready"
I1123 09:57:18.132109 1 shared_informer.go:318] Caches are synced for attach detach
I1123 09:57:18.349828 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-tndwj"
I1123 09:57:18.372449 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-gf5sx"
I1123 09:57:18.406026 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="452.070013ms"
I1123 09:57:18.463224 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="57.127396ms"
I1123 09:57:18.483794 1 event.go:307] "Event occurred" object="kube-system/kube-proxy" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kube-proxy-sgv48"
I1123 09:57:18.483871 1 shared_informer.go:318] Caches are synced for garbage collector
I1123 09:57:18.504473 1 event.go:307] "Event occurred" object="kube-system/kindnet" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kindnet-tpvt2"
I1123 09:57:18.560131 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="95.571025ms"
I1123 09:57:18.560538 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="230.617µs"
I1123 09:57:18.562358 1 shared_informer.go:318] Caches are synced for garbage collector
I1123 09:57:18.562385 1 garbagecollector.go:166] "All resource monitors have synced. Proceeding to collect garbage"
I1123 09:57:19.789485 1 event.go:307] "Event occurred" object="kube-system/coredns" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set coredns-5dd5756b68 to 1 from 2"
I1123 09:57:19.808843 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: coredns-5dd5756b68-tndwj"
I1123 09:57:19.823673 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="40.107806ms"
I1123 09:57:19.833064 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="9.315043ms"
I1123 09:57:19.833185 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="76.73µs"
I1123 09:57:33.949212 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="110.096µs"
I1123 09:57:33.981566 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="90.706µs"
I1123 09:57:35.176726 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="136.892µs"
I1123 09:57:35.214616 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="9.894482ms"
I1123 09:57:35.214767 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="93.972µs"
I1123 09:57:38.010283 1 node_lifecycle_controller.go:1048] "Controller detected that some Nodes are Ready. Exiting master disruption mode"
==> kube-proxy [e82a6fec044de994c043f2f9c5656e0c2a71e8e480ed8f7cca948de66ed51059] <==
I1123 09:57:20.277594 1 server_others.go:69] "Using iptables proxy"
I1123 09:57:20.292272 1 node.go:141] Successfully retrieved node IP: 192.168.76.2
I1123 09:57:20.339595 1 server.go:632] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I1123 09:57:20.344426 1 server_others.go:152] "Using iptables Proxier"
I1123 09:57:20.344681 1 server_others.go:421] "Detect-local-mode set to ClusterCIDR, but no cluster CIDR for family" ipFamily="IPv6"
I1123 09:57:20.344815 1 server_others.go:438] "Defaulting to no-op detect-local"
I1123 09:57:20.344909 1 proxier.go:251] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses"
I1123 09:57:20.345726 1 server.go:846] "Version info" version="v1.28.0"
I1123 09:57:20.345900 1 server.go:848] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1123 09:57:20.347106 1 config.go:188] "Starting service config controller"
I1123 09:57:20.350153 1 shared_informer.go:311] Waiting for caches to sync for service config
I1123 09:57:20.349625 1 config.go:97] "Starting endpoint slice config controller"
I1123 09:57:20.350452 1 shared_informer.go:311] Waiting for caches to sync for endpoint slice config
I1123 09:57:20.350106 1 config.go:315] "Starting node config controller"
I1123 09:57:20.350583 1 shared_informer.go:311] Waiting for caches to sync for node config
I1123 09:57:20.450547 1 shared_informer.go:318] Caches are synced for service config
I1123 09:57:20.450714 1 shared_informer.go:318] Caches are synced for node config
I1123 09:57:20.450744 1 shared_informer.go:318] Caches are synced for endpoint slice config
==> kube-scheduler [33f6ed017ec882589a089aad6a009c657f1fc80298864259b48138233e264c91] <==
W1123 09:57:01.700971 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
E1123 09:57:01.701017 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StatefulSet: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
W1123 09:57:01.704770 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E1123 09:57:01.704814 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
W1123 09:57:01.752559 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
E1123 09:57:01.752596 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
W1123 09:57:01.981985 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
E1123 09:57:01.982024 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Node: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
W1123 09:57:01.983872 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
E1123 09:57:01.983905 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PodDisruptionBudget: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
W1123 09:57:02.057453 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
E1123 09:57:02.057498 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIStorageCapacity: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
W1123 09:57:02.144948 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
E1123 09:57:02.145025 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
W1123 09:57:03.483078 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
E1123 09:57:03.483126 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PersistentVolume: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
W1123 09:57:03.561961 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E1123 09:57:03.562012 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
W1123 09:57:03.808694 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
E1123 09:57:03.808744 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StatefulSet: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
W1123 09:57:03.860531 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
E1123 09:57:03.860576 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicaSet: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
W1123 09:57:03.972432 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
E1123 09:57:03.972478 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicationController: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
I1123 09:57:04.567087 1 shared_informer.go:318] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
==> kubelet <==
Nov 23 09:57:18 old-k8s-version-709593 kubelet[1519]: W1123 09:57:18.547160 1519 reflector.go:535] object-"kube-system"/"kube-proxy": failed to list *v1.ConfigMap: configmaps "kube-proxy" is forbidden: User "system:node:old-k8s-version-709593" cannot list resource "configmaps" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-709593' and this object
Nov 23 09:57:18 old-k8s-version-709593 kubelet[1519]: E1123 09:57:18.547223 1519 reflector.go:147] object-"kube-system"/"kube-proxy": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "kube-proxy" is forbidden: User "system:node:old-k8s-version-709593" cannot list resource "configmaps" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-709593' and this object
Nov 23 09:57:18 old-k8s-version-709593 kubelet[1519]: I1123 09:57:18.709145 1519 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dz9pq\" (UniqueName: \"kubernetes.io/projected/f5d963bd-a2f2-44d2-969c-d219c55aba33-kube-api-access-dz9pq\") pod \"kube-proxy-sgv48\" (UID: \"f5d963bd-a2f2-44d2-969c-d219c55aba33\") " pod="kube-system/kube-proxy-sgv48"
Nov 23 09:57:18 old-k8s-version-709593 kubelet[1519]: I1123 09:57:18.709218 1519 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-cfg\" (UniqueName: \"kubernetes.io/host-path/fd3daece-c28b-4efa-ae53-16c16790e5be-cni-cfg\") pod \"kindnet-tpvt2\" (UID: \"fd3daece-c28b-4efa-ae53-16c16790e5be\") " pod="kube-system/kindnet-tpvt2"
Nov 23 09:57:18 old-k8s-version-709593 kubelet[1519]: I1123 09:57:18.709250 1519 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/fd3daece-c28b-4efa-ae53-16c16790e5be-xtables-lock\") pod \"kindnet-tpvt2\" (UID: \"fd3daece-c28b-4efa-ae53-16c16790e5be\") " pod="kube-system/kindnet-tpvt2"
Nov 23 09:57:18 old-k8s-version-709593 kubelet[1519]: I1123 09:57:18.709281 1519 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c6p4v\" (UniqueName: \"kubernetes.io/projected/fd3daece-c28b-4efa-ae53-16c16790e5be-kube-api-access-c6p4v\") pod \"kindnet-tpvt2\" (UID: \"fd3daece-c28b-4efa-ae53-16c16790e5be\") " pod="kube-system/kindnet-tpvt2"
Nov 23 09:57:18 old-k8s-version-709593 kubelet[1519]: I1123 09:57:18.709316 1519 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/f5d963bd-a2f2-44d2-969c-d219c55aba33-lib-modules\") pod \"kube-proxy-sgv48\" (UID: \"f5d963bd-a2f2-44d2-969c-d219c55aba33\") " pod="kube-system/kube-proxy-sgv48"
Nov 23 09:57:18 old-k8s-version-709593 kubelet[1519]: I1123 09:57:18.709389 1519 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/fd3daece-c28b-4efa-ae53-16c16790e5be-lib-modules\") pod \"kindnet-tpvt2\" (UID: \"fd3daece-c28b-4efa-ae53-16c16790e5be\") " pod="kube-system/kindnet-tpvt2"
Nov 23 09:57:18 old-k8s-version-709593 kubelet[1519]: I1123 09:57:18.709422 1519 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-proxy\" (UniqueName: \"kubernetes.io/configmap/f5d963bd-a2f2-44d2-969c-d219c55aba33-kube-proxy\") pod \"kube-proxy-sgv48\" (UID: \"f5d963bd-a2f2-44d2-969c-d219c55aba33\") " pod="kube-system/kube-proxy-sgv48"
Nov 23 09:57:18 old-k8s-version-709593 kubelet[1519]: I1123 09:57:18.709454 1519 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/f5d963bd-a2f2-44d2-969c-d219c55aba33-xtables-lock\") pod \"kube-proxy-sgv48\" (UID: \"f5d963bd-a2f2-44d2-969c-d219c55aba33\") " pod="kube-system/kube-proxy-sgv48"
Nov 23 09:57:24 old-k8s-version-709593 kubelet[1519]: I1123 09:57:24.152873 1519 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kube-proxy-sgv48" podStartSLOduration=6.152803535 podCreationTimestamp="2025-11-23 09:57:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 09:57:21.2206232 +0000 UTC m=+15.292351138" watchObservedRunningTime="2025-11-23 09:57:24.152803535 +0000 UTC m=+18.224531435"
Nov 23 09:57:24 old-k8s-version-709593 kubelet[1519]: I1123 09:57:24.153064 1519 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kindnet-tpvt2" podStartSLOduration=2.534840269 podCreationTimestamp="2025-11-23 09:57:18 +0000 UTC" firstStartedPulling="2025-11-23 09:57:19.547788823 +0000 UTC m=+13.619516716" lastFinishedPulling="2025-11-23 09:57:23.165974087 +0000 UTC m=+17.237701980" observedRunningTime="2025-11-23 09:57:24.152485675 +0000 UTC m=+18.224213576" watchObservedRunningTime="2025-11-23 09:57:24.153025533 +0000 UTC m=+18.224753438"
Nov 23 09:57:33 old-k8s-version-709593 kubelet[1519]: I1123 09:57:33.920548 1519 kubelet_node_status.go:493] "Fast updating node status as it just became ready"
Nov 23 09:57:33 old-k8s-version-709593 kubelet[1519]: I1123 09:57:33.948876 1519 topology_manager.go:215] "Topology Admit Handler" podUID="9a493920-3739-4eb9-8426-3590a8f2ee51" podNamespace="kube-system" podName="coredns-5dd5756b68-gf5sx"
Nov 23 09:57:33 old-k8s-version-709593 kubelet[1519]: I1123 09:57:33.949059 1519 topology_manager.go:215] "Topology Admit Handler" podUID="ba58926e-fdf3-4750-b44d-7c94a027737e" podNamespace="kube-system" podName="storage-provisioner"
Nov 23 09:57:34 old-k8s-version-709593 kubelet[1519]: I1123 09:57:34.123178 1519 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-724lb\" (UniqueName: \"kubernetes.io/projected/ba58926e-fdf3-4750-b44d-7c94a027737e-kube-api-access-724lb\") pod \"storage-provisioner\" (UID: \"ba58926e-fdf3-4750-b44d-7c94a027737e\") " pod="kube-system/storage-provisioner"
Nov 23 09:57:34 old-k8s-version-709593 kubelet[1519]: I1123 09:57:34.123243 1519 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/ba58926e-fdf3-4750-b44d-7c94a027737e-tmp\") pod \"storage-provisioner\" (UID: \"ba58926e-fdf3-4750-b44d-7c94a027737e\") " pod="kube-system/storage-provisioner"
Nov 23 09:57:34 old-k8s-version-709593 kubelet[1519]: I1123 09:57:34.123297 1519 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5rzx7\" (UniqueName: \"kubernetes.io/projected/9a493920-3739-4eb9-8426-3590a8f2ee51-kube-api-access-5rzx7\") pod \"coredns-5dd5756b68-gf5sx\" (UID: \"9a493920-3739-4eb9-8426-3590a8f2ee51\") " pod="kube-system/coredns-5dd5756b68-gf5sx"
Nov 23 09:57:34 old-k8s-version-709593 kubelet[1519]: I1123 09:57:34.123357 1519 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9a493920-3739-4eb9-8426-3590a8f2ee51-config-volume\") pod \"coredns-5dd5756b68-gf5sx\" (UID: \"9a493920-3739-4eb9-8426-3590a8f2ee51\") " pod="kube-system/coredns-5dd5756b68-gf5sx"
Nov 23 09:57:35 old-k8s-version-709593 kubelet[1519]: I1123 09:57:35.176230 1519 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/coredns-5dd5756b68-gf5sx" podStartSLOduration=17.176168603 podCreationTimestamp="2025-11-23 09:57:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 09:57:35.175754843 +0000 UTC m=+29.247482743" watchObservedRunningTime="2025-11-23 09:57:35.176168603 +0000 UTC m=+29.247896503"
Nov 23 09:57:35 old-k8s-version-709593 kubelet[1519]: I1123 09:57:35.204836 1519 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/storage-provisioner" podStartSLOduration=16.204788689 podCreationTimestamp="2025-11-23 09:57:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 09:57:35.19026469 +0000 UTC m=+29.261992589" watchObservedRunningTime="2025-11-23 09:57:35.204788689 +0000 UTC m=+29.276516592"
Nov 23 09:57:37 old-k8s-version-709593 kubelet[1519]: I1123 09:57:37.507262 1519 topology_manager.go:215] "Topology Admit Handler" podUID="bea346d9-0dca-482c-b9f9-7b71741b18d7" podNamespace="default" podName="busybox"
Nov 23 09:57:37 old-k8s-version-709593 kubelet[1519]: I1123 09:57:37.646410 1519 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pj5kg\" (UniqueName: \"kubernetes.io/projected/bea346d9-0dca-482c-b9f9-7b71741b18d7-kube-api-access-pj5kg\") pod \"busybox\" (UID: \"bea346d9-0dca-482c-b9f9-7b71741b18d7\") " pod="default/busybox"
Nov 23 09:57:41 old-k8s-version-709593 kubelet[1519]: I1123 09:57:41.192410 1519 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="default/busybox" podStartSLOduration=1.9155870259999999 podCreationTimestamp="2025-11-23 09:57:37 +0000 UTC" firstStartedPulling="2025-11-23 09:57:37.952685082 +0000 UTC m=+32.024412966" lastFinishedPulling="2025-11-23 09:57:40.229447793 +0000 UTC m=+34.301175679" observedRunningTime="2025-11-23 09:57:41.192028507 +0000 UTC m=+35.263756408" watchObservedRunningTime="2025-11-23 09:57:41.192349739 +0000 UTC m=+35.264077634"
Nov 23 09:57:47 old-k8s-version-709593 kubelet[1519]: E1123 09:57:47.744109 1519 upgradeaware.go:425] Error proxying data from client to backend: readfrom tcp 192.168.76.2:34062->192.168.76.2:10010: write tcp 192.168.76.2:34062->192.168.76.2:10010: write: broken pipe
==> storage-provisioner [6188a0a11a558ccfe4a936446819a158ec0f3ff08b1c7692bf3db57ce82539bc] <==
I1123 09:57:34.497639 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I1123 09:57:34.510426 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I1123 09:57:34.510517 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I1123 09:57:34.519430 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I1123 09:57:34.519625 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_old-k8s-version-709593_09fc0e4b-1f89-47c2-90c6-e8921583fe8f!
I1123 09:57:34.522696 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"89d02a34-1ced-4051-82ca-0198f46f6d6a", APIVersion:"v1", ResourceVersion:"448", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' old-k8s-version-709593_09fc0e4b-1f89-47c2-90c6-e8921583fe8f became leader
I1123 09:57:34.619835 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_old-k8s-version-709593_09fc0e4b-1f89-47c2-90c6-e8921583fe8f!
-- /stdout --
helpers_test.go:262: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p old-k8s-version-709593 -n old-k8s-version-709593
helpers_test.go:269: (dbg) Run: kubectl --context old-k8s-version-709593 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:293: <<< TestStartStop/group/old-k8s-version/serial/DeployApp FAILED: end of post-mortem logs <<<
helpers_test.go:294: ---------------------/post-mortem---------------------------------
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:223: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: network settings <======
helpers_test.go:230: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:238: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: docker inspect <======
helpers_test.go:239: (dbg) Run: docker inspect old-k8s-version-709593
helpers_test.go:243: (dbg) docker inspect old-k8s-version-709593:
-- stdout --
[
{
"Id": "29cb528aee84df4277faf7afff19daffc07e3b9a021296ff004f8b42489e8384",
"Created": "2025-11-23T09:56:47.666891207Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 294280,
"ExitCode": 0,
"Error": "",
"StartedAt": "2025-11-23T09:56:47.720935343Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:133ca4ac39008d0056ad45d8cb70521d6b70d6e1b8bbff4678fd4b354efbdf70",
"ResolvConfPath": "/var/lib/docker/containers/29cb528aee84df4277faf7afff19daffc07e3b9a021296ff004f8b42489e8384/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/29cb528aee84df4277faf7afff19daffc07e3b9a021296ff004f8b42489e8384/hostname",
"HostsPath": "/var/lib/docker/containers/29cb528aee84df4277faf7afff19daffc07e3b9a021296ff004f8b42489e8384/hosts",
"LogPath": "/var/lib/docker/containers/29cb528aee84df4277faf7afff19daffc07e3b9a021296ff004f8b42489e8384/29cb528aee84df4277faf7afff19daffc07e3b9a021296ff004f8b42489e8384-json.log",
"Name": "/old-k8s-version-709593",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"old-k8s-version-709593:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {
"max-size": "100m"
}
},
"NetworkMode": "old-k8s-version-709593",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "private",
"Dns": null,
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 3221225472,
"NanoCpus": 0,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 6442450944,
"MemorySwappiness": null,
"OomKillDisable": null,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"ID": "29cb528aee84df4277faf7afff19daffc07e3b9a021296ff004f8b42489e8384",
"LowerDir": "/var/lib/docker/overlay2/ea62ac2e144b45f2284ed569ef537390326f82b0cb3d40e4d46e0ff286b7eb90-init/diff:/var/lib/docker/overlay2/c80a0dfdb81b7753b0a82e2bc6458805cbbad0a9ce5819c63e1d9b7b71ba226c/diff",
"MergedDir": "/var/lib/docker/overlay2/ea62ac2e144b45f2284ed569ef537390326f82b0cb3d40e4d46e0ff286b7eb90/merged",
"UpperDir": "/var/lib/docker/overlay2/ea62ac2e144b45f2284ed569ef537390326f82b0cb3d40e4d46e0ff286b7eb90/diff",
"WorkDir": "/var/lib/docker/overlay2/ea62ac2e144b45f2284ed569ef537390326f82b0cb3d40e4d46e0ff286b7eb90/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
},
{
"Type": "volume",
"Name": "old-k8s-version-709593",
"Source": "/var/lib/docker/volumes/old-k8s-version-709593/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
}
],
"Config": {
"Hostname": "old-k8s-version-709593",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "old-k8s-version-709593",
"name.minikube.sigs.k8s.io": "old-k8s-version-709593",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"SandboxID": "b544aba317fcf40d3e61edbec3240f39587be7e914d5c21fc69a6535b296b152",
"SandboxKey": "/var/run/docker/netns/b544aba317fc",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33093"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33094"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33097"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33095"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33096"
}
]
},
"Networks": {
"old-k8s-version-709593": {
"IPAMConfig": {
"IPv4Address": "192.168.76.2",
"IPv6Address": ""
},
"Links": null,
"Aliases": null,
"DriverOpts": null,
"GwPriority": 0,
"NetworkID": "4fa988beb7cda350f0c11b822dcc90801b7cc48baa23c5c851d275a8d3ed42f8",
"EndpointID": "da8f042fa74ebc4420b7404b4cac4144f9e37e8a91e96eb145a8c67dcfe76dd3",
"Gateway": "192.168.76.1",
"IPAddress": "192.168.76.2",
"MacAddress": "76:bc:b6:48:41:0f",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"old-k8s-version-709593",
"29cb528aee84"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:247: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p old-k8s-version-709593 -n old-k8s-version-709593
helpers_test.go:252: <<< TestStartStop/group/old-k8s-version/serial/DeployApp FAILED: start of post-mortem logs <<<
helpers_test.go:253: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: minikube logs <======
helpers_test.go:255: (dbg) Run: out/minikube-linux-amd64 -p old-k8s-version-709593 logs -n 25
helpers_test.go:255: (dbg) Done: out/minikube-linux-amd64 -p old-k8s-version-709593 logs -n 25: (1.209220437s)
helpers_test.go:260: TestStartStop/group/old-k8s-version/serial/DeployApp logs:
-- stdout --
==> Audit <==
┌─────────┬────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬──────────────────────────────┬─────────┬─────────┬─────────────────────┬─────────────────────┐
│ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │
├─────────┼────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼──────────────────────────────┼─────────┼─────────┼─────────────────────┼─────────────────────┤
│ ssh │ -p bridge-676928 sudo systemctl cat kubelet --no-pager │ bridge-676928 │ jenkins │ v1.37.0 │ 23 Nov 25 09:57 UTC │ 23 Nov 25 09:57 UTC │
│ ssh │ -p bridge-676928 sudo journalctl -xeu kubelet --all --full --no-pager │ bridge-676928 │ jenkins │ v1.37.0 │ 23 Nov 25 09:57 UTC │ 23 Nov 25 09:57 UTC │
│ ssh │ -p bridge-676928 sudo cat /etc/kubernetes/kubelet.conf │ bridge-676928 │ jenkins │ v1.37.0 │ 23 Nov 25 09:57 UTC │ 23 Nov 25 09:57 UTC │
│ ssh │ -p bridge-676928 sudo cat /var/lib/kubelet/config.yaml │ bridge-676928 │ jenkins │ v1.37.0 │ 23 Nov 25 09:57 UTC │ 23 Nov 25 09:57 UTC │
│ ssh │ -p bridge-676928 sudo systemctl status docker --all --full --no-pager │ bridge-676928 │ jenkins │ v1.37.0 │ 23 Nov 25 09:57 UTC │ │
│ ssh │ -p bridge-676928 sudo systemctl cat docker --no-pager │ bridge-676928 │ jenkins │ v1.37.0 │ 23 Nov 25 09:57 UTC │ 23 Nov 25 09:57 UTC │
│ ssh │ -p bridge-676928 sudo cat /etc/docker/daemon.json │ bridge-676928 │ jenkins │ v1.37.0 │ 23 Nov 25 09:57 UTC │ │
│ ssh │ -p bridge-676928 sudo docker system info │ bridge-676928 │ jenkins │ v1.37.0 │ 23 Nov 25 09:57 UTC │ │
│ ssh │ -p bridge-676928 sudo systemctl status cri-docker --all --full --no-pager │ bridge-676928 │ jenkins │ v1.37.0 │ 23 Nov 25 09:57 UTC │ │
│ ssh │ -p bridge-676928 sudo systemctl cat cri-docker --no-pager │ bridge-676928 │ jenkins │ v1.37.0 │ 23 Nov 25 09:57 UTC │ 23 Nov 25 09:57 UTC │
│ ssh │ -p bridge-676928 sudo cat /etc/systemd/system/cri-docker.service.d/10-cni.conf │ bridge-676928 │ jenkins │ v1.37.0 │ 23 Nov 25 09:57 UTC │ │
│ ssh │ -p bridge-676928 sudo cat /usr/lib/systemd/system/cri-docker.service │ bridge-676928 │ jenkins │ v1.37.0 │ 23 Nov 25 09:57 UTC │ 23 Nov 25 09:57 UTC │
│ ssh │ -p bridge-676928 sudo cri-dockerd --version │ bridge-676928 │ jenkins │ v1.37.0 │ 23 Nov 25 09:57 UTC │ 23 Nov 25 09:57 UTC │
│ ssh │ -p bridge-676928 sudo systemctl status containerd --all --full --no-pager │ bridge-676928 │ jenkins │ v1.37.0 │ 23 Nov 25 09:57 UTC │ 23 Nov 25 09:57 UTC │
│ ssh │ -p bridge-676928 sudo systemctl cat containerd --no-pager │ bridge-676928 │ jenkins │ v1.37.0 │ 23 Nov 25 09:57 UTC │ 23 Nov 25 09:57 UTC │
│ ssh │ -p bridge-676928 sudo cat /lib/systemd/system/containerd.service │ bridge-676928 │ jenkins │ v1.37.0 │ 23 Nov 25 09:57 UTC │ 23 Nov 25 09:57 UTC │
│ ssh │ -p bridge-676928 sudo cat /etc/containerd/config.toml │ bridge-676928 │ jenkins │ v1.37.0 │ 23 Nov 25 09:57 UTC │ 23 Nov 25 09:57 UTC │
│ ssh │ -p bridge-676928 sudo containerd config dump │ bridge-676928 │ jenkins │ v1.37.0 │ 23 Nov 25 09:57 UTC │ 23 Nov 25 09:57 UTC │
│ ssh │ -p bridge-676928 sudo systemctl status crio --all --full --no-pager │ bridge-676928 │ jenkins │ v1.37.0 │ 23 Nov 25 09:57 UTC │ │
│ ssh │ -p bridge-676928 sudo systemctl cat crio --no-pager │ bridge-676928 │ jenkins │ v1.37.0 │ 23 Nov 25 09:57 UTC │ 23 Nov 25 09:57 UTC │
│ ssh │ -p bridge-676928 sudo find /etc/crio -type f -exec sh -c 'echo {}; cat {}' \; │ bridge-676928 │ jenkins │ v1.37.0 │ 23 Nov 25 09:57 UTC │ 23 Nov 25 09:57 UTC │
│ ssh │ -p bridge-676928 sudo crio config │ bridge-676928 │ jenkins │ v1.37.0 │ 23 Nov 25 09:57 UTC │ 23 Nov 25 09:57 UTC │
│ delete │ -p bridge-676928 │ bridge-676928 │ jenkins │ v1.37.0 │ 23 Nov 25 09:57 UTC │ 23 Nov 25 09:57 UTC │
│ delete │ -p disable-driver-mounts-178820 │ disable-driver-mounts-178820 │ jenkins │ v1.37.0 │ 23 Nov 25 09:57 UTC │ 23 Nov 25 09:57 UTC │
│ start │ -p default-k8s-diff-port-696492 --memory=3072 --alsologtostderr --wait=true --apiserver-port=8444 --driver=docker --container-runtime=containerd --kubernetes-version=v1.34.1 │ default-k8s-diff-port-696492 │ jenkins │ v1.37.0 │ 23 Nov 25 09:57 UTC │ │
└─────────┴────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴──────────────────────────────┴─────────┴─────────┴─────────────────────┴─────────────────────┘
==> Last Start <==
Log file created at: 2025/11/23 09:57:41
Running on machine: ubuntu-20-agent
Binary: Built with gc go1.25.3 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I1123 09:57:41.194019 311138 out.go:360] Setting OutFile to fd 1 ...
I1123 09:57:41.194298 311138 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1123 09:57:41.194308 311138 out.go:374] Setting ErrFile to fd 2...
I1123 09:57:41.194312 311138 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1123 09:57:41.194606 311138 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21968-3552/.minikube/bin
I1123 09:57:41.195144 311138 out.go:368] Setting JSON to false
I1123 09:57:41.196591 311138 start.go:133] hostinfo: {"hostname":"ubuntu-20-agent","uptime":2400,"bootTime":1763889461,"procs":331,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"22.04","kernelVersion":"6.8.0-1044-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
I1123 09:57:41.196668 311138 start.go:143] virtualization: kvm guest
I1123 09:57:41.199167 311138 out.go:179] * [default-k8s-diff-port-696492] minikube v1.37.0 on Ubuntu 22.04 (kvm/amd64)
I1123 09:57:41.201043 311138 out.go:179] - MINIKUBE_LOCATION=21968
I1123 09:57:41.201094 311138 notify.go:221] Checking for updates...
I1123 09:57:41.204382 311138 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I1123 09:57:41.206017 311138 out.go:179] - KUBECONFIG=/home/jenkins/minikube-integration/21968-3552/kubeconfig
I1123 09:57:41.207959 311138 out.go:179] - MINIKUBE_HOME=/home/jenkins/minikube-integration/21968-3552/.minikube
I1123 09:57:41.209794 311138 out.go:179] - MINIKUBE_BIN=out/minikube-linux-amd64
I1123 09:57:41.211809 311138 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I1123 09:57:41.214009 311138 config.go:182] Loaded profile config "embed-certs-412583": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1123 09:57:41.214105 311138 config.go:182] Loaded profile config "no-preload-309734": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1123 09:57:41.214180 311138 config.go:182] Loaded profile config "old-k8s-version-709593": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1123 09:57:41.214271 311138 driver.go:422] Setting default libvirt URI to qemu:///system
I1123 09:57:41.241306 311138 docker.go:124] docker version: linux-29.0.2:Docker Engine - Community
I1123 09:57:41.241474 311138 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1123 09:57:41.312013 311138 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:3 ContainersRunning:3 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:63 OomKillDisable:false NGoroutines:75 SystemTime:2025-11-23 09:57:41.299959199 +0000 UTC LoggingDriver:json-file CgroupDriver:systemd NEventsListener:0 KernelVersion:6.8.0-1044-gcp OperatingSystem:Ubuntu 22.04.5 LTS OSType:linux Architecture:x
86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8 ::1/128] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33652080640 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent Labels:[] ExperimentalBuild:false ServerVersion:29.0.2 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:fcd43222d6b07379a4be9786bda52438f0dd16a1 Expected:} RuncCommit:{ID:v1.3.3-0-gd842d771 Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map
[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.30.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.40.3] map[Name:model Path:/usr/libexec/docker/cli-plugins/docker-model SchemaVersion:0.1.0 ShortDescription:Docker Model Runner Vendor:Docker Inc. Version:v1.0.1] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I1123 09:57:41.312116 311138 docker.go:319] overlay module found
I1123 09:57:41.314243 311138 out.go:179] * Using the docker driver based on user configuration
I1123 09:57:41.316002 311138 start.go:309] selected driver: docker
I1123 09:57:41.316024 311138 start.go:927] validating driver "docker" against <nil>
I1123 09:57:41.316037 311138 start.go:938] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I1123 09:57:41.316751 311138 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1123 09:57:41.385595 311138 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:3 ContainersRunning:3 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:63 OomKillDisable:false NGoroutines:75 SystemTime:2025-11-23 09:57:41.373759534 +0000 UTC LoggingDriver:json-file CgroupDriver:systemd NEventsListener:0 KernelVersion:6.8.0-1044-gcp OperatingSystem:Ubuntu 22.04.5 LTS OSType:linux Architecture:x
86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8 ::1/128] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33652080640 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent Labels:[] ExperimentalBuild:false ServerVersion:29.0.2 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:fcd43222d6b07379a4be9786bda52438f0dd16a1 Expected:} RuncCommit:{ID:v1.3.3-0-gd842d771 Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map
[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.30.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.40.3] map[Name:model Path:/usr/libexec/docker/cli-plugins/docker-model SchemaVersion:0.1.0 ShortDescription:Docker Model Runner Vendor:Docker Inc. Version:v1.0.1] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I1123 09:57:41.385794 311138 start_flags.go:327] no existing cluster config was found, will generate one from the flags
I1123 09:57:41.386023 311138 start_flags.go:992] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1123 09:57:41.388087 311138 out.go:179] * Using Docker driver with root privileges
I1123 09:57:41.389651 311138 cni.go:84] Creating CNI manager for ""
I1123 09:57:41.389725 311138 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1123 09:57:41.389738 311138 start_flags.go:336] Found "CNI" CNI - setting NetworkPlugin=cni
I1123 09:57:41.389816 311138 start.go:353] cluster config:
{Name:default-k8s-diff-port-696492 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8444 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:default-k8s-diff-port-696492 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:
cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8444 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath:
StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1123 09:57:41.391556 311138 out.go:179] * Starting "default-k8s-diff-port-696492" primary control-plane node in "default-k8s-diff-port-696492" cluster
I1123 09:57:41.392982 311138 cache.go:134] Beginning downloading kic base image for docker with containerd
I1123 09:57:41.394476 311138 out.go:179] * Pulling base image v0.0.48-1763789673-21948 ...
I1123 09:57:41.395978 311138 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime containerd
I1123 09:57:41.396028 311138 preload.go:203] Found local preload: /home/jenkins/minikube-integration/21968-3552/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-containerd-overlay2-amd64.tar.lz4
I1123 09:57:41.396036 311138 cache.go:65] Caching tarball of preloaded images
I1123 09:57:41.396075 311138 image.go:81] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f in local docker daemon
I1123 09:57:41.396157 311138 preload.go:238] Found /home/jenkins/minikube-integration/21968-3552/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-containerd-overlay2-amd64.tar.lz4 in cache, skipping download
I1123 09:57:41.396175 311138 cache.go:68] Finished verifying existence of preloaded tar for v1.34.1 on containerd
I1123 09:57:41.396320 311138 profile.go:143] Saving config to /home/jenkins/minikube-integration/21968-3552/.minikube/profiles/default-k8s-diff-port-696492/config.json ...
I1123 09:57:41.396374 311138 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21968-3552/.minikube/profiles/default-k8s-diff-port-696492/config.json: {Name:mk3b81d8fd8561a54828649e3e510565221995b5 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 09:57:41.422089 311138 image.go:100] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f in local docker daemon, skipping pull
I1123 09:57:41.422112 311138 cache.go:158] gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f exists in daemon, skipping load
I1123 09:57:41.422133 311138 cache.go:243] Successfully downloaded all kic artifacts
I1123 09:57:41.422177 311138 start.go:360] acquireMachinesLock for default-k8s-diff-port-696492: {Name:mkc8ee83ed2b7a995e355ddec223dfeea233bbf7 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1123 09:57:41.422316 311138 start.go:364] duration metric: took 112.296µs to acquireMachinesLock for "default-k8s-diff-port-696492"
I1123 09:57:41.422500 311138 start.go:93] Provisioning new machine with config: &{Name:default-k8s-diff-port-696492 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8444 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:default-k8s-diff-port-696492 Namespace:default API
ServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8444 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false Disabl
eCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8444 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1123 09:57:41.422632 311138 start.go:125] createHost starting for "" (driver="docker")
W1123 09:57:37.251564 300017 node_ready.go:57] node "embed-certs-412583" has "Ready":"False" status (will retry)
W1123 09:57:39.751746 300017 node_ready.go:57] node "embed-certs-412583" has "Ready":"False" status (will retry)
I1123 09:57:42.255256 300017 node_ready.go:49] node "embed-certs-412583" is "Ready"
I1123 09:57:42.255291 300017 node_ready.go:38] duration metric: took 11.507766088s for node "embed-certs-412583" to be "Ready" ...
I1123 09:57:42.255310 300017 api_server.go:52] waiting for apiserver process to appear ...
I1123 09:57:42.255471 300017 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1123 09:57:42.277737 300017 api_server.go:72] duration metric: took 12.028046262s to wait for apiserver process to appear ...
I1123 09:57:42.277770 300017 api_server.go:88] waiting for apiserver healthz status ...
I1123 09:57:42.277792 300017 api_server.go:253] Checking apiserver healthz at https://192.168.103.2:8443/healthz ...
I1123 09:57:42.285468 300017 api_server.go:279] https://192.168.103.2:8443/healthz returned 200:
ok
I1123 09:57:42.287274 300017 api_server.go:141] control plane version: v1.34.1
I1123 09:57:42.287395 300017 api_server.go:131] duration metric: took 9.61454ms to wait for apiserver health ...
I1123 09:57:42.287422 300017 system_pods.go:43] waiting for kube-system pods to appear ...
I1123 09:57:42.294433 300017 system_pods.go:59] 8 kube-system pods found
I1123 09:57:42.294478 300017 system_pods.go:61] "coredns-66bc5c9577-8dgc7" [f685cc03-30df-4119-9d66-0e808c2d3c93] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1123 09:57:42.294486 300017 system_pods.go:61] "etcd-embed-certs-412583" [ea8b65e6-8c1f-4dda-8902-6b6be242b01f] Running
I1123 09:57:42.294493 300017 system_pods.go:61] "kindnet-f76c2" [16967e76-b4bf-4a99-aab9-d7f76cbb0830] Running
I1123 09:57:42.294499 300017 system_pods.go:61] "kube-apiserver-embed-certs-412583" [7eee3d42-8f6d-4f15-8eb6-d6cb611f8904] Running
I1123 09:57:42.294505 300017 system_pods.go:61] "kube-controller-manager-embed-certs-412583" [e118b0d0-9dad-4c49-beb5-fa7d32814216] Running
I1123 09:57:42.294510 300017 system_pods.go:61] "kube-proxy-wm7k2" [120a9b03-e7bf-4f4d-9b8c-6fa05d3739d9] Running
I1123 09:57:42.294515 300017 system_pods.go:61] "kube-scheduler-embed-certs-412583" [dde2c2e0-b58a-4028-a671-1a8f577dd063] Running
I1123 09:57:42.294526 300017 system_pods.go:61] "storage-provisioner" [dcf16920-e30b-42ab-8195-4ef946498d0f] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1123 09:57:42.294539 300017 system_pods.go:74] duration metric: took 7.098728ms to wait for pod list to return data ...
I1123 09:57:42.294549 300017 default_sa.go:34] waiting for default service account to be created ...
I1123 09:57:42.298321 300017 default_sa.go:45] found service account: "default"
I1123 09:57:42.298368 300017 default_sa.go:55] duration metric: took 3.811774ms for default service account to be created ...
I1123 09:57:42.298382 300017 system_pods.go:116] waiting for k8s-apps to be running ...
I1123 09:57:42.302807 300017 system_pods.go:86] 8 kube-system pods found
I1123 09:57:42.302871 300017 system_pods.go:89] "coredns-66bc5c9577-8dgc7" [f685cc03-30df-4119-9d66-0e808c2d3c93] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1123 09:57:42.302887 300017 system_pods.go:89] "etcd-embed-certs-412583" [ea8b65e6-8c1f-4dda-8902-6b6be242b01f] Running
I1123 09:57:42.302896 300017 system_pods.go:89] "kindnet-f76c2" [16967e76-b4bf-4a99-aab9-d7f76cbb0830] Running
I1123 09:57:42.302903 300017 system_pods.go:89] "kube-apiserver-embed-certs-412583" [7eee3d42-8f6d-4f15-8eb6-d6cb611f8904] Running
I1123 09:57:42.302927 300017 system_pods.go:89] "kube-controller-manager-embed-certs-412583" [e118b0d0-9dad-4c49-beb5-fa7d32814216] Running
I1123 09:57:42.302937 300017 system_pods.go:89] "kube-proxy-wm7k2" [120a9b03-e7bf-4f4d-9b8c-6fa05d3739d9] Running
I1123 09:57:42.302943 300017 system_pods.go:89] "kube-scheduler-embed-certs-412583" [dde2c2e0-b58a-4028-a671-1a8f577dd063] Running
I1123 09:57:42.302954 300017 system_pods.go:89] "storage-provisioner" [dcf16920-e30b-42ab-8195-4ef946498d0f] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1123 09:57:42.303049 300017 retry.go:31] will retry after 268.599682ms: missing components: kube-dns
I1123 09:57:42.577490 300017 system_pods.go:86] 8 kube-system pods found
I1123 09:57:42.577531 300017 system_pods.go:89] "coredns-66bc5c9577-8dgc7" [f685cc03-30df-4119-9d66-0e808c2d3c93] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1123 09:57:42.577541 300017 system_pods.go:89] "etcd-embed-certs-412583" [ea8b65e6-8c1f-4dda-8902-6b6be242b01f] Running
I1123 09:57:42.577550 300017 system_pods.go:89] "kindnet-f76c2" [16967e76-b4bf-4a99-aab9-d7f76cbb0830] Running
I1123 09:57:42.577557 300017 system_pods.go:89] "kube-apiserver-embed-certs-412583" [7eee3d42-8f6d-4f15-8eb6-d6cb611f8904] Running
I1123 09:57:42.577563 300017 system_pods.go:89] "kube-controller-manager-embed-certs-412583" [e118b0d0-9dad-4c49-beb5-fa7d32814216] Running
I1123 09:57:42.577568 300017 system_pods.go:89] "kube-proxy-wm7k2" [120a9b03-e7bf-4f4d-9b8c-6fa05d3739d9] Running
I1123 09:57:42.577573 300017 system_pods.go:89] "kube-scheduler-embed-certs-412583" [dde2c2e0-b58a-4028-a671-1a8f577dd063] Running
I1123 09:57:42.577581 300017 system_pods.go:89] "storage-provisioner" [dcf16920-e30b-42ab-8195-4ef946498d0f] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1123 09:57:42.577600 300017 retry.go:31] will retry after 240.156475ms: missing components: kube-dns
I1123 09:57:42.822131 300017 system_pods.go:86] 8 kube-system pods found
I1123 09:57:42.822171 300017 system_pods.go:89] "coredns-66bc5c9577-8dgc7" [f685cc03-30df-4119-9d66-0e808c2d3c93] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1123 09:57:42.822177 300017 system_pods.go:89] "etcd-embed-certs-412583" [ea8b65e6-8c1f-4dda-8902-6b6be242b01f] Running
I1123 09:57:42.822182 300017 system_pods.go:89] "kindnet-f76c2" [16967e76-b4bf-4a99-aab9-d7f76cbb0830] Running
I1123 09:57:42.822186 300017 system_pods.go:89] "kube-apiserver-embed-certs-412583" [7eee3d42-8f6d-4f15-8eb6-d6cb611f8904] Running
I1123 09:57:42.822190 300017 system_pods.go:89] "kube-controller-manager-embed-certs-412583" [e118b0d0-9dad-4c49-beb5-fa7d32814216] Running
I1123 09:57:42.822194 300017 system_pods.go:89] "kube-proxy-wm7k2" [120a9b03-e7bf-4f4d-9b8c-6fa05d3739d9] Running
I1123 09:57:42.822197 300017 system_pods.go:89] "kube-scheduler-embed-certs-412583" [dde2c2e0-b58a-4028-a671-1a8f577dd063] Running
I1123 09:57:42.822202 300017 system_pods.go:89] "storage-provisioner" [dcf16920-e30b-42ab-8195-4ef946498d0f] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1123 09:57:42.822216 300017 retry.go:31] will retry after 383.926777ms: missing components: kube-dns
I1123 09:57:43.211532 300017 system_pods.go:86] 8 kube-system pods found
I1123 09:57:43.211575 300017 system_pods.go:89] "coredns-66bc5c9577-8dgc7" [f685cc03-30df-4119-9d66-0e808c2d3c93] Running
I1123 09:57:43.211585 300017 system_pods.go:89] "etcd-embed-certs-412583" [ea8b65e6-8c1f-4dda-8902-6b6be242b01f] Running
I1123 09:57:43.211592 300017 system_pods.go:89] "kindnet-f76c2" [16967e76-b4bf-4a99-aab9-d7f76cbb0830] Running
I1123 09:57:43.211600 300017 system_pods.go:89] "kube-apiserver-embed-certs-412583" [7eee3d42-8f6d-4f15-8eb6-d6cb611f8904] Running
I1123 09:57:43.211608 300017 system_pods.go:89] "kube-controller-manager-embed-certs-412583" [e118b0d0-9dad-4c49-beb5-fa7d32814216] Running
I1123 09:57:43.211624 300017 system_pods.go:89] "kube-proxy-wm7k2" [120a9b03-e7bf-4f4d-9b8c-6fa05d3739d9] Running
I1123 09:57:43.211635 300017 system_pods.go:89] "kube-scheduler-embed-certs-412583" [dde2c2e0-b58a-4028-a671-1a8f577dd063] Running
I1123 09:57:43.211640 300017 system_pods.go:89] "storage-provisioner" [dcf16920-e30b-42ab-8195-4ef946498d0f] Running
I1123 09:57:43.211650 300017 system_pods.go:126] duration metric: took 913.260942ms to wait for k8s-apps to be running ...
I1123 09:57:43.211661 300017 system_svc.go:44] waiting for kubelet service to be running ....
I1123 09:57:43.211722 300017 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1123 09:57:43.226055 300017 system_svc.go:56] duration metric: took 14.383207ms WaitForService to wait for kubelet
I1123 09:57:43.226087 300017 kubeadm.go:587] duration metric: took 12.976401428s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1123 09:57:43.226108 300017 node_conditions.go:102] verifying NodePressure condition ...
I1123 09:57:43.229492 300017 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I1123 09:57:43.229524 300017 node_conditions.go:123] node cpu capacity is 8
I1123 09:57:43.229547 300017 node_conditions.go:105] duration metric: took 3.432669ms to run NodePressure ...
I1123 09:57:43.229560 300017 start.go:242] waiting for startup goroutines ...
I1123 09:57:43.229570 300017 start.go:247] waiting for cluster config update ...
I1123 09:57:43.229583 300017 start.go:256] writing updated cluster config ...
I1123 09:57:43.229975 300017 ssh_runner.go:195] Run: rm -f paused
I1123 09:57:43.235596 300017 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1123 09:57:43.243251 300017 pod_ready.go:83] waiting for pod "coredns-66bc5c9577-8dgc7" in "kube-system" namespace to be "Ready" or be gone ...
I1123 09:57:43.248984 300017 pod_ready.go:94] pod "coredns-66bc5c9577-8dgc7" is "Ready"
I1123 09:57:43.249015 300017 pod_ready.go:86] duration metric: took 5.729453ms for pod "coredns-66bc5c9577-8dgc7" in "kube-system" namespace to be "Ready" or be gone ...
I1123 09:57:43.251635 300017 pod_ready.go:83] waiting for pod "etcd-embed-certs-412583" in "kube-system" namespace to be "Ready" or be gone ...
I1123 09:57:43.256613 300017 pod_ready.go:94] pod "etcd-embed-certs-412583" is "Ready"
I1123 09:57:43.256645 300017 pod_ready.go:86] duration metric: took 4.984583ms for pod "etcd-embed-certs-412583" in "kube-system" namespace to be "Ready" or be gone ...
I1123 09:57:43.259023 300017 pod_ready.go:83] waiting for pod "kube-apiserver-embed-certs-412583" in "kube-system" namespace to be "Ready" or be gone ...
I1123 09:57:43.264242 300017 pod_ready.go:94] pod "kube-apiserver-embed-certs-412583" is "Ready"
I1123 09:57:43.264273 300017 pod_ready.go:86] duration metric: took 5.223434ms for pod "kube-apiserver-embed-certs-412583" in "kube-system" namespace to be "Ready" or be gone ...
I1123 09:57:43.311182 300017 pod_ready.go:83] waiting for pod "kube-controller-manager-embed-certs-412583" in "kube-system" namespace to be "Ready" or be gone ...
I1123 09:57:43.642602 300017 pod_ready.go:94] pod "kube-controller-manager-embed-certs-412583" is "Ready"
I1123 09:57:43.642637 300017 pod_ready.go:86] duration metric: took 331.426321ms for pod "kube-controller-manager-embed-certs-412583" in "kube-system" namespace to be "Ready" or be gone ...
I1123 09:57:43.843849 300017 pod_ready.go:83] waiting for pod "kube-proxy-wm7k2" in "kube-system" namespace to be "Ready" or be gone ...
I1123 09:57:44.244623 300017 pod_ready.go:94] pod "kube-proxy-wm7k2" is "Ready"
I1123 09:57:44.244667 300017 pod_ready.go:86] duration metric: took 400.77745ms for pod "kube-proxy-wm7k2" in "kube-system" namespace to be "Ready" or be gone ...
I1123 09:57:44.444056 300017 pod_ready.go:83] waiting for pod "kube-scheduler-embed-certs-412583" in "kube-system" namespace to be "Ready" or be gone ...
I1123 09:57:44.843963 300017 pod_ready.go:94] pod "kube-scheduler-embed-certs-412583" is "Ready"
I1123 09:57:44.843992 300017 pod_ready.go:86] duration metric: took 399.904179ms for pod "kube-scheduler-embed-certs-412583" in "kube-system" namespace to be "Ready" or be gone ...
I1123 09:57:44.844006 300017 pod_ready.go:40] duration metric: took 1.608365258s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1123 09:57:44.891853 300017 start.go:625] kubectl: 1.34.2, cluster: 1.34.1 (minor skew: 0)
I1123 09:57:44.964864 300017 out.go:179] * Done! kubectl is now configured to use "embed-certs-412583" cluster and "default" namespace by default
W1123 09:57:41.488122 296642 node_ready.go:57] node "no-preload-309734" has "Ready":"False" status (will retry)
W1123 09:57:43.488201 296642 node_ready.go:57] node "no-preload-309734" has "Ready":"False" status (will retry)
I1123 09:57:43.988019 296642 node_ready.go:49] node "no-preload-309734" is "Ready"
I1123 09:57:43.988052 296642 node_ready.go:38] duration metric: took 14.003534589s for node "no-preload-309734" to be "Ready" ...
I1123 09:57:43.988069 296642 api_server.go:52] waiting for apiserver process to appear ...
I1123 09:57:43.988149 296642 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1123 09:57:44.008503 296642 api_server.go:72] duration metric: took 14.434117996s to wait for apiserver process to appear ...
I1123 09:57:44.008530 296642 api_server.go:88] waiting for apiserver healthz status ...
I1123 09:57:44.008551 296642 api_server.go:253] Checking apiserver healthz at https://192.168.94.2:8443/healthz ...
I1123 09:57:44.017109 296642 api_server.go:279] https://192.168.94.2:8443/healthz returned 200:
ok
I1123 09:57:44.018176 296642 api_server.go:141] control plane version: v1.34.1
I1123 09:57:44.018200 296642 api_server.go:131] duration metric: took 9.663468ms to wait for apiserver health ...
I1123 09:57:44.018208 296642 system_pods.go:43] waiting for kube-system pods to appear ...
I1123 09:57:44.022287 296642 system_pods.go:59] 8 kube-system pods found
I1123 09:57:44.022324 296642 system_pods.go:61] "coredns-66bc5c9577-sx25q" [50adb46a-6c29-465a-adba-f806eeef81aa] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1123 09:57:44.022351 296642 system_pods.go:61] "etcd-no-preload-309734" [debda9ed-65d8-4a7e-99a0-42943a3c0520] Running
I1123 09:57:44.022364 296642 system_pods.go:61] "kindnet-d6zbp" [d1c56dde-7af0-49ca-a982-04ae56add5f9] Running
I1123 09:57:44.022369 296642 system_pods.go:61] "kube-apiserver-no-preload-309734" [165ccf5d-2d0c-4395-b9e8-31308c188f74] Running
I1123 09:57:44.022375 296642 system_pods.go:61] "kube-controller-manager-no-preload-309734" [d70022cf-2aaa-45a7-bcb0-0563bf832d88] Running
I1123 09:57:44.022381 296642 system_pods.go:61] "kube-proxy-jpvhc" [eb0ab966-23fc-429f-bcfe-eb5726b865be] Running
I1123 09:57:44.022387 296642 system_pods.go:61] "kube-scheduler-no-preload-309734" [c1fac6cc-06b9-419d-b9e5-e99b01de4dd2] Running
I1123 09:57:44.022397 296642 system_pods.go:61] "storage-provisioner" [b1352952-5fff-47aa-af05-dd6b2078fa39] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1123 09:57:44.022406 296642 system_pods.go:74] duration metric: took 4.191598ms to wait for pod list to return data ...
I1123 09:57:44.022421 296642 default_sa.go:34] waiting for default service account to be created ...
I1123 09:57:44.025262 296642 default_sa.go:45] found service account: "default"
I1123 09:57:44.025287 296642 default_sa.go:55] duration metric: took 2.858313ms for default service account to be created ...
I1123 09:57:44.025300 296642 system_pods.go:116] waiting for k8s-apps to be running ...
I1123 09:57:44.028240 296642 system_pods.go:86] 8 kube-system pods found
I1123 09:57:44.028269 296642 system_pods.go:89] "coredns-66bc5c9577-sx25q" [50adb46a-6c29-465a-adba-f806eeef81aa] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1123 09:57:44.028275 296642 system_pods.go:89] "etcd-no-preload-309734" [debda9ed-65d8-4a7e-99a0-42943a3c0520] Running
I1123 09:57:44.028281 296642 system_pods.go:89] "kindnet-d6zbp" [d1c56dde-7af0-49ca-a982-04ae56add5f9] Running
I1123 09:57:44.028285 296642 system_pods.go:89] "kube-apiserver-no-preload-309734" [165ccf5d-2d0c-4395-b9e8-31308c188f74] Running
I1123 09:57:44.028289 296642 system_pods.go:89] "kube-controller-manager-no-preload-309734" [d70022cf-2aaa-45a7-bcb0-0563bf832d88] Running
I1123 09:57:44.028293 296642 system_pods.go:89] "kube-proxy-jpvhc" [eb0ab966-23fc-429f-bcfe-eb5726b865be] Running
I1123 09:57:44.028296 296642 system_pods.go:89] "kube-scheduler-no-preload-309734" [c1fac6cc-06b9-419d-b9e5-e99b01de4dd2] Running
I1123 09:57:44.028300 296642 system_pods.go:89] "storage-provisioner" [b1352952-5fff-47aa-af05-dd6b2078fa39] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1123 09:57:44.028346 296642 retry.go:31] will retry after 283.472429ms: missing components: kube-dns
I1123 09:57:44.317300 296642 system_pods.go:86] 8 kube-system pods found
I1123 09:57:44.317353 296642 system_pods.go:89] "coredns-66bc5c9577-sx25q" [50adb46a-6c29-465a-adba-f806eeef81aa] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1123 09:57:44.317361 296642 system_pods.go:89] "etcd-no-preload-309734" [debda9ed-65d8-4a7e-99a0-42943a3c0520] Running
I1123 09:57:44.317370 296642 system_pods.go:89] "kindnet-d6zbp" [d1c56dde-7af0-49ca-a982-04ae56add5f9] Running
I1123 09:57:44.317376 296642 system_pods.go:89] "kube-apiserver-no-preload-309734" [165ccf5d-2d0c-4395-b9e8-31308c188f74] Running
I1123 09:57:44.317382 296642 system_pods.go:89] "kube-controller-manager-no-preload-309734" [d70022cf-2aaa-45a7-bcb0-0563bf832d88] Running
I1123 09:57:44.317387 296642 system_pods.go:89] "kube-proxy-jpvhc" [eb0ab966-23fc-429f-bcfe-eb5726b865be] Running
I1123 09:57:44.317391 296642 system_pods.go:89] "kube-scheduler-no-preload-309734" [c1fac6cc-06b9-419d-b9e5-e99b01de4dd2] Running
I1123 09:57:44.317397 296642 system_pods.go:89] "storage-provisioner" [b1352952-5fff-47aa-af05-dd6b2078fa39] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1123 09:57:44.317416 296642 retry.go:31] will retry after 321.7427ms: missing components: kube-dns
I1123 09:57:44.689277 296642 system_pods.go:86] 8 kube-system pods found
I1123 09:57:44.689322 296642 system_pods.go:89] "coredns-66bc5c9577-sx25q" [50adb46a-6c29-465a-adba-f806eeef81aa] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1123 09:57:44.689344 296642 system_pods.go:89] "etcd-no-preload-309734" [debda9ed-65d8-4a7e-99a0-42943a3c0520] Running
I1123 09:57:44.689353 296642 system_pods.go:89] "kindnet-d6zbp" [d1c56dde-7af0-49ca-a982-04ae56add5f9] Running
I1123 09:57:44.689359 296642 system_pods.go:89] "kube-apiserver-no-preload-309734" [165ccf5d-2d0c-4395-b9e8-31308c188f74] Running
I1123 09:57:44.689366 296642 system_pods.go:89] "kube-controller-manager-no-preload-309734" [d70022cf-2aaa-45a7-bcb0-0563bf832d88] Running
I1123 09:57:44.689370 296642 system_pods.go:89] "kube-proxy-jpvhc" [eb0ab966-23fc-429f-bcfe-eb5726b865be] Running
I1123 09:57:44.689375 296642 system_pods.go:89] "kube-scheduler-no-preload-309734" [c1fac6cc-06b9-419d-b9e5-e99b01de4dd2] Running
I1123 09:57:44.689382 296642 system_pods.go:89] "storage-provisioner" [b1352952-5fff-47aa-af05-dd6b2078fa39] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1123 09:57:44.689411 296642 retry.go:31] will retry after 353.961831ms: missing components: kube-dns
I1123 09:57:45.048995 296642 system_pods.go:86] 8 kube-system pods found
I1123 09:57:45.049060 296642 system_pods.go:89] "coredns-66bc5c9577-sx25q" [50adb46a-6c29-465a-adba-f806eeef81aa] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1123 09:57:45.049069 296642 system_pods.go:89] "etcd-no-preload-309734" [debda9ed-65d8-4a7e-99a0-42943a3c0520] Running
I1123 09:57:45.049078 296642 system_pods.go:89] "kindnet-d6zbp" [d1c56dde-7af0-49ca-a982-04ae56add5f9] Running
I1123 09:57:45.049084 296642 system_pods.go:89] "kube-apiserver-no-preload-309734" [165ccf5d-2d0c-4395-b9e8-31308c188f74] Running
I1123 09:57:45.049090 296642 system_pods.go:89] "kube-controller-manager-no-preload-309734" [d70022cf-2aaa-45a7-bcb0-0563bf832d88] Running
I1123 09:57:45.049099 296642 system_pods.go:89] "kube-proxy-jpvhc" [eb0ab966-23fc-429f-bcfe-eb5726b865be] Running
I1123 09:57:45.049104 296642 system_pods.go:89] "kube-scheduler-no-preload-309734" [c1fac6cc-06b9-419d-b9e5-e99b01de4dd2] Running
I1123 09:57:45.049116 296642 system_pods.go:89] "storage-provisioner" [b1352952-5fff-47aa-af05-dd6b2078fa39] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1123 09:57:45.049135 296642 retry.go:31] will retry after 412.630882ms: missing components: kube-dns
I1123 09:57:45.607770 296642 system_pods.go:86] 8 kube-system pods found
I1123 09:57:45.607816 296642 system_pods.go:89] "coredns-66bc5c9577-sx25q" [50adb46a-6c29-465a-adba-f806eeef81aa] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1123 09:57:45.607826 296642 system_pods.go:89] "etcd-no-preload-309734" [debda9ed-65d8-4a7e-99a0-42943a3c0520] Running
I1123 09:57:45.607836 296642 system_pods.go:89] "kindnet-d6zbp" [d1c56dde-7af0-49ca-a982-04ae56add5f9] Running
I1123 09:57:45.607841 296642 system_pods.go:89] "kube-apiserver-no-preload-309734" [165ccf5d-2d0c-4395-b9e8-31308c188f74] Running
I1123 09:57:45.607847 296642 system_pods.go:89] "kube-controller-manager-no-preload-309734" [d70022cf-2aaa-45a7-bcb0-0563bf832d88] Running
I1123 09:57:45.607851 296642 system_pods.go:89] "kube-proxy-jpvhc" [eb0ab966-23fc-429f-bcfe-eb5726b865be] Running
I1123 09:57:45.607856 296642 system_pods.go:89] "kube-scheduler-no-preload-309734" [c1fac6cc-06b9-419d-b9e5-e99b01de4dd2] Running
I1123 09:57:45.607873 296642 system_pods.go:89] "storage-provisioner" [b1352952-5fff-47aa-af05-dd6b2078fa39] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1123 09:57:45.607891 296642 retry.go:31] will retry after 544.365573ms: missing components: kube-dns
I1123 09:57:41.425584 311138 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ...
I1123 09:57:41.425893 311138 start.go:159] libmachine.API.Create for "default-k8s-diff-port-696492" (driver="docker")
I1123 09:57:41.425945 311138 client.go:173] LocalClient.Create starting
I1123 09:57:41.426056 311138 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21968-3552/.minikube/certs/ca.pem
I1123 09:57:41.426100 311138 main.go:143] libmachine: Decoding PEM data...
I1123 09:57:41.426121 311138 main.go:143] libmachine: Parsing certificate...
I1123 09:57:41.426185 311138 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21968-3552/.minikube/certs/cert.pem
I1123 09:57:41.426208 311138 main.go:143] libmachine: Decoding PEM data...
I1123 09:57:41.426217 311138 main.go:143] libmachine: Parsing certificate...
I1123 09:57:41.426608 311138 cli_runner.go:164] Run: docker network inspect default-k8s-diff-port-696492 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W1123 09:57:41.445568 311138 cli_runner.go:211] docker network inspect default-k8s-diff-port-696492 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I1123 09:57:41.445670 311138 network_create.go:284] running [docker network inspect default-k8s-diff-port-696492] to gather additional debugging logs...
I1123 09:57:41.445697 311138 cli_runner.go:164] Run: docker network inspect default-k8s-diff-port-696492
W1123 09:57:41.465174 311138 cli_runner.go:211] docker network inspect default-k8s-diff-port-696492 returned with exit code 1
I1123 09:57:41.465216 311138 network_create.go:287] error running [docker network inspect default-k8s-diff-port-696492]: docker network inspect default-k8s-diff-port-696492: exit status 1
stdout:
[]
stderr:
Error response from daemon: network default-k8s-diff-port-696492 not found
I1123 09:57:41.465236 311138 network_create.go:289] output of [docker network inspect default-k8s-diff-port-696492]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network default-k8s-diff-port-696492 not found
** /stderr **
I1123 09:57:41.465403 311138 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1123 09:57:41.487255 311138 network.go:211] skipping subnet 192.168.49.0/24 that is taken: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName:br-de5cba392bb4 IfaceIPv4:192.168.49.1 IfaceMTU:1500 IfaceMAC:ea:8d:f5:88:bc:8b} reservation:<nil>}
I1123 09:57:41.488105 311138 network.go:211] skipping subnet 192.168.58.0/24 that is taken: &{IP:192.168.58.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.58.0/24 Gateway:192.168.58.1 ClientMin:192.168.58.2 ClientMax:192.168.58.254 Broadcast:192.168.58.255 IsPrivate:true Interface:{IfaceName:br-e2eabbe85d5b IfaceIPv4:192.168.58.1 IfaceMTU:1500 IfaceMAC:da:f4:02:bd:23:31} reservation:<nil>}
I1123 09:57:41.489037 311138 network.go:211] skipping subnet 192.168.67.0/24 that is taken: &{IP:192.168.67.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.67.0/24 Gateway:192.168.67.1 ClientMin:192.168.67.2 ClientMax:192.168.67.254 Broadcast:192.168.67.255 IsPrivate:true Interface:{IfaceName:br-22e47e96d08e IfaceIPv4:192.168.67.1 IfaceMTU:1500 IfaceMAC:da:9e:83:f9:9f:f6} reservation:<nil>}
I1123 09:57:41.489614 311138 network.go:211] skipping subnet 192.168.76.0/24 that is taken: &{IP:192.168.76.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.76.0/24 Gateway:192.168.76.1 ClientMin:192.168.76.2 ClientMax:192.168.76.254 Broadcast:192.168.76.255 IsPrivate:true Interface:{IfaceName:br-4fa988beb7cd IfaceIPv4:192.168.76.1 IfaceMTU:1500 IfaceMAC:1a:18:12:be:77:f6} reservation:<nil>}
I1123 09:57:41.492079 311138 network.go:206] using free private subnet 192.168.85.0/24: &{IP:192.168.85.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.85.0/24 Gateway:192.168.85.1 ClientMin:192.168.85.2 ClientMax:192.168.85.254 Broadcast:192.168.85.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc001d80820}
I1123 09:57:41.492121 311138 network_create.go:124] attempt to create docker network default-k8s-diff-port-696492 192.168.85.0/24 with gateway 192.168.85.1 and MTU of 1500 ...
I1123 09:57:41.492171 311138 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.85.0/24 --gateway=192.168.85.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=default-k8s-diff-port-696492 default-k8s-diff-port-696492
I1123 09:57:41.554538 311138 network_create.go:108] docker network default-k8s-diff-port-696492 192.168.85.0/24 created
I1123 09:57:41.554588 311138 kic.go:121] calculated static IP "192.168.85.2" for the "default-k8s-diff-port-696492" container
I1123 09:57:41.554664 311138 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I1123 09:57:41.575522 311138 cli_runner.go:164] Run: docker volume create default-k8s-diff-port-696492 --label name.minikube.sigs.k8s.io=default-k8s-diff-port-696492 --label created_by.minikube.sigs.k8s.io=true
I1123 09:57:41.598058 311138 oci.go:103] Successfully created a docker volume default-k8s-diff-port-696492
I1123 09:57:41.598141 311138 cli_runner.go:164] Run: docker run --rm --name default-k8s-diff-port-696492-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=default-k8s-diff-port-696492 --entrypoint /usr/bin/test -v default-k8s-diff-port-696492:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f -d /var/lib
I1123 09:57:42.041176 311138 oci.go:107] Successfully prepared a docker volume default-k8s-diff-port-696492
I1123 09:57:42.041254 311138 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime containerd
I1123 09:57:42.041269 311138 kic.go:194] Starting extracting preloaded images to volume ...
I1123 09:57:42.041325 311138 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21968-3552/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-containerd-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v default-k8s-diff-port-696492:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f -I lz4 -xf /preloaded.tar -C /extractDir
I1123 09:57:46.265821 296642 system_pods.go:86] 8 kube-system pods found
I1123 09:57:46.265851 296642 system_pods.go:89] "coredns-66bc5c9577-sx25q" [50adb46a-6c29-465a-adba-f806eeef81aa] Running
I1123 09:57:46.265856 296642 system_pods.go:89] "etcd-no-preload-309734" [debda9ed-65d8-4a7e-99a0-42943a3c0520] Running
I1123 09:57:46.265860 296642 system_pods.go:89] "kindnet-d6zbp" [d1c56dde-7af0-49ca-a982-04ae56add5f9] Running
I1123 09:57:46.265863 296642 system_pods.go:89] "kube-apiserver-no-preload-309734" [165ccf5d-2d0c-4395-b9e8-31308c188f74] Running
I1123 09:57:46.265868 296642 system_pods.go:89] "kube-controller-manager-no-preload-309734" [d70022cf-2aaa-45a7-bcb0-0563bf832d88] Running
I1123 09:57:46.265870 296642 system_pods.go:89] "kube-proxy-jpvhc" [eb0ab966-23fc-429f-bcfe-eb5726b865be] Running
I1123 09:57:46.265875 296642 system_pods.go:89] "kube-scheduler-no-preload-309734" [c1fac6cc-06b9-419d-b9e5-e99b01de4dd2] Running
I1123 09:57:46.265879 296642 system_pods.go:89] "storage-provisioner" [b1352952-5fff-47aa-af05-dd6b2078fa39] Running
I1123 09:57:46.265889 296642 system_pods.go:126] duration metric: took 2.240582653s to wait for k8s-apps to be running ...
I1123 09:57:46.265903 296642 system_svc.go:44] waiting for kubelet service to be running ....
I1123 09:57:46.265972 296642 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1123 09:57:46.283075 296642 system_svc.go:56] duration metric: took 17.161056ms WaitForService to wait for kubelet
I1123 09:57:46.283105 296642 kubeadm.go:587] duration metric: took 16.70872571s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1123 09:57:46.283128 296642 node_conditions.go:102] verifying NodePressure condition ...
I1123 09:57:46.491444 296642 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I1123 09:57:46.491473 296642 node_conditions.go:123] node cpu capacity is 8
I1123 09:57:46.491486 296642 node_conditions.go:105] duration metric: took 208.353263ms to run NodePressure ...
I1123 09:57:46.491509 296642 start.go:242] waiting for startup goroutines ...
I1123 09:57:46.491520 296642 start.go:247] waiting for cluster config update ...
I1123 09:57:46.491533 296642 start.go:256] writing updated cluster config ...
I1123 09:57:46.491804 296642 ssh_runner.go:195] Run: rm -f paused
I1123 09:57:46.498152 296642 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1123 09:57:46.503240 296642 pod_ready.go:83] waiting for pod "coredns-66bc5c9577-sx25q" in "kube-system" namespace to be "Ready" or be gone ...
I1123 09:57:46.508998 296642 pod_ready.go:94] pod "coredns-66bc5c9577-sx25q" is "Ready"
I1123 09:57:46.509028 296642 pod_ready.go:86] duration metric: took 5.757344ms for pod "coredns-66bc5c9577-sx25q" in "kube-system" namespace to be "Ready" or be gone ...
I1123 09:57:46.512072 296642 pod_ready.go:83] waiting for pod "etcd-no-preload-309734" in "kube-system" namespace to be "Ready" or be gone ...
I1123 09:57:46.517750 296642 pod_ready.go:94] pod "etcd-no-preload-309734" is "Ready"
I1123 09:57:46.517777 296642 pod_ready.go:86] duration metric: took 5.673234ms for pod "etcd-no-preload-309734" in "kube-system" namespace to be "Ready" or be gone ...
I1123 09:57:46.520446 296642 pod_ready.go:83] waiting for pod "kube-apiserver-no-preload-309734" in "kube-system" namespace to be "Ready" or be gone ...
I1123 09:57:46.525480 296642 pod_ready.go:94] pod "kube-apiserver-no-preload-309734" is "Ready"
I1123 09:57:46.525513 296642 pod_ready.go:86] duration metric: took 5.036877ms for pod "kube-apiserver-no-preload-309734" in "kube-system" namespace to be "Ready" or be gone ...
I1123 09:57:46.528196 296642 pod_ready.go:83] waiting for pod "kube-controller-manager-no-preload-309734" in "kube-system" namespace to be "Ready" or be gone ...
I1123 09:57:46.902790 296642 pod_ready.go:94] pod "kube-controller-manager-no-preload-309734" is "Ready"
I1123 09:57:46.902815 296642 pod_ready.go:86] duration metric: took 374.588413ms for pod "kube-controller-manager-no-preload-309734" in "kube-system" namespace to be "Ready" or be gone ...
I1123 09:57:47.104263 296642 pod_ready.go:83] waiting for pod "kube-proxy-jpvhc" in "kube-system" namespace to be "Ready" or be gone ...
I1123 09:57:47.504876 296642 pod_ready.go:94] pod "kube-proxy-jpvhc" is "Ready"
I1123 09:57:47.504999 296642 pod_ready.go:86] duration metric: took 400.696383ms for pod "kube-proxy-jpvhc" in "kube-system" namespace to be "Ready" or be gone ...
I1123 09:57:47.706275 296642 pod_ready.go:83] waiting for pod "kube-scheduler-no-preload-309734" in "kube-system" namespace to be "Ready" or be gone ...
I1123 09:57:48.104684 296642 pod_ready.go:94] pod "kube-scheduler-no-preload-309734" is "Ready"
I1123 09:57:48.104720 296642 pod_ready.go:86] duration metric: took 398.41369ms for pod "kube-scheduler-no-preload-309734" in "kube-system" namespace to be "Ready" or be gone ...
I1123 09:57:48.104739 296642 pod_ready.go:40] duration metric: took 1.606531718s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1123 09:57:48.181507 296642 start.go:625] kubectl: 1.34.2, cluster: 1.34.1 (minor skew: 0)
I1123 09:57:48.183959 296642 out.go:179] * Done! kubectl is now configured to use "no-preload-309734" cluster and "default" namespace by default
I1123 09:57:46.740944 311138 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21968-3552/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-containerd-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v default-k8s-diff-port-696492:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f -I lz4 -xf /preloaded.tar -C /extractDir: (4.699532205s)
I1123 09:57:46.741010 311138 kic.go:203] duration metric: took 4.699734046s to extract preloaded images to volume ...
W1123 09:57:46.741179 311138 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
W1123 09:57:46.741234 311138 oci.go:252] Your kernel does not support CPU cfs period/quota or the cgroup is not mounted.
I1123 09:57:46.741304 311138 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I1123 09:57:46.807009 311138 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname default-k8s-diff-port-696492 --name default-k8s-diff-port-696492 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=default-k8s-diff-port-696492 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=default-k8s-diff-port-696492 --network default-k8s-diff-port-696492 --ip 192.168.85.2 --volume default-k8s-diff-port-696492:/var --security-opt apparmor=unconfined --memory=3072mb -e container=docker --expose 8444 --publish=127.0.0.1::8444 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f
I1123 09:57:47.199589 311138 cli_runner.go:164] Run: docker container inspect default-k8s-diff-port-696492 --format={{.State.Running}}
I1123 09:57:47.220655 311138 cli_runner.go:164] Run: docker container inspect default-k8s-diff-port-696492 --format={{.State.Status}}
I1123 09:57:47.242623 311138 cli_runner.go:164] Run: docker exec default-k8s-diff-port-696492 stat /var/lib/dpkg/alternatives/iptables
I1123 09:57:47.295743 311138 oci.go:144] the created container "default-k8s-diff-port-696492" has a running status.
I1123 09:57:47.295783 311138 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/21968-3552/.minikube/machines/default-k8s-diff-port-696492/id_rsa...
I1123 09:57:47.562280 311138 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/21968-3552/.minikube/machines/default-k8s-diff-port-696492/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I1123 09:57:47.611801 311138 cli_runner.go:164] Run: docker container inspect default-k8s-diff-port-696492 --format={{.State.Status}}
I1123 09:57:47.650055 311138 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I1123 09:57:47.650078 311138 kic_runner.go:114] Args: [docker exec --privileged default-k8s-diff-port-696492 chown docker:docker /home/docker/.ssh/authorized_keys]
I1123 09:57:47.733580 311138 cli_runner.go:164] Run: docker container inspect default-k8s-diff-port-696492 --format={{.State.Status}}
I1123 09:57:47.763876 311138 machine.go:94] provisionDockerMachine start ...
I1123 09:57:47.763997 311138 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" default-k8s-diff-port-696492
I1123 09:57:47.798484 311138 main.go:143] libmachine: Using SSH client type: native
I1123 09:57:47.798947 311138 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 127.0.0.1 33108 <nil> <nil>}
I1123 09:57:47.798969 311138 main.go:143] libmachine: About to run SSH command:
hostname
I1123 09:57:47.966787 311138 main.go:143] libmachine: SSH cmd err, output: <nil>: default-k8s-diff-port-696492
I1123 09:57:47.966822 311138 ubuntu.go:182] provisioning hostname "default-k8s-diff-port-696492"
I1123 09:57:47.966888 311138 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" default-k8s-diff-port-696492
I1123 09:57:47.993804 311138 main.go:143] libmachine: Using SSH client type: native
I1123 09:57:47.994099 311138 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 127.0.0.1 33108 <nil> <nil>}
I1123 09:57:47.994117 311138 main.go:143] libmachine: About to run SSH command:
sudo hostname default-k8s-diff-port-696492 && echo "default-k8s-diff-port-696492" | sudo tee /etc/hostname
I1123 09:57:48.174661 311138 main.go:143] libmachine: SSH cmd err, output: <nil>: default-k8s-diff-port-696492
I1123 09:57:48.174752 311138 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" default-k8s-diff-port-696492
I1123 09:57:48.203529 311138 main.go:143] libmachine: Using SSH client type: native
I1123 09:57:48.203843 311138 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 127.0.0.1 33108 <nil> <nil>}
I1123 09:57:48.203881 311138 main.go:143] libmachine: About to run SSH command:
if ! grep -xq '.*\sdefault-k8s-diff-port-696492' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 default-k8s-diff-port-696492/g' /etc/hosts;
else
echo '127.0.1.1 default-k8s-diff-port-696492' | sudo tee -a /etc/hosts;
fi
fi
I1123 09:57:48.379959 311138 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1123 09:57:48.380002 311138 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/21968-3552/.minikube CaCertPath:/home/jenkins/minikube-integration/21968-3552/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21968-3552/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21968-3552/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21968-3552/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21968-3552/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21968-3552/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21968-3552/.minikube}
I1123 09:57:48.380096 311138 ubuntu.go:190] setting up certificates
I1123 09:57:48.380127 311138 provision.go:84] configureAuth start
I1123 09:57:48.380222 311138 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" default-k8s-diff-port-696492
I1123 09:57:48.421922 311138 provision.go:143] copyHostCerts
I1123 09:57:48.422045 311138 exec_runner.go:144] found /home/jenkins/minikube-integration/21968-3552/.minikube/key.pem, removing ...
I1123 09:57:48.422074 311138 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21968-3552/.minikube/key.pem
I1123 09:57:48.422196 311138 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21968-3552/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21968-3552/.minikube/key.pem (1679 bytes)
I1123 09:57:48.422353 311138 exec_runner.go:144] found /home/jenkins/minikube-integration/21968-3552/.minikube/ca.pem, removing ...
I1123 09:57:48.422365 311138 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21968-3552/.minikube/ca.pem
I1123 09:57:48.422399 311138 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21968-3552/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21968-3552/.minikube/ca.pem (1082 bytes)
I1123 09:57:48.422467 311138 exec_runner.go:144] found /home/jenkins/minikube-integration/21968-3552/.minikube/cert.pem, removing ...
I1123 09:57:48.422523 311138 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21968-3552/.minikube/cert.pem
I1123 09:57:48.422566 311138 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21968-3552/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21968-3552/.minikube/cert.pem (1123 bytes)
I1123 09:57:48.422642 311138 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21968-3552/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21968-3552/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21968-3552/.minikube/certs/ca-key.pem org=jenkins.default-k8s-diff-port-696492 san=[127.0.0.1 192.168.85.2 default-k8s-diff-port-696492 localhost minikube]
I1123 09:57:48.539621 311138 provision.go:177] copyRemoteCerts
I1123 09:57:48.539708 311138 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1123 09:57:48.539762 311138 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" default-k8s-diff-port-696492
I1123 09:57:48.564284 311138 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33108 SSHKeyPath:/home/jenkins/minikube-integration/21968-3552/.minikube/machines/default-k8s-diff-port-696492/id_rsa Username:docker}
I1123 09:57:48.677154 311138 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21968-3552/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I1123 09:57:48.704807 311138 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21968-3552/.minikube/machines/server.pem --> /etc/docker/server.pem (1249 bytes)
I1123 09:57:48.730566 311138 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21968-3552/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I1123 09:57:48.755362 311138 provision.go:87] duration metric: took 375.193527ms to configureAuth
I1123 09:57:48.755396 311138 ubuntu.go:206] setting minikube options for container-runtime
I1123 09:57:48.755732 311138 config.go:182] Loaded profile config "default-k8s-diff-port-696492": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1123 09:57:48.755752 311138 machine.go:97] duration metric: took 991.839554ms to provisionDockerMachine
I1123 09:57:48.755762 311138 client.go:176] duration metric: took 7.329805852s to LocalClient.Create
I1123 09:57:48.755786 311138 start.go:167] duration metric: took 7.329894759s to libmachine.API.Create "default-k8s-diff-port-696492"
I1123 09:57:48.755799 311138 start.go:293] postStartSetup for "default-k8s-diff-port-696492" (driver="docker")
I1123 09:57:48.755811 311138 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1123 09:57:48.755868 311138 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1123 09:57:48.755919 311138 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" default-k8s-diff-port-696492
I1123 09:57:48.784317 311138 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33108 SSHKeyPath:/home/jenkins/minikube-integration/21968-3552/.minikube/machines/default-k8s-diff-port-696492/id_rsa Username:docker}
I1123 09:57:48.901734 311138 ssh_runner.go:195] Run: cat /etc/os-release
I1123 09:57:48.906292 311138 main.go:143] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I1123 09:57:48.906325 311138 info.go:137] Remote host: Debian GNU/Linux 12 (bookworm)
I1123 09:57:48.906355 311138 filesync.go:126] Scanning /home/jenkins/minikube-integration/21968-3552/.minikube/addons for local assets ...
I1123 09:57:48.906577 311138 filesync.go:126] Scanning /home/jenkins/minikube-integration/21968-3552/.minikube/files for local assets ...
I1123 09:57:48.906715 311138 filesync.go:149] local asset: /home/jenkins/minikube-integration/21968-3552/.minikube/files/etc/ssl/certs/71092.pem -> 71092.pem in /etc/ssl/certs
I1123 09:57:48.906835 311138 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1123 09:57:48.917431 311138 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21968-3552/.minikube/files/etc/ssl/certs/71092.pem --> /etc/ssl/certs/71092.pem (1708 bytes)
I1123 09:57:48.947477 311138 start.go:296] duration metric: took 191.661634ms for postStartSetup
I1123 09:57:48.947957 311138 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" default-k8s-diff-port-696492
I1123 09:57:48.973141 311138 profile.go:143] Saving config to /home/jenkins/minikube-integration/21968-3552/.minikube/profiles/default-k8s-diff-port-696492/config.json ...
I1123 09:57:48.973692 311138 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I1123 09:57:48.973751 311138 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" default-k8s-diff-port-696492
I1123 09:57:48.996029 311138 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33108 SSHKeyPath:/home/jenkins/minikube-integration/21968-3552/.minikube/machines/default-k8s-diff-port-696492/id_rsa Username:docker}
I1123 09:57:49.106682 311138 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I1123 09:57:49.112230 311138 start.go:128] duration metric: took 7.689569326s to createHost
I1123 09:57:49.112259 311138 start.go:83] releasing machines lock for "default-k8s-diff-port-696492", held for 7.689795634s
I1123 09:57:49.112351 311138 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" default-k8s-diff-port-696492
I1123 09:57:49.135976 311138 ssh_runner.go:195] Run: cat /version.json
I1123 09:57:49.136033 311138 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1123 09:57:49.136042 311138 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" default-k8s-diff-port-696492
I1123 09:57:49.136113 311138 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" default-k8s-diff-port-696492
I1123 09:57:49.160077 311138 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33108 SSHKeyPath:/home/jenkins/minikube-integration/21968-3552/.minikube/machines/default-k8s-diff-port-696492/id_rsa Username:docker}
I1123 09:57:49.161278 311138 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33108 SSHKeyPath:/home/jenkins/minikube-integration/21968-3552/.minikube/machines/default-k8s-diff-port-696492/id_rsa Username:docker}
I1123 09:57:49.264125 311138 ssh_runner.go:195] Run: systemctl --version
I1123 09:57:49.329282 311138 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W1123 09:57:49.335197 311138 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I1123 09:57:49.335268 311138 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1123 09:57:49.366357 311138 cni.go:262] disabled [/etc/cni/net.d/10-crio-bridge.conflist.disabled, /etc/cni/net.d/87-podman-bridge.conflist] bridge cni config(s)
I1123 09:57:49.366380 311138 start.go:496] detecting cgroup driver to use...
I1123 09:57:49.366416 311138 detect.go:190] detected "systemd" cgroup driver on host os
I1123 09:57:49.366470 311138 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I1123 09:57:49.383235 311138 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1123 09:57:49.399768 311138 docker.go:218] disabling cri-docker service (if available) ...
I1123 09:57:49.399842 311138 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I1123 09:57:49.420125 311138 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I1123 09:57:49.442300 311138 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I1123 09:57:49.541498 311138 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I1123 09:57:49.659194 311138 docker.go:234] disabling docker service ...
I1123 09:57:49.659272 311138 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I1123 09:57:49.682070 311138 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I1123 09:57:49.698015 311138 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I1123 09:57:49.798105 311138 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I1123 09:57:49.894575 311138 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1123 09:57:49.911733 311138 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1123 09:57:49.931314 311138 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I1123 09:57:49.945424 311138 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1123 09:57:49.956889 311138 containerd.go:146] configuring containerd to use "systemd" as cgroup driver...
I1123 09:57:49.956953 311138 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = true|g' /etc/containerd/config.toml"
I1123 09:57:49.967923 311138 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1123 09:57:49.979575 311138 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1123 09:57:49.991202 311138 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1123 09:57:50.002918 311138 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1123 09:57:50.015086 311138 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1123 09:57:50.027588 311138 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1123 09:57:50.038500 311138 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1123 09:57:50.050508 311138 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1123 09:57:50.060907 311138 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1123 09:57:50.069882 311138 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1123 09:57:50.169936 311138 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1123 09:57:50.287676 311138 start.go:543] Will wait 60s for socket path /run/containerd/containerd.sock
I1123 09:57:50.287747 311138 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
I1123 09:57:50.292388 311138 start.go:564] Will wait 60s for crictl version
I1123 09:57:50.292450 311138 ssh_runner.go:195] Run: which crictl
I1123 09:57:50.296873 311138 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl version
I1123 09:57:50.325533 311138 start.go:580] Version: 0.1.0
RuntimeName: containerd
RuntimeVersion: v2.1.5
RuntimeApiVersion: v1
I1123 09:57:50.325605 311138 ssh_runner.go:195] Run: containerd --version
I1123 09:57:50.350974 311138 ssh_runner.go:195] Run: containerd --version
I1123 09:57:50.381808 311138 out.go:179] * Preparing Kubernetes v1.34.1 on containerd 2.1.5 ...
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE
8d7f40f8f4e07 56cc512116c8f 10 seconds ago Running busybox 0 fef27a1a4d0d4 busybox default
d15093524dcf0 ead0a4a53df89 16 seconds ago Running coredns 0 1410c58ee49e1 coredns-5dd5756b68-gf5sx kube-system
6188a0a11a558 6e38f40d628db 16 seconds ago Running storage-provisioner 0 d10f215129879 storage-provisioner kube-system
a1af83bb67492 409467f978b4a 27 seconds ago Running kindnet-cni 0 0d60321491712 kindnet-tpvt2 kube-system
e82a6fec044de ea1030da44aa1 31 seconds ago Running kube-proxy 0 11e7ed694601b kube-proxy-sgv48 kube-system
1b2964c416267 4be79c38a4bab 52 seconds ago Running kube-controller-manager 0 2cc4143ea8b90 kube-controller-manager-old-k8s-version-709593 kube-system
33f6ed017ec88 f6f496300a2ae 52 seconds ago Running kube-scheduler 0 11295be3c0583 kube-scheduler-old-k8s-version-709593 kube-system
9ab267968c030 bb5e0dde9054c 52 seconds ago Running kube-apiserver 0 86d19ce97a6b1 kube-apiserver-old-k8s-version-709593 kube-system
d4c298d1c8060 73deb9a3f7025 52 seconds ago Running etcd 0 2f9ec40d5f287 etcd-old-k8s-version-709593 kube-system
==> containerd <==
Nov 23 09:57:34 old-k8s-version-709593 containerd[660]: time="2025-11-23T09:57:34.420590122Z" level=info msg="CreateContainer within sandbox \"d10f2151298793071f334a433fb6cfce4b8b35c05f27a6d4e58960cedbf96462\" for &ContainerMetadata{Name:storage-provisioner,Attempt:0,} returns container id \"6188a0a11a558ccfe4a936446819a158ec0f3ff08b1c7692bf3db57ce82539bc\""
Nov 23 09:57:34 old-k8s-version-709593 containerd[660]: time="2025-11-23T09:57:34.421304727Z" level=info msg="StartContainer for \"6188a0a11a558ccfe4a936446819a158ec0f3ff08b1c7692bf3db57ce82539bc\""
Nov 23 09:57:34 old-k8s-version-709593 containerd[660]: time="2025-11-23T09:57:34.423036667Z" level=info msg="connecting to shim 6188a0a11a558ccfe4a936446819a158ec0f3ff08b1c7692bf3db57ce82539bc" address="unix:///run/containerd/s/1f0be7d26635bbcb41f6c32b3d2f1385a50ecbc1dec74ce6548e85610e0cefc1" protocol=ttrpc version=3
Nov 23 09:57:34 old-k8s-version-709593 containerd[660]: time="2025-11-23T09:57:34.423927224Z" level=info msg="CreateContainer within sandbox \"1410c58ee49e106f41592b5e6ae663765165c9b234249dacefc4e2eccebfec08\" for &ContainerMetadata{Name:coredns,Attempt:0,} returns container id \"d15093524dcf0f71add09a89666b6ef551f8abcfe19462f1f52e6396cfa9b90f\""
Nov 23 09:57:34 old-k8s-version-709593 containerd[660]: time="2025-11-23T09:57:34.424701663Z" level=info msg="StartContainer for \"d15093524dcf0f71add09a89666b6ef551f8abcfe19462f1f52e6396cfa9b90f\""
Nov 23 09:57:34 old-k8s-version-709593 containerd[660]: time="2025-11-23T09:57:34.425764608Z" level=info msg="connecting to shim d15093524dcf0f71add09a89666b6ef551f8abcfe19462f1f52e6396cfa9b90f" address="unix:///run/containerd/s/fe12e30014183b4c11ebd3e6acfbe97fc1992c631d1626cb13faef4fe4d22ee6" protocol=ttrpc version=3
Nov 23 09:57:34 old-k8s-version-709593 containerd[660]: time="2025-11-23T09:57:34.488919409Z" level=info msg="StartContainer for \"d15093524dcf0f71add09a89666b6ef551f8abcfe19462f1f52e6396cfa9b90f\" returns successfully"
Nov 23 09:57:34 old-k8s-version-709593 containerd[660]: time="2025-11-23T09:57:34.489532054Z" level=info msg="StartContainer for \"6188a0a11a558ccfe4a936446819a158ec0f3ff08b1c7692bf3db57ce82539bc\" returns successfully"
Nov 23 09:57:37 old-k8s-version-709593 containerd[660]: time="2025-11-23T09:57:37.817959050Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:bea346d9-0dca-482c-b9f9-7b71741b18d7,Namespace:default,Attempt:0,}"
Nov 23 09:57:37 old-k8s-version-709593 containerd[660]: time="2025-11-23T09:57:37.866021477Z" level=info msg="connecting to shim fef27a1a4d0d4d0fd89a702b88e4f10a3d0f81a41d5a766dcd38d6273f063615" address="unix:///run/containerd/s/f66c8e58b533a67c21226ca176913c77f22823731a0ac223ff958c8fefe43b11" namespace=k8s.io protocol=ttrpc version=3
Nov 23 09:57:37 old-k8s-version-709593 containerd[660]: time="2025-11-23T09:57:37.950965400Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:bea346d9-0dca-482c-b9f9-7b71741b18d7,Namespace:default,Attempt:0,} returns sandbox id \"fef27a1a4d0d4d0fd89a702b88e4f10a3d0f81a41d5a766dcd38d6273f063615\""
Nov 23 09:57:37 old-k8s-version-709593 containerd[660]: time="2025-11-23T09:57:37.953294596Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\""
Nov 23 09:57:40 old-k8s-version-709593 containerd[660]: time="2025-11-23T09:57:40.223204984Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 23 09:57:40 old-k8s-version-709593 containerd[660]: time="2025-11-23T09:57:40.224183979Z" level=info msg="stop pulling image gcr.io/k8s-minikube/busybox:1.28.4-glibc: active requests=0, bytes read=2396648"
Nov 23 09:57:40 old-k8s-version-709593 containerd[660]: time="2025-11-23T09:57:40.226078502Z" level=info msg="ImageCreate event name:\"sha256:56cc512116c8f894f11ce1995460aef1ee0972d48bc2a3bdb1faaac7c020289c\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 23 09:57:40 old-k8s-version-709593 containerd[660]: time="2025-11-23T09:57:40.228512955Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 23 09:57:40 old-k8s-version-709593 containerd[660]: time="2025-11-23T09:57:40.229002948Z" level=info msg="Pulled image \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" with image id \"sha256:56cc512116c8f894f11ce1995460aef1ee0972d48bc2a3bdb1faaac7c020289c\", repo tag \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\", repo digest \"gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e\", size \"2395207\" in 2.275384117s"
Nov 23 09:57:40 old-k8s-version-709593 containerd[660]: time="2025-11-23T09:57:40.229045171Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" returns image reference \"sha256:56cc512116c8f894f11ce1995460aef1ee0972d48bc2a3bdb1faaac7c020289c\""
Nov 23 09:57:40 old-k8s-version-709593 containerd[660]: time="2025-11-23T09:57:40.230910353Z" level=info msg="CreateContainer within sandbox \"fef27a1a4d0d4d0fd89a702b88e4f10a3d0f81a41d5a766dcd38d6273f063615\" for container &ContainerMetadata{Name:busybox,Attempt:0,}"
Nov 23 09:57:40 old-k8s-version-709593 containerd[660]: time="2025-11-23T09:57:40.242585175Z" level=info msg="Container 8d7f40f8f4e0763efe28dd2b910dd945b4ad8925953ca7a945bf4566509889f4: CDI devices from CRI Config.CDIDevices: []"
Nov 23 09:57:40 old-k8s-version-709593 containerd[660]: time="2025-11-23T09:57:40.253136286Z" level=info msg="CreateContainer within sandbox \"fef27a1a4d0d4d0fd89a702b88e4f10a3d0f81a41d5a766dcd38d6273f063615\" for &ContainerMetadata{Name:busybox,Attempt:0,} returns container id \"8d7f40f8f4e0763efe28dd2b910dd945b4ad8925953ca7a945bf4566509889f4\""
Nov 23 09:57:40 old-k8s-version-709593 containerd[660]: time="2025-11-23T09:57:40.253869141Z" level=info msg="StartContainer for \"8d7f40f8f4e0763efe28dd2b910dd945b4ad8925953ca7a945bf4566509889f4\""
Nov 23 09:57:40 old-k8s-version-709593 containerd[660]: time="2025-11-23T09:57:40.258087383Z" level=info msg="connecting to shim 8d7f40f8f4e0763efe28dd2b910dd945b4ad8925953ca7a945bf4566509889f4" address="unix:///run/containerd/s/f66c8e58b533a67c21226ca176913c77f22823731a0ac223ff958c8fefe43b11" protocol=ttrpc version=3
Nov 23 09:57:40 old-k8s-version-709593 containerd[660]: time="2025-11-23T09:57:40.328511725Z" level=info msg="StartContainer for \"8d7f40f8f4e0763efe28dd2b910dd945b4ad8925953ca7a945bf4566509889f4\" returns successfully"
Nov 23 09:57:47 old-k8s-version-709593 containerd[660]: E1123 09:57:47.651496 660 websocket.go:100] "Unhandled Error" err="unable to upgrade websocket connection: websocket server finished before becoming ready" logger="UnhandledError"
==> coredns [d15093524dcf0f71add09a89666b6ef551f8abcfe19462f1f52e6396cfa9b90f] <==
.:53
[INFO] plugin/reload: Running configuration SHA512 = b7aacdf6a6aa730aafe4d018cac9b7b5ecfb346cba84a99f64521f87aef8b4958639c1cf97967716465791d05bd38f372615327b7cb1d93c850bae532744d54d
CoreDNS-1.10.1
linux/amd64, go1.20, 055b2c3
[INFO] 127.0.0.1:34931 - 60518 "HINFO IN 7244376839273605299.5052886007572092194. udp 57 false 512" NXDOMAIN qr,rd,ra 132 0.04020687s
==> describe nodes <==
Name: old-k8s-version-709593
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=old-k8s-version-709593
kubernetes.io/os=linux
minikube.k8s.io/commit=37270640e5bc1cd4189f05b508feb80c8debef53
minikube.k8s.io/name=old-k8s-version-709593
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2025_11_23T09_57_07_0700
minikube.k8s.io/version=v1.37.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///run/containerd/containerd.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Sun, 23 Nov 2025 09:57:00 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: old-k8s-version-709593
AcquireTime: <unset>
RenewTime: Sun, 23 Nov 2025 09:57:47 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Sun, 23 Nov 2025 09:57:36 +0000 Sun, 23 Nov 2025 09:56:58 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Sun, 23 Nov 2025 09:57:36 +0000 Sun, 23 Nov 2025 09:56:58 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Sun, 23 Nov 2025 09:57:36 +0000 Sun, 23 Nov 2025 09:56:58 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Sun, 23 Nov 2025 09:57:36 +0000 Sun, 23 Nov 2025 09:57:33 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.76.2
Hostname: old-k8s-version-709593
Capacity:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863360Ki
pods: 110
Allocatable:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863360Ki
pods: 110
System Info:
Machine ID: 9629f1d5bc1ed524a56ce23c69214c09
System UUID: 9e6f0832-18db-4c8d-86e4-20812ea439e5
Boot ID: e4c4d39b-bebd-4037-9237-26b945dbe084
Kernel Version: 6.8.0-1044-gcp
OS Image: Debian GNU/Linux 12 (bookworm)
Operating System: linux
Architecture: amd64
Container Runtime Version: containerd://2.1.5
Kubelet Version: v1.28.0
Kube-Proxy Version: v1.28.0
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (9 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 14s
kube-system coredns-5dd5756b68-gf5sx 100m (1%) 0 (0%) 70Mi (0%) 170Mi (0%) 33s
kube-system etcd-old-k8s-version-709593 100m (1%) 0 (0%) 100Mi (0%) 0 (0%) 45s
kube-system kindnet-tpvt2 100m (1%) 100m (1%) 50Mi (0%) 50Mi (0%) 33s
kube-system kube-apiserver-old-k8s-version-709593 250m (3%) 0 (0%) 0 (0%) 0 (0%) 48s
kube-system kube-controller-manager-old-k8s-version-709593 200m (2%) 0 (0%) 0 (0%) 0 (0%) 47s
kube-system kube-proxy-sgv48 0 (0%) 0 (0%) 0 (0%) 0 (0%) 33s
kube-system kube-scheduler-old-k8s-version-709593 100m (1%) 0 (0%) 0 (0%) 0 (0%) 48s
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 32s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 850m (10%) 100m (1%)
memory 220Mi (0%) 220Mi (0%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 30s kube-proxy
Normal Starting 54s kubelet Starting kubelet.
Normal NodeHasSufficientMemory 54s (x8 over 54s) kubelet Node old-k8s-version-709593 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 54s (x8 over 54s) kubelet Node old-k8s-version-709593 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 54s (x7 over 54s) kubelet Node old-k8s-version-709593 status is now: NodeHasSufficientPID
Normal NodeAllocatableEnforced 54s kubelet Updated Node Allocatable limit across pods
Normal Starting 45s kubelet Starting kubelet.
Normal NodeAllocatableEnforced 45s kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 45s kubelet Node old-k8s-version-709593 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 45s kubelet Node old-k8s-version-709593 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 45s kubelet Node old-k8s-version-709593 status is now: NodeHasSufficientPID
Normal RegisteredNode 33s node-controller Node old-k8s-version-709593 event: Registered Node old-k8s-version-709593 in Controller
Normal NodeReady 18s kubelet Node old-k8s-version-709593 status is now: NodeReady
==> dmesg <==
[ +6.288463] kauditd_printk_skb: 47 callbacks suppressed
[Nov23 09:55] IPv4: martian source 10.244.0.1 from 10.244.0.2, on dev eth0
[ +0.000008] ll header: 00000000: ff ff ff ff ff ff ba 2b 39 eb 11 2b 08 06
[Nov23 09:56] IPv4: martian source 10.244.0.1 from 10.244.0.2, on dev eth0
[ +0.000007] ll header: 00000000: ff ff ff ff ff ff 8e bd c3 0c c1 99 08 06
[ +10.195562] IPv4: martian source 10.244.0.1 from 10.244.0.2, on dev eth0
[ +0.000010] ll header: 00000000: ff ff ff ff ff ff 5e 49 b3 20 41 43 08 06
[ +5.912917] IPv4: martian source 10.244.0.1 from 10.244.0.3, on dev eth0
[ +0.000007] ll header: 00000000: ff ff ff ff ff ff f2 c0 1c 98 33 a9 08 06
[ +0.000437] IPv4: martian source 10.244.0.3 from 10.244.0.2, on dev eth0
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff 8e bd c3 0c c1 99 08 06
[ +10.002091] IPv4: martian source 10.244.0.1 from 10.244.0.3, on dev eth0
[ +0.000008] ll header: 00000000: ff ff ff ff ff ff 1e 47 bd bf 96 57 08 06
[ +0.000405] IPv4: martian source 10.244.0.3 from 10.244.0.2, on dev eth0
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff ba 2b 39 eb 11 2b 08 06
[ +4.460318] IPv4: martian source 10.244.0.1 from 10.244.0.3, on dev eth0
[ +0.000008] ll header: 00000000: ff ff ff ff ff ff 3e 85 b9 91 f8 a4 08 06
[ +0.000372] IPv4: martian source 10.244.0.3 from 10.244.0.2, on dev eth0
[ +0.000005] ll header: 00000000: ff ff ff ff ff ff 5e 49 b3 20 41 43 08 06
[ +2.904694] IPv4: martian source 10.244.0.1 from 10.244.0.2, on dev eth0
[ +0.000010] ll header: 00000000: ff ff ff ff ff ff 9e 48 a2 4c da c6 08 06
[Nov23 09:57] IPv4: martian source 10.244.0.1 from 10.244.0.3, on dev eth0
[ +0.000009] ll header: 00000000: ff ff ff ff ff ff 76 48 bf 8b d1 fc 08 06
[ +0.000931] IPv4: martian source 10.244.0.3 from 10.244.0.2, on dev eth0
[ +0.000005] ll header: 00000000: ff ff ff ff ff ff 9e 48 a2 4c da c6 08 06
==> etcd [d4c298d1c8060139c5bb973acee87dc3fbc6b6454b9e3c8ebe9c6b86a2e5a7b8] <==
{"level":"info","ts":"2025-11-23T09:56:58.59753Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.76.2:2379"}
{"level":"info","ts":"2025-11-23T09:56:58.597864Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"6f20f2c4b2fb5f8a","local-member-id":"ea7e25599daad906","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-23T09:56:58.597974Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-23T09:56:58.598004Z","caller":"etcdserver/server.go:2595","msg":"cluster version is updated","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-23T09:56:58.599014Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
{"level":"warn","ts":"2025-11-23T09:57:01.971736Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"124.487229ms","expected-duration":"100ms","prefix":"","request":"header:<ID:15638356837419424543 username:\"kube-apiserver-etcd-client\" auth_revision:1 > txn:<compare:<target:MOD key:\"/registry/clusterroles/system:monitoring\" mod_revision:0 > success:<request_put:<key:\"/registry/clusterroles/system:monitoring\" value_size:573 >> failure:<>>","response":"size:14"}
{"level":"info","ts":"2025-11-23T09:57:01.971868Z","caller":"traceutil/trace.go:171","msg":"trace[1367842110] transaction","detail":"{read_only:false; response_revision:112; number_of_response:1; }","duration":"185.333295ms","start":"2025-11-23T09:57:01.786515Z","end":"2025-11-23T09:57:01.971849Z","steps":["trace[1367842110] 'process raft request' (duration: 59.969834ms)","trace[1367842110] 'compare' (duration: 124.335128ms)"],"step_count":2}
{"level":"warn","ts":"2025-11-23T09:57:02.204167Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"132.409698ms","expected-duration":"100ms","prefix":"","request":"header:<ID:15638356837419424553 username:\"kube-apiserver-etcd-client\" auth_revision:1 > txn:<compare:<target:MOD key:\"/registry/clusterroles/view\" mod_revision:0 > success:<request_put:<key:\"/registry/clusterroles/view\" value_size:673 >> failure:<>>","response":"size:14"}
{"level":"info","ts":"2025-11-23T09:57:02.204261Z","caller":"traceutil/trace.go:171","msg":"trace[1142240257] transaction","detail":"{read_only:false; response_revision:117; number_of_response:1; }","duration":"141.084345ms","start":"2025-11-23T09:57:02.063163Z","end":"2025-11-23T09:57:02.204247Z","steps":["trace[1142240257] 'compare' (duration: 132.298203ms)"],"step_count":1}
{"level":"warn","ts":"2025-11-23T09:57:02.49574Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"128.58211ms","expected-duration":"100ms","prefix":"","request":"header:<ID:15638356837419424557 username:\"kube-apiserver-etcd-client\" auth_revision:1 > txn:<compare:<target:MOD key:\"/registry/clusterroles/system:aggregate-to-edit\" mod_revision:0 > success:<request_put:<key:\"/registry/clusterroles/system:aggregate-to-edit\" value_size:1957 >> failure:<>>","response":"size:14"}
{"level":"info","ts":"2025-11-23T09:57:02.495841Z","caller":"traceutil/trace.go:171","msg":"trace[1763507131] transaction","detail":"{read_only:false; response_revision:119; number_of_response:1; }","duration":"249.990542ms","start":"2025-11-23T09:57:02.245837Z","end":"2025-11-23T09:57:02.495828Z","steps":["trace[1763507131] 'process raft request' (duration: 121.258106ms)","trace[1763507131] 'compare' (duration: 128.446744ms)"],"step_count":2}
{"level":"warn","ts":"2025-11-23T09:57:02.811736Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"142.743867ms","expected-duration":"100ms","prefix":"","request":"header:<ID:15638356837419424559 username:\"kube-apiserver-etcd-client\" auth_revision:1 > txn:<compare:<target:MOD key:\"/registry/clusterroles/system:aggregate-to-view\" mod_revision:0 > success:<request_put:<key:\"/registry/clusterroles/system:aggregate-to-view\" value_size:1862 >> failure:<>>","response":"size:14"}
{"level":"info","ts":"2025-11-23T09:57:02.811827Z","caller":"traceutil/trace.go:171","msg":"trace[334752838] linearizableReadLoop","detail":"{readStateIndex:125; appliedIndex:124; }","duration":"197.624876ms","start":"2025-11-23T09:57:02.614187Z","end":"2025-11-23T09:57:02.811812Z","steps":["trace[334752838] 'read index received' (duration: 54.776357ms)","trace[334752838] 'applied index is now lower than readState.Index' (duration: 142.846972ms)"],"step_count":2}
{"level":"info","ts":"2025-11-23T09:57:02.811874Z","caller":"traceutil/trace.go:171","msg":"trace[577911190] transaction","detail":"{read_only:false; response_revision:120; number_of_response:1; }","duration":"309.546043ms","start":"2025-11-23T09:57:02.502295Z","end":"2025-11-23T09:57:02.811841Z","steps":["trace[577911190] 'process raft request' (duration: 166.630437ms)","trace[577911190] 'compare' (duration: 142.557878ms)"],"step_count":2}
{"level":"warn","ts":"2025-11-23T09:57:02.811926Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"197.752655ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/health\" ","response":"range_response_count:0 size:4"}
{"level":"info","ts":"2025-11-23T09:57:02.811961Z","caller":"traceutil/trace.go:171","msg":"trace[450821894] range","detail":"{range_begin:/registry/health; range_end:; response_count:0; response_revision:120; }","duration":"197.79258ms","start":"2025-11-23T09:57:02.614154Z","end":"2025-11-23T09:57:02.811947Z","steps":["trace[450821894] 'agreement among raft nodes before linearized reading' (duration: 197.694344ms)"],"step_count":1}
{"level":"warn","ts":"2025-11-23T09:57:02.812003Z","caller":"v3rpc/interceptor.go:197","msg":"request stats","start time":"2025-11-23T09:57:02.50227Z","time spent":"309.683301ms","remote":"127.0.0.1:39468","response type":"/etcdserverpb.KV/Txn","request count":1,"request size":1917,"response count":0,"response size":37,"request content":"compare:<target:MOD key:\"/registry/clusterroles/system:aggregate-to-view\" mod_revision:0 > success:<request_put:<key:\"/registry/clusterroles/system:aggregate-to-view\" value_size:1862 >> failure:<>"}
{"level":"warn","ts":"2025-11-23T09:57:03.126521Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"185.304764ms","expected-duration":"100ms","prefix":"","request":"header:<ID:15638356837419424563 username:\"kube-apiserver-etcd-client\" auth_revision:1 > txn:<compare:<target:MOD key:\"/registry/clusterroles/system:heapster\" mod_revision:0 > success:<request_put:<key:\"/registry/clusterroles/system:heapster\" value_size:579 >> failure:<>>","response":"size:14"}
{"level":"info","ts":"2025-11-23T09:57:03.126599Z","caller":"traceutil/trace.go:171","msg":"trace[1403684060] transaction","detail":"{read_only:false; response_revision:121; number_of_response:1; }","duration":"309.884743ms","start":"2025-11-23T09:57:02.816704Z","end":"2025-11-23T09:57:03.126589Z","steps":["trace[1403684060] 'process raft request' (duration: 124.45761ms)","trace[1403684060] 'compare' (duration: 185.120538ms)"],"step_count":2}
{"level":"warn","ts":"2025-11-23T09:57:03.126635Z","caller":"v3rpc/interceptor.go:197","msg":"request stats","start time":"2025-11-23T09:57:02.816683Z","time spent":"309.941015ms","remote":"127.0.0.1:39468","response type":"/etcdserverpb.KV/Txn","request count":1,"request size":625,"response count":0,"response size":37,"request content":"compare:<target:MOD key:\"/registry/clusterroles/system:heapster\" mod_revision:0 > success:<request_put:<key:\"/registry/clusterroles/system:heapster\" value_size:579 >> failure:<>"}
{"level":"warn","ts":"2025-11-23T09:57:03.378154Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"117.573425ms","expected-duration":"100ms","prefix":"","request":"header:<ID:15638356837419424567 username:\"kube-apiserver-etcd-client\" auth_revision:1 > txn:<compare:<target:MOD key:\"/registry/clusterroles/system:node-problem-detector\" mod_revision:0 > success:<request_put:<key:\"/registry/clusterroles/system:node-problem-detector\" value_size:583 >> failure:<>>","response":"size:14"}
{"level":"info","ts":"2025-11-23T09:57:03.37825Z","caller":"traceutil/trace.go:171","msg":"trace[407529311] transaction","detail":"{read_only:false; response_revision:123; number_of_response:1; }","duration":"236.959494ms","start":"2025-11-23T09:57:03.141275Z","end":"2025-11-23T09:57:03.378235Z","steps":["trace[407529311] 'process raft request' (duration: 119.236514ms)","trace[407529311] 'compare' (duration: 117.440472ms)"],"step_count":2}
{"level":"info","ts":"2025-11-23T09:57:03.488901Z","caller":"traceutil/trace.go:171","msg":"trace[331049729] transaction","detail":"{read_only:false; response_revision:124; number_of_response:1; }","duration":"105.829119ms","start":"2025-11-23T09:57:03.38305Z","end":"2025-11-23T09:57:03.488879Z","steps":["trace[331049729] 'process raft request' (duration: 105.359949ms)"],"step_count":1}
{"level":"info","ts":"2025-11-23T09:57:03.685992Z","caller":"traceutil/trace.go:171","msg":"trace[1238052414] transaction","detail":"{read_only:false; response_revision:127; number_of_response:1; }","duration":"180.587913ms","start":"2025-11-23T09:57:03.505382Z","end":"2025-11-23T09:57:03.68597Z","steps":["trace[1238052414] 'process raft request' (duration: 128.699733ms)","trace[1238052414] 'compare' (duration: 51.773911ms)"],"step_count":2}
{"level":"info","ts":"2025-11-23T09:57:44.684831Z","caller":"traceutil/trace.go:171","msg":"trace[671402052] transaction","detail":"{read_only:false; response_revision:477; number_of_response:1; }","duration":"110.153636ms","start":"2025-11-23T09:57:44.574655Z","end":"2025-11-23T09:57:44.684809Z","steps":["trace[671402052] 'process raft request' (duration: 110.003906ms)"],"step_count":1}
==> kernel <==
09:57:51 up 40 min, 0 user, load average: 5.55, 4.20, 2.64
Linux old-k8s-version-709593 6.8.0-1044-gcp #47~22.04.1-Ubuntu SMP Thu Oct 23 21:07:54 UTC 2025 x86_64 GNU/Linux
PRETTY_NAME="Debian GNU/Linux 12 (bookworm)"
==> kindnet [a1af83bb6749287f8df2adaeff4c43c5820f5194cb24f7fe3eb5ef134893d93c] <==
I1123 09:57:23.601786 1 main.go:109] connected to apiserver: https://10.96.0.1:443
I1123 09:57:23.602109 1 main.go:139] hostIP = 192.168.76.2
podIP = 192.168.76.2
I1123 09:57:23.602284 1 main.go:148] setting mtu 1500 for CNI
I1123 09:57:23.602304 1 main.go:178] kindnetd IP family: "ipv4"
I1123 09:57:23.602318 1 main.go:182] noMask IPv4 subnets: [10.244.0.0/16]
time="2025-11-23T09:57:23Z" level=info msg="Created plugin 10-kube-network-policies (kindnetd, handles RunPodSandbox,RemovePodSandbox)"
I1123 09:57:23.855098 1 controller.go:377] "Starting controller" name="kube-network-policies"
I1123 09:57:23.855140 1 controller.go:381] "Waiting for informer caches to sync"
I1123 09:57:23.855154 1 shared_informer.go:350] "Waiting for caches to sync" controller="kube-network-policies"
I1123 09:57:23.900801 1 controller.go:390] nri plugin exited: failed to connect to NRI service: dial unix /var/run/nri/nri.sock: connect: no such file or directory
I1123 09:57:24.355697 1 shared_informer.go:357] "Caches are synced" controller="kube-network-policies"
I1123 09:57:24.355735 1 metrics.go:72] Registering metrics
I1123 09:57:24.355844 1 controller.go:711] "Syncing nftables rules"
I1123 09:57:33.855972 1 main.go:297] Handling node with IPs: map[192.168.76.2:{}]
I1123 09:57:33.856030 1 main.go:301] handling current node
I1123 09:57:43.856054 1 main.go:297] Handling node with IPs: map[192.168.76.2:{}]
I1123 09:57:43.856111 1 main.go:301] handling current node
==> kube-apiserver [9ab267968c030e0a3bce6b123e59cf0e26705c3742842d1fe84461463f48a663] <==
I1123 09:57:00.606586 1 shared_informer.go:318] Caches are synced for crd-autoregister
I1123 09:57:00.606625 1 aggregator.go:166] initial CRD sync complete...
I1123 09:57:00.606634 1 autoregister_controller.go:141] Starting autoregister controller
I1123 09:57:00.606641 1 cache.go:32] Waiting for caches to sync for autoregister controller
I1123 09:57:00.606650 1 cache.go:39] Caches are synced for autoregister controller
I1123 09:57:00.608306 1 controller.go:624] quota admission added evaluator for: namespaces
I1123 09:57:00.609050 1 shared_informer.go:318] Caches are synced for configmaps
I1123 09:57:00.624076 1 controller.go:624] quota admission added evaluator for: leases.coordination.k8s.io
I1123 09:57:00.649174 1 shared_informer.go:318] Caches are synced for node_authorizer
I1123 09:57:01.610779 1 storage_scheduling.go:95] created PriorityClass system-node-critical with value 2000001000
I1123 09:57:01.702685 1 storage_scheduling.go:95] created PriorityClass system-cluster-critical with value 2000000000
I1123 09:57:01.702703 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist.
I1123 09:57:04.338662 1 controller.go:624] quota admission added evaluator for: roles.rbac.authorization.k8s.io
I1123 09:57:04.416324 1 controller.go:624] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
I1123 09:57:04.524354 1 alloc.go:330] "allocated clusterIPs" service="default/kubernetes" clusterIPs={"IPv4":"10.96.0.1"}
W1123 09:57:04.538023 1 lease.go:263] Resetting endpoints for master service "kubernetes" to [192.168.76.2]
I1123 09:57:04.540122 1 controller.go:624] quota admission added evaluator for: endpoints
I1123 09:57:04.546988 1 controller.go:624] quota admission added evaluator for: endpointslices.discovery.k8s.io
I1123 09:57:04.575545 1 controller.go:624] quota admission added evaluator for: serviceaccounts
I1123 09:57:05.959109 1 controller.go:624] quota admission added evaluator for: deployments.apps
I1123 09:57:05.975157 1 alloc.go:330] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs={"IPv4":"10.96.0.10"}
I1123 09:57:05.986661 1 controller.go:624] quota admission added evaluator for: daemonsets.apps
I1123 09:57:17.926455 1 controller.go:624] quota admission added evaluator for: replicasets.apps
I1123 09:57:18.460236 1 controller.go:624] quota admission added evaluator for: controllerrevisions.apps
E1123 09:57:47.744877 1 upgradeaware.go:425] Error proxying data from client to backend: write tcp 192.168.76.2:47470->192.168.76.2:10250: write: connection reset by peer
==> kube-controller-manager [1b2964c41626762d3beb765fa131cc83c8eafa60068157afab3d1e775a761750] <==
I1123 09:57:18.051120 1 shared_informer.go:318] Caches are synced for resource quota
I1123 09:57:18.052924 1 event.go:307] "Event occurred" object="kube-system/kube-apiserver-old-k8s-version-709593" fieldPath="" kind="Pod" apiVersion="v1" type="Warning" reason="NodeNotReady" message="Node is not ready"
I1123 09:57:18.132109 1 shared_informer.go:318] Caches are synced for attach detach
I1123 09:57:18.349828 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-tndwj"
I1123 09:57:18.372449 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-gf5sx"
I1123 09:57:18.406026 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="452.070013ms"
I1123 09:57:18.463224 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="57.127396ms"
I1123 09:57:18.483794 1 event.go:307] "Event occurred" object="kube-system/kube-proxy" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kube-proxy-sgv48"
I1123 09:57:18.483871 1 shared_informer.go:318] Caches are synced for garbage collector
I1123 09:57:18.504473 1 event.go:307] "Event occurred" object="kube-system/kindnet" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kindnet-tpvt2"
I1123 09:57:18.560131 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="95.571025ms"
I1123 09:57:18.560538 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="230.617µs"
I1123 09:57:18.562358 1 shared_informer.go:318] Caches are synced for garbage collector
I1123 09:57:18.562385 1 garbagecollector.go:166] "All resource monitors have synced. Proceeding to collect garbage"
I1123 09:57:19.789485 1 event.go:307] "Event occurred" object="kube-system/coredns" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set coredns-5dd5756b68 to 1 from 2"
I1123 09:57:19.808843 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: coredns-5dd5756b68-tndwj"
I1123 09:57:19.823673 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="40.107806ms"
I1123 09:57:19.833064 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="9.315043ms"
I1123 09:57:19.833185 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="76.73µs"
I1123 09:57:33.949212 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="110.096µs"
I1123 09:57:33.981566 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="90.706µs"
I1123 09:57:35.176726 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="136.892µs"
I1123 09:57:35.214616 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="9.894482ms"
I1123 09:57:35.214767 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="93.972µs"
I1123 09:57:38.010283 1 node_lifecycle_controller.go:1048] "Controller detected that some Nodes are Ready. Exiting master disruption mode"
==> kube-proxy [e82a6fec044de994c043f2f9c5656e0c2a71e8e480ed8f7cca948de66ed51059] <==
I1123 09:57:20.277594 1 server_others.go:69] "Using iptables proxy"
I1123 09:57:20.292272 1 node.go:141] Successfully retrieved node IP: 192.168.76.2
I1123 09:57:20.339595 1 server.go:632] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I1123 09:57:20.344426 1 server_others.go:152] "Using iptables Proxier"
I1123 09:57:20.344681 1 server_others.go:421] "Detect-local-mode set to ClusterCIDR, but no cluster CIDR for family" ipFamily="IPv6"
I1123 09:57:20.344815 1 server_others.go:438] "Defaulting to no-op detect-local"
I1123 09:57:20.344909 1 proxier.go:251] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses"
I1123 09:57:20.345726 1 server.go:846] "Version info" version="v1.28.0"
I1123 09:57:20.345900 1 server.go:848] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1123 09:57:20.347106 1 config.go:188] "Starting service config controller"
I1123 09:57:20.350153 1 shared_informer.go:311] Waiting for caches to sync for service config
I1123 09:57:20.349625 1 config.go:97] "Starting endpoint slice config controller"
I1123 09:57:20.350452 1 shared_informer.go:311] Waiting for caches to sync for endpoint slice config
I1123 09:57:20.350106 1 config.go:315] "Starting node config controller"
I1123 09:57:20.350583 1 shared_informer.go:311] Waiting for caches to sync for node config
I1123 09:57:20.450547 1 shared_informer.go:318] Caches are synced for service config
I1123 09:57:20.450714 1 shared_informer.go:318] Caches are synced for node config
I1123 09:57:20.450744 1 shared_informer.go:318] Caches are synced for endpoint slice config
==> kube-scheduler [33f6ed017ec882589a089aad6a009c657f1fc80298864259b48138233e264c91] <==
W1123 09:57:01.700971 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
E1123 09:57:01.701017 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StatefulSet: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
W1123 09:57:01.704770 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E1123 09:57:01.704814 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
W1123 09:57:01.752559 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
E1123 09:57:01.752596 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
W1123 09:57:01.981985 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
E1123 09:57:01.982024 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Node: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
W1123 09:57:01.983872 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
E1123 09:57:01.983905 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PodDisruptionBudget: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
W1123 09:57:02.057453 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
E1123 09:57:02.057498 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIStorageCapacity: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
W1123 09:57:02.144948 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
E1123 09:57:02.145025 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
W1123 09:57:03.483078 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
E1123 09:57:03.483126 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PersistentVolume: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
W1123 09:57:03.561961 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E1123 09:57:03.562012 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
W1123 09:57:03.808694 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
E1123 09:57:03.808744 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StatefulSet: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
W1123 09:57:03.860531 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
E1123 09:57:03.860576 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicaSet: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
W1123 09:57:03.972432 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
E1123 09:57:03.972478 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicationController: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
I1123 09:57:04.567087 1 shared_informer.go:318] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
==> kubelet <==
Nov 23 09:57:18 old-k8s-version-709593 kubelet[1519]: W1123 09:57:18.547160 1519 reflector.go:535] object-"kube-system"/"kube-proxy": failed to list *v1.ConfigMap: configmaps "kube-proxy" is forbidden: User "system:node:old-k8s-version-709593" cannot list resource "configmaps" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-709593' and this object
Nov 23 09:57:18 old-k8s-version-709593 kubelet[1519]: E1123 09:57:18.547223 1519 reflector.go:147] object-"kube-system"/"kube-proxy": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "kube-proxy" is forbidden: User "system:node:old-k8s-version-709593" cannot list resource "configmaps" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-709593' and this object
Nov 23 09:57:18 old-k8s-version-709593 kubelet[1519]: I1123 09:57:18.709145 1519 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dz9pq\" (UniqueName: \"kubernetes.io/projected/f5d963bd-a2f2-44d2-969c-d219c55aba33-kube-api-access-dz9pq\") pod \"kube-proxy-sgv48\" (UID: \"f5d963bd-a2f2-44d2-969c-d219c55aba33\") " pod="kube-system/kube-proxy-sgv48"
Nov 23 09:57:18 old-k8s-version-709593 kubelet[1519]: I1123 09:57:18.709218 1519 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-cfg\" (UniqueName: \"kubernetes.io/host-path/fd3daece-c28b-4efa-ae53-16c16790e5be-cni-cfg\") pod \"kindnet-tpvt2\" (UID: \"fd3daece-c28b-4efa-ae53-16c16790e5be\") " pod="kube-system/kindnet-tpvt2"
Nov 23 09:57:18 old-k8s-version-709593 kubelet[1519]: I1123 09:57:18.709250 1519 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/fd3daece-c28b-4efa-ae53-16c16790e5be-xtables-lock\") pod \"kindnet-tpvt2\" (UID: \"fd3daece-c28b-4efa-ae53-16c16790e5be\") " pod="kube-system/kindnet-tpvt2"
Nov 23 09:57:18 old-k8s-version-709593 kubelet[1519]: I1123 09:57:18.709281 1519 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c6p4v\" (UniqueName: \"kubernetes.io/projected/fd3daece-c28b-4efa-ae53-16c16790e5be-kube-api-access-c6p4v\") pod \"kindnet-tpvt2\" (UID: \"fd3daece-c28b-4efa-ae53-16c16790e5be\") " pod="kube-system/kindnet-tpvt2"
Nov 23 09:57:18 old-k8s-version-709593 kubelet[1519]: I1123 09:57:18.709316 1519 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/f5d963bd-a2f2-44d2-969c-d219c55aba33-lib-modules\") pod \"kube-proxy-sgv48\" (UID: \"f5d963bd-a2f2-44d2-969c-d219c55aba33\") " pod="kube-system/kube-proxy-sgv48"
Nov 23 09:57:18 old-k8s-version-709593 kubelet[1519]: I1123 09:57:18.709389 1519 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/fd3daece-c28b-4efa-ae53-16c16790e5be-lib-modules\") pod \"kindnet-tpvt2\" (UID: \"fd3daece-c28b-4efa-ae53-16c16790e5be\") " pod="kube-system/kindnet-tpvt2"
Nov 23 09:57:18 old-k8s-version-709593 kubelet[1519]: I1123 09:57:18.709422 1519 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-proxy\" (UniqueName: \"kubernetes.io/configmap/f5d963bd-a2f2-44d2-969c-d219c55aba33-kube-proxy\") pod \"kube-proxy-sgv48\" (UID: \"f5d963bd-a2f2-44d2-969c-d219c55aba33\") " pod="kube-system/kube-proxy-sgv48"
Nov 23 09:57:18 old-k8s-version-709593 kubelet[1519]: I1123 09:57:18.709454 1519 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/f5d963bd-a2f2-44d2-969c-d219c55aba33-xtables-lock\") pod \"kube-proxy-sgv48\" (UID: \"f5d963bd-a2f2-44d2-969c-d219c55aba33\") " pod="kube-system/kube-proxy-sgv48"
Nov 23 09:57:24 old-k8s-version-709593 kubelet[1519]: I1123 09:57:24.152873 1519 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kube-proxy-sgv48" podStartSLOduration=6.152803535 podCreationTimestamp="2025-11-23 09:57:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 09:57:21.2206232 +0000 UTC m=+15.292351138" watchObservedRunningTime="2025-11-23 09:57:24.152803535 +0000 UTC m=+18.224531435"
Nov 23 09:57:24 old-k8s-version-709593 kubelet[1519]: I1123 09:57:24.153064 1519 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kindnet-tpvt2" podStartSLOduration=2.534840269 podCreationTimestamp="2025-11-23 09:57:18 +0000 UTC" firstStartedPulling="2025-11-23 09:57:19.547788823 +0000 UTC m=+13.619516716" lastFinishedPulling="2025-11-23 09:57:23.165974087 +0000 UTC m=+17.237701980" observedRunningTime="2025-11-23 09:57:24.152485675 +0000 UTC m=+18.224213576" watchObservedRunningTime="2025-11-23 09:57:24.153025533 +0000 UTC m=+18.224753438"
Nov 23 09:57:33 old-k8s-version-709593 kubelet[1519]: I1123 09:57:33.920548 1519 kubelet_node_status.go:493] "Fast updating node status as it just became ready"
Nov 23 09:57:33 old-k8s-version-709593 kubelet[1519]: I1123 09:57:33.948876 1519 topology_manager.go:215] "Topology Admit Handler" podUID="9a493920-3739-4eb9-8426-3590a8f2ee51" podNamespace="kube-system" podName="coredns-5dd5756b68-gf5sx"
Nov 23 09:57:33 old-k8s-version-709593 kubelet[1519]: I1123 09:57:33.949059 1519 topology_manager.go:215] "Topology Admit Handler" podUID="ba58926e-fdf3-4750-b44d-7c94a027737e" podNamespace="kube-system" podName="storage-provisioner"
Nov 23 09:57:34 old-k8s-version-709593 kubelet[1519]: I1123 09:57:34.123178 1519 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-724lb\" (UniqueName: \"kubernetes.io/projected/ba58926e-fdf3-4750-b44d-7c94a027737e-kube-api-access-724lb\") pod \"storage-provisioner\" (UID: \"ba58926e-fdf3-4750-b44d-7c94a027737e\") " pod="kube-system/storage-provisioner"
Nov 23 09:57:34 old-k8s-version-709593 kubelet[1519]: I1123 09:57:34.123243 1519 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/ba58926e-fdf3-4750-b44d-7c94a027737e-tmp\") pod \"storage-provisioner\" (UID: \"ba58926e-fdf3-4750-b44d-7c94a027737e\") " pod="kube-system/storage-provisioner"
Nov 23 09:57:34 old-k8s-version-709593 kubelet[1519]: I1123 09:57:34.123297 1519 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5rzx7\" (UniqueName: \"kubernetes.io/projected/9a493920-3739-4eb9-8426-3590a8f2ee51-kube-api-access-5rzx7\") pod \"coredns-5dd5756b68-gf5sx\" (UID: \"9a493920-3739-4eb9-8426-3590a8f2ee51\") " pod="kube-system/coredns-5dd5756b68-gf5sx"
Nov 23 09:57:34 old-k8s-version-709593 kubelet[1519]: I1123 09:57:34.123357 1519 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9a493920-3739-4eb9-8426-3590a8f2ee51-config-volume\") pod \"coredns-5dd5756b68-gf5sx\" (UID: \"9a493920-3739-4eb9-8426-3590a8f2ee51\") " pod="kube-system/coredns-5dd5756b68-gf5sx"
Nov 23 09:57:35 old-k8s-version-709593 kubelet[1519]: I1123 09:57:35.176230 1519 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/coredns-5dd5756b68-gf5sx" podStartSLOduration=17.176168603 podCreationTimestamp="2025-11-23 09:57:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 09:57:35.175754843 +0000 UTC m=+29.247482743" watchObservedRunningTime="2025-11-23 09:57:35.176168603 +0000 UTC m=+29.247896503"
Nov 23 09:57:35 old-k8s-version-709593 kubelet[1519]: I1123 09:57:35.204836 1519 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/storage-provisioner" podStartSLOduration=16.204788689 podCreationTimestamp="2025-11-23 09:57:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 09:57:35.19026469 +0000 UTC m=+29.261992589" watchObservedRunningTime="2025-11-23 09:57:35.204788689 +0000 UTC m=+29.276516592"
Nov 23 09:57:37 old-k8s-version-709593 kubelet[1519]: I1123 09:57:37.507262 1519 topology_manager.go:215] "Topology Admit Handler" podUID="bea346d9-0dca-482c-b9f9-7b71741b18d7" podNamespace="default" podName="busybox"
Nov 23 09:57:37 old-k8s-version-709593 kubelet[1519]: I1123 09:57:37.646410 1519 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pj5kg\" (UniqueName: \"kubernetes.io/projected/bea346d9-0dca-482c-b9f9-7b71741b18d7-kube-api-access-pj5kg\") pod \"busybox\" (UID: \"bea346d9-0dca-482c-b9f9-7b71741b18d7\") " pod="default/busybox"
Nov 23 09:57:41 old-k8s-version-709593 kubelet[1519]: I1123 09:57:41.192410 1519 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="default/busybox" podStartSLOduration=1.9155870259999999 podCreationTimestamp="2025-11-23 09:57:37 +0000 UTC" firstStartedPulling="2025-11-23 09:57:37.952685082 +0000 UTC m=+32.024412966" lastFinishedPulling="2025-11-23 09:57:40.229447793 +0000 UTC m=+34.301175679" observedRunningTime="2025-11-23 09:57:41.192028507 +0000 UTC m=+35.263756408" watchObservedRunningTime="2025-11-23 09:57:41.192349739 +0000 UTC m=+35.264077634"
Nov 23 09:57:47 old-k8s-version-709593 kubelet[1519]: E1123 09:57:47.744109 1519 upgradeaware.go:425] Error proxying data from client to backend: readfrom tcp 192.168.76.2:34062->192.168.76.2:10010: write tcp 192.168.76.2:34062->192.168.76.2:10010: write: broken pipe
==> storage-provisioner [6188a0a11a558ccfe4a936446819a158ec0f3ff08b1c7692bf3db57ce82539bc] <==
I1123 09:57:34.497639 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I1123 09:57:34.510426 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I1123 09:57:34.510517 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I1123 09:57:34.519430 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I1123 09:57:34.519625 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_old-k8s-version-709593_09fc0e4b-1f89-47c2-90c6-e8921583fe8f!
I1123 09:57:34.522696 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"89d02a34-1ced-4051-82ca-0198f46f6d6a", APIVersion:"v1", ResourceVersion:"448", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' old-k8s-version-709593_09fc0e4b-1f89-47c2-90c6-e8921583fe8f became leader
I1123 09:57:34.619835 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_old-k8s-version-709593_09fc0e4b-1f89-47c2-90c6-e8921583fe8f!
-- /stdout --
helpers_test.go:262: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p old-k8s-version-709593 -n old-k8s-version-709593
helpers_test.go:269: (dbg) Run: kubectl --context old-k8s-version-709593 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:293: <<< TestStartStop/group/old-k8s-version/serial/DeployApp FAILED: end of post-mortem logs <<<
helpers_test.go:294: ---------------------/post-mortem---------------------------------
--- FAIL: TestStartStop/group/old-k8s-version/serial/DeployApp (14.80s)