测试环境node节点内存满造成node节点的kubelet自动关闭无法创建pod

发布时间 2024-01-03 09:25:53作者: YYQ-
#生产pod创建为pending状态

[root@redis ~]# kubectl get pod -n cms-v2
NAME READY STATUS RESTARTS AGE
base-auth-deploy-58c87b544-m2sc8 1/1 Running 0 65d
cms-admin-deploy-7d9d49d8c6-g8g8x 1/1 Running 0 23h
cms-agent-nginx-server-deploy-6df546d589-9mldp 0/1 Pending 0 45m
cms-agent-nginx-server-deploy-6df546d589-cdmzn 0/1 Pending 0 45m
cms-agent-nginx-server-deploy-6df546d589-w8z2r 1/1 Running 0 65d
cms-collector-c885c6b7c-z9kkg 1/1 Running 5 65d
cms-flinksql-client-deploy-6d6fbb7f56-tjzk5 1/1 Running 0 2d22h
cms-gateway-deploy-bfd794697-5rbt4 1/1 Running 0 2d22h
cms-hawkeye-heartbeat-77f4c9d4c-ttsss 2/2 Terminating 0 9d
cms-hawkeye-heartbeat-77f4c9d4c-x6887 0/2 Pending 0 45m
cms-hawkeye-inspection-5d77bfb4d4-l8wlp 0/1 Pending 0 45m
cms-hawkeye-inspection-5d77bfb4d4-vbhgf 1/1 Terminating 0 9d
cms-hawkeye-mediator-68ff4c4c5d-d6qvc 1/1 Terminating 0 9d
cms-hawkeye-mediator-68ff4c4c5d-qj82r 0/1 Pending 0 45m
cms-hawkeye-nodata-74888955b4-rvr8r 2/2 Terminating 0 9d
cms-hawkeye-nodata-74888955b4-tls4g 0/2 Pending 0 45m
cms-hawkeye-pmon-779bb97b5d-9bjzc 2/2 Running 0 3d17h
cms-hawkeye-pmon-gateway-794df59d46-7hnf2 0/2 Pending 0 45m
cms-hawkeye-pmon-gateway-794df59d46-ksw4c 2/2 Terminating 0 3d17h
cms-hawkeye-schedule-567f97b699-p5b66 1/1 Running 0 9d
cms-hawkeye-transit-6df6f786d4-jhkz8 1/1 Running 0 8d
cms-influxdb-search-deploy-b989648fd-fhjlh 1/1 Running 0 26h
cms-integrate-deploy-5cf74d5956-74m7b 1/1 Running 0 3d20h
cms-mgr-deploy-748885b6c4-cmwjn 0/1 Pending 0 45m
cms-mgr-deploy-748885b6c4-djflt 1/1 Terminating 0 2d22h
cms-msg-deploy-696795cc67-852bf 1/1 Terminating 0 20h
cms-msg-deploy-696795cc67-bw7l6 1/1 Running 0 20h
cms-msg-deploy-696795cc67-hh9cs 0/1 Pending 0 45m
cms-opacter-deploy-7f7bc576bf-8cwck 1/1 Running 0 2d22h
cms-openapi-deploy-787d994f74-kjmrw 1/1 Running 0 2d22h
cms-schedule-deploy-fbff4b776-6n6xg 1/1 Running 0 2d22h
cms-schema-deploy-597978d5f8-ldzcl 0/1 Pending 0 45m
cms-search-deploy-bc55c7897-fxnjk 1/1 Terminating 0 3d20h
cms-search-deploy-bc55c7897-hnwlf 0/1 Pending 0 45m
cms-search-deploy-bc55c7897-xcpn6 1/1 Running 0 3d20h
cms-v2-front-deploy-7d98844f7b-7cqwn 1/1 Terminating 0 43h
cms-v2-front-deploy-7d98844f7b-blnhc 0/1 Pending 0 45m
cms-v2-metric-calculation-5b498f89f-48n2d 1/1 Terminating 8 2d22h
cms-v2-metric-calculation-5b498f89f-74dt8 0/1 Pending 0 45m
cms-v2-mobile-front-deploy-85ccffc6b5-drtsn 0/1 Pending 0 45m
cms-v2-mobile-front-deploy-85ccffc6b5-kwtrl 1/1 Terminating 1 44d
cms-v2-rule-detection-6bc778c9bf-fw6h2 1/1 Terminating 9 2d22h
cms-v2-rule-detection-6bc778c9bf-mf57d 0/1 Pending 0 45m
cms-v2-rule-detection-taskmanager-33-1 1/1 Terminating 0 7h14m
cms-v2-rule-detection-taskmanager-34-1 1/1 Terminating 0 4h46m
hana-client 1/1 Running 0 121d
logstash-7897759d75-k2xrp 1/1 Running 0 65d
lp-agent-5dbbf59f65-z4kms 1/1 Running 0 2d19h
notice-service-deploy-657957cbbd-dqvbc 1/1 Running 0 65d
yx-monitor-api-deploy-595fcf4b95-r2zjz 1/1 Running 2 20h
yx-monitor-bingo-deploy-97ddf79dc-5rtph 1/1 Running 0 51d



#查看pod的报错信息
[root@redis ~]# kubectl describe pod -n cms-v2 cms-v2-rule-detection-6bc778c9bf-mf57d
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Warning FailedScheduling <unknown> default-scheduler 0/5 nodes are available: 2 node(s) were unschedulable, 3 node(s) had taint {node.kubernetes.io/unreachable: }, that the pod didn't tolerate.
Warning FailedScheduling <unknown> default-scheduler 0/5 nodes are available: 2 node(s) were unschedulable, 3 node(s) had taint {node.kubernetes.io/unreachable: }, that the pod didn't tolerate.
Warning FailedScheduling <unknown> default-scheduler 0/5 nodes are available: 2 node(s) were unschedulable, 3 node(s) had taint {node.kubernetes.io/unreachable: }, that the pod didn't tolerate.
Warning FailedScheduling <unknown> default-scheduler 0/5 nodes are available: 2 node(s) were unschedulable, 3 node(s) had taint {node.kubernetes.io/unreachable: }, that the pod didn't tolerate.
[root@redis ~]# kubectl describe pod -n cms-v2 cms-v2-front-deploy-7d98844f7b-blnhc
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Warning FailedScheduling <unknown> default-scheduler 0/5 nodes are available: 2 node(s) were unschedulable, 3 node(s) had taint {node.kubernetes.io/unreachable: }, that the pod didn't tolerate.
Warning FailedScheduling <unknown> default-scheduler 0/5 nodes are available: 2 node(s) were unschedulable, 3 node(s) had taint {node.kubernetes.io/unreachable: }, that the pod didn't tolerate.
Warning FailedScheduling <unknown> default-scheduler 0/5 nodes are available: 2 node(s) were unschedulable, 3 node(s) had taint {node.kubernetes.io/unreachable: }, that the pod didn't tolerate.
Warning FailedScheduling <unknown> default-scheduler 0/5 nodes are available: 2 node(s) were unschedulable, 3 node(s) had taint {node.kubernetes.io/unreachable: }, that the pod didn't tolerate.
Normal Scheduled <unknown> default-scheduler Successfully assigned cms-v2/cms-v2-front-deploy-7d98844f7b-blnhc to 10.1.192.89
#

 

#查看节点信息
[root@redis ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
10.1.192.74 Ready,SchedulingDisabled master 2y334d v1.18.2
10.1.192.81 Ready,SchedulingDisabled master 2y334d v1.18.2
10.1.192.83 NotReady node 2y334d v1.18.2
10.1.192.88 NotReady node 2y334d v1.18.2
10.1.192.89 NotReady node 2y334d v1.18.2

 

#查看污点
[root@redis ~]# kubectl describe node 10.1.192.89 |grep Taint
Taints: node.kubernetes.io/unreachable:NoSchedule
[root@redis ~]# kubectl describe node 10.1.192.88 |grep Taint
Taints: node.kubernetes.io/unreachable:NoSchedule
[root@redis ~]# kubectl describe node 10.1.192.83 |grep Taint
Taints: node.kubernetes.io/unreachable:NoSchedule

 

#删除污点
[root@redis ~]# kubectl taint node 10.1.192.89 node.kubernetes.io/unreachable:NoSchedule-
node/10.1.192.89 untainted
[root@redis ~]# kubectl taint node 10.1.192.88 node.kubernetes.io/unreachable:NoSchedule-
node/10.1.192.88 untainted
[root@redis ~]# kubectl taint node 10.1.192.83 node.kubernetes.io/unreachable:NoSchedule-
node/10.1.192.83 untainted

 

#再次检查
[root@redis ~]# kubectl describe node 10.1.192.83 |grep Taint
Taints: node.kubernetes.io/unreachable:NoSchedule
[root@redis ~]# kubectl describe node 10.1.192.88 |grep Taint
Taints: node.kubernetes.io/unreachable:NoSchedule
[root@redis ~]# kubectl describe node 10.1.192.89 |grep Taint
Taints: node.kubernetes.io/unreachable:NoSchedule

 

#查看event事件
[root@redis ~]# kubectl get event -n cms-v2
175m Warning FailedToUpdateEndpointSlices service/lp-svc Error updating Endpoint Slices for Service cms-v2/lp-svc: node "10.1.192.83" not found
115m Warning FailedToUpdateEndpointSlices service/lp-svc Error updating Endpoint Slices for Service cms-v2/lp-svc: node "10.1.192.83" not found
114m Warning FailedToUpdateEndpointSlices service/lp-svc Error updating Endpoint Slices for Service cms-v2/lp-svc: node "10.1.192.83" not found
111m Warning FailedToUpdateEndpointSlices service/lp-svc Error updating Endpoint Slices for Service cms-v2/lp-svc: node "10.1.192.83" not found
19m Warning FailedToUpdateEndpointSlices service/lp-svc Error updating Endpoint Slices for Service cms-v2/lp-svc: node "10.1.192.83" not found
162m Normal TaintManagerEviction pod/notice-service-deploy-657957cbbd-dqvbc Cancelling deletion of Pod cms-v2/notice-service-deploy-657957cbbd-dqvbc
128m Normal TaintManagerEviction pod/notice-service-deploy-657957cbbd-dqvbc Cancelling deletion of Pod cms-v2/notice-service-deploy-657957cbbd-dqvbc
73m Normal TaintManagerEviction pod/notice-service-deploy-657957cbbd-dqvbc Cancelling deletion of Pod cms-v2/notice-service-deploy-657957cbbd-dqvbc
18h Warning FailedToUpdateEndpointSlices service/notice-service Error updating Endpoint Slices for Service cms-v2/notice-service: node "10.1.192.83" not found
17h Warning FailedToUpdateEndpointSlices service/notice-service Error updating Endpoint Slices for Service cms-v2/notice-service: node "10.1.192.83" not found
111m Warning FailedToUpdateEndpointSlices service/notice-service Error updating Endpoint Slices for Service cms-v2/notice-service: node "10.1.192.83" not found
68m Warning FailedToUpdateEndpointSlices service/notice-service Error updating Endpoint Slices for Service cms-v2/notice-service: node "10.1.192.83" not found
19m Warning FailedToUpdateEndpointSlices service/notice-service Error updating Endpoint Slices for Service cms-v2/notice-service: node "10.1.192.83" not found
<unknown> Normal Scheduled pod/yx-monitor-api-deploy-595fcf4b95-r2zjz Successfully assigned cms-v2/yx-monitor-api-deploy-595fcf4b95-r2zjz to 10.1.192.83
20h Normal Pulling pod/yx-monitor-api-deploy-595fcf4b95-r2zjz Pulling image "10.1.192.94/library/yx-monitor-api:v2.0.30"
20h Warning Failed pod/yx-monitor-api-deploy-595fcf4b95-r2zjz Failed to pull image "10.1.192.94/library/yx-monitor-api:v2.0.30": rpc error: code = Unknown desc = Error response from daemon: unknown: artifact library/yx-monitor-api:v2.0.30 not found
20h Warning Failed pod/yx-monitor-api-deploy-595fcf4b95-r2zjz Error: ErrImagePull
20h Normal BackOff pod/yx-monitor-api-deploy-595fcf4b95-r2zjz Back-off pulling image "10.1.192.94/library/yx-monitor-api:v2.0.30"
20h Warning Failed pod/yx-monitor-api-deploy-595fcf4b95-r2zjz Error: ImagePullBackOff
20h Normal Pulled pod/yx-monitor-api-deploy-595fcf4b95-r2zjz Successfully pulled image "10.1.192.94/library/yx-monitor-api:v2.0.30"
20h Normal Created pod/yx-monitor-api-deploy-595fcf4b95-r2zjz Created container yx-monitor-api-container
20h Normal Started pod/yx-monitor-api-deploy-595fcf4b95-r2zjz Started container yx-monitor-api-container
18h Normal Pulling pod/yx-monitor-api-deploy-595fcf4b95-r2zjz Pulling image "10.1.192.94/library/openjdk:8u232-stretch-yak"
18h Normal Pulled pod/yx-monitor-api-deploy-595fcf4b95-r2zjz Successfully pulled image "10.1.192.94/library/openjdk:8u232-stretch-yak"
20h Normal Created pod/yx-monitor-api-deploy-595fcf4b95-r2zjz Created container sidecar-jdk
20h Normal Started pod/yx-monitor-api-deploy-595fcf4b95-r2zjz Started container sidecar-jdk
4h27m Warning Unhealthy pod/yx-monitor-api-deploy-595fcf4b95-r2zjz Liveness probe failed: Get http://172.20.2.240:8090/yx-monitor-api/actuator/health: net/http: request canceled (Client.Timeout exceeded while awaiting headers)
4h15m Warning Unhealthy pod/yx-monitor-api-deploy-595fcf4b95-r2zjz Readiness probe failed: Get http://172.20.2.240:8090/yx-monitor-api/actuator/health: net/http: request canceled (Client.Timeout exceeded while awaiting headers)
18h Normal Killing pod/yx-monitor-api-deploy-595fcf4b95-r2zjz Container sidecar-jdk failed liveness probe, will be restarted
18h Warning Unhealthy pod/yx-monitor-api-deploy-595fcf4b95-r2zjz Readiness probe failed: Get http://172.20.2.240:8090/yx-monitor-api/actuator/health: dial tcp 172.20.2.240:8090: connect: connection refused
162m Normal TaintManagerEviction pod/yx-monitor-api-deploy-595fcf4b95-r2zjz Cancelling deletion of Pod cms-v2/yx-monitor-api-deploy-595fcf4b95-r2zjz
128m Normal TaintManagerEviction pod/yx-monitor-api-deploy-595fcf4b95-r2zjz Cancelling deletion of Pod cms-v2/yx-monitor-api-deploy-595fcf4b95-r2zjz
73m Normal TaintManagerEviction pod/yx-monitor-api-deploy-595fcf4b95-r2zjz Cancelling deletion of Pod cms-v2/yx-monitor-api-deploy-595fcf4b95-r2zjz
20h Normal SuccessfulCreate replicaset/yx-monitor-api-deploy-595fcf4b95 Created pod: yx-monitor-api-deploy-595fcf4b95-r2zjz
20h Normal Killing pod/yx-monitor-api-deploy-5f4bb45b97-c2d8t Stopping container sidecar-jdk
20h Warning Unhealthy pod/yx-monitor-api-deploy-5f4bb45b97-c2d8t Liveness probe failed: Get http://172.20.4.92:8090/yx-monitor-api/actuator/health: dial tcp 172.20.4.92:8090: connect: connection refused
20h Normal SuccessfulDelete replicaset/yx-monitor-api-deploy-5f4bb45b97 Deleted pod: yx-monitor-api-deploy-5f4bb45b97-c2d8t
20h Normal ScalingReplicaSet deployment/yx-monitor-api-deploy Scaled up replica set yx-monitor-api-deploy-595fcf4b95 to 1
20h Normal ScalingReplicaSet deployment/yx-monitor-api-deploy Scaled down replica set yx-monitor-api-deploy-5f4bb45b97 to 0
18h Warning FailedToUpdateEndpointSlices service/yx-monitor-api-service Error updating Endpoint Slices for Service cms-v2/yx-monitor-api-service: node "10.1.192.83" not found
175m Warning FailedToUpdateEndpointSlices service/yx-monitor-api-service Error updating Endpoint Slices for Service cms-v2/yx-monitor-api-service: node "10.1.192.83" not found
115m Warning FailedToUpdateEndpointSlices service/yx-monitor-api-service Error updating Endpoint Slices for Service cms-v2/yx-monitor-api-service: node "10.1.192.83" not found
114m Warning FailedToUpdateEndpointSlices service/yx-monitor-api-service Error updating Endpoint Slices for Service cms-v2/yx-monitor-api-service: node "10.1.192.83" not found
111m Warning FailedToUpdateEndpointSlices service/yx-monitor-api-service Error updating Endpoint Slices for Service cms-v2/yx-monitor-api-service: node "10.1.192.83" not found
19m Warning FailedToUpdateEndpointSlices service/yx-monitor-api-service Error updating Endpoint Slices for Service cms-v2/yx-monitor-api-service: node "10.1.192.83" not found
162m Normal TaintManagerEviction pod/yx-monitor-bingo-deploy-97ddf79dc-5rtph Cancelling deletion of Pod cms-v2/yx-monitor-bingo-deploy-97ddf79dc-5rtph
128m Normal TaintManagerEviction pod/yx-monitor-bingo-deploy-97ddf79dc-5rtph Cancelling deletion of Pod cms-v2/yx-monitor-bingo-deploy-97ddf79dc-5rtph
73m Normal TaintManagerEviction pod/yx-monitor-bingo-deploy-97ddf79dc-5rtph Cancelling deletion of Pod cms-v2/yx-monitor-bingo-deploy-97ddf79dc-5rtph
19h Warning FailedToUpdateEndpointSlices service/yx-monitor-bingo-service Error updating Endpoint Slices for Service cms-v2/yx-monitor-bingo-service: node "10.1.192.83" not found
114m Warning FailedToUpdateEndpointSlices service/yx-monitor-bingo-service Error updating Endpoint Slices for Service cms-v2/yx-monitor-bingo-service: node "10.1.192.83" not found
111m Warning FailedToUpdateEndpointSlices service/yx-monitor-bingo-service Error updating Endpoint Slices for Service cms-v2/yx-monitor-bingo-service: node "10.1.192.83" not found
19m Warning FailedToUpdateEndpointSlices service/yx-monitor-bingo-service Error updating Endpoint Slices for Service cms-v2/yx-monitor-bingo-service: node "10.1.192.83" not found

 


#最后检查发现node节点的kubelet挂了
[root@redis ~]# kubectl describe node 10.1.192.88
Name: 10.1.192.88
Roles: node
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=10.1.192.88
kubernetes.io/os=linux
kubernetes.io/role=node
Annotations: flannel.alpha.coreos.com/backend-data: {"VtepMAC":"86:eb:27:e6:92:34"}
flannel.alpha.coreos.com/backend-type: vxlan
flannel.alpha.coreos.com/kube-subnet-manager: true
flannel.alpha.coreos.com/public-ip: 10.1.192.88
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Fri, 29 Jan 2021 14:59:05 +0800
Taints: node.kubernetes.io/unreachable:NoSchedule
Unschedulable: false
Lease:
HolderIdentity: 10.1.192.88
AcquireTime: <unset>
RenewTime: Fri, 29 Dec 2023 12:35:36 +0800
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
NetworkUnavailable False Thu, 10 Nov 2022 20:09:03 +0800 Thu, 10 Nov 2022 20:09:03 +0800 FlannelIsUp Flannel is running on this node
MemoryPressure Unknown Fri, 29 Dec 2023 12:35:24 +0800 Fri, 29 Dec 2023 12:37:31 +0800 NodeStatusUnknown Kubelet stopped posting node status.
DiskPressure Unknown Fri, 29 Dec 2023 12:35:24 +0800 Fri, 29 Dec 2023 12:37:31 +0800 NodeStatusUnknown Kubelet stopped posting node status.
PIDPressure Unknown Fri, 29 Dec 2023 12:35:24 +0800 Fri, 29 Dec 2023 12:37:31 +0800 NodeStatusUnknown Kubelet stopped posting node status.
Ready Unknown Fri, 29 Dec 2023 12:35:24 +0800 Fri, 29 Dec 2023 12:37:31 +0800 NodeStatusUnknown Kubelet stopped posting node status.
Addresses:
InternalIP: 10.1.192.88
Hostname: 10.1.192.88
Capacity:
cpu: 16
ephemeral-storage: 102189Mi
hugepages-2Mi: 0
memory: 32888584Ki
pods: 110
Allocatable:
cpu: 16
ephemeral-storage: 96437639418
hugepages-2Mi: 0
memory: 32171784Ki
pods: 110
System Info:
Machine ID: 12accf0612accf0612accf0612accf06
System UUID: e694362d-e694-362d-e694-362de694362d
Boot ID: 0be5c0af-32a3-4084-8468-12a3a218d95d
Kernel Version: 5.4.93-1.el7.elrepo.x86_64
OS Image: CentOS Linux 7 (Core)
Operating System: linux
Architecture: amd64
Container Runtime Version: docker://19.3.8
Kubelet Version: v1.18.2
Kube-Proxy Version: v1.18.2
PodCIDR: 172.20.3.0/24
PodCIDRs: 172.20.3.0/24
Non-terminated Pods: (20 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits AGE
--------- ---- ------------ ---------- --------------- ------------- ---
apm zk-0 500m (3%) 0 (0%) 1Gi (3%) 0 (0%) 661d
cloudops ansible-runner-7bb7fb678c-92pnm 500m (3%) 1 (6%) 256Mi (0%) 1Gi (3%) 42m
cloudops cloudops-868fcb7987-ws7cb 1 (6%) 1 (6%) 3Gi (9%) 3Gi (9%) 107d
cloudops crontask-59856b9d57-vb7dl 500m (3%) 1 (6%) 256Mi (0%) 1Gi (3%) 107d
cms-v2 cms-collector-c885c6b7c-z9kkg 500m (3%) 500m (3%) 1Gi (3%) 1Gi (3%) 65d
cms-v2 cms-gateway-deploy-bfd794697-5rbt4 300m (1%) 1 (6%) 1Gi (3%) 2Gi (6%) 2d22h
cms-v2 cms-integrate-deploy-5cf74d5956-74m7b 300m (1%) 1 (6%) 1Gi (3%) 2Gi (6%) 3d20h
cms-v2 cms-msg-deploy-696795cc67-hh9cs 500m (3%) 2 (12%) 3Gi (9%) 4Gi (13%) 42m
cms-v2 cms-opacter-deploy-7f7bc576bf-8cwck 300m (1%) 2 (12%) 2Gi (6%) 3Gi (9%) 2d22h
cms-v2 cms-v2-rule-detection-6bc778c9bf-mf57d 1 (6%) 1 (6%) 2Gi (6%) 2Gi (6%) 42m
cms-v2 cms-v2-rule-detection-taskmanager-33-1 2 (12%) 2 (12%) 2Gi (6%) 2Gi (6%) 7h11m
cms-v2 logstash-7897759d75-k2xrp 1 (6%) 1 (6%) 2Gi (6%) 2Gi (6%) 65d
default redis-cluster-2 900m (5%) 900m (5%) 1Gi (3%) 1Gi (3%) 62d
default redis-cluster-5 900m (5%) 900m (5%) 1Gi (3%) 1Gi (3%) 62d
kube-system dashboard-metrics-scraper-545bbb8767-j4mj4 0 (0%) 0 (0%) 0 (0%) 0 (0%) 2y334d
kube-system kube-flannel-ds-amd64-glfpj 100m (0%) 100m (0%) 50Mi (0%) 50Mi (0%) 2y334d
kube-system node-local-dns-n8gmd 25m (0%) 0 (0%) 5Mi (0%) 0 (0%) 233d
kube-system tiller-deploy-5bd4488cc-lprhc 0 (0%) 0 (0%) 0 (0%) 0 (0%) 2y32d
kube-system traefik-ingress-controller-b9xjt 0 (0%) 0 (0%) 0 (0%) 0 (0%) 2y330d
mongo mongodb-69b4497bfc-rdxws 100m (0%) 2 (12%) 8Gi (26%) 8Gi (26%) 164d
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 10425m (65%) 17400m (108%)
memory 29239Mi (93%) 33842Mi (107%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events: <none>

 #查看node1节点

[root@redis ~]# kubectl describe node 10.1.192.83
Name: 10.1.192.83
Roles: node
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=10.1.192.83
kubernetes.io/os=linux
kubernetes.io/role=node
Annotations: flannel.alpha.coreos.com/backend-data: {"VtepMAC":"de:c9:b9:28:80:49"}
flannel.alpha.coreos.com/backend-type: vxlan
flannel.alpha.coreos.com/kube-subnet-manager: true
flannel.alpha.coreos.com/public-ip: 10.1.192.83
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Fri, 29 Jan 2021 14:59:05 +0800
Taints: node.kubernetes.io/unreachable:NoSchedule
Unschedulable: false
Lease:
HolderIdentity: 10.1.192.83
AcquireTime: <unset>
RenewTime: Fri, 29 Dec 2023 12:35:35 +0800
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
NetworkUnavailable False Thu, 10 Nov 2022 20:11:00 +0800 Thu, 10 Nov 2022 20:11:00 +0800 FlannelIsUp Flannel is running on this node
MemoryPressure Unknown Fri, 29 Dec 2023 12:34:43 +0800 Fri, 29 Dec 2023 12:37:32 +0800 NodeStatusUnknown Kubelet stopped posting node status.
DiskPressure Unknown Fri, 29 Dec 2023 12:34:43 +0800 Fri, 29 Dec 2023 12:37:32 +0800 NodeStatusUnknown Kubelet stopped posting node status.
PIDPressure Unknown Fri, 29 Dec 2023 12:34:43 +0800 Fri, 29 Dec 2023 12:37:32 +0800 NodeStatusUnknown Kubelet stopped posting node status.
Ready Unknown Fri, 29 Dec 2023 12:34:43 +0800 Fri, 29 Dec 2023 12:37:32 +0800 NodeStatusUnknown Kubelet stopped posting node status.
Addresses:
InternalIP: 10.1.192.83
Hostname: 10.1.192.83
Capacity:
cpu: 16
ephemeral-storage: 102189Mi
hugepages-2Mi: 0
memory: 32888584Ki
pods: 110
Allocatable:
cpu: 16
ephemeral-storage: 96437639418
hugepages-2Mi: 0
memory: 32171784Ki
pods: 110
System Info:
Machine ID: 12accf0612accf0612accf0612accf06
System UUID: 7ba163a6-7ba1-63a6-7ba1-63a67ba163a6
Boot ID: bfdd4cff-5748-4a69-8544-08d36b1bfd9d
Kernel Version: 5.4.93-1.el7.elrepo.x86_64
OS Image: CentOS Linux 7 (Core)
Operating System: linux
Architecture: amd64
Container Runtime Version: docker://19.3.8
Kubelet Version: v1.18.2
Kube-Proxy Version: v1.18.2
PodCIDR: 172.20.2.0/24
PodCIDRs: 172.20.2.0/24
Non-terminated Pods: (30 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits AGE
--------- ---- ------------ ---------- --------------- ------------- ---
apm apm-front-deploy-6c48b7cff8-t4k4s 0 (0%) 0 (0%) 0 (0%) 0 (0%) 522d
cloudops aiops-front-8594696498-bkmjl 200m (1%) 200m (1%) 512Mi (1%) 512Mi (1%) 107d
cloudops ums-b47545846-48wgr 200m (1%) 200m (1%) 1Gi (3%) 1Gi (3%) 107d
cms-v2 base-auth-deploy-58c87b544-m2sc8 300m (1%) 300m (1%) 500Mi (1%) 500Mi (1%) 65d
cms-v2 cms-admin-deploy-7d9d49d8c6-g8g8x 300m (1%) 1 (6%) 2Gi (6%) 3Gi (9%) 23h
cms-v2 cms-agent-nginx-server-deploy-6df546d589-w8z2r 50m (0%) 50m (0%) 200Mi (0%) 200Mi (0%) 65d
cms-v2 cms-flinksql-client-deploy-6d6fbb7f56-tjzk5 300m (1%) 1 (6%) 1Gi (3%) 2Gi (6%) 2d22h
cms-v2 cms-hawkeye-pmon-779bb97b5d-9bjzc 0 (0%) 0 (0%) 0 (0%) 0 (0%) 3d17h
cms-v2 cms-hawkeye-schedule-567f97b699-p5b66 0 (0%) 0 (0%) 0 (0%) 0 (0%) 9d
cms-v2 cms-hawkeye-transit-6df6f786d4-jhkz8 0 (0%) 0 (0%) 0 (0%) 0 (0%) 8d
cms-v2 cms-influxdb-search-deploy-b989648fd-fhjlh 300m (1%) 1 (6%) 2560Mi (8%) 3Gi (9%) 26h
cms-v2 cms-msg-deploy-696795cc67-bw7l6 500m (3%) 2 (12%) 3Gi (9%) 4Gi (13%) 20h
cms-v2 cms-openapi-deploy-787d994f74-kjmrw 300m (1%) 1 (6%) 2Gi (6%) 3Gi (9%) 2d22h
cms-v2 cms-schedule-deploy-fbff4b776-6n6xg 300m (1%) 1 (6%) 2Gi (6%) 3Gi (9%) 2d22h
cms-v2 cms-search-deploy-bc55c7897-xcpn6 300m (1%) 2 (12%) 3Gi (9%) 4Gi (13%) 3d20h
cms-v2 hana-client 50m (0%) 100m (0%) 256Mi (0%) 512Mi (1%) 121d
cms-v2 lp-agent-5dbbf59f65-z4kms 0 (0%) 0 (0%) 0 (0%) 0 (0%) 2d19h
cms-v2 notice-service-deploy-657957cbbd-dqvbc 300m (1%) 300m (1%) 500Mi (1%) 500Mi (1%) 65d
cms-v2 yx-monitor-api-deploy-595fcf4b95-r2zjz 500m (3%) 500m (3%) 2Gi (6%) 2Gi (6%) 20h
cms-v2 yx-monitor-bingo-deploy-97ddf79dc-5rtph 300m (1%) 300m (1%) 500Mi (1%) 500Mi (1%) 51d
default redis-cluster-1 900m (5%) 900m (5%) 1Gi (3%) 1Gi (3%) 62d
default redis-cluster-4 900m (5%) 900m (5%) 1Gi (3%) 1Gi (3%) 62d
es kibana-f4994cf6f-5cj6x 500m (3%) 500m (3%) 1Gi (3%) 1Gi (3%) 562d
kube-system coredns-65dbdb44db-lbplv 100m (0%) 0 (0%) 70Mi (0%) 170Mi (0%) 2y334d
kube-system kube-flannel-ds-amd64-wfq58 100m (0%) 100m (0%) 50Mi (0%) 50Mi (0%) 2y334d
kube-system node-local-dns-kxxlt 25m (0%) 0 (0%) 5Mi (0%) 0 (0%) 233d
kube-system traefik-ingress-controller-5f54bc4dd7-vqdc7 0 (0%) 0 (0%) 0 (0%) 0 (0%) 522d
kube-system traefik-ingress-controller-947fk 0 (0%) 0 (0%) 0 (0%) 0 (0%) 2y330d
nacos-server nacos-0 500m (3%) 0 (0%) 2Gi (6%) 0 (0%) 170d
ycms zabbix-appliance-7d4965d897-lcc4r 0 (0%) 0 (0%) 0 (0%) 0 (0%) 113d
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 7225m (45%) 13350m (83%)
memory 26657Mi (84%) 31616Mi (100%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events: <none>

 

#完整信息
[root@redis ~]# kubectl describe node 10.1.192.89
Name: 10.1.192.89
Roles: node
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=10.1.192.89
kubernetes.io/os=linux
kubernetes.io/role=node
Annotations: flannel.alpha.coreos.com/backend-data: {"VtepMAC":"a2:92:89:95:da:7f"}
flannel.alpha.coreos.com/backend-type: vxlan
flannel.alpha.coreos.com/kube-subnet-manager: true
flannel.alpha.coreos.com/public-ip: 10.1.192.89
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Fri, 29 Jan 2021 14:59:06 +0800
Taints: node.kubernetes.io/unreachable:NoSchedule
Unschedulable: false
Lease:
HolderIdentity: 10.1.192.89
AcquireTime: <unset>
RenewTime: Fri, 29 Dec 2023 12:35:32 +0800
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
NetworkUnavailable False Sat, 16 Dec 2023 10:25:23 +0800 Sat, 16 Dec 2023 10:25:23 +0800 FlannelIsUp Flannel is running on this node
MemoryPressure Unknown Fri, 29 Dec 2023 12:35:19 +0800 Fri, 29 Dec 2023 12:37:31 +0800 NodeStatusUnknown Kubelet stopped posting node status.
DiskPressure Unknown Fri, 29 Dec 2023 12:35:19 +0800 Fri, 29 Dec 2023 12:37:31 +0800 NodeStatusUnknown Kubelet stopped posting node status.
PIDPressure Unknown Fri, 29 Dec 2023 12:35:19 +0800 Fri, 29 Dec 2023 12:37:31 +0800 NodeStatusUnknown Kubelet stopped posting node status.
Ready Unknown Fri, 29 Dec 2023 12:35:19 +0800 Fri, 29 Dec 2023 12:37:31 +0800 NodeStatusUnknown Kubelet stopped posting node status.
Addresses:
InternalIP: 10.1.192.89
Hostname: 10.1.192.89
Capacity:
cpu: 16
ephemeral-storage: 102189Mi
hugepages-2Mi: 0
memory: 32888584Ki
pods: 110
Allocatable:
cpu: 16
ephemeral-storage: 96437639418
hugepages-2Mi: 0
memory: 32171784Ki
pods: 110
System Info:
Machine ID: 12accf0612accf0612accf0612accf06
System UUID: ebee242c-ebee-242c-ebee-242cebee242c
Boot ID: aea37a04-9d88-4a1a-8745-81624ca6752e
Kernel Version: 5.4.93-1.el7.elrepo.x86_64
OS Image: CentOS Linux 7 (Core)
Operating System: linux
Architecture: amd64
Container Runtime Version: docker://19.3.8
Kubelet Version: v1.18.2
Kube-Proxy Version: v1.18.2
PodCIDR: 172.20.4.0/24
PodCIDRs: 172.20.4.0/24
Non-terminated Pods: (37 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits AGE
--------- ---- ------------ ---------- --------------- ------------- ---
cloudops ansible-runner-7bb7fb678c-6swd5 500m (3%) 1 (6%) 256Mi (0%) 1Gi (3%) 107d
cms-v2 cms-agent-nginx-server-deploy-6df546d589-9mldp 50m (0%) 50m (0%) 200Mi (0%) 200Mi (0%) 37m
cms-v2 cms-agent-nginx-server-deploy-6df546d589-cdmzn 50m (0%) 50m (0%) 200Mi (0%) 200Mi (0%) 37m
cms-v2 cms-hawkeye-heartbeat-77f4c9d4c-ttsss 0 (0%) 0 (0%) 0 (0%) 0 (0%) 9d
cms-v2 cms-hawkeye-heartbeat-77f4c9d4c-x6887 0 (0%) 0 (0%) 0 (0%) 0 (0%) 37m
cms-v2 cms-hawkeye-inspection-5d77bfb4d4-l8wlp 0 (0%) 0 (0%) 0 (0%) 0 (0%) 36m
cms-v2 cms-hawkeye-inspection-5d77bfb4d4-vbhgf 0 (0%) 0 (0%) 0 (0%) 0 (0%) 9d
cms-v2 cms-hawkeye-mediator-68ff4c4c5d-d6qvc 0 (0%) 0 (0%) 0 (0%) 0 (0%) 9d
cms-v2 cms-hawkeye-mediator-68ff4c4c5d-qj82r 0 (0%) 0 (0%) 0 (0%) 0 (0%) 37m
cms-v2 cms-hawkeye-nodata-74888955b4-rvr8r 0 (0%) 0 (0%) 0 (0%) 0 (0%) 9d
cms-v2 cms-hawkeye-nodata-74888955b4-tls4g 0 (0%) 0 (0%) 0 (0%) 0 (0%) 37m
cms-v2 cms-hawkeye-pmon-gateway-794df59d46-7hnf2 500m (3%) 1 (6%) 1Gi (3%) 3Gi (9%) 37m
cms-v2 cms-hawkeye-pmon-gateway-794df59d46-ksw4c 500m (3%) 1 (6%) 1Gi (3%) 3Gi (9%) 3d17h
cms-v2 cms-mgr-deploy-748885b6c4-cmwjn 500m (3%) 1 (6%) 3Gi (9%) 4Gi (13%) 36m
cms-v2 cms-mgr-deploy-748885b6c4-djflt 500m (3%) 1 (6%) 3Gi (9%) 4Gi (13%) 2d22h
cms-v2 cms-msg-deploy-696795cc67-852bf 500m (3%) 2 (12%) 3Gi (9%) 4Gi (13%) 19h
cms-v2 cms-schema-deploy-597978d5f8-ldzcl 300m (1%) 1 (6%) 2Gi (6%) 3Gi (9%) 36m
cms-v2 cms-search-deploy-bc55c7897-fxnjk 300m (1%) 2 (12%) 3Gi (9%) 4Gi (13%) 3d19h
cms-v2 cms-v2-front-deploy-7d98844f7b-7cqwn 50m (0%) 50m (0%) 200Mi (0%) 200Mi (0%) 43h
cms-v2 cms-v2-front-deploy-7d98844f7b-blnhc 50m (0%) 50m (0%) 200Mi (0%) 200Mi (0%) 37m
cms-v2 cms-v2-metric-calculation-5b498f89f-48n2d 1 (6%) 1 (6%) 2Gi (6%) 2Gi (6%) 2d22h
cms-v2 cms-v2-metric-calculation-5b498f89f-74dt8 1 (6%) 1 (6%) 2Gi (6%) 2Gi (6%) 36m
cms-v2 cms-v2-mobile-front-deploy-85ccffc6b5-drtsn 50m (0%) 50m (0%) 200Mi (0%) 200Mi (0%) 36m
cms-v2 cms-v2-mobile-front-deploy-85ccffc6b5-kwtrl 50m (0%) 50m (0%) 200Mi (0%) 200Mi (0%) 44d
cms-v2 cms-v2-rule-detection-6bc778c9bf-fw6h2 1 (6%) 1 (6%) 2Gi (6%) 2Gi (6%) 2d22h
cms-v2 cms-v2-rule-detection-taskmanager-34-1 2 (12%) 2 (12%) 2Gi (6%) 2Gi (6%) 4h38m
default redis-cluster-0 900m (5%) 900m (5%) 1Gi (3%) 1Gi (3%) 62d
default redis-cluster-3 900m (5%) 900m (5%) 1Gi (3%) 1Gi (3%) 62d
kube-system kube-flannel-ds-amd64-7k9xv 100m (0%) 100m (0%) 50Mi (0%) 50Mi (0%) 2y334d
kube-system metrics-server-6df49bbd5f-8vmcz 0 (0%) 0 (0%) 0 (0%) 0 (0%) 36m
kube-system metrics-server-6df49bbd5f-smn2s 0 (0%) 0 (0%) 0 (0%) 0 (0%) 522d
kube-system node-local-dns-srvtm 25m (0%) 0 (0%) 5Mi (0%) 0 (0%) 232d
kube-system traefik-ingress-controller-d6xtd 0 (0%) 0 (0%) 0 (0%) 0 (0%) 2y330d
ycms grafana-fd448b6c5-fhtwg 200m (1%) 500m (3%) 256Mi (0%) 1Gi (3%) 522d
ycms grafana-fd448b6c5-hs4xh 200m (1%) 500m (3%) 256Mi (0%) 1Gi (3%) 37m
ycms grafana-nginx-deploy-6585576d95-9tgms 100m (0%) 100m (0%) 256Mi (0%) 256Mi (0%) 37m
ycms grafana-nginx-deploy-6585576d95-rphcm 100m (0%) 100m (0%) 256Mi (0%) 256Mi (0%) 522d
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 11425m (71%) 18400m (114%)
memory 29159Mi (92%) 40674Mi (129%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events: <none>
[root@redis ~]#



node.kubernetes.io/not-ready:节点尚未准备好。这对应于NodeConditionReady为False。
node.kubernetes.io/unreachable:无法从节点控制器访问节点。这对应于NodeConditionReady为Unknown。
node.kubernetes.io/out-of-disk:节点磁盘不足。
node.kubernetes.io/memory-pressure:节点有内存压力。
node.kubernetes.io/disk-pressure:节点有磁盘压力。
node.kubernetes.io/network-unavailable:节点的网络不可用。
node.kubernetes.io/unschedulable:节点不可调度。
node.cloudprovider.kubernetes.io/uninitialized:当kubelet从外部云服务提供程序启动时,在节点上设置此污点以将其标记为不可用。来自cloud-controller-manager的控制器初始化此节点后,kubelet删除此污点。