阿里云ECS 1master+3node添加新节点报错

发布时间 2023-09-16 10:50:26作者: 青空如璃

master:

k8s的token默认只有24个小时,如果后续加入的需要重新创建token:

1、创建:kubeadm create token

2、查看:kubeadm token list

3、获取ca证书sha256编码hash值: openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'

4、 root@k8s-master:/etc/kubernetes/pki# kubeadm token create --print-join-command
kubeadm join 172.21.158.209:6443 --token f7wkffw79n8r32 --discovery-token-ca-cert-hash sha256:6d6afbf84fbe4a67979e044e7e4c3b77024c16b2a7792dedb89461109693d591

或kubeadm join --token f7wkffw79n8r32 --discovery-token-ca-cert-hash sha256:6d6afbf84fbe4a67979e044e7e4c3b77024c16b2a7792dedb89461109693d591 --skip-preflight-checks


主kubelet版本

root@k8s-master:/etc/kubernetes/pki# kubelet
I0916 10:12:48.525251 22767 server.go:440] "Kubelet version" kubeletVersion="v1.21.4"
I0916 10:12:48.525437 22767 server.go:573] "Standalone mode, no API client"
I0916 10:12:48.525503 22767 server.go:629] "Failed to get the kubelet's cgroup. Kubelet system container metrics may be missing." err="cpu and memory cgroup hierarchy not unified. cpu: /user.slice, memory: /user.slice/user-1000.slice/session-324754.scope"
I0916 10:12:48.611048 22767 server.go:488] "No api server defined - no events will be sent to API server"
I0916 10:12:48.611070 22767 server.go:660] "--cgroups-per-qos enabled, but --cgroup-root was not specified. defaulting to /"
I0916 10:12:48.611224 22767 container_manager_linux.go:278] "Container manager verified user specified cgroup-root exists" cgroupRoot=[]
I0916 10:12:48.611281 22767 container_manager_linux.go:283] "Creating Container Manager object based on Node Config" nodeConfig={RuntimeCgroupsName: SystemCgroupsName: KubeletCgroupsName: ContainerRuntime:docker CgroupsPerQOS:true CgroupRoot:/ CgroupDriver:cgroupfs KubeletRootDir:/var/lib/kubelet ProtectKernelDefaults:false NodeAllocatableConfig:{KubeReservedCgroupName: SystemReservedCgroupName: ReservedSystemCPUs: EnforceNodeAllocatable:map[pods:{}] KubeReserved:map[] SystemReserved:map[] HardEvictionThresholds:[{Signal:memory.available Operator:LessThan Value:{Quantity:100Mi Percentage:0} GracePeriod:0s MinReclaim:<nil>} {Signal:nodefs.available Operator:LessThan Value:{Quantity:<nil> Percentage:0.1} GracePeriod:0s MinReclaim:<nil>} {Signal:nodefs.inodesFree Operator:LessThan Value:{Quantity:<nil> Percentage:0.05} GracePeriod:0s MinReclaim:<nil>} {Signal:imagefs.available Operator:LessThan Value:{Quantity:<nil> Percentage:0.15} GracePeriod:0s MinReclaim:<nil>}]} QOSReserved:map[] ExperimentalCPUManagerPolicy:none ExperimentalTopologyManagerScope:container ExperimentalCPUManagerReconcilePeriod:10s ExperimentalMemoryManagerPolicy:None ExperimentalMemoryManagerReservedMemory:[] ExperimentalPodPidsLimit:-1 EnforceCPULimits:true CPUCFSQuotaPeriod:100ms ExperimentalTopologyManagerPolicy:none}
I0916 10:12:48.611298 22767 topology_manager.go:120] "Creating topology manager with policy per scope" topologyPolicyName="none" topologyScopeName="container"
I0916 10:12:48.611308 22767 container_manager_linux.go:314] "Initializing Topology Manager" policy="none" scope="container"
I0916 10:12:48.611315 22767 container_manager_linux.go:319] "Creating device plugin manager" devicePluginEnabled=true
I0916 10:12:48.611365 22767 kubelet.go:307] "Using dockershim is deprecated, please consider using a full-fledged CRI implementation"
I0916 10:12:48.611389 22767 client.go:78] "Connecting to docker on the dockerEndpoint" endpoint="unix:///var/run/docker.sock"
I0916 10:12:48.611401 22767 client.go:97] "Start docker client with request timeout" timeout="2m0s"
I0916 10:12:48.618078 22767 docker_service.go:566] "Hairpin mode is set but kubenet is not enabled, falling back to HairpinVeth" hairpinMode=promiscuous-bridge
I0916 10:12:48.618096 22767 docker_service.go:242] "Hairpin mode is set" hairpinMode=hairpin-veth
I0916 10:12:48.624600 22767 docker_service.go:257] "Docker cri networking managed by the network plugin" networkPluginName="kubernetes.io/no-op"
I0916 10:12:48.633012 22767 docker_service.go:264] "Docker Info" dockerInfo=&{ID:24ST:4ALS:7GUL:D2VK:QU7X:3IHZ:S4UT:L7BX:MZ3V:A3P4:RNPQ:A2PV Containers:60 ContainersRunning:39 ContainersPaused:0 ContainersStopped:21 Images:39 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Native Overlay Diff true] [userxattr false]] SystemStatus:[] Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:[] Log:[awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog]} MemoryLimit:true SwapLimit:false KernelMemory:true KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6tables:true Debug:false NFd:278 OomKillDisable:true NGoroutines:239 SystemTime:2023-09-16T10:12:48.626001647+08:00 LoggingDriver:json-file CgroupDriver:systemd CgroupVersion:1 NEventsListener:0 KernelVersion:4.19.0-17-amd64 OperatingSystem:Debian GNU/Linux 10 (buster) OSVersion:10 OSType:linux Architecture:x86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:0xc000a76070 NCPU:8 MemTotal:66480222208 GenericResources:[] DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:k8s-master Labels:[] ExperimentalBuild:false ServerVersion:20.10.8 ClusterStore: ClusterAdvertise: Runtimes:map[io.containerd.runc.v2:{Path:runc Args:[] Shim:<nil>} io.containerd.runtime.v1.linux:{Path:runc Args:[] Shim:<nil>} runc:{Path:runc Args:[] Shim:<nil>}] DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:[] Nodes:0 Managers:0 Cluster:<nil> Warnings:[]} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:e25210fe30a0a703442421b0f60afac609f950a3 Expected:e25210fe30a0a703442421b0f60afac609f950a3} RuncCommit:{ID:v1.0.1-0-g4144b63 Expected:v1.0.1-0-g4144b63} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=default] ProductLicense: DefaultAddressPools:[] Warnings:[WARNING: No swap limit support]}
E0916 10:12:48.633041 22767 server.go:292] "Failed to run kubelet" err="failed to run Kubelet: misconfiguration: kubelet cgroup driver: \"cgroupfs\" is different from docker cgroup driver: \"systemd\""新节点:

因master、node节点kubelet等版本不一致,导致加入失败
root@k8s-node5:/etc/kubernetes# cat /etc/issue
Debian GNU/Linux 11 \n \l
root@k8s-node5:/etc/kubernetes# kubeadm reset
[reset] Reading configuration from the cluster...
[reset] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
W0916 10:02:27.636486 798132 reset.go:101] [reset] Unable to fetch the kubeadm-config ConfigMap from cluster: failed to get node registration: failed to get node name from kubelet config: open /etc/kubernetes/kubelet.conf: no such file or directory
[reset] WARNING: Changes made to this host by 'kubeadm init' or 'kubeadm join' will be reverted.
[reset] Are you sure you want to proceed? [y/N]: n
error execution phase preflight: aborted reset operation
To see the stack trace of this error execute with --v=5 or higher
root@k8s-node5:/etc/kubernetes# apt remove kubelet kubeadm kubectl
Reading package lists... Done
Building dependency tree... Done
Reading state information... Done


root@k8s-node5:/etc/kubernetes# apt list kubeadm -a
Listing... Done
kubeadm/kubernetes-xenial 1.25.4-00 amd64 [residual-config]
kubeadm/kubernetes-xenial 1.25.3-00 amd64 [residual-config]
kubeadm/kubernetes-xenial 1.25.2-00 amd64 [residual-config]
kubeadm/kubernetes-xenial 1.25.1-00 amd64 [residual-config]
kubeadm/kubernetes-xenial 1.25.0-00 amd64 [residual-config]
kubeadm/kubernetes-xenial 1.24.8-00 amd64 [residual-config]
kubeadm/kubernetes-xenial 1.24.7-00 amd64 [residual-config]
kubeadm/kubernetes-xenial 1.24.6-00 amd64 [residual-config]
kubeadm/kubernetes-xenial 1.24.5-00 amd64 [residual-config]
kubeadm/kubernetes-xenial 1.24.4-00 amd64 [residual-config]
kubeadm/kubernetes-xenial 1.24.3-00 amd64 [residual-config]
kubeadm/kubernetes-xenial 1.24.2-00 amd64 [residual-config]
kubeadm/kubernetes-xenial 1.24.1-00 amd64 [residual-config]
kubeadm/kubernetes-xenial 1.24.0-00 amd64 [residual-config]
kubeadm/kubernetes-xenial 1.23.14-00 amd64 [residual-config]
kubeadm/kubernetes-xenial 1.23.13-00 amd64 [residual-config]
kubeadm/kubernetes-xenial 1.23.12-00 amd64 [residual-config]
kubeadm/kubernetes-xenial 1.23.11-00 amd64 [residual-config]
kubeadm/kubernetes-xenial 1.23.10-00 amd64 [residual-config]
kubeadm/kubernetes-xenial 1.23.9-00 amd64 [residual-config]
kubeadm/kubernetes-xenial 1.23.8-00 amd64 [residual-config]
kubeadm/kubernetes-xenial 1.23.7-00 amd64 [residual-config]
kubeadm/kubernetes-xenial 1.23.6-00 amd64 [residual-config]
kubeadm/kubernetes-xenial,now 1.23.5-00 amd64 [residual-config]
kubeadm/kubernetes-xenial 1.23.4-00 amd64 [residual-config]
kubeadm/kubernetes-xenial 1.23.3-00 amd64 [residual-config]
kubeadm/kubernetes-xenial 1.23.2-00 amd64 [residual-config]
kubeadm/kubernetes-xenial 1.23.1-00 amd64 [residual-config]
kubeadm/kubernetes-xenial 1.23.0-00 amd64 [residual-config]
kubeadm/kubernetes-xenial 1.22.16-00 amd64 [residual-config]
kubeadm/kubernetes-xenial 1.22.15-00 amd64 [residual-config]
kubeadm/kubernetes-xenial 1.22.14-00 amd64 [residual-config]
kubeadm/kubernetes-xenial 1.22.13-00 amd64 [residual-config]
kubeadm/kubernetes-xenial 1.22.12-00 amd64 [residual-config]
kubeadm/kubernetes-xenial 1.22.11-00 amd64 [residual-config]
kubeadm/kubernetes-xenial 1.22.10-00 amd64 [residual-config]
kubeadm/kubernetes-xenial 1.22.9-00 amd64 [residual-config]
kubeadm/kubernetes-xenial 1.22.8-00 amd64 [residual-config]
kubeadm/kubernetes-xenial 1.22.7-00 amd64 [residual-config]
kubeadm/kubernetes-xenial 1.22.6-00 amd64 [residual-config]
kubeadm/kubernetes-xenial 1.22.5-00 amd64 [residual-config]
kubeadm/kubernetes-xenial 1.22.4-00 amd64 [residual-config]
kubeadm/kubernetes-xenial 1.22.3-00 amd64 [residual-config]
kubeadm/kubernetes-xenial 1.22.2-00 amd64 [residual-config]
kubeadm/kubernetes-xenial 1.22.1-00 amd64 [residual-config]
kubeadm/kubernetes-xenial 1.22.0-00 amd64 [residual-config]
kubeadm/kubernetes-xenial 1.21.14-00 amd64 [residual-config]
kubeadm/kubernetes-xenial 1.21.13-00 amd64 [residual-config]
kubeadm/kubernetes-xenial 1.21.12-00 amd64 [residual-config]
kubeadm/kubernetes-xenial 1.21.11-00 amd64 [residual-config]
kubeadm/kubernetes-xenial 1.21.10-00 amd64 [residual-config]
kubeadm/kubernetes-xenial 1.21.9-00 amd64 [residual-config]
kubeadm/kubernetes-xenial 1.21.8-00 amd64 [residual-config]
kubeadm/kubernetes-xenial 1.21.7-00 amd64 [residual-config]
kubeadm/kubernetes-xenial 1.21.6-00 amd64 [residual-config]
kubeadm/kubernetes-xenial 1.21.5-00 amd64 [residual-config]
kubeadm/kubernetes-xenial 1.21.4-00 amd64 [residual-config]
kubeadm/kubernetes-xenial 1.21.3-00 amd64 [residual-config]

root@k8s-node5:/etc/kubernetes# apt install kubelet=1.21.4-00 kubectl=1.21.4-00 kubeadm=1.21.4-00
Reading package lists... Done
Building dependency tree... Done
Reading state information... Done
The following NEW packages will be installed:
kubeadm kubectl kubelet
0 upgraded, 3 newly installed, 0 to remove and 92 not upgraded.
Need to get 0 B/36.3 MB of archives.
After this operation, 209 MB of additional disk space will be used.
Selecting previously unselected package kubelet.
(Reading database ... 45678 files and directories currently installed.)
Preparing to unpack .../kubelet_1.21.4-00_amd64.deb ...
Unpacking kubelet (1.21.4-00) ...
Selecting previously unselected package kubectl.
Preparing to unpack .../kubectl_1.21.4-00_amd64.deb ...
Unpacking kubectl (1.21.4-00) ...
Selecting previously unselected package kubeadm.
Preparing to unpack .../kubeadm_1.21.4-00_amd64.deb ...
Unpacking kubeadm (1.21.4-00) ...
Setting up kubectl (1.21.4-00) ...
Setting up kubelet (1.21.4-00) ...
Setting up kubeadm (1.21.4-00) ...
root@k8s-node5:/etc/kubernetes# kubeadm join 172.21.158.209:6443 --token ffaq.wilf1egpvm2t6fho --discovery-token-ca-cert-hash sha256:6d6afbf84fbe4a67769e044e7e4c3b77024c16b2a7792dedb89461109693d591
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
root@k8s-node5:/etc/kubernetes#

 

master:

 

root@k8s-master:/etc/kubernetes/pki# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master Ready control-plane,master 2y17d v1.21.4
k8s-node1 Ready <none> 39d v1.21.4
k8s-node2 Ready <none> 2y17d v1.21.4
k8s-node3 Ready <none> 715d v1.21.4
k8s-node5 Ready <none> 28m v1.21.4
root@k8s-master:/etc/kubernetes/pki#