k8s基于containerd安装

发布时间 2023-10-13 17:45:47作者: Me-lihu


yum update -y 升级系统(master和work节点都需要配置)
1、关闭防火墙:
systemctl stop firewalld
systemctl disable firewalld

2、关闭selinux:(master和work节点都需要配置)
sed -i 's/enforcing/disabled/' /etc/selinux/config # 永久
setenforce 0 # 临时

3、关闭swap:(master和work节点都需要配置)
swapoff -a 临时关闭
sed -ri 's/.*swap.*/#&/' /etc/fstab 永久

4、设定主机名 (master和work节点都需要配置)
hostnamectl set-hostname k8s-master
hostnamectl set-hostname k8s-node01
hostnamectl set-hostname k8s-node02

5、添加hosts (master和work节点都需要配置)
cat >> /etc/hosts << EOF
10.124.191.100 k8s-master
10.124.191.101 k8s-node1
10.124.191.102 k8s-node2
EOF

6、时间同步:(master和work节点都需要配置)
yum install ntpdate -y
ntpdate time.windows.com

7、将桥接的IPv4流量传递到iptables的链 (master和work节点都需要配置)
cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward=1
net.ipv4.tcp_tw_recycle=0
vm.swappiness=0
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=1048576
fs.file-max=52706963
fs.nr_open=52706963
net.ipv6.conf.all.disable_ipv6=1
net.netfilter.nf_conntrack_max=2310720
EOF
sysctl --system

8、安装ipvs模块 (master和work节点都需要配置)
yum install ipset ipvsadm -y
modprobe br_netfilter
#### 内核3.10 ####
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4 #内核5.4 (nf_conntrack)
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep ip_vs

9、删除docker (master和work节点都需要配置)
yum -y remove docker \
docker-client \
docker-client-latest \
docker-common \
docker-latest \
docker-latest-logrotate \
docker-logrotate \
docker-selinux \
docker-engine-selinux \
docker-ce-cli \
docker-engine
rpm -e docker-buildx-plugin-0.11.2-1.el7.x86_64
rpm -e docker-compose-plugin-2.21.0-1.el7.x86_64

10、加载内核模块 (master和work节点都需要配置)
cat <<EOF | sudo tee /etc/modules-load.d/containerd.conf
overlay
br_netfilter
EOF
modprobe overlay
modprobe br_netfilter

11、设置内核参数 (master和work节点都需要配置)
cat <<EOF | sudo tee /etc/sysctl.d/99-kubernetes-cri.conf
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF
sysctl --system 应用内核参数

12、配置yum源并安装 (master和work节点都需要配置)
wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo
yum -y install containerd.io

13、配置containerd (master和work节点都需要配置)
默认是没有配置文件的,可以像k8s一样获取到一些默认配置。
containerd config default > /etc/containerd/config.toml
配置systemd cgroup驱动
sed -i 's/SystemdCgroup = false/SystemdCgroup = true/g' /etc/containerd/config.toml
grep 'SystemdCgroup = true' -B 7 /etc/containerd/config.toml
镜像加速
sed -i 's#sandbox_image = "registry.k8s.io/pause:3.6"#sandbox_image = "registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.7"#g' /etc/containerd/config.toml

14、添加为启动项,启动服务 (master和work节点都需要配置)
systemctl daemon-reload
systemctl enable --now containerd
systemctl start containerd

15、添加kubernetes源 (master和work节点都需要配置)
cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

16、安装kubeadm (master和work节点都需要配置)
yum install -y kubelet-1.28.2 kubeadm-1.28.2 kubectl-1.28.2
systemctl enable --now kubelet

17、设置crictl(master和work节点都需要配置)
使用除docke以外的CRI时,需要使用crictl来进行镜像管理,相当于docker-cli。
Containerd只支持通过CRI拉取镜像的mirror,也就是说,只有通过crictl或者Kubernetes调用时mirror才会生效,通过ctr拉取是不会生效的。crictl是k8s内部的镜像管理命令。
cat << EOF >> /etc/crictl.yaml
runtime-endpoint: unix:///var/run/containerd/containerd.sock
image-endpoint: unix:///var/run/containerd/containerd.sock
timeout: 10
debug: false
EOF

18、在master上执行 (master配置)
kubeadm init \
--apiserver-advertise-address=10.124.191.100 \
--image-repository registry.aliyuncs.com/google_containers \
--kubernetes-version v1.28.2 \
--service-cidr=10.96.0.0/12 \
--pod-network-cidr=10.244.0.0/16
–apiserver-advertise-address 集群通告地址
–image-repository 由于默认拉取镜像地址k8s.gcr.io国内无法访问,这里指定阿里云镜像仓库地址
–kubernetes-version K8s版本,与上面安装的一致
–service-cidr 集群内部虚拟网络,Pod统一访问入口
–pod-network-cidr Pod网络,,与下面部署的CNI网络组件yaml中保持一致
默认token有效期为24小时,当过期之后,该token就不可用了。这时就需要重新创建token,可以直接使用命令快捷生成:
kubeadm token create --print-join-command
kubeadm config print init-defaults #cidr地址和version可以使用如下命令查找

19、加入node (work节点都需要配置)
kubeadm join 10.124.191.100:6443 --token 9zelr8.5uz5sulfw9p1sqxd \
--discovery-token-ca-cert-hash sha256:b75561e11254c7d517efca90fee0d134b004572b5691dbb99b8643bb84cea259
出现问题需要先清除,重置后再加入:kubeadm reset

20、拷贝kubectl使用的连接k8s认证文件到默认路径 (master配置)
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

21、验证node的加入
kubectl get nodes

22、kubectl与kubeadm命令命令补全功能,重新登陆生效(master和work节点都需要配置)
yum -y install bash-completion
kubectl completion bash > /etc/bash_completion.d/kubectl
kubeadm completion bash > /etc/bash_completion.d/kubeadm

23、修改kubelet工作目录(master和work节点都需要配置)
mkdir -p /data/k8s/kubelet #创建kubelet的数据目录
sed -i 's#KUBELET_EXTRA_ARGS=#KUBELET_EXTRA_ARGS="--root-dir=/data/k8s/kubelet"#g' /etc/sysconfig/kubelet && cat /etc/sysconfig/kubelet
sed -i 's#/var/lib/kubelet#/data/k8s/kubelet#g' /usr/lib/systemd/system/kubelet.service.d/10-kubeadm.conf && cat /usr/lib/systemd/system/kubelet.service.d/10-kubeadm.conf|grep /data/k8s/kubelet
sed -i 's#/var/lib/kubelet#/data/k8s/kubelet#g' /etc/kubernetes/kubelet.conf && cat /etc/kubernetes/kubelet.conf|grep /data/k8s/kubelet #修改/etc/kubernetes/kubelet.conf的证书路径
cp -r /var/lib/kubelet/* /data/k8s/kubelet/ #拷贝/var/lib/kubelet的数据到新的kubelet数据目录/data/k8s/kubelet
systemctl daemon-reload
systemctl restart kubelet #重启kubelet
df -h|grep pods #查看工作路径

24、安装网络插件calico (master配置)
wget https://docs.tigera.io/archive/v3.25/manifests/calico.yaml --no-check-certificate
- name: CALICO_IPV4POOL_CIDR
value: "10.244.0.0/16"
# Cluster type to identify the deployment type
- name: CLUSTER_TYPE
value: "k8s,bgp"
# 下方新增
- name: IP_AUTODETECTION_METHOD
value: "interface=ens192"
kubectl apply -f calico.yaml
kubectl get pods -n kube-system
kubectl log -n kube-system calico-node-4f4r7 #查看日志

25、更改网络模式为ipvs
kubectl edit -n kube-system cm kube-proxy
修改:将mode: " "
修改为mode: “ipvs”
#删除kube-proxy pod让其自动生成新的
kubectl get pod -n kube-system |grep kube-proxy |awk '{system("kubectl delete pod "$1" -n kube-system")}'
#清除防火墙规则
iptables -t filter -F
iptables -t filter -X
iptables -t nat -F
iptables -t nat -X

26、测试集群
kubectl create deployment nginx --image=nginx
kubectl expose deployment nginx --port=80 --type=NodePort
kubectl get pod,svc