部署k8s-1.28.0

发布时间 2023-08-18 21:07:45作者: 1769987233

===============================================================

#centos8及其以上不用操作!!!

#所有节点上关闭swap分区
swapoff -a ; sed -i '/fstab/d' /etc/fstab 

#升级系统内核

#导入elrepo gpg key
rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org

#安装elrepo YUM源仓库
yum -y install https://www.elrepo.org/elrepo-release-7.0-4.el7.elrepo.noarch.rpm

#安装kernel-ml版本,ml为长期稳定版本,lt为长期维护版本
yum --enablerepo="elrepo-kernel" -y install kernel-ml.x86_64

#设置grub2默认引导为0
grub2-set-default 0

#重新生成grub2引导文件
grub2-mkconfig -o /boot/grub2/grub.cfg

#更新后,需要重启,使用升级的内核生效。
reboot

#重启后,需要验证内核是否为更新对应的版本
uname -r

===============================================================

#添加网桥过滤及内核转发配置文件
cat <<EOF >/etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
vm.swappiness=0
EOF

#加载br_netfilter模块
modprobe br_netfilter

#使之生效
sysctl -p /etc/sysctl.d/k8s.conf 

#查看是否加载
lsmod | grep br_netfilter
[root@hecs-83607 ~]# lsmod | grep br_netfilter
br_netfilter           32768  0
bridge                315392  1 br_netfilter

===============================================================

#安装ipset及ipvsadm
yum -y install ipset ipvsadm

#配置ipvsadm模块加载方式.添加需要加载的模块

cat > /etc/sysconfig/modules/ipvs.module <<EOF
modprobe -- ip_vs
modprobe -- ip_vs_sh
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- nf_conntrack
EOF

#授权、运行、检查是否加载
chmod 755 /etc/sysconfig/modules/ipvs.module &&  /etc/sysconfig/modules/ipvs.module

lsmod | grep -e ip_vs -e nf_conntrack

[root@hecs-83607 ~]# lsmod | grep -e ip_vs -e nf_conntrack
nf_conntrack_netlink    57344  0
nfnetlink              20480  4 nft_compat,nf_conntrack_netlink,nf_tables
ip_vs_sh               16384  0
ip_vs_wrr              16384  0
ip_vs_rr               16384  0
ip_vs                 188416  6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack          176128  5 xt_conntrack,nf_nat,nf_conntrack_netlink,xt_MASQUERADE,ip_vs
nf_defrag_ipv6         24576  2 nf_conntrack,ip_vs
nf_defrag_ipv4         16384  1 nf_conntrack
libcrc32c              16384  4 nf_conntrack,nf_nat,nf_tables,ip_vs
[root@hecs-83607 ~]# 


===============================================================

#安装一些必要工具
yum install -y yum-utils device-mapper-persistent-data lvm2

#配置docker镜像源
wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo


#查看所有的可用版本
yum list docker-ce --showduplicates | sort -r

#安装旧版本 yum install docker-ce-cli-19.03.15-3.el7  docker-ce-19.03.15-3.el7

#安装源里最新版本
yum install docker-ce

#镜像加速器
mkdir -p /etc/docker
cat > /etc/docker/daemon.json <<EOF
{
   "registry-mirrors": ["https://uwtwp6l0.mirror.aliyuncs.com"],
    "exec-opts": ["native.cgroupdriver=systemd"]
}
EOF

启动Docker服务
systemctl enable --now docker
systemctl start docker

===============================================================

#到下面的链接下载最新版cri-docker
https://github.com/Mirantis/cri-dockerd/tags

#所有节点 都安装 cri-dockerd
#拷贝二进制文件
tar -xf cri-dockerd-0.2.1.amd64.tgz 
cp cri-dockerd/cri-dockerd /usr/bin/
chmod +x /usr/bin/cri-dockerd 


# 配置启动文件
cat <<'EOF'> /usr/lib/systemd/system/cri-docker.service
[Unit]
Description=CRI Interface for Docker Application Container Engine
Documentation=https://docs.mirantis.com
After=network-online.target firewalld.service docker.service
Wants=network-online.target
Requires=cri-docker.socket

[Service]
Type=notify
ExecStart=/usr/bin/cri-dockerd --network-plugin=cni --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.9
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always

StartLimitBurst=3

StartLimitInterval=60s

LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity

TasksMax=infinity
Delegate=yes
KillMode=process

[Install]
WantedBy=multi-user.target
EOF

#生成socket 文件
cat <<'EOF'> /usr/lib/systemd/system/cri-docker.socket
[Unit]
Description=CRI Docker Socket for the API
PartOf=cri-docker.service

[Socket]
ListenStream=%t/cri-dockerd.sock
SocketMode=0660
SocketUser=root
SocketGroup=docker

[Install]
WantedBy=sockets.target
EOF


#启动cri-docker并设置开机自动启动
systemctl daemon-reload ; systemctl enable cri-docker --now
systemctl is-active cri-docker

[root@hecs-83607 ~]# 
[root@hecs-83607 ~]# ll /var/run/cri-dockerd.sock
srwxr-xr-x 1 root root 0 Aug 18 19:25 /var/run/cri-dockerd.sock

/var/run/cri-dockerd.sock 在下面初始化的时候用到

===============================================================

#阿里云YUM源【国内主机】
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
        http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

yum clean all && yum makecache


#所有节点均可安装
#查看所有的可用版本
yum list  kubeadm  kubelet kubectl --showduplicates | sort -r | head



#默认安装的版本就是最新版1.25.X,当然也可以指定版本安装 ,如 yum install kubelet-1.16.2 kubeadm-1.16.2 kubectl-1.16.2
yum install  kubeadm  kubelet kubectl

#安装后查看版本
kubeadm version

#设置kubelet为开机自启动即可,由于没有生成配置文件,集群初始化后自动启动
systemctl enable kubelet
systemctl is-active kubelet



为了实现docker使用的cgroupdriver与kubelet使用的cgroup的一致性,建议修改如下文件内容。
cat <<EOF > /etc/sysconfig/kubelet
KUBELET_EXTRA_ARGS="--cgroup-driver=systemd"
EOF


===============================================================


初始化
kubeadm init \
--image-repository registry.aliyuncs.com/google_containers \
--kubernetes-version=v1.28.0 \
--pod-network-cidr=10.244.0.0/16 \
--cri-socket /var/run/cri-dockerd.sock

初始化成功
-----------------------
Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.0.117:6443 --token 0l8ha3.ms7c5l3yknuv497x \
        --discovery-token-ca-cert-hash sha256:dbfb4b491b35a5392ea2939d7b74e5edbbb8d28b23c106ba46a6b78d999198b8 
[root@hecs-83607 ~]# 
------------------------

加入集群需要加后面添加下面这一段
--cri-socket unix:///var/run/cri-dockerd.sock

kubeadm join 192.168.0.117:6443 --token 0l8ha3.ms7c5l3yknuv497x \
--discovery-token-ca-cert-hash sha256:dbfb4b491b35a5392ea2939d7b74e5edbbb8d28b23c106ba46a6b78d999198b8 \
--cri-socket unix:///var/run/cri-dockerd.sock


打印初始化yaml
kubeadm config print init-defaults 
kubeadm config print join-defaults 
kubeadm config print reset-defaults 

===============================================================

#配置 calico 网络插件
#下载 calico 插件:
wget https://docs.projectcalico.org/manifests/calico.yaml --no-check-certificate

老地址:https://docs.projectcalico.org/manifests/calico.yaml
新地址:https://raw.githubusercontent.com/projectcalico/calico/v3.26.1/manifests/calico.yaml


#修改 网段
vim calico.yaml
-----

 # no effect. This should fall within `--cluster-cidr`.
 - name: CALICO_IPV4POOL_CIDR
   value: "10.244.0.0/16"
 # Disable file logging so `kubectl logs` works.

------

[root@hecs-83607 ~]# cat calico.yaml |grep image |grep docker.io
          image: docker.io/calico/cni:v3.26.1
          image: docker.io/calico/cni:v3.26.1
          image: docker.io/calico/node:v3.26.1
          image: docker.io/calico/node:v3.26.1
          image: docker.io/calico/kube-controllers:v3.26.1

images=(
docker.io/calico/cni:v3.26.1
docker.io/calico/cni:v3.26.1
docker.io/calico/node:v3.26.1
docker.io/calico/node:v3.26.1
docker.io/calico/kube-controllers:v3.26.1
)
for i in ${images[@]}
do 
	docker pull $i
done

# ${array[*]} 加引号,表示数组元素的字符串形式。不加引号,表示数组。
# ${array[@]} 加引号,不加引号,均表示数组。 

kubectl apply -f calico.yaml

kubectl get pod -n kube-system

===============================================================

#去掉master 节点的污点
[root@hecs-83607 ~]# kubectl get node
NAME         STATUS   ROLES           AGE   VERSION
hecs-83607   Ready    control-plane   42m   v1.28.0

#查看污点 
[root@hecs-83607 ~]# kubectl describe node hecs-83607 | grep -i taint
Taints:             node-role.kubernetes.io/control-plane:NoSchedule

#去除污点
kubectl taint node hecs-83607 node-role.kubernetes.io/control-plane:NoSchedule-

===============================================================
 
#部署dashborad
wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.7.0/aio/deploy/recommended.yaml

vim recommended.yaml

----
spec:
  ports:
    - port: 443
      targetPort: 8443
      nodePort: 30001 # 添加
  type: NodePort # 添加
  selector:
    k8s-app: kubernetes-dashboard
----

kubectl apply -f recommended.yaml

kubectl get pod -n kubernetes-dashboard -w

#创建用户:
wget https://raw.githubusercontent.com/cby-chen/Kubernetes/main/yaml/dashboard-user.yaml

kubectl apply -f dashboard-user.yaml

#创建token 
kubectl -n kubernetes-dashboard create token admin-user

===============================================================

#--dry-run 已弃用,可以用 --dry-run=client 替换
kubectl create deployment nginx --image=nginx -o yaml --dry-run=client > nginx.yaml

kubectl apply -f nginx.yaml
watch -n1 kubectl get pod

kubectl get deploy nginx -o yaml  > nginx.yaml

===============================================================