kubeadm&dashboard 部署脚本

发布时间 2023-09-20 16:22:52作者: YhtWeirdo

kubeadm 部署脚本

一、服务器规划

4C8G 3台

IP地址 功能
192.168.10.5 Master
192.168.10.6 Node01
192.168.10.7 Node02

二、安装k8S

准备工作
关闭防火墙
systemctl stop firewalld && systemctl disable firewalld
关闭seliunx
setenforce 0 && sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config
关闭swaptc
swapoff -a && sed -i '/ swap / s/^(.*)$/#1/g' /etc/fstab
环境IP修改
cat  << EOF >  /root/huanjin.txt
declare -A MasterArray EtcdArray otherEtcd NodeArray AllNode Other
MasterArray=(['k8s-master']=192.168.10.5)
EtcdArray=(['etcd1']=192.168.10.5 ['etcd2']=192.168.10.6 ['etcd3']=192.168.10.7)
otherEtcd=(['etcd2']=192.168.10.6 ['etcd3']=192.168.10.7)
NodeArray=(['k8s-node01']=192.168.10.6 ['k8s-node02']=192.168.10.7)

AllNode=(['k8s-master']=192.168.10.5 ['k8s-node02']=192.168.10.6 ['k8s-node02']=192.168.10.7)
Other=(['k8s-node01']=192.168.10.6 ['k8s-node02']=192.168.10.7)
EOF
加载到本机环境变量
source /root/huanjin.txt
sshpass别名免密服务器
yum install sshpass -y
# 退出终端需重新设置临时别名(修改'xxxxx'为自己的服务器密码)
alias ssh='sshpass -p xxxxx ssh -o StrictHostKeyChecking=no'
alias scp='sshpass -p xxxxx scp -o StrictHostKeyChecking=no'
设置主机名
for i in ${!AllNode[@]};do 
	  echo -e "\033[0;31;1m--- $i ${AllNode[$i]} ------\033[0m"
      ssh ${AllNode[$i]} "hostnamectl set-hostname $i"
done
时间同步
yum remove -y ntp && yum install -y chrony vim
cat <<EOF > /etc/chrony.conf
server ntp1.aliyun.com iburst
server s1b.time.edu.cn iburst
stratumweight 0
driftfile /var/lib/chrony/drift
makestep 1.0 3
rtcsync
bindcmdaddress 127.0.0.1
bindcmdaddress ::1
local stratum 10
keyfile /etc/chrony.keys
logdir /var/log/chrony
noclientlog
logchange 1
EOF


for i in ${!NodeArray[@]};do 
	  echo -e "\033[0;31;1m--- $i ${NodeArray[$i]} ------\033[0m"
      ssh ${NodeArray[$i]} "yum remove -y ntp && yum install -y chrony"
done
服务调优
# 安装基本工具
for i in ${!AllNode[@]};do
    echo -e "\033[0;31;1m--- $i ${AllNode[$i]} ------\033[0m"
    sleep 1
	ssh ${AllNode[$i]} 'yum install -y nfs-utils unzip curl lrzsz yum-utils iptables ipvsadm device-mapper-persistent-data lvm2 net-tools conntrack-tools wget vim  ntpdate libseccomp libtool-ltdl telnet nfs-utils  bind-utils bzip2 git conntrack-tools gcc make libnftnl-devel libmnl-devel autoconf automake libtool bison flex libnetfilter_conntrack-devel libnetfilter_queue-devel libpcap-devel'
done

工作目录
mkdir /home/k8s && cd /home/k8s
cat <<EOF >  /home/k8s/k8s.conf
# 修复ipvs模式下长连接timeout问题,缺省2小时
net.ipv4.tcp_keepalive_time = 600
# 探测频率,缺省75秒
net.ipv4.tcp_keepalive_intvl = 30
# 在认定连接失效之前,发送多少个TCP的keepalive探测包,缺省值是9
net.ipv4.tcp_keepalive_probes = 10
# 关闭ipv6
net.ipv6.conf.all.disable_ipv6 = 1
net.ipv6.conf.default.disable_ipv6 = 1
net.ipv6.conf.lo.disable_ipv6 = 1
# 决定检查过期多久邻居条目
net.ipv4.neigh.default.gc_stale_time = 120
# 使用arp_announce / arp_ignore解决ARP映射问题
net.ipv4.conf.default.arp_announce = 2
net.ipv4.conf.lo.arp_announce = 2
net.ipv4.conf.all.arp_announce = 2
# 开启路由转发
net.ipv4.ip_forward = 1
# 网络连接问题
net.ipv4.tcp_max_tw_buckets = 5000
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 1024
net.ipv4.tcp_synack_retries = 2
# 要求iptables不对bridge的数据进行处理
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-arptables = 1
net.netfilter.nf_conntrack_max = 2310720
fs.inotify.max_user_watches=89100
fs.may_detach_mounts = 1
fs.file-max = 52706963
fs.nr_open = 52706963
#vm.swappiness = 0     # 禁止使用 swap 空间,只有当系统 OOM 时才允许使用它
#vm.overcommit_memory=1   #不检查物理内存是否够用
#vm.panic_on_oom=0    # 开启 OOM
#fs.inotify.max_user_instances=8192
#fs.inotify.max_user_watches=1048576
EOF

# 生效
cp k8s.conf /etc/sysctl.d/k8s.conf
sysctl -p /etc/sysctl.d/k8s.conf

# 分发生效
for i in ${!AllNode[@]};do 
      echo -e "\033[0;31;1m--- $i ${AllNode[$i]} ------\033[0m"
      scp /etc/sysctl.d/k8s.conf ${AllNode[$i]}:/etc/sysctl.d/k8s.conf
      ssh ${AllNode[$i]} "sysctl -p /etc/sysctl.d/k8s.conf"
done

kube-proxy 开启ipvs的前置条件
modprobe br_netfilter
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
 #!/bin/bash 
 modprobe -- ip_vs 
 modprobe -- ip_vs_rr 
 modprobe -- ip_vs_wrr 
 modprobe -- ip_vs_sh 
 modprobe -- nf_conntrack_ipv4 
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
(如果为较高内核,并报错 Module nf_conntrack_ipv4 not found.
modprobe br_netfilter
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
 #!/bin/bash 
 modprobe -- ip_vs 
 modprobe -- ip_vs_rr 
 modprobe -- ip_vs_wrr 
 modprobe -- ip_vs_sh 
 modprobe -- nf_conntrack
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack)


# 分发生效
for i in ${!AllNode[@]};do 
      echo -e "\033[0;31;1m--- $i ${AllNode[$i]} ------\033[0m"
      scp /etc/sysconfig/modules/ipvs.modules ${AllNode[$i]}:/etc/sysconfig/modules/ipvs.modules
      ssh ${AllNode[$i]} "chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4"
      ssh ${AllNode[$i]} "chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack"
done
安装docker
for i in "${!AllNode[@]}";do
	echo -e "\033[0;31;1m--- $i ${AllNode[$i]} ------\033[0m"
	ssh ${AllNode[$i]} "yum upgrade -y && yum install -y yum-utils && yum-config-manager --add-repo 
https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo && yum makecache && yum install -y docker-ce docker-ce-cli containerd.io"
	ssh ${AllNode[$i]} "systemctl enable docker --now"
done

如果没有docker-ce
for i in "${!AllNode[@]}";do
	echo -e "\033[0;31;1m--- $i ${AllNode[$i]} ------\033[0m"
	ssh ${AllNode[$i]} "yum install -y yum-utils device-mapper-persistent-data lvm2 && yum-config-manager --add-repo 
http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo"
	ssh ${AllNod[$i]} "systemctl enable docker --now"
done

安装k8s

k8s已经启用了docker,制定安装版本1.18.3

cat > /etc/yum.repos.d/kubernetes.repo <<EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
yum install -y kubelet-1.18.3 kubeadm-1.18.3 kubectl-1.18.3
systemctl enable kubelet && systemctl start kubelet

for i in ${!NodeArray[@]};do 
      echo -e "\033[0;31;1m--- $i ${NodeArray[$i]} ------\033[0m"
      scp /etc/yum.repos.d/kubernetes.repo ${NodeArray[$i]}:/etc/yum.repos.d/kubernetes.repo
      ssh ${NodeArray]} "systemctl enable kubelet && systemctl start kubelet"
done
在matser节点初始化k8s集群
kubeadm init --kubernetes-version=1.18.3 --apiserver-advertise-address=192.168.10.5 --image-repository registry.aliyuncs.com/google_containers --service-cidr=10.10.0.0/16 --pod-network-cidr=10.244.0.0/16

初始化成功,记得保存join语句

image-20230920153226114

mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config

允许调度到master及查看node、pod信息

kubectl taint node k8s-master node-role.kubernetes.io/master-
kubectl get node
kubectl get pod --all-namespaces
在master节点安装flannel
kubectl apply -f 
https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

再次查看pod信息
kubectl get pod --all-namespaces

安装完flannel,将配置拷到node节点,否则添加节点之后状态不对
# 分发生效
for i in ${!NodeArray[@]};do 
      echo -e "\033[0;31;1m--- $i ${NodeArray[$i]} ------\033[0m"
      scp -r /etc/cni ${NodeArray[$i]}:/etc
      scp -r /run/flannel ${NodeArray[$i]}:/run
done

修改kube-proxy的配置文件,添加mode 为ipvs

kubectl edit cm kube-proxy -n kube-system
删除当前的kube-proxy并重启
kubectl get pod -n kube-system | grep kube-proxy |awk '{system("kubectl delete pod "$1" -n kube-system")}'
删除后kube-porxy会自动重新生成采用ipvs模式
这里要说明的是:k8s集群有几个节点,kube-proxy就会删除被删除几个,master和node节点上kube-proxy都会重启
在master节点安装dashboard组件
wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.3/aio/deploy/recommended.yaml
kubectl apply -f recommended.yaml

创建管理员
创建账号:
cat > dashboard-admin.yaml <<EOF
apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: dashboard-admin
  namespace: kubernetes-dashboard
EOF
kubectl create -f dashboard-admin.yaml

为用户分配权限:
cat > dashboard-admin-bind-cluster-role.yaml <<EOF
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: dashboard-admin-bind-cluster-role
  labels:
    k8s-app: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: dashboard-admin
  namespace: kubernetes-dashboard
EOF
kubectl create -f dashboard-admin-bind-cluster-role.yaml

查看服务端口 nodePort 通过下面命令查看
kubectl get svc -n kubernetes-dashboard
等待一段时间启动成功后,https://ip+nodePort,查看UI
Dashboard通过http访问和免密登录方案
拷贝recommended-80.yaml
kubectl apply -f recommended-80.yaml

免密登录
cat > default-user-bind-cluster-role.yaml <<EOF
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kubernetes-dashboard-dashboard
  namespace: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
EOF
kubectl create -f default-user-bind-cluster-role.yaml

查看服务端口 nodePort 通过下面命令查看,保存ip,配置nginx留用
kubectl get svc -n kubernetes-dashboard

image-20230920155848862

加入Node节点

执行上方操作master是保存的join语句

kubeadm join 192.168.10.5:6443 --token 0566f9.9ddqbp4bmyj48wpr \
    --discovery-token-ca-cert-hash sha256:07250e7c3715f902879d1c64f147149d7ae9b8291e906f1c4912816e4d4d75c0

如果忘记token,解决办法如下:

kubeadm token list

如果token过期,可重新生成

kubeadm token creat

查询哈希值

openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'

从节点加入集群

kubeadm join 192.168.0.01:6443 --token p99z9z.rpxirqvj9bq222ru --discovery-token-ca-cert-hash sha256:bd153224474c1506d54e1f2724d09bcce5ef3bc58197b032b9d26233732b9111
成功后在主节点查看
kubectl get nodes