Kubernetes集群 v1.27.3

发布时间 2023-06-30 18:35:09作者: SkyRainmom

基础环境

三个节点均需操作,以k8s-master为例

主机节点 进程 IP配置 操作系统
k8s-master docker,kube-apiserver,etcd,kube-scheduler,kube-controller-manager,kubelet,kube-proxy,coredns,calico Net:10.10.20.10 Centos8-Stream
k8s-worker01 docker,kubelet,kube-proxy,calico Net:10.10.20.20 Centos8-Stream
k8s-worker02 docker,kubelet,kube-proxy,calico Net:10.10.20.30 Centos8-Stream

主机名配置与IP映射

#k8s-master
[root@localhost ~]# hostnamectl set-hostname k8s-master
[root@localhost ~]# bash
[root@k8s-master ~]# cat >>/etc/hosts<<EOF
10.10.20.10     k8s-master
10.10.20.20     k8s-worker01
10.10.20.30     k8s-worker02
EOF

#k8s-worker01
[root@localhost ~]# hostnamectl set-hostname k8s-worker01
[root@localhost ~]# bash
[root@k8s-worker01 ~]# cat >>/etc/hosts<<EOF
10.10.20.10     k8s-master
10.10.20.20     k8s-worker01
10.10.20.30     k8s-worker02
EOF

#k8s-worker02
[root@localhost ~]# hostnamectl set-hostname k8s-worker02
[root@localhost ~]# bash
[root@k8s-worker02 ~]# cat >>/etc/hosts<<EOF
10.10.20.10     k8s-master
10.10.20.20     k8s-worker01
10.10.20.30     k8s-worker02
EOF

SSH-Key密钥认证

#在master节点上⽣成密钥⽂件,拷⻉到其它节点,测试免密登录
[root@k8s-master ~]# ssh-keygen
........(回车就完了)
[root@k8s-master ~]# for i in 10 20 30; do ssh-copy-id 10.10.20.$i; done
.......(输密码即可,此处省略过程)

#将worker01,worker02密钥给master
[root@k8s-worker01 ~]# ssh-keygen
........(回车就完了)
[root@k8s-worker01 ~]# ssh-copy-id k8s-master

[root@k8s-worker02 ~]# ssh-keygen
........(回车就完了)
[root@k8s-worker02 ~]# ssh-copy-id k8s-master

#验证
[root@k8s-master ~]# ssh k8s-worker01
[root@k8s-master ~]# ssh k8s-worker02

IP路由转发开启,转发 IPv4

#添加⽹桥的⽹络转发及内核转发配置⽂件
[root@k8s-master ~]# cat > /etc/sysctl.d/k8s.conf <<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
vm.swappiness = 0
EOF

#加载br_netfilter模块 && 查看是否加载
[root@k8s-master ~]# modprobe br_netfilter && lsmod | grep br_netfilter

br_netfilter           24576  0
bridge                290816  1 br_netfilter

#加载⽹桥过滤及内核转发配置⽂件
[root@k8s-master ~]# sysctl -p /etc/sysctl.d/k8s.conf

net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
vm.swappiness = 0

配置yum源

#使用阿里云Centos 8的源,地址:https://mirrors.aliyun.com

#配置centos8
[root@k8s-master ~]# mkdir /etc/yum.repos.d/Centos8
[root@k8s-master ~]# mv /etc/yum.repos.d/CentOS-Stream-* /etc/yum.repos.d/Centos8
[root@k8s-master ~]# curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-vault-8.5.2111.repo
[root@k8s-master ~]# sed -i -e '/mirrors.cloud.aliyuncs.com/d' -e '/mirrors.aliyuncs.com/d' /etc/yum.repos.d/CentOS-Base.repo

#配置Kubernetes
[root@k8s-master ~]# cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

#清理缓存,建立缓存
[root@k8s-master ~]# yum clean all

[root@k8s-master ~]# yum -y makecache

配置ipvs功能

在kubernetes中Service有两种代理模型,⼀种是基于iptables的,⼀种是基于ipvs,两者对⽐ipvs的性能要⾼,如果想要使⽤ipvs模型,需要⼿动载⼊ipvs模块

#安装ipset及ipvsadm
[root@k8s-master ~]# dnf -y install ipset ipvsadm

#添加需要加载的模块
[root@k8s-master ~]# cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
EOF


#授权、运⾏、检查是否加载
[root@k8s-master ~]# chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack

ip_vs_sh               16384  0
ip_vs_wrr              16384  0
ip_vs_rr               16384  0
ip_vs                 172032  6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack          172032  1 ip_vs
nf_defrag_ipv6         20480  2 nf_conntrack,ip_vs
nf_defrag_ipv4         16384  1 nf_conntrack
libcrc32c              16384  3 nf_conntrack,xfs,ip_vs

关闭Swap分区,防火墙,SeLinux

#永远关闭swap分区,需要重启操作系统
[root@k8s-master ~]# vi /etc/fstab 

#/dev/mapper/cs-swap     none                    swap    defaults        0 0

#关闭防火墙设置开机自动关闭
[root@k8s-master ~]# systemctl disable --now firewalld

#关selinux,设置永久关闭
[root@k8s-master ~]# vi /etc/selinux/config 
SELINUX=disabled

#重启虚拟机使配置生效
[root@k8s-master ~]# reboot

验证基础配置

#验证SELinux是否为disabled
[root@k8s-master ~]# getenforce
Disabled

#验证swap交换分区
[root@k8s-master ~]# free -h
              total        used        free      shared  buff/cache   available
Mem:          1.9Gi       160Mi       1.6Gi       8.0Mi       184Mi       1.6Gi
Swap:            0B          0B          0B

#验证防火墙
[root@k8s-master ~]# systemctl status firewalld
● firewalld.service - firewalld - dynamic firewall daemon
   Loaded: loaded (/usr/lib/systemd/system/firewalld.service; disabled; vendor preset: enabled)
   Active: inactive (dead)
     Docs: man:firewalld(1)

Docker

k8s是容器编排工具,需要容器管理工具,三个节点同时安装docker

二进制包下载地址:https://download.docker.com/linux/static/stable/x86_64/

安装Docker

[root@k8s-master ~]# wget https://download.docker.com/linux/static/stable/x86_64/docker-24.0.2.tgz

#解压
[root@k8s-master ~]# tar xf docker-24.0.2.tgz 

#拷贝二进制执行文件
[root@k8s-master ~]# cp docker/* /usr/bin/

配置docker镜像加速器

[root@k8s-master ~]# mkdir /etc/docker
[root@k8s-master ~]# vi /etc/docker/daemon.json 

{
        "registry-mirrors": ["https://docker.mirrors.ustc.edu.cn",
                        "https://docker.m.daocloud.io",
                        "http://hub-mirrors.c.163.com"],
        "max-concurrent-downloads": 10,
        "log-driver": "json-file",
        "log-level": "warn",
        "data-root": "/var/lib/docker"

}

配置Cgroup驱动程序

在 Linux 上,控制组(CGroup)⽤于限制分配给进程的资源,官⽅建议配置容器运⾏时和 kubelet 使⽤ systemd(systemd是Linux系统第⼀个初始进程)作为容器的控制组(CGroup), 以此使系统更为稳定 。

#在/etc/docker/daemon.json添加如下内容,别忘了在前一条配置后面加逗号!!!
"exec-opts": ["native.cgroupdriver=systemd"]

[root@k8s-master ~]# vi /etc/docker/daemon.json 
[root@k8s-master ~]# cat /etc/docker/daemon.json 
{
        "exec-opts": ["native.cgroupdriver=systemd"],
        "registry-mirrors": ["https://docker.mirrors.ustc.edu.cn",
                        "https://docker.m.daocloud.io",
                        "http://hub-mirrors.c.163.com"],
        "max-concurrent-downloads": 10,
        "log-driver": "json-file",
        "log-level": "warn",
        "data-root": "/var/lib/docker"
}

配置docker服务

#containerd.service
[root@k8s-master ~]# vi /etc/systemd/system/containerd.service

[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target local-fs.target

[Service]
ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/bin/containerd
Type=notify
Delegate=yes
KillMode=process
Restart=always
RestartSec=5
LimitNPROC=infinity
LimitCORE=infinity
LimitNOFILE=1048576
TasksMax=infinity
OOMScoreAdjust=-999

[Install]
WantedBy=multi-user.target


#docker.service
[root@k8s-master ~]# vi /etc/systemd/system/docker.service

[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket containerd.service

[Service]
Type=notify
ExecStart=/usr/bin/dockerd --containerd=/run/containerd/containerd.sock
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
Delegate=yes
KillMode=process
OOMScoreAdjust=-500

[Install]
WantedBy=multi-user.target


#docker.socket
[root@k8s-master ~]# vi /etc/systemd/system/docker.socket 

[Unit]
Description=Docker Socket for the API

[Socket]
ListenStream=/var/run/docker.sock
SocketMode=0660
SocketUser=root
SocketGroup=docker

[Install]
WantedBy=sockets.target

启动docker服务

[root@k8s-master ~]# systemctl daemon-reload
[root@k8s-master ~]# systemctl enable --now containerd.service && systemctl enable --now docker.service && systemctl enable --now docker.socket

查看版本和信息

[root@k8s-master ~]# docker --version
[root@k8s-master ~]# docker info

cri-docker

Kubernetes1.24以及更高版本已不支持docker,所以要安装cri-docker

安装cri-docker

[root@k8s-master ~]# wget https://github.com/Mirantis/cri-dockerd/releases/download/v0.3.3/cri-dockerd-0.3.3.amd64.tgz

#解压
[root@k8s-master ~]# tar xf cri-dockerd-0.3.3.amd64.tgz 

#拷贝二进制执行文件
[root@k8s-master ~]# cp cri-dockerd/* /usr/bin/

配置cri-docker服务

#cri-docker.service
[root@k8s-master cri-dockerd]# vi /usr/lib/systemd/system/cri-docker.service

[Unit]
Description=CRI Interface for Docker Application Container Engine
Documentation=https://docs.mirantis.com
After=network-online.target firewalld.service docker.service
Wants=network-online.target
Requires=cri-docker.socket

[Service]
Type=notify
ExecStart=/usr/bin/cri-dockerd --network-plugin=cni --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.7
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
Delegate=yes
KillMode=process

[Install]
WantedBy=multi-user.target


#cri-docker.socket
[root@k8s-master ~]# vi /usr/lib/systemd/system/cri-docker.socket 

[Unit]
Description=CRI Docker Socket for the API
PartOf=cri-docker.service

[Socket]
ListenStream=%t/cri-dockerd.sock
SocketMode=0660
SocketUser=root
SocketGroup=docker

[Install]
WantedBy=sockets.target

启动cri-docker服务

[root@k8s-master ~]# systemctl daemon-reload
[root@k8s-master ~]# systemctl enable --now cri-docker

查看cri-docker状态

[root@k8s-master ~]# systemctl is-active cri-docker
active

kubelet,kubeadm,kubectl

三个节点都需安装kubelet,kubeadm,kubectl

介绍:

Kubelet 是 kubernetes 工作节点上的一个代理组件,运行在每个节点上

Kubeadm 是一个快捷搭建kubernetes(k8s)的安装工具,它提供了 kubeadm init 以及 kubeadm join 这两个命令来快速创建 kubernetes 集群,kubeadm 通过执行必要的操作来启动和运行一个最小可用的集群

Kubectl是Kubernetes集群的命令行工具,通过kubectl能够对集群本身进行管理,并能够在集群上进行容器化应用的安装部署

安装kubelet,kubeadm,kubectl

#这里采用的选项是--disableexcludes,即“禁止从主配置,从源或者从任何位置排除”,意思是排除这个选项后面跟的参数以外的其他所有仓库
[root@k8s-master ~]# yum -y install kubelet kubeadm kubectl --disableexcludes=kubernetes

#为保证三个组件与工具版本的统一性
kubeadm-1.27.3-0.x86_64
kubectl-1.27.3-0.x86_64                            
kubelet-1.27.3-0.x86_64    

设置kubelet开机自启动

[root@k8s-master ~]# systemctl enable --now kubelet
Created symlink /etc/systemd/system/multi-user.target.wants/kubelet.service → /usr/lib/systemd/system/kubelet.service.

#初始化前kubelet是无法启动的,但可以查看它的状态,目前在等待指令
[root@k8s-master ~]# systemctl is-active kubelet
activating

kubeadm初始化

查看版本

[root@k8s-master ~]# yum list --showduplicates kubeadm --disableexcludes=kubernetes
Last metadata expiration check: 11:54:21 ago on Tue 27 Jun 2023 10:00:07 PM CST.
Installed Packages
kubeadm.x86_64                                   1.27.3-0                                     @kubernetes
Available Packages
kubeadm.x86_64                                   1.6.0-0                                      kubernetes 
kubeadm.x86_64                                   1.6.1-0                                      kubernetes 
kubeadm.x86_64                                   1.6.2-0                                      kubernetes 
kubeadm.x86_64                                   1.6.3-0                                      kubernetes 
kubeadm.x86_64                                   1.6.4-0                                      kubernetes 
kubeadm.x86_64                                   1.6.5-0                                      kubernetes 
kubeadm.x86_64                                   1.6.6-0                                      kubernetes 
........(略)

初始化开始(仅在k8s-master上执行)

--image-repository registry.aliyuncs.com/google_containers:使用阿里云镜像仓库
--kubernetes-version=v1.27.3:指定k8s的版本
--pod-network-cidr=10.10.20.0/24:指定pod的网段

--cri-socket unix:///var/run/cri-dockerd.sock:指定容器运行时的Socket文件路径,原本默认是dockershim.sock,但现在改成cri-docker.sock

[root@k8s-master ~]# kubeadm init \
--image-repository registry.aliyuncs.com/google_containers \
--kubernetes-version v1.27.3 \
--pod-network-cidr=10.10.20.0/24 \
--cri-socket unix:///var/run/cri-dockerd.sock

 .......(略)
 [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!		#出现这个初始化成功

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 10.10.20.10:6443 --token xyhm8m.yqt9blyv9xi1av51 \
	--discovery-token-ca-cert-hash sha256:b48272f1d50bd2166b77745a8bac0d3783bcaf64f696dea2a4aa582c266cb3db
#信息提示出现token,可以用它将worker节点加入到集群中

根据初始化指示,创建kubeconfig文件

[root@k8s-master ~]# mkdir -p $HOME/.kube
[root@k8s-master ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@k8s-master ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config

Worker节点加入集群(在k8s-worker01,k8s-worker02执行)

#k8s-worker01:
[root@k8s-worker01 ~]# kubeadm join 10.10.20.10:6443 --token xyhm8m.yqt9blyv9xi1av51 \
--discovery-token-ca-cert-hash sha256:b48272f1d50bd2166b77745a8bac0d3783bcaf64f696dea2a4aa582c266cb3db \
--cri-socket unix:///var/run/cri-dockerd.sock

.....(略)
This node has joined the cluster:				#末尾显示这样为成功
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
#提示可以用kubectl get nodes命令查看集群节点

#k8s-worker02
[root@k8s-worker02 ~]# kubeadm join 10.10.20.10:6443 --token xyhm8m.yqt9blyv9xi1av51 \
--discovery-token-ca-cert-hash sha256:b48272f1d50bd2166b77745a8bac0d3783bcaf64f696dea2a4aa582c266cb3db \
--cri-socket unix:///var/run/cri-dockerd.sock

.......(略)

回到主节点上查看(仅在k8s-master上执行)

[root@k8s-master ~]# kubectl get nodes
NAME           STATUS     ROLES           AGE    VERSION
k8s-master     NotReady   control-plane   14m    v1.27.3
k8s-worker01   NotReady   <none>          5m1s   v1.27.3
k8s-worker02   NotReady   <none>          2m5s   v1.27.3