k8s1.25安装

发布时间 2023-10-04 10:44:47作者: 烟雨楼台,行云流水

环境初始化

yum install bash-completion vim ntpdate  iptables lrzsz epel-release -y && exec bash
 systemctl stop firewalld 
 systemctl disabled  firewalld 
setenforce 0
sed -i 's/=enforcing/=disabled/g' /etc/selinux/config

docker
# step 1: 安装必要的一些系统工具
sudo yum install -y yum-utils device-mapper-persistent-data lvm2
# Step 2: 添加软件源信息
sudo yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
# Step 3
sudo sed -i 's+download.docker.com+mirrors.aliyun.com/docker-ce+' /etc/yum.repos.d/docker-ce.repo
# Step 4: 更新并安装Docker-CE
sudo yum makecache fast
sudo yum -y install docker-ce
# Step 4: 开启Docker服务
sudo service docker start

  主机免密登录

[root@k8s-master ~]# ssh-keygen 
Generating public/private rsa key pair.
Enter file in which to save the key (/root/.ssh/id_rsa): 
Created directory '/root/.ssh'.
Enter passphrase (empty for no passphrase): 
Enter same passphrase again: 
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:Zs+V+wNPaXRiainTUzIReEzp/KpjdTVZ9o7zNwWMzFU root@k8s-master
The key's randomart image is:
+---[RSA 2048]----+
|           +oo .E|
|          . = . o|
|           * = .+|
|            O.Bo+|
|        S  .oX O.|
|       o oo.O.O o|
|          o=.O o.|
|          o ..o.o|
|         ..o  ..o|
+----[SHA256]-----+
[root@k8s-master ~]# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.10.50  k8s-master
192.168.10.51  k8s-node1
192.168.10.52  k8s-node2
[root@k8s-master ~]# ssh-copy-id k8s-node1
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
The authenticity of host 'k8s-node1 (192.168.10.51)' can't be established.
ECDSA key fingerprint is SHA256:H9NvcSpsUXCcUziykpSN7WMrL/EomIaPP6/zJupGpUk.
ECDSA key fingerprint is MD5:22:b4:00:26:09:7f:fd:fa:a0:7c:e8:d4:4f:fd:38:0d.
Are you sure you want to continue connecting (yes/no)? yes
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
root@k8s-node1's password: 

Number of key(s) added: 1

Now try logging into the machine, with:   "ssh 'k8s-node1'"
and check to make sure that only the key(s) you wanted were added.

[root@k8s-master ~]# ssh-copy-id k8s-node2
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
The authenticity of host 'k8s-node2 (192.168.10.52)' can't be established.
ECDSA key fingerprint is SHA256:zzldrfyVbfqWMww99687af8UEtUh+GCaM8rlUJmYhtE.
ECDSA key fingerprint is MD5:c9:ec:f6:d3:60:fa:b8:d2:f8:7c:26:39:ce:5a:87:01.
Are you sure you want to continue connecting (yes/no)? yes
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
root@k8s-node2's password: 

Number of key(s) added: 1

Now try logging into the machine, with:   "ssh 'k8s-node2'"
and check to make sure that only the key(s) you wanted were added.

[root@k8s-master ~]# ssh-copy-id k8s-master
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
The authenticity of host 'k8s-master (192.168.10.50)' can't be established.
ECDSA key fingerprint is SHA256:BbQyv46crWLZgDlqpA5fjHnDrl5oJwOAHh9tX526l9w.
ECDSA key fingerprint is MD5:c1:55:d6:42:05:00:f5:49:78:fa:cd:b4:24:30:a6:a6.
Are you sure you want to continue connecting (yes/no)? yes
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
root@k8s-master's password: 

Number of key(s) added: 1

Now try logging into the machine, with:   "ssh 'k8s-master'"
and check to make sure that only the key(s) you wanted were added.

[root@k8s-master ~]# scp /etc/hosts k8s-node1:/etc/hosts
hosts                                                                                                                                                                          100%  234   126.8KB/s   00:00    
[root@k8s-master ~]# scp /etc/hosts k8s-node2:/etc/hosts
hosts                                                                                                                                                                          100%  234   106.1KB/s   00:00    

  关闭交换分区

swapoff -a
 vim /etc/fstab
 
 
#
# /etc/fstab
# Created by anaconda on Sun Feb  7 10:14:45 2021
#
# Accessible filesystems, by reference, are maintained under '/dev/disk'
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info
#
/dev/mapper/centos-root /                       xfs     defaults        0 0
UUID=ec65c557-715f-4f2b-beae-ec564c71b66b /boot                   xfs     defaults        0 0
#/dev/mapper/centos-swap swap                    swap    defaults        0 0

   加载内核参数并加以设置

modprobe br_netfilter
 echo "modprobe br_netfilter" >> /etc/profile
cat > /etc/sysctl.d/k8s.conf <<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
sysctl -p /etc/sysctl.d/k8s.conf

  加载ipvs 模块

cd /etc/sysconfig/modules/
cat /etc/sysconfig/modules/ipvs.modules
#!/bin/bash
ipvs_modules="ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_nq ip_vs_sed ip_vs_ftp nf_conntrack"
for kernel_module in ${ipvs_modules}; do
 /sbin/modinfo -F filename ${kernel_module} > /dev/null 2>&1
 if [ 0 -eq 0 ]; then
 /sbin/modprobe ${kernel_module}
 fi
done
chmod +x ipvs.modules  
bash ipvs.modules
 
lsmod | grep ip_vs 
ip_vs_ftp              13079  0
nf_nat                 26787  1 ip_vs_ftp
ip_vs_sed              12519  0
ip_vs_nq               12516  0
ip_vs_sh               12688  0
ip_vs_dh               12688  0
ip_vs_lblcr            12922  0
ip_vs_lblc             12819  0
ip_vs_wrr              12697  0
ip_vs_rr               12600  0
ip_vs_wlc              12519  0
ip_vs_lc               12516  0
ip_vs                 145497  22 ip_vs_dh,ip_vs_lc,ip_vs_nq,ip_vs_rr,ip_vs_sh,ip_vs_ftp,ip_vs_sed,ip_vs_wlc,ip_vs_wrr,ip_vs_lblcr,ip_vs_lblc
nf_conntrack          133095  2 ip_vs,nf_nat
libcrc32c              12644  4 xfs,ip_vs,nf_nat,nf_conntrack

  安装containerd

[root@k8s-all modules]# yum install containerd.io-1.6.6 -y
[root@k8s-all containerd]# containerd config default > /etc/containerd/config.toml 
打开/etc/containerd/config.toml
把SystemdCgroup = false修改成SystemdCgroup = true
把sandbox_image = "k8s.gcr.io/pause:3.6"修改成
sandbox_image="registry.aliyuncs.com/google_containers/pause:3.7"
找到config_path = "",修改成如下目录:
config_path = "/etc/containerd/certs.d"
[root@k8s-all containerd]#cat > /etc/crictl.yaml <<EOF
runtime-endpoint: unix:///run/containerd/containerd.sock
image-endpoint: unix:///run/containerd/containerd.sock
timeout: 10
debug: false
EOF
[root@k8s-all containerd]#mkdir /etc/containerd/certs.d/docker.io/ -p
[root@k8s-all containerd]#vim /etc/containerd/certs.d/docker.io/hosts.toml
#写入如下内容:
[host."https://vh3bm52y.mirror.aliyuncs.com",host."https://registry.docker-cn.com"]
  capabilities = ["pull"]
[root@k8s-all containerd]#systemctl restart  containerd

  配置docker 镜像加速器

sudo mkdir -p /etc/docker
sudo tee /etc/docker/daemon.json <<-'EOF'
{
  "registry-mirrors": ["https://g2aogmw8.mirror.aliyuncs.com"]
}
EOF
sudo systemctl daemon-reload
sudo systemctl restart docker

  配置k8s 仓库

cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

  安装k8s

yum install -y kubelet-1.25.0 kubeadm-1.25.0 kubectl-1.25.0 && systemctl enable kubelet
#设置容器运行时
crictl config runtime-endpoint /run/containerd/containerd.sock
#使用kubeadm初始化k8s集群
[root@k8s-master ~]#  kubeadm config print init-defaults > kubeadm.yaml
[root@k8s-master ~]# vim kubeadm.yaml
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.10.50 #控制节点IP
  bindPort: 6443 # 端口
nodeRegistration:
  criSocket: unix:///run/containerd/containerd.sock #指定containerd容器运行时
  imagePullPolicy: IfNotPresent
  name: k8s-master
  taints: null
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers #指定阿里云镜像仓库 
kind: ClusterConfiguration
kubernetesVersion: 1.25.0
#controlPlaneEndpoint: 192.168.40.199:16443  #高可用vip 端口
networking:
  dnsDomain: cluster.local
  podSubnet: 10.244.0.0/16 #指定pod网段
  serviceSubnet: 10.96.0.0/12
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs # 设置网络模式
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: systemd  # 驱动

  初始化集群并加入集群

[root@k8s-master ~]# kubeadm init --config=kubeadm.yaml --ignore-preflight-errors=SystemVerification
To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.10.50:6443 --token abcdef.0123456789abcdef \
	--discovery-token-ca-cert-hash sha256:3965be8b67be6c841add842c788fc4879e2efbe23ad543b68889fef28570fea7 
[root@k8s-master ~]#   mkdir -p $HOME/.kube
[root@k8s-master ~]# 
[root@k8s-master ~]#   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@k8s-master ~]# 
[root@k8s-master ~]#   sudo chown $(id -u):$(id -g) $HOME/.kube/config
[root@k8s-master ~]# 
[root@k8s-master ~]#  export KUBECONFIG=/etc/kubernetes/admin.conf
node 加入集群
[root@k8s-node1 ~]# kubeadm join 192.168.10.50:6443 --token abcdef.0123456789abcdef --discovery-token-ca-cert-hash sha256:3965be8b67be6c841add842c788fc4879e2efbe23ad543b68889fef28570fea7 --ignore-preflight-errors=SystemVerification
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
[root@k8s-node2 ~]# kubeadm join 192.168.10.50:6443 --token abcdef.0123456789abcdef --discovery-token-ca-cert-hash sha256:3965be8b67be6c841add842c788fc4879e2efbe23ad543b68889fef28570fea7 --ignore-preflight-errors=SystemVerification
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

  查看集群状态并给node 打标签

[root@k8s-master ~]# kubectl get nodes
NAME         STATUS     ROLES           AGE     VERSION
k8s-master   NotReady   control-plane   7m57s   v1.25.0
k8s-node1    NotReady   <none>          6m53s   v1.25.0
k8s-node2    NotReady   <none>          6m11s   v1.25.0
[root@k8s-master ~]# kubectl label nodes k8s-node2 node-role.kubernetes.io/work=work
node/k8s-node2 labeled
[root@k8s-master ~]# kubectl label nodes k8s-node1 node-role.kubernetes.io/work=work
node/k8s-node1 labeled
您在 /var/spool/mail/root 中有新邮件
[root@k8s-master ~]# kubectl get nodes
NAME         STATUS     ROLES           AGE     VERSION
k8s-master   NotReady   control-plane   10m     v1.25.0
k8s-node1    NotReady   work            9m5s    v1.25.0
k8s-node2    NotReady   work            8m23s   v1.25.0

   calico.yaml 安装

在线下载配置文件地址是: https://docs.projectcalico.org/manifests/calico.yaml

[root@k8s-master ~]# kubectl apply -f calico.yaml 
configmap/calico-config created
customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/kubecontrollersconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networksets.crd.projectcalico.org created
clusterrole.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrole.rbac.authorization.k8s.io/calico-node created
clusterrolebinding.rbac.authorization.k8s.io/calico-node created
daemonset.apps/calico-node created
serviceaccount/calico-node created
deployment.apps/calico-kube-controllers created
serviceaccount/calico-kube-controllers created
poddisruptionbudget.policy/calico-kube-controllers created
[root@k8s-master ~]# kubectl get nodes
NAME         STATUS   ROLES           AGE   VERSION
k8s-master   Ready    control-plane   20m   v1.25.0
k8s-node1    Ready    work            19m   v1.25.0
k8s-node2    Ready    work            19m   v1.25.0

  测试网络

[root@k8s-master ~]# kubectl run busybox --image docker.io/library/busybox:1.28  --image-pull-policy=IfNotPresent --restart=Never --rm -it busybox -- sh
If you don't see a command prompt, try pressing enter.
/ # ping baidu.com
PING baidu.com (110.242.68.66): 56 data bytes
64 bytes from 110.242.68.66: seq=2 ttl=127 time=558.154 ms
64 bytes from 110.242.68.66: seq=3 ttl=127 time=334.110 ms
64 bytes from 110.242.68.66: seq=4 ttl=127 time=598.778 ms
^C
--- baidu.com ping statistics ---
6 packets transmitted, 3 packets received, 50% packet loss
round-trip min/avg/max = 334.110/497.014/598.778 ms