二进制部署kubernetes并升级

发布时间 2023-10-08 09:25:30作者: 小糊涂90
#二进制部署之ansible部署多master高可用kubernetes集群环境

1.#主机名设置
类型 服务器IP 主机名 VIP
K8S Master1 192.168.7.101 k8s-master1.magedu.net 192.168.7.248
K8S Master2 192.168.7.102 k8s-master2.magedu.net 192.168.7.248
Harbor1 192.168.7.103 k8s-harbor1.magedu.net
Harbor2 192.168.7.104 k8s-harbor2.magedu.net
etcd节点1 192.168.7.105 k8s-etcd1.magedu.net
etcd节点2 192.168.7.106 k8s-etcd2.magedu.net
etcd节点3 192.168.7.107 k8s-etcd3.magedu.net
Haproxy1 192.168.7.108 k8s-ha1.magedu.net
Haproxy2 192.168.7.109 k8s-ha2.magedu.net
Node节点1 192.168.7.110 k8s-node1.magedu.net
Node节点2 192.168.7.111 k8s-node2.magedu.net

2.#软件清单
端⼝:192.168.7.248:6443 #需要配置在负载均衡上实现反向代
理,dashboard的端⼝为8443
操作系统:ubuntu server 1804
k8s版本: 1.13.5
calico:3.4.4

3.#基础环境准备
http://releases.ubuntu.com/
系统主机名配置、IP配置、系统参数优化,以及依赖的负载均衡和Harbor部署

3.1#keepalived:
root@k8s-ha1:~# cat /etc/keepalived/keepalived.conf
vrrp_instance VI_1 {
 state MASTER
 interface eth0
 virtual_router_id 1
 priority 100
 advert_int 3
 unicast_src_ip 192.168.7.108
 unicast_peer {
 192.168.7.109
 }
 authentication {
 auth_type PASS
 auth_pass 123abc
 }
  virtual_ipaddress {
 192.168.7.248 dev eth0 label eth0:1
 }
}

3.2##haproxy:
listen k8s_api_nodes_6443
 bind 192.168.7.248:6443
 mode tcp
 #balance leastconn
 server 192.168.7.101 192.168.7.101:6443 check inter 2000 fall 3 rise 5
 #server 192.168.100.202 192.168.100.202:6443 check inter 2000 fall 3 rise 5

3.3#Harbor之https:
内部镜像将统⼀保存在内部Harbor服务器,不再通过互联⽹在线下载
root@k8s-harbor1:/usr/local/src/harbor# pwd
/usr/local/src/harbor
root@k8s-harbor1:/usr/local/src/harbor# mkdir certs/

# openssl genrsa -out
/usr/local/src/harbor/certs/harbor-ca.key #⽣成私有key
# openssl req -x509 -new -nodes -key
/usr/local/src/harbor/certs/harbor-ca.key -subj "/CN=harbor.magedu.net" -days 7120 -out /usr/local/src/harbor/certs/harbor-ca.crt #签证

# vim harbor.cfg
hostname = harbor.magedu.net
ui_url_protocol = https
ssl_cert = /usr/local/src/harbor/certs/harbor-ca.crt
ssl_cert_key = /usr/local/src/harbor/certs/harbor-ca.key
harbor_admin_password = 123456

# ./install.sh

#client 同步在crt证书:
master1:~# mkdir /etc/docker/certs.d/harbor.magedu.net -p
harbor1:~# scp /usr/local/src/harbor/certs/harbor-ca.crt 192.168.7.101:/etc/docker/certs.d/harbor.magedu.net
master1:~# vim /etc/hosts #添加host⽂件解析
192.168.7.103 harbor.magedu.net
master1:~# systemctl restart docker #重启docker

#测试登录harbor:
root@k8s-master1:~# docker login harbor.magedu.net
Username: admin
Password:
WARNING! Your password will be stored unencrypted in /root/.docker/config.json.
Configure a credential helper to remove this warning. See https://docs.docker.com/engine/reference/commandline/login/#credentials-store
Login Succeeded

#测试push镜像到harbor:
master1:~# docker pull alpine
root@k8s-master1:~# docker tag alpine harbor.magedu.net/library/alpine:linux36
root@k8s-master1:~# docker push harbor.magedu.net/library/alpine:linux36
The push refers to repository [harbor.magedu.net/library/alpine]
256a7af3acb1: Pushed
linux36: digest:
sha256:97a042bf09f1bf78c8cf3dcebef94614f2b95fa2f988a5c07314031bc2570c7a size: 528

4.#ansible部署
4.1#基础环境准备:
# apt-get install python2.7
# ln -s /usr/bin/python2.7 /usr/bin/python
# apt-get install git ansible -y
# ssh-keygen #⽣成密钥对
# apt-get install sshpass #ssh同步公钥到各k8s服务器
#分发公钥脚本:
root@k8s-master1:~# cat scp.sh
#!/bin/bash
#⽬标主机列表
IP="
192.168.7.101
192.168.7.102
192.168.7.103
192.168.7.104
192.168.7.105
192.168.7.106
192.168.7.107
192.168.7.108
192.168.7.109
192.168.7.110
192.168.7.111
"
for node in ${IP};do
  sshpass -p 123456 ssh-copy-id ${node} -o StrictHostKeyChecking=no
  if [ $? -eq 0 ];then
    echo "${node} 秘钥copy完成"
  else
    echo "${node} 秘钥copy失败"
  fi
done

#同步docker证书脚本:
#!/bin/bash
#⽬标主机列表
IP="
192.168.7.101
192.168.7.102
192.168.7.103
192.168.7.104
192.168.7.105
192.168.7.106
192.168.7.107
192.168.7.108
192.168.7.109
192.168.7.110
192.168.7.111
"
for node in ${IP};do
  sshpass -p 123456 ssh-copy-id ${node} -o StrictHostKeyChecking=no
  if [ $? -eq 0 ];then
    echo "${node} 秘钥copy完成"
    echo "${node} 秘钥copy完成,准备环境初始化....."
    ssh ${node} "mkdir /etc/docker/certs.d/harbor.magedu.net -p"
    echo "Harbor 证书⽬录创建成功!
    scp /etc/docker/certs.d/harbor.magedu.net/harbor-ca.crt ${node}:/etc/docker/certs.d/harbor.magedu.net/harbor-ca.crt
    echo "Harbor 证书拷⻉成功!"
    scp /etc/hosts ${node}:/etc/hosts
    echo "host ⽂件拷⻉完成"
    scp -r /root/.docker ${node}:/root/
    echo "Harbor 认证⽂件拷⻉完成!"
    scp -r /etc/resolv.conf ${node}:/etc/
  else
    echo "${node} 秘钥copy失败"
  fi
done

#执⾏脚本同步:
k8s-master1:~# bash scp.sh

# vim ~/.vimrc #取消vim ⾃动缩进功能
:set paste

4.2#clone项⽬:
# git clone -b 0.6.1 https://github.com/easzlab/kubeasz.git
root@k8s-master1:~# mv /etc/ansible/* /opt/
root@k8s-master1:~# mv kubeasz/* /etc/ansible/
root@k8s-master1:~# cd /etc/ansible/
root@k8s-master1:/etc/ansible# cp example/hosts.m-masters.example ./hosts #复制hosts模板⽂件

4.3#准备hosts⽂件:
root@k8s-master1:/etc/ansible# pwd
/etc/ansible
root@k8s-master1:/etc/ansible# cp example/hosts.m-masters.example ./hosts
root@k8s-master1:/etc/ansible# cat hosts
# 集群部署节点:⼀般为运⾏ansible 脚本的节点
# 变量 NTP_ENABLED (=yes/no) 设置集群是否安装 chrony 时间同步
[deploy]
192.168.7.101 NTP_ENABLED=no
# etcd集群请提供如下NODE_NAME,注意etcd集群必须是1,3,5,7...奇数个节点

[etcd]
192.168.7.105 NODE_NAME=etcd1
192.168.7.106 NODE_NAME=etcd2
192.168.7.107 NODE_NAME=etcd3

[new-etcd] # 预留组,后续添加etcd节点使⽤
#192.168.7.x NODE_NAME=etcdx
[kube-master]
192.168.7.101


[new-master] # 预留组,后续添加master节点使⽤
#192.168.7.5

[kube-node]
192.168.7.110

[new-node] # 预留组,后续添加node节点使⽤
#192.168.7.xx

# 参数 NEW_INSTALL:yes表示新建,no表示使⽤已有harbor服务器
# 如果不使⽤域名,可以设置 HARBOR_DOMAIN=""
[harbor]
#192.168.7.8 HARBOR_DOMAIN="harbor.yourdomain.com"
NEW_INSTALL=no

# 负载均衡(⽬前已⽀持多于2节点,⼀般2节点就够了) 安装haproxy+keepalived
[lb]
192.168.7.1 LB_ROLE=backup
192.168.7.2 LB_ROLE=master

#【可选】外部负载均衡,⽤于⾃有环境负载转发 NodePort 暴露的服务等
[ex-lb]
#192.168.7.6 LB_ROLE=backup EX_VIP=192.168.7.250
#192.168.7.7 LB_ROLE=master EX_VIP=192.168.7.250

[all:vars]
# ---------集群主要参数---------------
#集群部署模式:allinone, single-master, multi-master
DEPLOY_MODE=multi-master

#集群主版本号,⽬前⽀持: v1.8, v1.9, v1.10,v1.11,v1.12, v1.13
K8S_VER="v1.13"

# 集群 MASTER IP即 LB节点VIP地址,为区别与默认apiserver端口,设置VIP监听的服务端⼝8443
# 公有云上请使⽤云负载均衡内⽹地址和监听端口
MASTER_IP="192.168.7.248"
KUBE_APISERVER="https://{{ MASTER_IP }}:6443"

# 集群⽹络插件,⽬前⽀持calico, flannel, kube-router,cilium
CLUSTER_NETWORK="calico"

# 服务⽹段 (Service CIDR),注意不要与内⽹已有⽹段冲突
SERVICE_CIDR="10.20.0.0/16"

# POD ⽹段 (Cluster CIDR),注意不要与内⽹已有⽹段冲突
CLUSTER_CIDR="172.31.0.0/16"

# 服务端⼝范围 (NodePort Range)
NODE_PORT_RANGE="20000-60000"

# kubernetes 服务 IP (预分配,⼀般是 SERVICE_CIDR 中第⼀个IP)
CLUSTER_KUBERNETES_SVC_IP="10.20.0.1"

# 集群 DNS 服务 IP (从 SERVICE_CIDR 中预分配)
CLUSTER_DNS_SVC_IP="10.20.254.254"

# 集群 DNS 域名
CLUSTER_DNS_DOMAIN="linux36.local."

# 集群basic auth 使⽤的⽤户名和密码
BASIC_AUTH_USER="admin"
BASIC_AUTH_PASS="123456"

# ---------附加参数--------------------
#默认⼆进制⽂件⽬录
bin_dir="/usr/bin"

#证书⽬录
ca_dir="/etc/kubernetes/ssl"

#部署⽬录,即 ansible ⼯作⽬录,建议不要修改
base_dir="/etc/ansible

4.4#准备⼆进制⽂件:
k8s-master1:/etc/ansible/bin# pwd
/etc/ansible/bin
k8s-master1:/etc/ansible/bin# tar xvf k8s.1-13-5.tar.gz
k8s-master1:/etc/ansible/bin# mv bin/* .

4.5#通过ansible脚本初始化环境及部署k8s ⾼可⽤集群
4.5.1#环境初始化
root@k8s-master1:/etc/ansible# pwd
/etc/ansible
root@k8s-master1:/etc/ansible# ansible-playbook 01.prepare.y

4.5.2#部署etcd集群:
root@k8s-master1:/etc/ansible# ansible-playbook 02.etcd.yml
#各etcd服务器验证etcd服务:
root@k8s-etcd1:~# export NODE_IPS="192.168.7.105 192.168.7.106 192.168.7.107"
root@k8s-etcd1:~# for ip in ${NODE_IPS}; do 
ETCDCTL_API=3 /usr/bin/etcdctl --
endpoints=https://${ip}:2379 --
cacert=/etc/kubernetes/ssl/ca.pem --
cert=/etc/etcd/ssl/etcd.pem --
key=/etc/etcd/ssl/etcd-key.pem endpoint health;
done
https://192.168.7.105:2379 is healthy: successfully
committed proposal: took = 2.198515ms
https://192.168.7.106:2379 is healthy: successfully
committed proposal: took = 2.457971ms
https://192.168.7.107:2379 is healthy: successfully
committed proposal: took = 1.859514ms

4.5.3#部署docker:
#可选更改启动脚本路径,但是docker已经提前安装,因此不需要重新执⾏
root@k8s-master1:/etc/ansible# ansible-playbook 03.docker.yml

4.5.4#部署master:
可选更改启动脚本路径
root@k8s-master1:/etc/ansible# ansible-playbook 04.kube-master.yml

4.5.5#部署node:
node节点必须安装docker
root@k8s-master1:/etc/ansible# vim roles/kube-node/defaults/main.yml
# 基础容器镜像
SANDBOX_IMAGE: "harbor.magedu.net/baseimages/pause-amd64:3.1"
root@k8s-master1:/etc/ansible# ansible-playbook 05.kube-node.yml

4.5.6#部署⽹络服务calico:
可选更改calico服务启动脚本路径,csr证书信息
# docker load -i calico-cni.tar
# docker tag calico/cni:v3.4.4 harbor.magedu.net/baseimages/cni:v3.4.4
# docker push harbor.magedu.net/baseimages/cni:v3.4.4
# docker load -i calico-node.tar
# docker tag calico/node:v3.4.4 harbor.magedu.net/baseimages/node:v3.4.4
# docker push harbor.magedu.net/baseimages/node:v3.4.4
# docker load -i calico-kube-controllers.tar
# docker tag calico/kube-controllers:v3.4.4 harbor.magedu.net/baseimages/kube-controllers:v3.4.4
# docker push harbor.magedu.net/baseimages/kube-controllers:v3.4.4

#执⾏部署⽹络:
root@k8s-master1:/etc/ansible# ansible-playbook 06.network.yml

#验证calico:
root@k8s-master1:/etc/ansible# calicoctl node status
Calico process is running.
IPv4 BGP status
+---------------+-------------------+-------+----------+-------------+
| PEER ADDRESS  | PEER TYPE         | STATE | SINCE    | INFO        |
+---------------+-------------------+-------+----------+-------------+
| 192.168.7.110 | node-to-node mesh | up    |14:22:44  | Established |
+---------------+-------------------+-------+----------+-------------+
IPv6 BGP status
No IPv6 peers found.

#kubectl run net-test1 --image=alpine --replicas=4 sleep 360000 
#创建pod测试夸主机⽹络通信是否正常

4.5.7#添加node节点:
[kube-node]
192.168.7.110

[new-node] # 预留组,后续添加node节点使⽤
192.168.7.111

root@k8s-master1:/etc/ansible# ansible-playbook 20.addnode.yml

4.5.8#添加master节点:
注释掉lb,否则⽆法下⼀步
[kube-master]
192.168.7.101

[new-master] # 预留组,后续添加master节点使⽤
192.168.7.102

root@k8s-master1:/etc/ansible# ansible-playbook 21.addmaster.yml

4.5.9#验证当前状态:
root@k8s-master1:/etc/ansible# calicoctl node status
Calico process is running.
IPv4 BGP status
+---------------+-------------------+-------+----------+-------------+
| PEER ADDRESS  | PEER TYPE         | STATE | SINCE    | INFO        |
+---------------+-------------------+-------+----------+-------------+
| 192.168.7.110 | node-to-node mesh | up    |14:22:45  | Established |
| 192.168.7.111 | node-to-node mesh | up    |14:33:24  | Established |
| 192.168.7.102 | node-to-node mesh | up    |14:42:21  | Established |
+---------------+-------------------+-------+----------+-------------+
IPv6 BGP status
No IPv6 peers found.

root@k8s-master1:/etc/ansible# kubectl get nodes
NAME STATUS ROLES AGE VERSION
192.168.7.101 Ready,SchedulingDisabled master 37m v1.13.5
192.168.7.102 Ready,SchedulingDisabled master 41s v1.13.5
192.168.7.110 Ready                    node 33m v1.13.5
192.168.7.111 Ready                    node 15m v1.13.5

5#k8s应⽤环境:
5.1#dashboard(1.10.1)
部署kubernetes的web管理界⾯dashboard

5.2#具体步骤:
5.2.1.导⼊dashboard镜像并上传⾄本地harbor服务器
# tar xvf dashboard-yaml_image-1.10.1.tar.gz
./admin-user-sa-rbac.yaml
./kubernetes-dashboard-amd64-v1.10.1.tar.gz
./kubernetes-dashboard.yaml
./read-user-sa-rbac.yaml
./ui-admin-rbac.yaml
./ui-read-rbac.yaml

# docker load -i kubernetes-dashboard-amd64-v1.10.1.tar.gz
# docker tag gcr.io/google-containers/kubernetes-dashboard-amd64:v1.10.1 harbor.magedu.net/baseimages/kubernetes-dashboard-amd64:v1.10.1
# docker push harbor.magedu.net/baseimages/kubernetes-dashboard-amd64:v1.10.1

5.2.2.修改yaml⽂件中的dashboard镜像地址为本地harbor地址
image: harbor.magedu.net/baseimages/kubernetes-dashboard-amd64:v1.10.

5.2.3.创建服务
# kubectl apply -f .
serviceaccount/admin-user created
clusterrolebinding.rbac.authorization.k8s.io/admin-user created
secret/kubernetes-dashboard-certs created
serviceaccount/kubernetes-dashboard created
role.rbac.authorization.k8s.io/kubernetes-dashboard-minimal created
rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard-minimal created
deployment.apps/kubernetes-dashboard created
service/kubernetes-dashboard created
serviceaccount/dashboard-read-user created
clusterrolebinding.rbac.authorization.k8s.io/dashboard-read-binding created
clusterrole.rbac.authorization.k8s.io/dashboard-read-clusterrole created
clusterrole.rbac.authorization.k8s.io/ui-admin created
rolebinding.rbac.authorization.k8s.io/ui-admin-binding created
clusterrole.rbac.authorization.k8s.io/ui-read created
rolebinding.rbac.authorization.k8s.io/ui-read-binding created

5.2.4.验证dashboard启动完成:
# kubectl get pods -n kube-system
NAME                                     READY STATUS  RESTARTS AGE
calico-kube-controllers-77d9f69cdd-rbml2 1/1   Running 0        25m
calico-node-rk6jk                        1/1   Running 1        20m
calico-node-vjn65                        1/1   Running 0        25m
calico-node-wmj48                        1/1   Running 0        25m
calico-node-wvsxb                        1/1   Running 0        5m14s
kubernetes-dashboard-cb96d4fd8-th6nw     1/1   Running 0        37s

# kubectl get svc -n kube-system
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes-dashboard NodePort 10.20.32.158 <none> 443:27775/TCP 79s

# kubectl cluster-info #查看集群信息
Kubernetes master is running at https://192.168.7.248:6443
kubernetes-dashboard is running at https://192.168.7.248:6443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy

5.3#token登录dashboard
# kubectl -n kube-system get secret | grep admin-user
# kubectl -n kube-system describe secret admin-user-token-2wm96

5.4#Kubeconfig登录
制作Kubeconfig⽂件

5.5#修改iptables为ipvs及调度算法:
#IPVS模式在Kubernetes v1.8中引⼊alpha版本,在v1.9中处于beta版本,在v1.11中处于GA(也就是release版本,国外都是说GA版本)版本,IPTABLES模式已在v1.1中添加,并成为v1.2之后的默认操作模式, IPVS和IPTABLES都基于netfilter, IPVS模式和IPTABLES模式之间的差异如下:
#IPVS为⼤型群集提供了更好的可伸缩性和性能。与IPTABLES相⽐,IPVS⽀持更复杂的负载平衡算法(最少连接,轮询,加权轮询等)。
#IPVS⽀持服务器运⾏状况检查和连接重试等。

修改kubernetes使⽤ipvs:
root@s6:~# vim /etc/systemd/system/kube-proxy.service
--proxy-mode=ipvs \
 --ipvs-scheduler=sh
 
算法:
 rr: round-robin
 lc: least connection
 dh: destination hashing
 sh: source hashing
 sed: shortest expected delay
 nq: never queue
 
5.6#设置token登录会话保持时间
# vim dashboard/kubernetes-dashboard.yaml
 image:192.168.200.110/baseimages/kubernetes-dashboardamd64:v1.10.1
 ports:
 - containerPort: 8443
   protocol: TCP
 args:
   - --auto-generate-certificates
   - --token-ttl=43200
# kubectl apply -f .

5.7#可选管理界面:rancher
# curl --insecure -sfL https://172.31.7.101/v3/import/c8khcm67zhvxrhbx4ddpmmc88htxmkwb5b4f4fpttn2jlkfnwbscsx_c-wq7t7.yaml |kubectl apply -f -

5.8#可选管理界面:kuboard
https://kuboard.cn/overview/#kuboard%E5%9C%A8%E7%BA%BF%E4%BD%93%E9%AA%8C
在浏览器输⼊ http://your-host-ip:80 即可访问 Kuboardv3.x 的界⾯,登录⽅式:
⽤户名: admin
密 码: Kuboard123

6#DNS服务:
#⽬前常⽤的dns组件有kube-dns和coredns两个,即到⽬前k8s版本1.17.X都可以使⽤,kube-dns和coredns⽤于解析k8s集群中service name所对应得到IP地址。

6.1#部署kube-dns:
k8s 1.18版本后将不再⽀持kube-dns。

6.1.1.# skyDNS/kube-dns/coreDNS
kube-dns:提供service name域名的解析
dns-dnsmasq:提供DNS缓存,降低kubedns负载,提⾼性能
dns-sidecar:定期检查kubedns和dnsmasq的健康状态

6.1.2.导⼊镜像并上传⾄本地harbor
# docker load -i k8s-dns-kube-dns-amd64_1.14.13.tar.gz
# docker images
# docker tag gcr.io/google-containers/k8s-dns-kube-dns-amd64:1.14.13 harbor.magedu.net/baseimages/k8s-dns-kube-dns-amd64:1.14.13
# docker push harbor.magedu.net/baseimages/k8s-dns-kube-dns-amd64:1.14.13
# docker load -i k8s-dns-sidecar-amd64_1.14.13.tar.gz
# docker images
# docker tag gcr.io/google-containers/k8s-dns-sidecar-amd64:1.14.13 harbor.magedu.net/baseimages/k8s-dns-sidecar-amd64:1.14.13
# docker push harbor.magedu.net/baseimages/k8s-dns-sidecar-amd64:1.14.13
# docker load -i k8s-dns-dnsmasq-nanny-amd64_1.14.13.tar.gz
# docker imgaes
# docker images
# docker tag gcr.io/google-containers/k8s-dns-dnsmasq-nanny-amd64:1.14.13harbor.magedu.net/baseimages/k8s-dns-dnsmasq-nanny-amd64:1.14.13
# docker push harbor.magedu.net/baseimages/k8s-dns-dnsmasq-nanny-amd64:1.14.13

6.1.3.修改yaml⽂件中的镜像地址为本地harbor地址
# vim kube-dns.yaml
 - name: kubedns
 image: harbor.magedu.net/baseimages/k8s-dns-kube-dns-amd64:1.14.13
 
 - name: dnsmasq
 image: harbor.magedu.net/baseimages/k8s-dns-dnsmasq-nanny-amd64:1.14.13
 
 - name: sidecar
 image: harbor.magedu.net/baseimages/k8s-dns-sidecar-amd64:1.14.13
 
6.1.4.创建服务
# kubectl apply -f kube-dns.yaml

6.2#部署coredns:
https://github.com/coredns/coredns

#coredns 1.2/1.3/1.4/1.5版本:
# docker tag gcr.io/google-containers/coredns:1.2.6 harbor.magedu.net/baseimages/coredns:1.2.6
# docker push harbor.magedu.net/baseimages/coredns:1.2.6

#1.6版本部署方式:
https://github.com/coredns/deployment/tree/master/kubernetes
# unzip deployment-master.zip
# ./deploy.sh 10.20.0.0/16 > magedu-coredns.yaml
# vim magedu-coredns.yaml #修改域名

6.3域名解析测试:
# kubectl delete -f /etc/ansible/manifests/dns/kube-dns/kube-dns.yaml #删 除kube-dns
# kubectl apply -f coredns.yaml #部署coredns
# kubectl exec busybox nslookup kubernetes
Server: 10.20.254.254
Address 1: 10.20.254.254 kube-dns.kube-system.svc.linux36.local
Name: kubernetes
Address 1: 10.20.0.1
kubernetes.default.svc.linux36.local
# kubectl exec busybox nslookup
kubernetes.default.svc.linux36.local
Server: 10.20.254.254
Address 1: 10.20.254.254 kube-dns.kube-system.svc.linux36.local

Name: kubernetes.default.svc.linux36.local
Address 1: 10.20.0.1
kubernetes.default.svc.linux36.local

# kubectl exec busybox nslookup kube-dns.kube-system.svc.magedu.local
Server: 10.10.0.2
Address 1: 10.10.0.2 kube-dns.kube-system.svc.magedu.local
Name: kube-dns.kube-system.svc.magedu.local
Address 1: 10.10.0.2 kube-dns.kube-system.svc.magedu.local

Name: kube-dns.kube-system.svc.magedu.local
Address 1: 10.10.0.2 kube-dns.kube-system.svc.magedu.local

6.4监控组件heapster:
heapster:数据采集
influxdb:数据存储
grafana:web展示
	1.导⼊相应的镜像
	2.更改yaml中的镜像地址
	3.创建服务

7.k8s升级
7.1.K8S版本下载:K8S源码包

7.2.master查看版本
#kubectl version

7.3.master关闭k8s服务
#systemctl stop kube-apiserver.service kube-controller-manager.service kubelet.service kube-proxy.service kube-scheduler.service

7.4.node关闭k8s服务
#systemctl stop kubelet.service kube-proxy.service

7.5.ansible配置
mkdir /backup         创建老文件备份目录
cd /etc/ansible/bin/
mv kube-apiserver kube-controller-manager kubectl kubelet kube-proxy kube-scheduler /backup/

cd /tmp   下载新的文件到目录
wget https://storage.googleapis.com/kubernetes-release/release/v1.19.7/kubernetes-server-linux-amd64.tar.gz
tar xf kubernetes-server-linux-amd64.tar.gz
cd kubernetes/server/bin

默认新的文件都有执行权限,没有手动加上就行
cp kube-apiserver kube-controller-manager kube-proxy kubectl kubelet kube-scheduler /etc/ansible/bin/

默认kubeasz会自动安装easzctl,这个专门用来升级版本
easzctl upgrade  开始升级master和node

7.6.在master和node查看新的版本是否安装成功
#kubectl version