k8s

发布时间 2023-08-23 06:46:40作者: 李八一

docker

k8s基础环境搭建

主机初始化配置
所有主机升级内核(选做)
#更新yum源仓库
[root@localhost ~]# yum update -y
#升级所有包的同时升级内核

#导入ELRepo仓库的公共密钥
[root@localhost ~]# rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org

#安装ELRepo仓库的yum源
[root@localhost ~]# rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm

#查看可用的系统内核包
[root@localhost ~]# yum --disablerepo="*" --enablerepo="elrepo-kernel" list available


#安装最新版本内核
[root@localhost ~]# yum --enablerepo=elrepo-kernel install -y kernel-ml

#查看系统上的所有可用内核
[root@localhost ~]# awk -F\' '$1=="menuentry " {print i++ " : " $2}' /etc/grub2.cfg

#设置默认版本,其中 0 是上面查询出来的可用内核
[root@localhost ~]# grub2-set-default 0

#生成 grub 配置文件
[root@localhost ~]# grub2-mkconfig -o /boot/grub2/grub.cfg

#重启
[root@localhost ~]# reboot

#删除旧内核(可选)
#查看系统中全部的内核
[root@localhost ~]# rpm -qa | grep kernel

#删除旧内核的 RPM 包,具体内容视上述命令的返回结果而定
[root@localhost ~]# yum remove kernel-3.10.0-514.el7.x86_64 \
kernel-tools-libs-3.10.0-862.11.6.el7.x86_64 \
kernel-tools-3.10.0-862.11.6.el7.x86_64 \
kernel-3.10.0-862.11.6.el7.x86_64

hostnamectl set-hostname k8s-node01
bash
cat << EOF >> /etc/hosts
192.168.207.129 test-masetr01
192.168.207.130 test-node01
192.168.207.131 test-node02
192.168.207.132 test-node03
192.168.207.133 test-node04
EOF



hostnamectl set-hostname test-node01
bash
cat << EOF >> /etc/hosts
192.168.207.129 test-masetr01
192.168.207.130 test-node01
192.168.207.131 test-node02
EOF

hostnamectl set-hostname test-node02
bash
cat << EOF >> /etc/hosts
192.168.207.129 test-masetr01
192.168.207.130 test-node01
192.168.207.131 test-node02
EOF



#主机初始化所有主机文件
 yum -y install vim wget net-tools lrzsz


swapoff -a
sed -i '/swap/s/^/#/' /etc/fstab

cat << EOF >> /etc/sysctl.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
modprobe br_netfilter
modprobe overlay
sysctl -p

setenforce 0
sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/selinux/config


systemctl disable --now firewalld
systemctl disable --now dnsmasq
iptables -F
systemctl disable --now NetworkManager

yum -y install ntpdate
ntpdate time2.aliyun.com
hwclock --systohc

crontab -e
*/5 * * * * /usr/sbin/ntpdate time2.aliyun.com

#所有节点配置limit
ulimit -SHn 65535
#临时配置
vim /etc/security/limits.conf
#永久配置
* soft nofile 65536
* hard nofile 131072
* soft nproc 65535
* hard nproc 655350
* soft memlock unlimited
* hard memlock unlimited


#master连接其它节点
ssh-keygen -t rsa
for i in test-master01  test-node01 test-node02 ;do ssh-copy-id -i .ssh/id_rsa.pub $i;done

#所有主机安装模块
tee /etc/modules-load.d/ipvs.conf <<'EOF'
ip_vs
ip_vs_lc
ip_vs_wlc
ip_vs_rr
ip_vs_wrr
ip_vs_lblc
ip_vs_lblcr
ip_vs_dh
ip_vs_sh
ip_vs_fo
ip_vs_nq
ip_vs_sed
ip_vs_ftp
ip_vs_conntrack
ip_vs_tables
ip_vs_set
ipt_rpfilter
ipt_REJECT
ipip
EOF

systemctl enable --now systemd-modules-load.service
lsmod |grep -e ip_vs -e nf_conntrack


#所有节点配置内核参数优化
cat <<EOF> /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
user.maxuser..namespaces=28633
fs.may_detach.mounts = 1
net.ipv4.conf.all.route_localnet = 1
vm.overcommit.memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720
net.ipv4.tcp_keepalive_time = 600 
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl =15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384
EOF
sysctl --system

#所有节点重启后加载旧内核
reboot
lsmod | grep --color=auto -e ip_vs -e nf_conntrack


#所有主机设置日志目录
mkdir /var/log/journal
mkdir /etc/systemd/journal.conf.d

cat >/etc/systemd/journal.conf.d/99-prophet.conf <<EOF
[Journal]
Storage=persistent 
Compress=yes 
SynclntervalSec=5m
RateLimitlnterval=30s
RateLimitBurst=1000 
SystemMaxUse=10G
SystemMaxFilesize=200M
MaxRetentionSec=2week
ForwardToSyslog=no
EOF


cat >/etc/systemd/iournal.conf.d/99-prophet.conf<<EOF
[Journal]
#持久化保存到磁盘
Storage=persistent 
#压缩历史目志。
Compress=yes 
SynclntervalSec=5m
RateLimitlnterval=30s
RateLimitBurst=1000 
#最大占用空间10G
SystemMaxUse=10G
#单日志文件最大200M 
SystemMaxFilesize=200M
#日志保存时间2周。
MaxRetentionSec=2week
#不将日志转发到syslog 
ForwardToSyslog=no
EOF


百度网盘有rpm包
scp docker_rpm/* root@192.168.5.129:/root/docker_rpm
yum localinstall *.rpm

systemctl start docker
systemctl enable docker
      
      
cat << END > /etc/docker/daemon.json
{
        "registry-mirrors":[ "https://nyakyfun.mirror.aliyuncs.com" ],"insecure-registries":["192.168.10.250"],"exec-opts": ["native.cgroupdriver=systemd"]
}
END
systemctl daemon-reload
systemctl restart docker
      
      
 #安装k8,#所有主机执行
 cat <<EOF > /etc/yum.repos.d/kubernetes.repo 
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
       https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

yum install -y kubelet-1.20.9 kubeadm-1.20.9 kubectl-1.20.9

 ls /etc/yum.repos.d/
 
 yum list kubeadm.x86_64 --showduplicates | sort -r
#过滤出来查看可以用K8版本

yum install -y kubelet-1.23.7 kubeadm-1.23.7 kubectl-1.23.7
systemctl start kubelet
 systemctl enable --now kubelet
 
 cd
 
 kubeadm config print init-defaults > init-config.yaml
 #生成默认的初始化配置文件
 
 


 
vim init-config.yaml 
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.200.111		//master节点IP地址
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  name: k8s-master		//如果使用域名保证可以解析,或直接使用 IP 地址
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd		//etcd 容器挂载到本地的目录
imageRepository: registry.aliyuncs.com/google_containers	//修改为国内地址
kind: ClusterConfiguration
kubernetesVersion: v1.19.0
networking:
  dnsDomain: cluster.local
  serviceSubnet: 10.96.0.0/12
  podSubnet: 10.244.0.0/16 	//新增加 Pod 网段
scheduler: {}

#安装master节点
 kubeadm config images list --config init-config.yaml
 kubeadm config images pull --config=init-config.yaml
 
 
 
#本地安装 本地提前准备好所需软件包
[root@k8s-master ~]#ls | while read line
>do
>ddocker load < $line
>done
 
 systemctl daemon-reload
systemctl restart docker
systemctl restart kubelet
 kubeadm reset
 
 #初始化
  kubeadm init --config=init-config.yaml
  
mkdir -p $HOME/.kube
 cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config
export KUBECONFIG=/etc/kubernetes/admin.conf

kubeadm join 192.168.5.128:6443 --token abcdef.0123456789abcdef \
        --discovery-token-ca-cert-hash sha256:fcebd32a020679d644a093337c59b3d651e52335840241efb4aafd36dd7a709f 
#先把生成的文件保存到一个文件中,kubeadm init生成的默认配置文件只有24小时

[root@k8s-master01 ~]# kubeadm token create --ttl 0 --print-join-command
#生成一个不过期的token
[root@k8s-master01 ~]# kubeadm token list

kubeadm join 192.168.5.128:6443 --token 3h2obt.0c0t4zmyibx8u3fg \
        --discovery-token-ca-cert-hash sha256:fcebd32a020679d644a093337c59b3d651e52335840241efb4aafd36dd7a709f
        #换成不过期的
        
        
 #node执行
 systemctl enable --now kubelet
kubeadm join 192.168.5.128:6443 --token 3h2obt.0c0t4zmyibx8u3fg \
        --discovery-token-ca-cert-hash sha256:fcebd32a020679d644a093337c59b3d651e52335840241efb4aafd36dd7a709f
 #复制一下在node节点执行
 
 
 #master执行验证一下
 kubectl get nodes
 
 #安装网络插件
 mkdir calico
 cd calico/
kubectl apply -f calico.yaml
#执行安装calico.yam网络插件
kuebctl get pod -A
#查看pod及node信息
for i in `ls *.tar`;do docker load < $i ;done


#节点管理命令
kubeadm reset
#重置master和node配置

删除node配置
kubectl delete node k8s-node04
docker rm -f $(docker ps -aq)
#把所有
systemctl stop kubelet
#停止kubelet
rm -rf /etc/kubernetes/*
rm -rf /var/lib/kubelet/*
systemctl status kubelet
#查看状态

#删除的可能会出现的问题
rm: cannot remove ‘/var/lib/kubelet/pods/85b9fae8-2eef-4ae2-82e3-7ee8a7654c62/volumes/kubernetes.io~secret/kube-proxy-token-z4ndh’: Device or resource busy
rm: cannot remove ‘/var/lib/kubelet/pods/db48c40d-9150-4422-9ee1-34d127f0322b/volumes/kubernetes.io~secret/calico-node-token-rv7vl’:
df -HT | grep '/var/lib/kubelet/pods'
#过滤一下挂载的信息

umount $(df -HT | grep '/var/lib/kubelet/pods' | awk '{print $7}')
#umount这些挂在,然后再删除
 df -HT | grep '/var/lib/kubelet/pods'
# 已查不到挂载信息,再次删除目录则不再报错



#测试
在node节点上一个ngixn镜像,在master执行yaml文件
[root@k8s-node1 ~]# docker pull nginx
[root@k8s-node1 ~]# docker images
[root@k8s-master01 ~]# kubectl apply -f nginx-deployment.yaml 
[root@k8s-master01 ~]# kubectl get deployment
#查看deployment详细信息
kubctl get pod
kubectl describe pod nginx-deployment-67dffbbbb-6xtkj
#查看某个pod详细信息
kuebctl get pod -o wide
curl 10.244.169.129
================================================================
接入master节点,部署一个 nginx 进行测试

#部署nginx
kubectl create deployment nginx  --image=nginx:1.18-alpine
#--image=nginx:1.18-alpine会node节点下载这个镜像

#暴露端口
kubectl expose deployment nginx  --port=80  --type=NodePort

kubectl get pod,svc
可以发现port=32673,用浏览器访问master:port 或 node1:port 或 node2:port都可以进入nginx主页
==========================================================================
kubectl get pod
#查看pod
kubectl get  podid --owide
kubectl get po,no
#node和po一起查
kubectl describe po nginx-deployment-55b8b59cf6-5
#查看这个pod的详细信息
[root@k8s-master01 ~]# kubectl logs  nginx-deployment-55b8b59cf6-xxgjl
查看单个pod日志
kubectl delete -f nginx-deployment.yaml 
#删除之前创建运行的pod
 systemctl status kubelet
 #查看kubelet状态
 journalctl -xef
 #f持续监控
 =============================================================
 [root@k8s-master01 ~]# kubectl expose deployment nginx-deployment --port=80 --target-port=8 --name=nginx-server --type=NodePort

 kubectl exec -it -- /bin/bash
 [root@k8s-master01 ~]# kubectl exec -it  nginx-deployment-55b8b59cf6-96gzr -- /bin/bash
#进入到pod里面了
root@nginx-deployment-55b8b59cf6-96gzr:/# find /root -name "index.html"
root@nginx-deployment-55b8b59cf6-96gzr:/# cd /usr/share/nginx/html/
root@nginx-deployment-55b8b59cf6-96gzr:/usr/share/nginx/html# ls
echo 111 >> index.html
root@nginx-deployment-55b8b59cf6-hxj4v:/# echo 2222 >> /usr/share/nginx/html/index.html
curl 10.109.81.173:30076
=====================================================================

Dashboard UI管理k8s环境部署

rz  recommended.yaml

#所有节点执行
docker pull kubernetesui/dashboard:v2.6.0
docker pull kubernetesui/metrics-scraper:v1.0.4
[root@k8s-master ~]# kubectl apply -f recommended.yaml
[root@k8s-master ~]# kubectl get pods -n kubernetes-dashboard
[root@k8s-master01 ~]# kubectl get svc -n kubernetes-dashboard

kubectl get pods -A  -o wide
使用谷歌浏览器测试访问 https://192.168.200.111:32443

[root@k8s-master ~]# kubectl describe secret -n kubernetes-dashboard $(kubectl get secret -n kubernetes-dashboard |grep kubernetes-dashboard-token | awk '{print $1}') |grep token | awk '{print $2}'
#把token拷贝过去
eyJhbGciOiJSUzI1NiIsImtpZCI6ImxsWGJ1RkRaWjh3SmljMVVRRks3N2RkLU1ReG83UTFJOUstckpiOVplNHMifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZC10b2tlbi1rc3dmNSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6Ijg0ZmYyOGQ1LTYwNmUtNGEwMy1iODY1LWI1YTlhOWJkNDViYSIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlcm5ldGVzLWRhc2hib2FyZDprdWJlcm5ldGVzLWRhc2hib2FyZCJ9.B1t54JSyDn0yd1feai3Je6sWgCCG8CmbTIXuBJKtjLLI6CnEjRgDsVmBvbNw2KoHM44ZXzZgtnXs-icTQ4ch4UFw4CUOjhdtvzxcCkky9_Ns7yNxZw68WQUD2kF5eFhQsPz9yfJhYs423sQs5rKjxohmBJIuWaa5JTSTGLCdtZQjeJGyZSxc6psfRtdoIHWTRVMrvPQZiEI0dE2UoCv4iAw33IB5hY427U2yZ88jROr9myLbbH7_HrPuaANK_C1ZFHTm1siRhiz2O3xQ7MxXg9yjHiobIYfRwgdJsQ71zPPWZ3IKJGAaCdxCRoXKkqALmVyz8mEj_zZAAXu4rz0yhg

#z资源监控及应用部署node节点运行
[root@k8s-node01 ~]# docker pull bluersw/metrics-server-amd64:v0.3.6
[root@k8s-node01 ~]# docker tag bluersw/metrics-server-amd64:v0.3.6 k8s.gcr.io/metrics-server-amd64:v0.3.6

[root@k8s-master ~]# vim /etc/kubernetes/manifests/kube-apiserver.yaml	
 44     - --enable-aggregator-routing=true

rz components.yaml
 kubectl create -f components.yaml
 
 
 kubectl top nodes
 
 
 cd /etc/kubernetes/pki

kubectl config set-cluster kubernetes --certificate-authority=./ca.crt --server="https://192.168.207.129:32443" --embed-certs=true --kubeconfig=/root/dashboard-admin.conf

TOKEN=$(kubectl get secret $(kubectl get secret -n kubernetes-dashboard|grep kubernetes-dashboard-token*|awk '{print $1}') -n kubernetes-dashboard  -o jsonpath={.data.token}|base64 -d)

kubectl config set-credentials dashboard-admin --token=$TOKEN --kubeconfig=/root/dashboard-admin.conf


 

Helm包管理机制

wget https://get.helm.sh/helm-v3.5.2-linux-amd64.tar.gz

tar xf helm-v3.5.2-linux-amd64.tar.gz 

[root@k8s-master01 ~]# cd linux-amd64/
[root@k8s-master01 linux-amd64]# ls
helm  LICENSE  README.md
[root@k8s-master01 linux-amd64]# mv helm /usr/bin/
helm
#验证一下能不能用

#添加c存储库
helm repo add aliyun https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts
 helm repo list
 #查看仓库

helm repo update
#更新一下yum 

helm repo remove aliyun
#删除aliyun存储库

 helm create nginx
cd nginx
ls

#目录介绍
[root@k8s-master01 ~]# tree nginx/
nginx/
├── charts					#依赖其他包的charts文件
├── Chart.yaml				#该chart的描述文件,包括ip地址,版本信息等
├── templates				#存放k8s模板文件目录
│   ├── deployment.yaml		#创建k8s deployment资源的yaml模板
│   ├── _helpers.tpl		#下划线开头的文件,可以被其他模板引用
│   ├── hpa.yaml			#配置服务资源CPU内存
│   ├── ingress.yaml		# ingress 配合service域名访问的配置
│   ├── NOTES.txt			#说明文件,helm install之后展示给用户看的内容
│   ├── serviceaccount.yaml
│   ├── service.yaml		#kubernetes Serivce yaml模板
│   └── tests
│       └── test-connection.yaml
└── values.yaml				#给模板文件使用的变量


vim values.yaml
service:
  type: NodePort
 11   tag: "1.24"

[root@k8s-master01 nginx]# helm install -f values.yaml nginx .

[root@k8s-master01 nginx]# kubectl get deploy

#使用chart部署一个Tomcat应用
helm create tomcat
[root@k8s-master01 ~]# helm create  tomcat
Creating tomcat
[root@k8s-master01 ~]# cd tomcat/

#升级改完yaml文件之后重新应用

问题记录

报错信息:
error execution phase preflight: [preflight] Some fatal errors occurred:
        [ERROR Port-6443]: Port 6443 is in use
        [ERROR Port-10251]: Port 10251 is in use
        [ERROR Port-10252]: Port 10252 is in use
        [ERROR FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml]: /etc/kubernetes/manifests/kube-apiserver.yaml already exists
        [ERROR FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml]: /etc/kubernetes/manifests/kube-controller-manager.yaml already exists
...

[root@test2 ~]# kubeadm reset     //重启kubeadm
#重启kubeadm在进行初始化

======================================================================================
#查看日志后
7月 15 16:43:16 k8s-master01 systemd[1]: kubelet.service: main process exited, code=exited, status=1/FA
7月 15 16:43:16 k8s-master01 systemd[1]: Unit kubelet.service entered failed state.
7月 15 16:43:16 k8s-master01 systemd[1]: kubelet.service failed.
#docker驱动与Kubelet的驱动程序不同导致

 docker info|grep Driver
 #查看docker驱动
 systemctl show --property=Environment kubelet |cat
#查看查看kubelet驱动

解决方法两种
第一种修改kubelet的驱动
cat>/etc/sysconfig/kubelet<<EOF
KUBELET_CGROUP_ARGS="--cgroup-driver=systemd"
KUBE_PROXY_MODE="ipvs"
EOF



第二种修改docker驱动,查看/etc/docker/daemon.json文件,没有的话,手动创建,添加以下内容
 
{
  "exec-opts": ["native.cgroupdriver=systemd"]
}
systemctl daemon-reload
systemctl restart docker
systemctl restart kubelet
 kubeadm reset
 
=======================================================================
[init] Using Kubernetes version: v1.23.0
[preflight] Running pre-flight checks
        [WARNING Hostname]: hostname "node" could not be reached
        [WARNING Hostname]: hostname "node": lookup node on 192.168.5.2:53: no such host
error execution phase preflight: [preflight] Some fatal errors occurred:
        [ERROR FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml]: /etc/kubernetes/manifests/kube-apiserver.yaml already exists
        [ERROR FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml]: /etc/kubernetes/manifests/kube-controller-manager.yaml already exists
        [ERROR FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml]: /etc/kubernetes/manifests/kube-scheduler.yaml already exists
        [ERROR FileAvailable--etc-kubernetes-manifests-etcd.yaml]: /etc/kubernetes/manifests/etcd.yaml already exists
#这种情况把多余的文件删了就行
=============================================================================

启动简单排错

#查看启动的服务
systemctl list-unit-files |grep kube

#依次查看服务情况
systemctl status kubelet


#查询各个服务的日志
journalctl -xefu kubelet

Mar 30 09:23:38 k8s-master1 kubelet[10310]: F0330 09:23:38.202074   10310 server.go:262] failed to run Kubelet: failed to create kubelet: failed to get docker version: Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?
Mar 30 09:23:38 k8s-master1 systemd[1]: kubelet.service: main process exited, code=exited, status=255/n/a
Mar 30 09:23:38 k8s-master1 systemd[1]: Unit kubelet.service entered failed state.
Mar 30 09:23:38 k8s-master1 systemd[1]: kubelet.service failed.

以上信息可以发现docker没有启动。

重启docker
systemctl daemon-reload
systemctl restart docker
systemctl enable docker

#重启kubelet服务
systemctl restart kubelet.service

kubesphere

常用命令

docker images
#查看以及所有镜像
docker rmi <镜像ID>
#删除镜像
docker save -o nginx.tar nginx:latest
docker save 1234567890ab -o kube-apiserver.tar
#将镜像保存到本地
yumdownloader --downloadonly --destdir=保存路径 软件包名称
#使用yum下载并将软件包保存为rpm

systemctl status kubelet
#查看集群启动状态
journalctl -xeu kubelet
#查看k8s运行日志


kubectl create secret docker-registry regcred \
  --docker-server=192.168.30.224 \
  --docker-username=Lijianhao  \
  --docker-password=Admin.123 \
  --docker-email=lijianhao@cellprobio.com 
  
  
  [root@k8s-master BaseAuth]# kubectl delete secret  basic-auth
secret "basic-auth" deleted
[root@k8s-master BaseAuth]# 



takewiki/cpcrm:6.0_api
takewiki/cpcrmdb:6.0_api
takewiki/crmapi3000:v6.0


apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: busybox-deploy
spec:
  replicas: 2
  selector:
    matchLabels:
      app: busybox
  template:
    metadata:
      labels:
        app: busybox
    spec:
      containers:
        - name: busybox
          image: busybox:1.28.4
          command:
          - "/bin/sh"
          - "-c"
          - "sleep 3600"