k8s cridocker

发布时间 2023-03-23 17:08:05作者: lzjasd
  1 mkdir /data/rpm
  2 mount -t nfs 192.168.14.134:/data/rpm /data/rpm -o nolock,nfsvers=3,vers=3
  3 echo "mount -t nfs 192.168.14.134:/data/rpm /data/rpm -o nolock,nfsvers=3,vers=3" >>/etc/rc.local
  4 chmod +x /etc/rc.local 
  5 
  6 cat >/root/base.repo<< EOF
  7 [base]
  8 name=base
  9 baseurl=file:///data/rpm
 10 enabled=1
 11 gpgcheck=0
 12 EOF 
 13 
 14 rm -rf /etc/yum.repos.d/*
 15 cp -r /root/base.repo /etc/yum.repos.d/ 
 16 
 17 yum clean all 
 18 yum makecache fast
 19 
 20 
 21 cat > /etc/hosts <<EOF
 22 
 23 192.168.14.132 master
 24 192.168.14.133 node1
 25 192.168.14.134 node2
 26 
 27 192.168.14.135 node3
 28 
 29 
 30 modprobe br_netfilter
 31 
 32 echo "modprobe br_netfilter" >> /etc/profile
 33 
 34 cat > /etc/sysctl.d/k8s.conf <EOF
 35 
 36 net.bridge.bridge-nf-call-ip6tables = 1 
 37 
 38 net.bridge.bridge-nf-call-iptables = 1 
 39 
 40 net.ipv4.ip_forward = 1 
 41 
 42 EOF
 43 
 44 sysctl -p /etc/sysctl.d/k8s.conf
 45 
 46 
 47 sudo sysctl --system
 48 
 49 
 50 yum install -y ipset ipvsadm
 51 cat > /etc/modules-load.d/ipvs.conf <<EOF
 52 # Load IPVS at boot
 53 ip_vs
 54 ip_vs_rr
 55 ip_vs_wrr
 56 ip_vs_sh
 57 nf_conntrack_ipv4
 58 EOF
 59 
 60 systemctl enable --now systemd-modules-load.service
 61 
 62 lsmod | grep -e ip_vs -e nf_conntrack_ipv4
 63 
 64 yum install -y yum-utils device-mapper-persistent-data lvm2 wget net-tools nfs-utils lrzsz gcc gcc-c++ make cmake libxml2-devel openssl-devel curl curl-devel unzip sudo ntp libaio-devel wget vim ncurses-devel autoconf automake zlib-devel python-devel epel-release openssh-server socat ipvsadm conntrack ntpdate telnet
 65 
 66 sudo yum install -y yum-utils
 67 
 68 sudo yum install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin -y
 69 
 70 sudo systemctl start docker
 71 
 72 systemctl enable docker 
 73 
 74 systemctl status docker
 75 
 76 tee /etc/docker/daemon.json <<-'EOF'
 77 {
 78   "registry-mirrors": ["https://19b12x6i.mirror.aliyuncs.com"],
 79 
 80 "exec-opts": ["native.cgroupdriver=systemd"]
 81 
 82 }
 83 EOF
 84 systemctl daemon-reload
 85 systemctl enable docker
 86 systemctl restart docker
 87 systemctl status docker
 88 
 89 
 90 
 91 tar -zxvf go1.20.2.linux-amd64.tar.gz -C /usr/local/
 92 
 93 
 94 cat >> /etc/profile  << EOF
 95 #go 环境变量
 96 export GO111MODULE=on
 97 export GOROOT=/usr/local/go
 98 export GOPATH=/home/gopath
 99 export PATH="$PATH:/usr/local/go/bin:/usr/local/go/bin"
100 EOF
101 
102 source /etc/profile
103 go version
104 
105 tar -zxvf cri.tar.gz 
106 cd cri-dockerd
107 mkdir bin
108 go build -o bin/cri-dockerd
109 mkdir -p /usr/local/bin
110 install -o root -g root -m 0755 bin/cri-dockerd /usr/local/bin/cri-dockerd
111 cp -a packaging/systemd/* /etc/systemd/system
112 sed -i -e 's,/usr/bin/cri-dockerd,/usr/local/bin/cri-dockerd,' /etc/systemd/system/cri-docker.service
113 systemctl daemon-reload
114 systemctl enable cri-docker.service
115 systemctl enable --now cri-docker.socket
116 systemctl restart cri-docker.service
117 systemctl status cri-docker.service
118 
119 
120 ps -ef|grep dockerd
121 
122 sed -i "/.*ExecStart=*/c\ExecStart=/usr/local/bin/cri-dockerd --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.9 --container-runtime-endpoint fd:// " /etc/systemd/system/cri-docker.service
123 
124 systemctl daemon-reload
125 systemctl enable cri-docker.service
126 systemctl enable --now cri-docker.socket
127 systemctl restart cri-docker.service
128 systemctl status cri-docker.service
129 ps -ef|grep dockerd
130 
131 
132 yum install -y kubelet-1.26.2-0  kubeadm-1.26.2-0  kubectl-1.26.2-0 
133 
134 cat <<EOF > /etc/sysconfig/kubelet
135 KUBELET_EXTRA_ARGS="--cgroup-driver=systemd"
136 EOF
137 
138 systemctl enable kubelet && systemctl start kubelet
139 
140 
141 kubeadm init --kubernetes-version=1.26.2  \
142  --apiserver-advertise-address=192.168.14.132 \
143  --image-repository registry.aliyuncs.com/google_containers \
144  --pod-network-cidr=10.244.0.0/16 \
145  --cri-socket=unix:///var/run/cri-dockerd.sock
146  
147  
148 cat > /root/import.sh << EOF
149  
150 #!/bin/bash
151 
152 
153 list_img=`ls /data/img`
154 
155 cd /data/img 
156 for tmp in $list_img;do
157     docker load -i $tmp 
158 done
159 
160 EOF
161 
162 
163   mkdir -p $HOME/.kube
164   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
165   sudo chown $(id -u):$(id -g) $HOME/.kube/config
166 
167 
168 
169  export KUBECONFIG=/etc/kubernetes/admin.conf
170  kubeadm join 192.168.14.132:6443 --token wah770.26riwq1ujqvvcjuh \
171     --discovery-token-ca-cert-hash sha256:28e0324bcc7a7b36aac7885b41595494f37a3d962951e2d234cc0201f37685fa 
172 
173 
174 
175 
176 [root@master ~]# kubectl get pods -n kube-system -o wide
177 NAME                             READY   STATUS    RESTARTS   AGE   IP               NODE     NOMINATED NODE   READINESS GATES
178 coredns-5bbd96d687-r7wng         0/1     Pending   0          44m   <none>           <none>   <none>           <none>
179 coredns-5bbd96d687-w7w5d         0/1     Pending   0          44m   <none>           <none>   <none>           <none>
180 etcd-master                      1/1     Running   0          44m   192.168.14.132   master   <none>           <none>
181 kube-apiserver-master            1/1     Running   0          44m   192.168.14.132   master   <none>           <none>
182 kube-controller-manager-master   1/1     Running   0          44m   192.168.14.132   master   <none>           <none>
183 kube-proxy-9pq26                 1/1     Running   0          44m   192.168.14.132   master   <none>           <none>
184 kube-scheduler-master            1/1     Running   0          44m   192.168.14.132   master   <none>           <none>
185 
186 
187 
188 kubectl describe  pod coredns-5bbd96d687-lnsgj  -n kube-system 
189 
190 
191 Events:
192   Type     Reason            Age                 From               Message
193   ----     ------            ----                ----               -------
194   Warning  FailedScheduling  62s (x10 over 46m)  default-scheduler  0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/not-ready: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling..
195   
196 
197 docker images|grep dns
198 registry.aliyuncs.com/google_containers/coredns                   v1.9.3    5185b96f0bec   9 months ago    48.8MB
199 
200 
201 
202 
203 
204 
205 kubeadm init --kubernetes-version=1.26.2  \
206  --apiserver-advertise-address=192.168.14.132 \
207  --image-repository registry.aliyuncs.com/google_containers \
208  --pod-network-cidr=192.168.0.0/16   \
209  --cri-socket=unix:///var/run/cri-dockerd.sock
210  
211  
212 kubeadm join 192.168.14.132:6443 --token 6q7ueo.1jt6zquc7hcr7ual \
213     --discovery-token-ca-cert-hash sha256:46d4b6cf1c9295d97fdeffa60951f5d6f4f24c5835e7aa355e91ff2a241d9ca2
214     
215 
216 
217 
218 安装calico
219 kubectl apply -f tigera-operator.yaml
220 kubectl apply -f custom-resources.yaml
221 
222 
223 [root@master calico]# kubectl get pods  --all-namespaces
224 NAMESPACE          NAME                                       READY   STATUS    RESTARTS   AGE
225 calico-apiserver   calico-apiserver-5d6c8bdfb5-kgshv          1/1     Running   0          2m28s
226 calico-apiserver   calico-apiserver-5d6c8bdfb5-mvtjj          1/1     Running   0          2m28s
227 calico-system      calico-kube-controllers-6b7b9c649d-ls7s5   1/1     Running   0          2m42s
228 calico-system      calico-node-2z4lw                          1/1     Running   0          2m42s
229 calico-system      calico-typha-85b77679db-cpk9b              1/1     Running   0          2m42s
230 calico-system      csi-node-driver-jlmsh                      2/2     Running   0          2m39s
231 kube-system        coredns-5bbd96d687-lnsgj                   1/1     Running   0          12m
232 kube-system        coredns-5bbd96d687-rqrq5                   1/1     Running   0          12m
233 kube-system        etcd-master                                1/1     Running   0          13m
234 kube-system        kube-apiserver-master                      1/1     Running   0          13m
235 kube-system        kube-controller-manager-master             1/1     Running   0          13m
236 kube-system        kube-proxy-xwwpn                           1/1     Running   0          12m
237 kube-system        kube-scheduler-master                      1/1     Running   0          13m
238 tigera-operator    tigera-operator-54b47459dd-bd6mk           1/1     Running   0          3m26s
239 
240 
241 安装dashboard 
242 
243 recommended.yaml
244 nodeName: master
245 
246 
247 kubectl get svc -n kubernetes-dashboard
248 NAME                        TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)    AGE
249 dashboard-metrics-scraper   ClusterIP   10.106.166.198   <none>        8000/TCP   2m35s
250 kubernetes-dashboard        ClusterIP   10.97.189.31     <none>        443/TCP    2m35s
251 
252 
253 kubectl edit svc kubernetes-dashboard  -n kubernetes-dashboard
254 type: NodePort
255 
256 kubectl get svc -n kubernetes-dashboard
257 NAME                        TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)         AGE
258 dashboard-metrics-scraper   ClusterIP   10.106.166.198   <none>        8000/TCP        4m13s
259 kubernetes-dashboard        NodePort    10.97.189.31     <none>        443:31468/TCP   4m13s
260 
261 https://192.168.14.132:31468
262 
263 curl -I https://192.168.14.132:31468 -k
264 kubectl create clusterrolebinding dashboard-cluster-admin --clusterrole=cluster-admin --serviceaccount=kubernetes-dashboard:kubernetes-dashboard
265 
266 
267 kubectl get secret -n kubernetes-dashboard
268 
269 kubectl -n kubernetes-dashboard create token kubernetes-dashboard
270 
271 eyJhbGciOiJSUzI1NiIsImtpZCI6ImpUV0ZXSDNodXEwaEVlZER0VDBvamVxSmlVdDY0QlNweDNBR1paNDZvNjgifQ.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiXSwiZXhwIjoxNjc5NTYzOTY1LCJpYXQiOjE2Nzk1NjAzNjUsImlzcyI6Imh0dHBzOi8va3ViZXJuZXRlcy5kZWZhdWx0LnN2Yy5jbHVzdGVyLmxvY2FsIiwia3ViZXJuZXRlcy5pbyI6eyJuYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsInNlcnZpY2VhY2NvdW50Ijp7Im5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsInVpZCI6IjVmNWJjMWEyLTBmOWMtNGZiMi05ZTRhLThlNmJkMjQ2YjYwMSJ9fSwibmJmIjoxNjc5NTYwMzY1LCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZXJuZXRlcy1kYXNoYm9hcmQ6a3ViZXJuZXRlcy1kYXNoYm9hcmQifQ.XDWobWH-E_aQsyUkLk25dU7Zzs1pSAXm-jk3QfyWA9b8AI-za3jGCy40GhdnekpT6gJUYfe9c_9FkTHGHJe20iXWwrIsrxTSiXKXRltJu6fSZQhc54vt6Iwmz1DSExmTAe9OPb4MIcFDW5-R9itQQrhL_zst6jMVrRoXuXDVIzAycJPeavqtPgP55ZBzHKX1w_0EhY6SIaYfO26Ss6gSBuLcBVrJSpVK6YwFNzQFldGOhDKqUWTlfkoxFOrhIOtVeE22KzJTV2GxQRAKb2vx06jwuWE1kR03j-YxadDGNOxtIklmVdGgVO8ZNLS6xaMNWF6Km1CpsBbs6ExMn3axVg
272 
273 
274  kubectl get pods  --all-namespaces -o wide
275 NAMESPACE              NAME                                         READY   STATUS    RESTARTS   AGE     IP               NODE     NOMINATED NODE   READINESS GATES
276 calico-apiserver       calico-apiserver-5d6c8bdfb5-kgshv            1/1     Running   0          14m     192.168.219.70   master   <none>           <none>
277 calico-apiserver       calico-apiserver-5d6c8bdfb5-mvtjj            1/1     Running   0          14m     192.168.219.69   master   <none>           <none>
278 calico-system          calico-kube-controllers-6b7b9c649d-ls7s5     1/1     Running   0          14m     192.168.219.66   master   <none>           <none>
279 calico-system          calico-node-2z4lw                            1/1     Running   0          14m     192.168.14.132   master   <none>           <none>
280 calico-system          calico-typha-85b77679db-cpk9b                1/1     Running   0          14m     192.168.14.132   master   <none>           <none>
281 calico-system          csi-node-driver-jlmsh                        2/2     Running   0          14m     192.168.219.65   master   <none>           <none>
282 kube-system            coredns-5bbd96d687-lnsgj                     1/1     Running   0          24m     192.168.219.67   master   <none>           <none>
283 kube-system            coredns-5bbd96d687-rqrq5                     1/1     Running   0          24m     192.168.219.68   master   <none>           <none>
284 kube-system            etcd-master                                  1/1     Running   0          24m     192.168.14.132   master   <none>           <none>
285 kube-system            kube-apiserver-master                        1/1     Running   0          25m     192.168.14.132   master   <none>           <none>
286 kube-system            kube-controller-manager-master               1/1     Running   0          25m     192.168.14.132   master   <none>           <none>
287 kube-system            kube-proxy-xwwpn                             1/1     Running   0          24m     192.168.14.132   master   <none>           <none>
288 kube-system            kube-scheduler-master                        1/1     Running   0          25m     192.168.14.132   master   <none>           <none>
289 kubernetes-dashboard   dashboard-metrics-scraper-5cc596bbc7-g244g   1/1     Running   0          7m11s   192.168.219.72   master   <none>           <none>
290 kubernetes-dashboard   kubernetes-dashboard-cf4b945fc-xmwpk         1/1     Running   0          7m11s   192.168.219.71   master   <none>           <none>
291 tigera-operator        tigera-operator-54b47459dd-bd6mk             1/1     Running   0          15m     192.168.14.132   master   <none>           <none>
292 
293 
294 
295     
296 节点注册
297 
298 cat <<EOF > /etc/sysconfig/kubelet
299 KUBELET_EXTRA_ARGS="--cgroup-driver=systemd"
300 EOF
301 
302 
303 sudo kubeadm reset --cri-socket /var/run/cri-dockerd.sock
304 
305 sudo systemctl enable docker
306 
307 sudo systemctl enable kubelet
308 
309 sudo systemctl daemon-reload
310 
311 sudo systemctl restart docker
312 systemctl restart cri-docker.service
313 
314 sudo netstat -lnp | grep 1025
315 
316 sudo rm -rf /etc/kubernetes/kubelet.conf /etc/kubernetes/pki/ca.crt
317 pkill -9 kubelet
318 
319 
320 
321 kubeadm join 192.168.14.132:6443 --token xu6hfw.jfykuy0st3k3r69u --discovery-token-ca-cert-hash sha256:46d4b6cf1c9295d97fdeffa60951f5d6f4f24c5835e7aa355e91ff2a241d9ca2  --cri-socket /var/run/cri-dockerd.sock
322 
323 
324 
325 [root@master dashboard]#  kubectl get pods  --all-namespaces -o wide
326 NAMESPACE              NAME                                         READY   STATUS    RESTARTS   AGE     IP                NODE     NOMINATED NODE   READINESS GATES
327 calico-apiserver       calico-apiserver-5d6c8bdfb5-kgshv            1/1     Running   0          40m     192.168.219.70    master   <none>           <none>
328 calico-apiserver       calico-apiserver-5d6c8bdfb5-mvtjj            1/1     Running   0          40m     192.168.219.69    master   <none>           <none>
329 calico-system          calico-kube-controllers-6b7b9c649d-ls7s5     1/1     Running   0          40m     192.168.219.66    master   <none>           <none>
330 calico-system          calico-node-2z4lw                            1/1     Running   0          40m     192.168.14.132    master   <none>           <none>
331 calico-system          calico-node-586xg                            1/1     Running   0          8m44s   192.168.14.135    node3    <none>           <none>
332 calico-system          calico-node-n5z9h                            1/1     Running   0          2m17s   192.168.14.133    node1    <none>           <none>
333 calico-system          calico-typha-85b77679db-cpk9b                1/1     Running   0          40m     192.168.14.132    master   <none>           <none>
334 calico-system          calico-typha-85b77679db-wft27                1/1     Running   0          2m9s    192.168.14.135    node3    <none>           <none>
335 calico-system          csi-node-driver-hjj4j                        2/2     Running   0          2m12s   192.168.166.129   node1    <none>           <none>
336 calico-system          csi-node-driver-jlmsh                        2/2     Running   0          40m     192.168.219.65    master   <none>           <none>
337 calico-system          csi-node-driver-xgr8s                        2/2     Running   0          8m43s   192.168.135.1     node3    <none>           <none>
338 kube-system            coredns-5bbd96d687-lnsgj                     1/1     Running   0          51m     192.168.219.67    master   <none>           <none>
339 kube-system            coredns-5bbd96d687-rqrq5                     1/1     Running   0          51m     192.168.219.68    master   <none>           <none>
340 kube-system            etcd-master                                  1/1     Running   0          51m     192.168.14.132    master   <none>           <none>
341 kube-system            kube-apiserver-master                        1/1     Running   0          51m     192.168.14.132    master   <none>           <none>
342 kube-system            kube-controller-manager-master               1/1     Running   0          51m     192.168.14.132    master   <none>           <none>
343 kube-system            kube-proxy-8n6vw                             1/1     Running   0          8m44s   192.168.14.135    node3    <none>           <none>
344 kube-system            kube-proxy-p7758                             1/1     Running   0          2m17s   192.168.14.133    node1    <none>           <none>
345 kube-system            kube-proxy-xwwpn                             1/1     Running   0          51m     192.168.14.132    master   <none>           <none>
346 kube-system            kube-scheduler-master                        1/1     Running   0          51m     192.168.14.132    master   <none>           <none>
347 kubernetes-dashboard   dashboard-metrics-scraper-5cc596bbc7-g244g   1/1     Running   0          33m     192.168.219.72    master   <none>           <none>
348 kubernetes-dashboard   kubernetes-dashboard-cf4b945fc-xmwpk         1/1     Running   0          33m     192.168.219.71    master   <none>           <none>
349 tigera-operator        tigera-operator-54b47459dd-bd6mk             1/1     Running   0          41m     192.168.14.132    master   <none>           <none>
350 
351