ceph(八)实现kubernetes数据持久化

发布时间 2023-09-27 07:51:29作者: areke

一、基于ceph块存储的数据持久化

让k8s中的pod可以访问ceph中rbd提供的镜像作为存储设备,需要在ceph创建rbd,并且让k8s node节点能够通过ceph的认证。

k8s在使用ceph作为动态存储卷的时候,需要kube-controller-manager组件能够访问ceph,因此需要在包括k8s master及node节点在内的每一个node同步认证文件。

1.1 创建rbd并初始化

# 创建rbd
cephadmin@ceph-deploy:~$ ceph osd pool create k8s-rbd-pool 32 32
pool 'k8s-rbd-pool' created

# 验证存储池
cephadmin@ceph-deploy:~$ ceph osd pool ls
device_health_metrics
mypool
myrbd1
rbd-data1
cephfs-metadata
cephfs-data
.rgw.root
default.rgw.log
default.rgw.control
default.rgw.meta
default.rgw.buckets.index
default.rgw.buckets.data
test-ssd-pool
default-pool
k8s-rbd-pool		# 新建的存储池

# 存储池启用rbd
cephadmin@ceph-deploy:~$ ceph osd pool application enable k8s-rbd-pool rbd
enabled application 'rbd' on pool 'k8s-rbd-pool'

# 初始rbd
cephadmin@ceph-deploy:~$ rbd pool init -p k8s-rbd-pool

1.2 创建image

# 创建镜像
cephadmin@ceph-deploy:~$ rbd create k8s-img --size 3G --pool k8s-rbd-pool --image-feature layering
cephadmin@ceph-deploy:~$ rbd ls --pool k8s-rbd-pool
k8s-img

# 查看镜像信息
cephadmin@ceph-deploy:~$ rbd --image k8s-img --pool k8s-rbd-pool info
rbd image 'k8s-img':
	size 3 GiB in 768 objects
	order 22 (4 MiB objects)
	snapshot_count: 0
	id: 106e5c2688c77
	block_name_prefix: rbd_data.106e5c2688c77
	format: 2
	features: layering
	op_features: 
	flags: 
	create_timestamp: Wed Sep 27 03:34:17 2023
	access_timestamp: Wed Sep 27 03:34:17 2023
	modify_timestamp: Wed Sep 27 03:34:17 2023

1.3 k8s集群安装ceph-common

分别在k8s master和node节点安装ceph-common组件包

# 各master、node节点配置ceph清华镜像源
apt install -y apt-transport-https ca-certificates curl software-properties-common
wget -q -O- 'https://mirrors.tuna.tsinghua.edu.cn/ceph/keys/release.asc' | apt-key add -

echo 'deb https://mirrors.tuna.tsinghua.edu.cn/ceph/debian-pacific/ focal main' >> /etc/apt/sources.list

# 更新软件源
apt update

# 查看ceph-common版本
apt-cache madison ceph-common

# 安装与ceph集群相同的ceph-common版本
apt install ceph-common=16.2.14-1focal -y

1.4 创建ceph用户并授权

cephadmin@ceph-deploy:/data/ceph-cluster$ ceph auth get-or-create client.k8s mon 'allow r' osd 'allow * pool=k8s-rbd-pool'
[client.k8s]
	key = AQBNOBNlrdzbHhAAK/lp8aAqvUEEG5VArlOCzQ==
cephadmin@ceph-deploy:/data/ceph-cluster$ ceph auth get client.k8s
[client.k8s]
	key = AQBNOBNlrdzbHhAAK/lp8aAqvUEEG5VArlOCzQ==
	caps mon = "allow r"
	caps osd = "allow * pool=k8s-rbd-pool"
exported keyring for client.k8s
cephadmin@ceph-deploy:/data/ceph-cluster$ ceph auth get client.k8s -o ceph.client.k8s.keyring
exported keyring for client.k8s
cephadmin@ceph-deploy:/data/ceph-cluster$ cat ceph.client.k8s.keyring 
[client.k8s]
	key = AQBNOBNlrdzbHhAAK/lp8aAqvUEEG5VArlOCzQ==
	caps mon = "allow r"
	caps osd = "allow * pool=k8s-rbd-pool"



cephadmin@ceph-deploy:/data/ceph-cluster$ sshpass -p '123456' scp -o "StrictHostKeyChecking=no" ceph.conf ceph.client.k8s.keyring root@10.0.0.11:/etc/ceph/
cephadmin@ceph-deploy:/data/ceph-cluster$ sshpass -p '123456' scp -o "StrictHostKeyChecking=no" ceph.conf ceph.client.k8s.keyring root@10.0.0.12:/etc/ceph/
cephadmin@ceph-deploy:/data/ceph-cluster$ sshpass -p '123456' scp -o "StrictHostKeyChecking=no" ceph.conf ceph.client.k8s.keyring root@10.0.0.13:/etc/ceph/
cephadmin@ceph-deploy:/data/ceph-cluster$ sshpass -p '123456' scp -o "StrictHostKeyChecking=no" ceph.conf ceph.client.k8s.keyring root@10.0.0.41:/etc/ceph/
cephadmin@ceph-deploy:/data/ceph-cluster$ sshpass -p '123456' scp -o "StrictHostKeyChecking=no" ceph.conf ceph.client.k8s.keyring root@10.0.0.42:/etc/ceph/
cephadmin@ceph-deploy:/data/ceph-cluster$ sshpass -p '123456' scp -o "StrictHostKeyChecking=no" ceph.conf ceph.client.k8s.keyring root@10.0.0.43:/etc/ceph/

验证k8s节点用户权限

[root@k8s-master2 ~]#ceph -s --user=k8s
  cluster:
    id:     28820ae5-8747-4c53-827b-219361781ada
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum ceph-mon1,ceph-mon2,ceph-mon3 (age 3d)
    mgr: ceph-mgr1(active, since 91m), standbys: ceph-mgr2
    mds: 2/2 daemons up, 2 standby
    osd: 24 osds: 24 up (since 7h), 24 in (since 7h)
    rgw: 2 daemons active (2 hosts, 1 zones)
 
  data:
    volumes: 1/1 healthy
    pools:   15 pools, 481 pgs
    objects: 461 objects, 269 MiB
    usage:   8.0 GiB used, 24 TiB / 24 TiB avail
    pgs:     481 active+clean

验证镜像访问权限

[root@k8s-master2 ~]#rbd --user=k8s ls --pool=k8s-rbd-pool
k8s-img

1.5 k8s节点配置hosts域名解析

在ceph.conf配置中包含ceph集群的主机名,因此需要在k8s各master及node配置主机名解析

cat >> /etc/hosts <<EOF
10.0.0.50 ceph-deploy
10.0.0.51 ceph-mon1
10.0.0.52 ceph-mon2
10.0.0.53 ceph-mon3
10.0.0.54 ceph-mgr1
10.0.0.55 ceph-mgr2
10.0.0.56 ceph-node1
10.0.0.57 ceph-node2
10.0.0.58 ceph-node3
10.0.0.59 ceph-node4
EOF

1.6 通过keyring文件挂载rbd

基于ceph提供的rbd实现存储卷的动态提供,由两种方式实现,一是通过宿主机的keyring文件挂载rbd,另外一个是通过keyring中key定义为k8s的secret,然后pod通过secret挂载rbd。

1.6.1 keyring文件方式直接挂载-busybox

  1. 编写yaml文件
apiVersion: v1
kind: Pod
metadata:
  name: busybox
  namespace: default
spec:
  containers:
  - image: busybox 
    command:
      - sleep
      - "3600"
    imagePullPolicy: Always 
    name: busybox
    #restartPolicy: Always
    volumeMounts:
    - name: rbd-data1
      mountPath: /data
  volumes:
    - name: rbd-data1
      rbd:
        monitors:
        - '10.0.0.51:6789'
        - '10.0.0.52:6789'
        - '10.0.0.53:6789'
        pool: k8s-rbd-pool
        image: k8s-img
        fsType: xfs
        readOnly: false
        user: k8s
        keyring: /etc/ceph/ceph.client.k8s.keyring
  1. 执行创建pod
# 执行创建
[root@k8s-master1 ceph-case]#kubectl apply -f case1-busybox-keyring.yaml 
pod/busybox created

# 查看pod
[root@k8s-master1 ceph-case]#kubectl get pod
NAME                                 READY   STATUS    RESTARTS       AGE
busybox                              1/1     Running   0              21s
tomcat-deployment-68d695f995-8b7wk   1/1     Running   2 (114m ago)   12d
tomcat-deployment-68d695f995-qq7x8   1/1     Running   2 (125m ago)   14d
  1. 进入pod验证挂载
[root@k8s-master1 ceph-case]#kubectl exec -it busybox sh
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
/ # 
/ # df -Th
Filesystem           Type            Size      Used Available Use% Mounted on
overlay              overlay        77.0G     11.9G     65.1G  15% /
tmpfs                tmpfs          64.0M         0     64.0M   0% /dev
tmpfs                tmpfs           1.9G         0      1.9G   0% /sys/fs/cgroup
/dev/rbd0            xfs             3.0G     53.9M      2.9G   2% /data			# 挂载成功
/dev/sda4            xfs            77.0G     11.9G     65.1G  15% /etc/hosts
/dev/sda4            xfs            77.0G     11.9G     65.1G  15% /dev/termination-log
/dev/sda4            xfs            77.0G     11.9G     65.1G  15% /etc/hostname
/dev/sda4            xfs            77.0G     11.9G     65.1G  15% /etc/resolv.conf
shm                  tmpfs          64.0M         0     64.0M   0% /dev/shm
tmpfs                tmpfs           3.5G     12.0K      3.5G   0% /var/run/secrets/kubernetes.io/serviceaccount
tmpfs                tmpfs           1.9G         0      1.9G   0% /proc/acpi
tmpfs                tmpfs          64.0M         0     64.0M   0% /proc/kcore
tmpfs                tmpfs          64.0M         0     64.0M   0% /proc/keys
tmpfs                tmpfs          64.0M         0     64.0M   0% /proc/timer_list
tmpfs                tmpfs          64.0M         0     64.0M   0% /proc/sched_debug
tmpfs                tmpfs           1.9G         0      1.9G   0% /proc/scsi
tmpfs                tmpfs           1.9G         0      1.9G   0% /sys/firmware
/ # 

1.6.2 通过keyring文件直接挂载-nginx

  1. 编写yaml文件
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 1
  selector:
    matchLabels:
      app: ng-deploy-80
  template:
    metadata:
      labels:
        app: ng-deploy-80
    spec:
      containers:
      - name: ng-deploy-80
        image: nginx
        ports:
        - containerPort: 80
        volumeMounts:
        - name: rbd-data1
          mountPath: /data
      volumes:
        - name: rbd-data1
          rbd:
            monitors:
            - '10.0.0.51:6789'
            - '10.0.0.52:6789'
            - '10.0.0.53:6789'
            pool: k8s-rbd-pool
            image: k8s-img
            fsType: xfs
            readOnly: false
            user: k8s
            keyring: /etc/ceph/ceph.client.k8s.keyring
  1. 执行创建pod
# 先删除busybox挂载
[root@k8s-master1 ceph-case]#kubectl delete -f case1-busybox-keyring.yaml

# 执行创建
[root@k8s-master1 ceph-case]#kubectl apply -f case2-nginx-keyring.yaml 
deployment.apps/nginx-deployment created

# 查看pod
[root@k8s-master1 ceph-case]#kubectl get pod
NAME                                 READY   STATUS    RESTARTS       AGE
nginx-deployment-774577467c-x6kh7    1/1     Running   0              3m41s
tomcat-deployment-68d695f995-8b7wk   1/1     Running   2 (128m ago)   12d
tomcat-deployment-68d695f995-qq7x8   1/1     Running   2 (139m ago)   14d
  1. 进入pod验证挂载
# 进入pod
[root@k8s-master1 ceph-case]#kubectl exec -it nginx-deployment-774577467c-x6kh7 bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.

root@nginx-deployment-774577467c-x6kh7:/# df -Th
Filesystem     Type     Size  Used Avail Use% Mounted on
overlay        overlay   17G   11G  6.7G  62% /
tmpfs          tmpfs     64M     0   64M   0% /dev
tmpfs          tmpfs    971M     0  971M   0% /sys/fs/cgroup
/dev/rbd0      xfs      3.0G   54M  3.0G   2% /data				# 成功挂载
/dev/sda4      xfs       17G   11G  6.7G  62% /etc/hosts
shm            tmpfs     64M     0   64M   0% /dev/shm
tmpfs          tmpfs    1.7G   12K  1.7G   1% /run/secrets/kubernetes.io/serviceaccount
tmpfs          tmpfs    971M     0  971M   0% /proc/acpi
tmpfs          tmpfs    971M     0  971M   0% /proc/scsi
tmpfs          tmpfs    971M     0  971M   0% /sys/firmware

1.6.3 宿主机验证rbd

rbd在pod里面看是挂载到pod,但由于pod使用的宿主机内核,因此实际是在宿主机挂载的

  1. 查看pod所在宿主机IP

查看pod在宿主机master2(10.0.0.12)上

[root@k8s-master1 ceph-case]#kubectl get pod -owide
NAME                                 READY   STATUS    RESTARTS       AGE    IP               NODE        NOMINATED NODE   READINESS GATES
nginx-deployment-774577467c-x6kh7    1/1     Running   0              7m1s   10.200.224.4     10.0.0.12   <none>           <none>
tomcat-deployment-68d695f995-8b7wk   1/1     Running   2 (131m ago)   12d    10.200.224.3     10.0.0.12   <none>           <none>
tomcat-deployment-68d695f995-qq7x8   1/1     Running   2 (143m ago)   14d    10.200.159.134   10.0.0.11   <none>           <none>
  1. 进入宿主机验证rbd挂载
[root@k8s-master2 ~]#rbd showmapped
id  pool          namespace  image    snap  device   
0   k8s-rbd-pool             k8s-img  -     /dev/rbd0

[root@k8s-master2 ~]#df -Th
Filesystem     Type      Size  Used Avail Use% Mounted on
udev           devtmpfs  925M     0  925M   0% /dev
tmpfs          tmpfs     195M  2.1M  192M   2% /run
/dev/sda4      xfs        17G   11G  6.7G  62% /
shm            tmpfs      64M     0   64M   0% /run/containerd/io.containerd.grpc.v1.cri/sandboxes/13ad5b7d64ab5438ac8a99c1d0384af0c0cb5f268a34097269eb4410b1c102d5/shm
overlay        overlay    17G   11G  6.7G  62% /run/containerd/io.containerd.runtime.v2.task/k8s.io/13ad5b7d64ab5438ac8a99c1d0384af0c0cb5f268a34097269eb4410b1c102d5/rootfs
tmpfs          tmpfs     195M     0  195M   0% /run/user/0
tmpfs          tmpfs     1.7G   12K  1.7G   1% /var/lib/kubelet/pods/57106dd4-eb7c-4acb-a1a3-032197071f62/volumes/kubernetes.io~projected/kube-api-access-nwnzr
...
/dev/rbd0      xfs       3.0G   54M  3.0G   2% /var/lib/kubelet/plugins/kubernetes.io/rbd/mounts/k8s-rbd-pool-image-k8s-img			# rbd挂载

1.7 通过secret挂载rbd

将key定义为secret,然后再挂载至pod,每个k8s node节点就不需要保存keyring文件。

1.7.1 创建普通用户secret

首先要创建secret,secret中主要就是要包含ceph中被授权keyring文件的key,需要将key内容通过base64编码后即可创建secret

  1. 对key进行base64编码
# 查看key内容
cephadmin@ceph-deploy:/data/ceph-cluster$ ceph auth print-key client.k8s
AQBNOBNlrdzbHhAAK/lp8aAqvUEEG5VArlOCzQ==

# base64编码
cephadmin@ceph-deploy:/data/ceph-cluster$ ceph auth print-key client.k8s | base64
QVFCTk9CTmxyZHpiSGhBQUsvbHA4YUFxdlVFRUc1VkFybE9DelE9PQ==
  1. 编写secret yaml文件
apiVersion: v1
kind: Secret
metadata:
  name: ceph-secret-k8s
type: "kubernetes.io/rbd"
data:
  key: QVFCTk9CTmxyZHpiSGhBQUsvbHA4YUFxdlVFRUc1VkFybE9DelE9PQ==
  1. 创建secret
[root@k8s-master1 ceph-case]#kubectl apply -f case3-secret-client-k8s.yaml 
secret/ceph-secret-k8s created

# 验证secret
[root@k8s-master1 ceph-case]#kubectl get secret
NAME              TYPE                DATA   AGE
ceph-secret-k8s   kubernetes.io/rbd   1      6s
mysecret          Opaque              2      16d

1.7.2 创建pod

编写yaml文件

apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 1
  selector:
    matchLabels: 
      app: ng-deploy-80
  template:
    metadata:
      labels:
        app: ng-deploy-80
    spec:
      containers:
      - name: ng-deploy-80
        image: nginx
        ports:
        - containerPort: 80
        volumeMounts:
        - name: rbd-data1
          mountPath: /usr/share/nginx/html/rbd
      volumes:
        - name: rbd-data1
          rbd:
            monitors:
            - '10.0.0.51:6789'
            - '10.0.0.52:6789'
            - '10.0.0.53:6789'
            pool: k8s-rbd-pool
            image: k8s-img
            fsType: xfs
            readOnly: false
            user: k8s
            secretRef:
              name: ceph-secret-k8s

执行创建

[root@k8s-master1 ceph-case]#kubectl apply -f case4-nginx-secret.yaml 
deployment.apps/nginx-deployment created

[root@k8s-master1 ceph-case]#kubectl get pod -owide
NAME                                 READY   STATUS    RESTARTS       AGE     IP               NODE        NOMINATED NODE   READINESS GATES
nginx-deployment-9c7889fdd-dtzbt     1/1     Running   0              2m18s   10.200.159.136   10.0.0.11   <none>           <none>
tomcat-deployment-68d695f995-8b7wk   1/1     Running   2 (155m ago)   12d     10.200.224.3     10.0.0.12   <none>           <none>
tomcat-deployment-68d695f995-qq7x8   1/1     Running   2 (166m ago)   14d     10.200.159.134   10.0.0.11   <none>           <none>

pod验证挂载

# 进入pod
[root@k8s-master1 ceph-case]#kubectl exec -it nginx-deployment-9c7889fdd-dtzbt bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
root@nginx-deployment-9c7889fdd-dtzbt:/# df -h
Filesystem      Size  Used Avail Use% Mounted on
overlay          77G   13G   65G  16% /
tmpfs            64M     0   64M   0% /dev
tmpfs           1.9G     0  1.9G   0% /sys/fs/cgroup
shm              64M     0   64M   0% /dev/shm
/dev/sda4        77G   13G   65G  16% /etc/hosts
tmpfs           3.6G   12K  3.6G   1% /run/secrets/kubernetes.io/serviceaccount
/dev/rbd0       3.0G   54M  3.0G   2% /usr/share/nginx/html/rbd					# rbd挂载
tmpfs           1.9G     0  1.9G   0% /proc/acpi
tmpfs           1.9G     0  1.9G   0% /proc/scsi
tmpfs           1.9G     0  1.9G   0% /sys/firmware

宿主机验证挂载

[root@k8s-master1 ~]#rbd showmapped 
id  pool          namespace  image    snap  device   
0   k8s-rbd-pool             k8s-img  -     /dev/rbd0

1.8 动态存储卷-需要使用二进制安装k8s

存储卷可以通过kube-controller-manager组件动态构建,适用于有状态服务需要多个存储卷的场合。

将ceph admin用户key文件定义为k8s secret,用于k8s调用ceph admin权限动态创建存储卷,即不再需要提前创建好image,而是k8s在需要使用的时候再调用ceph创建。

1.8.1 创建admin用户secret

  1. 获取admin用户key,并进行base64编码
cephadmin@ceph-deploy:/data/ceph-cluster$ ceph auth print-key client.admin | base64
QVFEYVFBdGxHVVVRQWhBQTdrSmRFd3dOdnVDNFNEZDNYdDczMWc9PQ==
  1. 编写yaml
apiVersion: v1
kind: Secret
metadata:
  name: ceph-secret-admin
type: "kubernetes.io/rbd"
data:
  key: QVFEYVFBdGxHVVVRQWhBQTdrSmRFd3dOdnVDNFNEZDNYdDczMWc9PQ==
  1. 执行创建并验证
[root@k8s-master1 ceph-case]#kubectl apply -f case5-secret-admin.yaml 
secret/ceph-secret-admin created

[root@k8s-master1 ceph-case]#kubectl get secret
NAME                TYPE                DATA   AGE
ceph-secret-admin   kubernetes.io/rbd   1      7s
ceph-secret-k8s     kubernetes.io/rbd   1      18m
mysecret            Opaque              2      16d

1.8.2 创建普通用户的secret

用于访问存储进行数据读写,方法与前面相同

# 获取普通用户key并进行base64编码
cephadmin@ceph-deploy:/data/ceph-cluster$ ceph auth print-key client.k8s | base64
QVFCTk9CTmxyZHpiSGhBQUsvbHA4YUFxdlVFRUc1VkFybE9DelE9PQ==

# 编写secret yaml文件
[root@k8s-master1 ceph-case]#cat case3-secret-client-k8s.yaml 
apiVersion: v1
kind: Secret
metadata:
  name: ceph-secret-k8s
type: "kubernetes.io/rbd"
data:
  key: QVFCTk9CTmxyZHpiSGhBQUsvbHA4YUFxdlVFRUc1VkFybE9DelE9PQ==

# 创建secret
[root@k8s-master1 ceph-case]#kubectl apply -f case3-secret-client-k8s.yaml

1.8.3 创建存储类

创建动态存储类,为pod提供动态PVC

  1. 编写存储类yaml文件
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: ceph-storage-class-k8s
  annotations:
    storageclass.kubernetes.io/is-default-class: "false" 	#设置为默认存储类
provisioner: kubernetes.io/rbd
parameters:
  monitors: 10.0.0.51:6789,10.0.0.52:6789,10.0.0.53:6789
  adminId: admin
  adminSecretName: ceph-secret-admin
  adminSecretNamespace: default 
  pool: k8s-rbd-pool
  userId: k8s
  userSecretName: ceph-secret-k8s
  1. 创建存储类并验证
# 执行创建
[root@k8s-master1 ceph-case]#kubectl apply -f case6-ceph-storage-class.yaml 
storageclass.storage.k8s.io/ceph-storage-class-k8s created

# 验证存储类
[root@k8s-master1 ceph-case]#kubectl get storageclass
NAME                     PROVISIONER         RECLAIMPOLICY   VOLUMEBINDINGMODE   ALLOWVOLUMEEXPANSION   AGE
ceph-storage-class-k8s   kubernetes.io/rbd   Delete          Immediate           false                  21s

1.8.4 创建基于存储类的PVC

  1. 编写yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: mysql-data-pvc
spec:
  accessModes:
    - ReadWriteOnce
  storageClassName: ceph-storage-class-k8s 
  resources:
    requests:
      storage: '5Gi'
  1. 创建pvc
[root@k8s-master1 ceph-case]#kubectl apply -f case7-mysql-pvc.yaml 
persistentvolumeclaim/mysql-data-pvc created
  1. 验证PV/PVC
# 查看PVC
[root@k8s-master1 ceph-case]#kubectl get pvc
NAME             STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS             AGE
mysql-data-pvc   Bound    pvc-009abefc-c5f3-4db5-9dc7-713db8781297   5Gi        RWO            ceph-storage-class-k8s   8s

# 查看PV
[root@k8s-master1 ceph-case]#kubectl get pv
NAME                                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM                         STORAGECLASS             REASON   AGE
mysql-datadir-1                            50Gi       RWO            Retain           Bound    web/data-mysql-0                                                19d
mysql-datadir-2                            50Gi       RWO            Retain           Bound    web/data-mysql-1                                                19d
mysql-datadir-3                            50Gi       RWO            Retain           Bound    web/data-mysql-2                                                19d
pvc-009abefc-c5f3-4db5-9dc7-713db8781297   5Gi        RWO            Delete           Bound    default/mysql-data-pvc        ceph-storage-class-k8s            8s
  1. ceph验证是否自动创建image
cephadmin@ceph-deploy:/data/ceph-cluster$ rbd ls --pool k8s-rbd-pool
k8s-img
kubernetes-dynamic-pvc-9a8a0e41-1575-4105-adfa-d2a4b049f470				# 动态创建

1.8.5 运行单机mysql

  1. 编写yaml文件
apiVersion: apps/v1
kind: Deployment
metadata:
  name: mysql
spec:
  selector:
    matchLabels:
      app: mysql
  strategy:
    type: Recreate
  template:
    metadata:
      labels:
        app: mysql
    spec:
      containers:
      - image: mysql:5.6.46
        name: mysql
        env:
        - name: MYSQL_ROOT_PASSWORD
          value: "123456"
        ports:
        - containerPort: 3306
          name: mysql
        volumeMounts:
        - name: mysql-persistent-storage
          mountPath: /var/lib/mysql
      volumes:
      - name: mysql-persistent-storage
        persistentVolumeClaim:
          claimName: mysql-data-pvc 


---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: mysql-service-label 
  name: mysql-service
spec:
  type: NodePort
  ports:
  - name: http
    port: 3306
    protocol: TCP
    targetPort: 3306
    nodePort: 33306
  selector:
    app: mysql
  1. 执行创建
[root@k8s-master1 ceph-case]#kubectl apply -f case8-mysql-single.yaml 
deployment.apps/mysql created
service/mysql-service created
  1. 验证mysql挂载
# 查看pod名称
[root@k8s-master1 ceph-case]#kubectl get pod
NAME                                 READY   STATUS    RESTARTS        AGE
mysql-77d55bfdd8-f9rg6               1/1     Running   0               59s
tomcat-deployment-68d695f995-8b7wk   1/1     Running   2 (4h38m ago)   12d
tomcat-deployment-68d695f995-qq7x8   1/1     Running   2 (4h49m ago)   14d

# 进入mysql pod
[root@k8s-master1 ceph-case]#kubectl exec -it mysql-77d55bfdd8-f9rg6 bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
# 查看空间挂载
root@mysql-77d55bfdd8-f9rg6:/# df -Th
Filesystem     Type     Size  Used Avail Use% Mounted on
overlay        overlay   17G   12G  5.3G  69% /
tmpfs          tmpfs     64M     0   64M   0% /dev
tmpfs          tmpfs    971M     0  971M   0% /sys/fs/cgroup
shm            tmpfs     64M     0   64M   0% /dev/shm
/dev/sda4      xfs       17G   12G  5.3G  69% /etc/hosts
/dev/rbd0      ext4     4.9G  110M  4.8G   3% /var/lib/mysql		# rbd挂载
tmpfs          tmpfs    1.7G   12K  1.7G   1% /run/secrets/kubernetes.io/serviceaccount
tmpfs          tmpfs    971M     0  971M   0% /proc/acpi
tmpfs          tmpfs    971M     0  971M   0% /proc/scsi
tmpfs          tmpfs    971M     0  971M   0% /sys/firmware
  1. 验证mysql访问

验证service

[root@k8s-master1 ceph-case]#kubectl get svc
NAME                     TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)          AGE
kubernetes               ClusterIP   10.100.0.1       <none>        443/TCP          25d
mysql-service            NodePort    10.100.206.235   <none>        3306:33306/TCP   4m20s
nodeport-nginx-service   NodePort    10.100.161.230   <none>        80:30120/TCP     16d
tomcat-service           NodePort    10.100.48.218    <none>        80:31080/TCP     16d

连接mysql

# 安装客户端
[root@k8s-deploy /]#apt install mysql-client

# 连接mysql,执行命令验证
[root@k8s-deploy /]#mysql -uroot -p123456 -h10.0.0.13 -P33306
mysql> use mysql;
Database changed
mysql> show tables;
+---------------------------+
| Tables_in_mysql           |
+---------------------------+
| columns_priv              |
| db                        |
| event                     |
| func                      |
| general_log               |
| help_category             |
| help_keyword              |
| help_relation             |
| help_topic                |
| innodb_index_stats        |
| innodb_table_stats        |
| ndb_binlog_index          |
| plugin                    |
| proc                      |
| procs_priv                |
| proxies_priv              |
| servers                   |
| slave_master_info         |
| slave_relay_log_info      |
| slave_worker_info         |
| slow_log                  |
| tables_priv               |
| time_zone                 |
| time_zone_leap_second     |
| time_zone_name            |
| time_zone_transition      |
| time_zone_transition_type |
| user                      |
+---------------------------+
28 rows in set (0.00 sec)
mysql> 

二、基于cephfs的数据持久化

k8s中的pod挂载ceph的cephfs共享存储,实现业务中数据共享、持久化、高性能、高可用的目的。

2.1 创建secret

创建ceph admin用户

  1. 获取admin用户key,并进行base64编码
cephadmin@ceph-deploy:/data/ceph-cluster$ ceph auth print-key client.admin | base64
QVFEYVFBdGxHVVVRQWhBQTdrSmRFd3dOdnVDNFNEZDNYdDczMWc9PQ==
  1. 编写yaml
apiVersion: v1
kind: Secret
metadata:
  name: ceph-secret-admin
type: "kubernetes.io/rbd"
data:
  key: QVFEYVFBdGxHVVVRQWhBQTdrSmRFd3dOdnVDNFNEZDNYdDczMWc9PQ==
  1. 执行创建
[root@k8s-master1 ceph-case]#kubectl apply -f case5-secret-admin.yaml 
secret/ceph-secret-admin created

2.2 创建pod

apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 3
  selector:
    matchLabels:
      app: ng-deploy-80
  template:
    metadata:
      labels:
        app: ng-deploy-80
    spec:
      containers:
      - name: ng-deploy-80
        image: nginx
        ports:
        - containerPort: 80

        volumeMounts:
        - name: k8s-staticdata-cephfs 
          mountPath: /usr/share/nginx/html/cephfs
      volumes:
        - name: k8s-staticdata-cephfs
          cephfs:
            monitors:
            - '10.0.0.51:6789'
            - '10.0.0.52:6789'
            - '10.0.0.53:6789'
            path: /
            user: admin
            secretRef:
              name: ceph-secret-admin

---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: ng-deploy-80-service-label
  name: ng-deploy-80-service
spec:
  type: NodePort
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: 80
    nodePort: 33380
  selector:
    app: ng-deploy-80

执行创建

[root@k8s-master1 ceph-case]#kubectl apply -f case9-nginx-cephfs.yaml 
deployment.apps/nginx-deployment created
service/ng-deploy-80-service created

2.3 挂载验证

进入pod查看

[root@k8s-master1 ceph-case]#kubectl get pod
NAME                                 READY   STATUS    RESTARTS        AGE
nginx-deployment-68c749cd46-9g78v    1/1     Running   0               3m11s
nginx-deployment-68c749cd46-l8tnj    1/1     Running   0               3m11s
nginx-deployment-68c749cd46-v5n96    1/1     Running   0               3m11s
tomcat-deployment-68d695f995-8b7wk   1/1     Running   2 (4h13m ago)   12d
tomcat-deployment-68d695f995-qq7x8   1/1     Running   2 (4h24m ago)   14d

# 进入pod
[root@k8s-master1 ceph-case]#kubectl exec -it nginx-deployment-68c749cd46-9g78v bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
# 查看挂载
root@nginx-deployment-68c749cd46-9g78v:/# df -Th
Filesystem                                     Type     Size  Used Avail Use% Mounted on
overlay                                        overlay   17G  7.3G  9.7G  43% /
tmpfs                                          tmpfs     64M     0   64M   0% /dev
tmpfs                                          tmpfs    1.9G     0  1.9G   0% /sys/fs/cgroup
shm                                            tmpfs     64M     0   64M   0% /dev/shm
/dev/sda4                                      xfs       17G  7.3G  9.7G  43% /etc/hosts
tmpfs                                          tmpfs    3.6G   12K  3.6G   1% /run/secrets/kubernetes.io/serviceaccount
10.0.0.51:6789,10.0.0.52:6789,10.0.0.53:6789:/ ceph     6.4T  200M  6.4T   1% /usr/share/nginx/html/cephfs				# cephfs挂载
tmpfs                                          tmpfs    1.9G     0  1.9G   0% /proc/acpi
tmpfs                                          tmpfs    1.9G     0  1.9G   0% /proc/scsi
tmpfs                                          tmpfs    1.9G     0  1.9G   0% /sys/firmware

2.4 pod多副本验证

[root@k8s-master1 ceph-case]#kubectl get pod -owide
NAME                                 READY   STATUS    RESTARTS        AGE     IP               NODE        NOMINATED NODE   READINESS GATES
nginx-deployment-68c749cd46-9g78v    1/1     Running   0               3m57s   10.200.135.199   10.0.0.13   <none>           <none>
nginx-deployment-68c749cd46-l8tnj    1/1     Running   0               3m57s   10.200.224.8     10.0.0.12   <none>           <none>
nginx-deployment-68c749cd46-v5n96    1/1     Running   0               3m57s   10.200.107.209   10.0.0.43   <none>           <none>
tomcat-deployment-68d695f995-8b7wk   1/1     Running   2 (4h14m ago)   12d     10.200.224.3     10.0.0.12   <none>           <none>
tomcat-deployment-68d695f995-qq7x8   1/1     Running   2 (4h25m ago)   14d     10.200.159.134   10.0.0.11   <none>           <none>

2.5 宿主机验证

# master2挂载
[root@k8s-master2 kubelet]#df -Th|grep "ceph"
10.0.0.51:6789,10.0.0.52:6789,10.0.0.53:6789:/ ceph      6.4T  200M  6.4T   1% /var/lib/kubelet/pods/20b3767d-9010-4e96-8c5f-0a6b95008425/volumes/kubernetes.io~cephfs/k8s-staticdata-cephfs

# master3挂载
[root@k8s-master3 ~]#df -Th|grep ceph
10.0.0.51:6789,10.0.0.52:6789,10.0.0.53:6789:/ ceph      6.4T  200M  6.4T   1% /var/lib/kubelet/pods/2884da59-5afb-4715-bf25-cfd9f804468a/volumes/kubernetes.io~cephfs/k8s-staticdata-cephfs

# node3挂载
[root@k8s-node3 ~]#df -Th|grep ceph
10.0.0.51:6789,10.0.0.52:6789,10.0.0.53:6789:/ ceph      6.4T  200M  6.4T   1% /var/lib/kubelet/pods/3985e61c-d9c9-4ba4-a826-74362c0856a8/volumes/kubernetes.io~cephfs/k8s-staticdata-cephfs