openstack cinder实现基于lvm、NFS实现云盘动态拉伸

发布时间 2023-10-08 09:15:17作者: 小糊涂90

 

#cindder部署官方参考文档
https://docs.openstack.org/cinder/train/install/cinder-controller-install-rdo.html

1.#数据库准备:
[root@openstack-mysql ~]# mysql
Welcome to the MariaDB monitor. Commands end with ; or \g.
Your MariaDB connection id is 3581
Server version: 10.3.20-MariaDB MariaDB Server

Copyright (c) 2000, 2018, Oracle, MariaDB Corporation Ab and others.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

MariaDB [(none)]> CREATE DATABASE cinder;
Query OK, 1 row affected (0.000 sec)

MariaDB [(none)]> GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' IDENTIFIED BY 'cinder123';
Query OK, 0 rows affected (0.001 sec)

MariaDB [(none)]> flush privileges;
Query OK, 0 rows affected (0.001 sec)

MariaDB [(none)]> exit
Bye

#验证
[root@openstack-controller1 ~]# mysql -h10.0.0.103 -ucinder -pcinder123
Welcome to the MariaDB monitor. Commands end with ; or \g.
Your MariaDB connection id is 3610
Server version: 10.3.20-MariaDB MariaDB Server

Copyright (c) 2000, 2018, Oracle, MariaDB Corporation Ab and others.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

MariaDB [(none)]>


2.#控制端 cinder 服务注册:
#创建 cinder 用户并授权
[root@openstack-controller1 ~]# . admin-openrc
[root@openstack-controller1 ~]# openstack user create --domain default --password-prompt cinder
User Password:
Repeat User Password:
+---------------------+----------------------------------+
| Field | Value |
+---------------------+----------------------------------+
| domain_id | default |
| enabled | True |
| id | 607c1e56870741d99977d0f3dd4779b0 |
| name | cinder |
| options | {} |
| password_expires_at | None |
+---------------------+----------------------------------+
#将admin角色添加到cinder用户:
[root@openstack-controller1 ~]# openstack role add --project service --user cinder admin

#创建cinderv2和cinderv3服务实体:
[root@openstack-controller1 ~]# openstack service create --name cinderv2 --description "OpenStack Block Storage" volumev2
+-------------+----------------------------------+
| Field | Value |
+-------------+----------------------------------+
| description | OpenStack Block Storage |
| enabled | True |
| id | 1af9a9b00d1b495498ae30f4dc7bfbcc |
| name | cinderv2 |
| type | volumev2 |
+-------------+----------------------------------+
You have new mail in /var/spool/mail/root
[root@openstack-controller1 ~]# openstack service create --name cinderv3 --description "OpenStack Block Storage" volumev3
+-------------+----------------------------------+
| Field | Value |
+-------------+----------------------------------+
| description | OpenStack Block Storage |
| enabled | True |
| id | dd0ba0dbeda841928aab3ccb2da22ab4 |
| name | cinderv3 |
| type | volumev3 |
+-------------+----------------------------------+

#创建块存储服务 API 端点:
[root@openstack-controller1 ~]# openstack endpoint create --region RegionOne volumev2 public http://openstack-vip.tan.local:8776/v2/%\(project_id\)s
+--------------+-------------------------------------------------------+
| Field | Value |
+--------------+-------------------------------------------------------+
| enabled | True |
| id | 83901fdeeba7472285c1cdb6fde0f007 |
| interface | public |
| region | RegionOne |
| region_id | RegionOne |
| service_id | 1af9a9b00d1b495498ae30f4dc7bfbcc |
| service_name | cinderv2 |
| service_type | volumev2 |
| url | http://openstack-vip.tan.local:8776/v2/%(project_id)s |
+--------------+-------------------------------------------------------+
[root@openstack-controller1 ~]# openstack endpoint create --region RegionOne volumev2 internal http://openstack-vip.tan.local:8776/v2/%\(project_id\)s
+--------------+-------------------------------------------------------+
| Field | Value |
+--------------+-------------------------------------------------------+
| enabled | True |
| id | 506b9cd468b64fccb68298f2c85b7ce9 |
| interface | internal |
| region | RegionOne |
| region_id | RegionOne |
| service_id | 1af9a9b00d1b495498ae30f4dc7bfbcc |
| service_name | cinderv2 |
| service_type | volumev2 |
| url | http://openstack-vip.tan.local:8776/v2/%(project_id)s |
+--------------+-------------------------------------------------------+
[root@openstack-controller1 ~]# openstack endpoint create --region RegionOne volumev2 admin http://openstack-vip.tan.local:8776/v2/%\(project_id\)s
+--------------+-------------------------------------------------------+
| Field | Value |
+--------------+-------------------------------------------------------+
| enabled | True |
| id | 806855a98bbd4e70a77004917b922ad6 |
| interface | admin |
| region | RegionOne |
| region_id | RegionOne |
| service_id | 1af9a9b00d1b495498ae30f4dc7bfbcc |
| service_name | cinderv2 |
| service_type | volumev2 |
| url | http://openstack-vip.tan.local:8776/v2/%(project_id)s |
+--------------+-------------------------------------------------------+
[root@openstack-controller1 ~]# openstack endpoint create --region RegionOne volumev3 public http://openstack-vip.tan.local:8776/v3/%\(project_id\)s
+--------------+-------------------------------------------------------+
| Field | Value |
+--------------+-------------------------------------------------------+
| enabled | True |
| id | e7515be9b4204193b871f7add08da7dc |
| interface | public |
| region | RegionOne |
| region_id | RegionOne |
| service_id | dd0ba0dbeda841928aab3ccb2da22ab4 |
| service_name | cinderv3 |
| service_type | volumev3 |
| url | http://openstack-vip.tan.local:8776/v3/%(project_id)s |
+--------------+-------------------------------------------------------+
[root@openstack-controller1 ~]# openstack endpoint create --region RegionOne volumev3 internal http://openstack-vip.tan.local:8776/v3/%\(project_id\)s
+--------------+-------------------------------------------------------+
| Field | Value |
+--------------+-------------------------------------------------------+
| enabled | True |
| id | 29069ca6cb734db5819db5db7a249cbf |
| interface | internal |
| region | RegionOne |
| region_id | RegionOne |
| service_id | dd0ba0dbeda841928aab3ccb2da22ab4 |
| service_name | cinderv3 |
| service_type | volumev3 |
| url | http://openstack-vip.tan.local:8776/v3/%(project_id)s |
+--------------+-------------------------------------------------------+
[root@openstack-controller1 ~]# openstack endpoint create --region RegionOne volumev3 admin http://openstack-vip.tan.local:8776/v3/%\(project_id\)s
+--------------+-------------------------------------------------------+
| Field | Value |
+--------------+-------------------------------------------------------+
| enabled | True |
| id | 8912991b5bdf42b7ad5218197dbc04d9 |
| interface | admin |
| region | RegionOne |
| region_id | RegionOne |
| service_id | dd0ba0dbeda841928aab3ccb2da22ab4 |
| service_name | cinderv3 |
| service_type | volumev3 |
| url | http://openstack-vip.tan.local:8776/v3/%(project_id)s |
+--------------+-------------------------------------------------------+

3.#配置负载均衡
#末尾添加8776的cinder服务的监听端口转发
[root@openstack-ha1 ~]# cat /etc/haproxy/haproxy.cfg
#---------------------------------------------------------------------
# Example configuration for a possible web application. See the
# full configuration options online.
#
# http://haproxy.1wt.eu/download/1.4/doc/configuration.txt
#
#---------------------------------------------------------------------

#---------------------------------------------------------------------
# Global settings
#---------------------------------------------------------------------
global
# to have these messages end up in /var/log/haproxy.log you will
# need to:
#
# 1) configure syslog to accept network log events. This is done
# by adding the '-r' option to the SYSLOGD_OPTIONS in
# /etc/sysconfig/syslog
#
# 2) configure local2 events to go to the /var/log/haproxy.log
# file. A line like the following can be added to
# /etc/sysconfig/syslog
#
# local2.* /var/log/haproxy.log
#
log 127.0.0.1 local2

chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
maxconn 4000
user haproxy
group haproxy
daemon

# turn on stats unix socket
stats socket /var/lib/haproxy/stats

#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------
defaults
mode http
log global
option httplog
option dontlognull
option http-server-close
option forwardfor except 127.0.0.0/8
option redispatch
retries 3
timeout http-request 10s
timeout queue 1m
timeout connect 10s
timeout client 1m
timeout server 1m
timeout http-keep-alive 10s
timeout check 10s
maxconn 30000

#---------------------------------------------------------------------
# main frontend which proxys to the backends
#---------------------------------------------------------------------
listen openstack-mysql-3306
bind 10.0.0.188:3306
mode tcp
server 10.0.0.103 10.0.0.103:3306 check inter 3s fall 3 rise 5

listen openstack-rabbitmq-5672
bind 10.0.0.188:5672
mode tcp
server 10.0.0.103 10.0.0.103:5672 check inter 3s fall 3 rise 5

listen openstack-memcached-11211
bind 10.0.0.188:11211
mode tcp
server 10.0.0.103 10.0.0.103:11211 check inter 3s fall 3 rise 5

listen openstack-keystone-5000
bind 10.0.0.188:5000
mode tcp
server 10.0.0.101 10.0.0.101:5000 check inter 3s fall 3 rise 5

listen openstack-glance-9292
bind 10.0.0.188:9292
mode tcp
server 10.0.0.101 10.0.0.101:9292 check inter 3s fall 3 rise 5

listen openstack-placement-8778
bind 10.0.0.188:8778
mode tcp
server 10.0.0.101 10.0.0.101:8778 check inter 3s fall 3 rise 5

listen openstack-novacontroller-8774
bind 10.0.0.188:8774
mode tcp
server 10.0.0.101 10.0.0.101:8774 check inter 3s fall 3 rise 5

listen openstack-nova-novncproxy-6080
bind 10.0.0.188:6080
mode tcp
server 10.0.0.101 10.0.0.101:6080 check inter 3s fall 3 rise 5

listen openstack-neutron-controller-9696
bind 10.0.0.188:9696
mode tcp
server 10.0.0.101 10.0.0.101:9696 check inter 3s fall 3 rise 5

listen openstack-dashboard-80
bind 10.0.0.188:80
mode tcp
server 10.0.0.101 10.0.0.101:80 check inter 3s fall 3 rise 5

listen openstack-nova-api-8775
bind 10.0.0.188:8775
mode tcp
server 10.0.0.101 10.0.0.101:8775 check inter 3s fall 3 rise 5

listen openstack-nova-cinder-8776
bind 10.0.0.188:8776
mode tcp
server 10.0.0.101 10.0.0.101:8776 check inter 3s fall 3 rise 5

[root@openstack-ha1 ~]# systemctl restart haproxy
[root@openstack-ha1 ~]# ss -tnl |grep 8776
LISTEN 0 128 10.0.0.188:8776 *:*

4.#安装并配置 cinder 组件,控制端安装 cinder:
#安装包
[root@openstack-controller1 ~]#yum install openstack-cinder -y

编辑/etc/cinder/cinder.conf文件并完成以下操作:

在该[database]部分中,配置数据库访问:

[database]
# ...
connection = mysql+pymysql://cinder:cinder123@openstack-vip.tan.local/cinder
替换CINDER_DBPASS为您为块存储数据库选择的密码。

在该[DEFAULT]部分中,配置RabbitMQ 消息队列访问:

[DEFAULT]
# ...
transport_url = rabbit://openstack:openstack123@openstack-vip.tan.local

在[DEFAULT]和[keystone_authtoken]部分中,配置身份服务访问:

[DEFAULT]
# ...
auth_strategy = keystone

[keystone_authtoken]
# ...
www_authenticate_uri = http://openstack-vip.tan.local:5000
auth_url = http://openstack-vip.tan.local:5000
memcached_servers = openstack-vip.tan.local:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = cinder

在该[DEFAULT]部分中,配置my_ip选项以使用控制器节点的管理接口 IP 地址:
[DEFAULT]
# ...
#my_ip = 10.0.0.11

在该[oslo_concurrency]部分中,配置锁定路径:
[oslo_concurrency]
# ...
lock_path = /var/lib/cinder/tmp

#填充块存储数据库:
# su -s /bin/sh -c "cinder-manage db sync" cinder

#验证
MariaDB [cinder]> show tables;
+----------------------------+
| Tables_in_cinder |
+----------------------------+
| attachment_specs |
| backup_metadata |
| backups |
| cgsnapshots |
| clusters |
| consistencygroups |
| driver_initiator_data |
| encryption |
......
#重启nova-api
[root@openstack-controller1 ~]# systemctl restart openstack-nova-api.service
#启动块存储服务
[root@openstack-controller1 ~]# systemctl enable openstack-cinder-api.service openstack-cinder-scheduler.service
[root@openstack-controller1 ~]# systemctl start openstack-cinder-api.service openstack-cinder-scheduler.service

5.#配置计算节点使用 cinder 存储,编辑nova配置文件并重启nova:
[root@openstack-compute1 ~]# vim /etc/nova/nova.conf
[cinder]
os_region_name = RegionOne
[root@openstack-compute1 ~]# systemctl restart libvirtd.service openstack-nova-compute.service
[root@openstack-compute2 ~]# vim /etc/nova/nova.conf
[cinder]
os_region_name = RegionOne
[root@openstack-compute2 ~]# systemctl restart libvirtd.service openstack-nova-compute.service

#验证cinder控制端
[root@openstack-controller1 ~]# openstack volume service list
+------------------+---------------------------------+------+---------+-------+----------------------------+
| Binary | Host | Zone | Status | State | Updated At |
+------------------+---------------------------------+------+---------+-------+----------------------------+
| cinder-scheduler | openstack-controller1.tan.local | nova | enabled | up | 2022-09-22T01:49:20.000000 |
+------------------+---------------------------------+------+---------+-------+----------------------------+

6.配置存储节点,使用lvm实现
#这里使用ha1负载均衡节点,复用存储节点,添加一块200G的硬盘
#安装LVM的包
[root@openstack-ha1 ~]# yum install lvm2 device-mapper-persistent-data

#启动lvm
[root@openstack-ha1 ~]# systemctl enable lvm2-lvmetad.service
[root@openstack-ha1 ~]# systemctl start lvm2-lvmetad.service

#动态识别硬盘
[root@openstack-ha1 ~]# echo "- - -" > /sys/class/scsi_host/host0/scan
[root@openstack-ha1 ~]# echo "- - -" > /sys/class/scsi_host/host1/scan
[root@openstack-ha1 ~]# echo "- - -" > /sys/class/scsi_host/host2/scan

#验证磁盘
[root@openstack-ha1 ~]# fdisk -l /dev/sdb
Disk /dev/sdb: 214.7 GB, 214748364800 bytes, 419430400 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes

#创建物理卷和卷组:
[root@openstack-ha1 ~]# pvcreate /dev/sdb
Physical volume "/dev/sdb" successfully created.
[root@openstack-ha1 ~]# vgcreate cinder-volumes /dev/sdb
Volume group "cinder-volumes" successfully created

#配置只有实例可以访问存储卷组:
[root@openstack-ha1 ~]# vim /etc/lvm/lvm.conf
filter = [ "a/sdb/", "r/.*/"]

#存储安装并配置组件
#安装软件包:
[root@openstack-ha1 ~]# yum install openstack-cinder targetcli python-keystone
编辑/etc/cinder/cinder.conf文件并完成以下操作:
[root@openstack-ha1 ~]#vim /etc/cinder/cinder.conf
在该[database]部分中,配置数据库访问:

[database]
# ...
connection = mysql+pymysql://cinder:cinder123@openstack-vip.tan.local/cinder
替换CINDER_DBPASS为您为块存储数据库选择的密码。

在该[DEFAULT]部分中,配置RabbitMQ 消息队列访问:

[DEFAULT]
# ...
transport_url = rabbit://openstack:openstack123@openstack-vip.tan.local
替换为您在 中为帐户RABBIT_PASS选择的密码。openstackRabbitMQ

在[DEFAULT]和[keystone_authtoken]部分中,配置身份服务访问:

[DEFAULT]
# ...
auth_strategy = keystone

[keystone_authtoken]
# ...
www_authenticate_uri = http://openstack-vip.tan.local:5000
auth_url = http://openstack-vip.tan.local:5000
memcached_servers = openstack-vip.tan.local:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = cinder
替换为您在身份服务中CINDER_PASS为用户选择的密码 。cinder

笔记

注释掉或删除该 [keystone_authtoken]部分中的任何其他选项。

在该[DEFAULT]部分中,配置my_ip选项:

[DEFAULT]
# ...
my_ip = 10.0.0.105
替换为存储节点上管理网络接口的 IP 地址,对于示例架构MANAGEMENT_INTERFACE_IP_ADDRESS中的第一个节点,通常为 10.0.0.41 。

在该部分中,使用 LVM 驱动程序、卷组、iSCSI 协议和适当的 iSCSI 服务[lvm]配置 LVM 后端。cinder-volumes如果该[lvm]部分不存在,请创建它:

[lvm]
volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
volume_group = cinder-volumes
target_protocol = iscsi
target_helper = lioadm
在该[DEFAULT]部分中,启用 LVM 后端:

[DEFAULT]
# ...
enabled_backends = lvm
笔记

后端名称是任意的。例如,本指南使用驱动程序的名称作为后端的名称。

在该[DEFAULT]部分中,配置图像服务 API 的位置:

[DEFAULT]
# ...
glance_api_servers = http://openstack-vip.tan.local:9292
在该[oslo_concurrency]部分中,配置锁定路径:

[oslo_concurrency]
# ...
lock_path = /var/lib/cinder/tmp

#配置hosts文件,否则sql连接失败
[root@openstack-ha1 ~]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
10.0.0.188 openstack-vip.tan.local

#完成安装
#启动块存储卷服务,包括其依赖项,并将它们配置为在系统启动时启动:

[root@openstack-ha1 ~]# systemctl enable openstack-cinder-volume.service target.service
[root@openstack-ha1 ~]# systemctl start openstack-cinder-volume.service target.service

#验证日志
[root@openstack-ha1 ~]# tail -f /var/log/cinder/volume.log
2022-09-22 10:14:42.804 30830 INFO cinder.volume.manager [req-c36fd7ab-f1dd-402b-a01c-7d36ddc3648e - - - - -] Driver initialization completed successfully.
2022-09-22 10:14:42.812 30830 INFO cinder.manager [req-c36fd7ab-f1dd-402b-a01c-7d36ddc3648e - - - - -] Initiating service 2 cleanup
2022-09-22 10:14:42.818 30830 INFO cinder.manager [req-c36fd7ab-f1dd-402b-a01c-7d36ddc3648e - - - - -] Service 2 cleanup completed.
2022-09-22 10:14:42.875 30830 INFO cinder.volume.manager [req-c36fd7ab-f1dd-402b-a01c-7d36ddc3648e - - - - -] Initializing RPC dependent components of volume driver LVMVolumeDriver (3.0.0)
2022-09-22 10:14:43.804 30830 INFO cinder.volume.manager [req-c36fd7ab-f1dd-402b-a01c-7d36ddc3648e - - - - -] Driver post RPC initialization completed successfully.

#控制端验证
[root@openstack-controller1 ~]# openstack volume service list
+------------------+---------------------------------+------+---------+-------+----------------------------+
| Binary | Host | Zone | Status | State | Updated At |
+------------------+---------------------------------+------+---------+-------+----------------------------+
| cinder-scheduler | openstack-controller1.tan.local | nova | enabled | up | 2022-09-22T02:16:11.000000 |
| cinder-volume | openstack-ha1.tan.local@lvm | nova | enabled | up | 2022-09-22T02:16:13.000000 |
+------------------+---------------------------------+------+---------+-------+----------------------------+

#lvm云盘动态拉伸
#创建两个实例

[root@openstack-controller1 ~]# openstack server create --flavor centos-template --image centos7-image --nic net-id=86a33d1d-a373-4c7d-90fe-31c4d3018546 --security-group e2a17048-c290-443d-bdb9-7ad34cb43e5d --key-name mykey centos7

[root@openstack-controller1 ~]# openstack server create --flavor centos-template --image centos7-image --nic net-id=86a33d1d-a373-4c7d-90fe-31c4d3018546 --security-group e2a17048-c290-443d-bdb9-7ad34cb43e5d --key-name mykey centos77

[root@openstack-controller1 ~]# openstack server list
+--------------------------------------+----------+--------+------------------------+---------------+-----------------+
| ID | Name | Status | Networks | Image | Flavor |
+--------------------------------------+----------+--------+------------------------+---------------+-----------------+
| 6cca6943-cc44-49e6-8af2-d063713b676d | centos77 | ACTIVE | external-net=10.0.0.88 | centos7-image | centos-template |
| d847736f-aa92-4da2-b0c0-246da50a4c02 | centos7 | ACTIVE | external-net=10.0.0.89 | centos7-image | centos-template |
+--------------------------------------+----------+--------+------------------------+---------------+-----------------+
openstack server create --flavor m1.nano --image cirros-0.4.0 --nic net-id=86a33d1d-a373-4c7d-90fe-31c4d3018546 --security-group e2a17048-c290-443d-bdb9-7ad34cb43e5d --key-name mykey cirros-vm1

#项目--卷--卷--创建卷--大小3G

#项目--计算--实例--选择一个实例,右边下拉菜单--连接卷

#虚拟机验证
[root@centos77 ~]# fdisk -l /dev/vdb
Disk /dev/vdb: 3221 MB, 3221225472 bytes, 6291456 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes

#格式化,挂载,写入数据
[root@centos77 ~]# mkfs.xfs /dev/vdb
meta-data=/dev/vdb isize=512 agcount=4, agsize=196608 blks
= sectsz=512 attr=2, projid32bit=1
= crc=1 finobt=0, sparse=0
data = bsize=4096 blocks=786432, imaxpct=25
= sunit=0 swidth=0 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=1
log =internal log bsize=4096 blocks=2560, version=2
= sectsz=512 sunit=0 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
[root@centos77 ~]# mount /dev/vdb /mnt
[root@centos77 ~]# echo test 10.0.0.88 >/mnt/index.html
[root@centos77 ~]# cat /mnt/index.html
test 10.0.0.88

#卸载卷
[root@centos77 ~]# umount /mnt

#分离卷,dashboard--项目--计算--实例--分离卷--选择3G的卷

#dashboard--项目--卷--卷--扩展卷--扩展到5G大小

#dashboard--项目--卷--卷--管理连接--连接到另一个实例

#在另一台实例验证磁盘
[root@centos7 ~]# fdisk -l /dev/vdb
Disk /dev/vdb: 5368 MB, 5368709120 bytes, 10485760 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes

#验证磁盘数据及磁盘大小
[root@centos7 ~]# mount /dev/vdb /mnt
[root@centos7 ~]# cat /mnt/index.html
test 10.0.0.88
[root@centos7 ~]# df -h
Filesystem Size Used Avail Use% Mounted on
devtmpfs 908M 0 908M 0% /dev
tmpfs 920M 0 920M 0% /dev/shm
tmpfs 920M 8.5M 911M 1% /run
tmpfs 920M 0 920M 0% /sys/fs/cgroup
/dev/mapper/centos-root 27G 1.5G 26G 6% /
/dev/vda1 1014M 150M 865M 15% /boot
tmpfs 184M 0 184M 0% /run/user/0
/dev/vdb 3.0G 33M 3.0G 2% /mnt

#刷新系统使其识别到新空间
[root@centos7 ~]# xfs_growfs /dev/vdb
meta-data=/dev/vdb isize=512 agcount=4, agsize=196608 blks
= sectsz=512 attr=2, projid32bit=1
= crc=1 finobt=0 spinodes=0
data = bsize=4096 blocks=786432, imaxpct=25
= sunit=0 swidth=0 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=1
log =internal bsize=4096 blocks=2560, version=2
= sectsz=512 sunit=0 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
data blocks changed from 786432 to 1310720
[root@centos7 ~]# df -h
Filesystem Size Used Avail Use% Mounted on
devtmpfs 908M 0 908M 0% /dev
tmpfs 920M 0 920M 0% /dev/shm
tmpfs 920M 8.5M 911M 1% /run
tmpfs 920M 0 920M 0% /sys/fs/cgroup
/dev/mapper/centos-root 27G 1.5G 26G 6% /
/dev/vda1 1014M 150M 865M 15% /boot
tmpfs 184M 0 184M 0% /run/user/0
/dev/vdb 5.0G 33M 5.0G 1% /mnt

使用 NFS 作为 openstack 后端存储

#上面介绍通过 cinder 使用 lvm 卷提供后端存储,本次将介绍通过 cinder 调用 NFS 提供后端存储。

1.#安装nfs服务器,在mysql服务器上安装nfs服务
[root@openstack-mysql ~]# yum install nfs-utils rpcbind
[root@openstack-mysql ~]# mkdir /nfsdata/
[root@openstack-mysql ~]# vim /etc/exports
/nfsdata *(rw,no_root_squash)
[root@openstack-mysql ~]# systemctl start nfs
[root@openstack-mysql ~]# systemctl enable nfs

2.#配置 cinder 使用 NFS:
#编辑主配置文件:
[root@openstack-controller1 ~]# vim /etc/cinder/cinder.conf
#[DEFAULT]中添加
enabled_backends = nfs
#末尾添加
[nfs]
volume_backend_name = openstack-NFS #定义名称,后面做关联的时候使用
volume_driver = cinder.volume.drivers.nfs.NfsDriver #驱动
nfs_shares_config = /etc/cinder/nfs_shares #定义 NFS 挂载的配置文件路径
nfs_mount_point_base = $state_path/mnt #定义 NFS 挂载点

3.#创建 NFS 挂载配置文件:
[root@openstack-controller1 ~]# showmount -e 10.0.0.103
Export list for 10.0.0.103:
/nfsdata *

[root@openstack-controller1 ~]# vim /etc/cinder/nfs_shares
10.0.0.103:/nfsdata

[root@openstack-controller1 ~]# chown root.cinder /etc/cinder/nfs_shares
[root@openstack-controller1 ~]# systemctl restart openstack-cinder-volume.service

#验证 NFS 是否自动挂载:
[root@openstack-controller1 ~]# df -Th
Filesystem Type Size Used Avail Use% Mounted on
devtmpfs devtmpfs 3.9G 0 3.9G 0% /dev
tmpfs tmpfs 3.9G 0 3.9G 0% /dev/shm
tmpfs tmpfs 3.9G 12M 3.9G 1% /run
tmpfs tmpfs 3.9G 0 3.9G 0% /sys/fs/cgroup
/dev/mapper/centos-root xfs 15G 8.0G 7.1G 53% /
/dev/sda1 xfs 1014M 138M 877M 14% /boot
/dev/mapper/centos-home xfs 2.0G 33M 2.0G 2% /home
tmpfs tmpfs 797M 0 797M 0% /run/user/0
10.0.0.103:/nfsdata nfs4 15G 2.2G 13G 15% /var/lib/cinder/mnt/d0249c90bf1851a8e7199c54ea9417a9

#验证 cinder 日志:
[root@openstack-controller1 ~]#tail -n10 /var/log/cinder/*.log
2022-09-22 12:56:22.855 4465 INFO cinder.volume.manager [req-61bad113-3c87-48fc-b391-42b8ca903b54 - - - - -] Driver initialization completed successfully.
2022-09-22 12:56:22.863 4465 INFO cinder.manager [req-61bad113-3c87-48fc-b391-42b8ca903b54 - - - - -] Initiating service 3 cleanup
2022-09-22 12:56:22.868 4465 INFO cinder.manager [req-61bad113-3c87-48fc-b391-42b8ca903b54 - - - - -] Service 3 cleanup completed.
2022-09-22 12:56:22.932 4465 INFO cinder.volume.manager [req-61bad113-3c87-48fc-b391-42b8ca903b54 - - - - -] Initializing RPC dependent components of volume driver NfsDriver (1.4.0)
2022-09-22 12:56:23.691 4465 INFO cinder.volume.manager [req-61bad113-3c87-48fc-b391-42b8ca903b54 - - - - -] Driver post RPC initialization completed successfully.

#验证 NFS 和 lvm:
[root@openstack-controller1 ~]# cinder service-list
+------------------+-------------------------------------+------+---------+-------+----------------------------+---------+-----------------+---------------+
| Binary | Host | Zone | Status | State | Updated_at | Cluster | Disabled Reason | Backend State |
+------------------+-------------------------------------+------+---------+-------+----------------------------+---------+-----------------+---------------+
| cinder-scheduler | openstack-controller1.tan.local | nova | enabled | up | 2022-09-22T04:58:30.000000 | - | - | |
| cinder-volume | openstack-controller1.tan.local@nfs | nova | enabled | up | 2022-09-22T04:58:23.000000 | - | - | up |
| cinder-volume | openstack-ha1.tan.local@lvm | nova | enabled | up | 2022-09-22T04:58:23.000000 | - | - | up |
+------------------+-------------------------------------+------+---------+-------+----------------------------+---------+-----------------+---------------+

#创建磁盘类型并关联:
否则在 openstack 管理界面创建磁盘的时候不能选择是 NFS 还是其他类型:
#创建类型:
[root@openstack-controller1 ~]# cinder type-create lvm
+--------------------------------------+------+-------------+-----------+
| ID | Name | Description | Is_Public |
+--------------------------------------+------+-------------+-----------+
| 65520191-e3ad-4772-92e5-c7906da35e7f | lvm | - | True |
+--------------------------------------+------+-------------+-----------+
[root@openstack-controller1 ~]# cinder type-create nfs
+--------------------------------------+------+-------------+-----------+
| ID | Name | Description | Is_Public |
+--------------------------------------+------+-------------+-----------+
| d8758906-dc32-492b-aa6c-5d6566896301 | nfs | - | True |
+--------------------------------------+------+-------------+-----------+

#将磁盘类型与磁盘关联:
[root@openstack-controller1 ~]# source admin-openrc
[root@openstack-controller1 ~]# cinder type-key lvm set volume_backend_name=Openstack-lvm
[root@openstack-controller1 ~]# cinder type-key nfs set volume_backend_name=openstack-NFS

#创建 NFS 卷:dashboard--项目--卷--卷--创建卷--卷名称:nfs--类型:nfs--大小:2G--创建卷

#管理卷连接:dashboard--项目--卷--卷--管理连接--连接到一个实例

#到虚拟机验证:
[root@centos77 ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
vda 252:0 0 30G 0 disk
├─vda1 252:1 0 1G 0 part /boot
└─vda2 252:2 0 29G 0 part
├─centos-root 253:0 0 27G 0 lvm /
└─centos-swap 253:1 0 2G 0 lvm [SWAP]
vdb 252:16 0 2G 0 disk

#格式化并挂载:
[root@centos77 ~]# mkfs.xfs /dev/vdb
meta-data=/dev/vdb isize=512 agcount=4, agsize=131072 blks
= sectsz=512 attr=2, projid32bit=1
= crc=1 finobt=0, sparse=0
data = bsize=4096 blocks=524288, imaxpct=25
= sunit=0 swidth=0 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=1
log =internal log bsize=4096 blocks=2560, version=2
= sectsz=512 sunit=0 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
[root@centos77 ~]# mount /dev/vdb /mnt
[root@centos77 ~]# echo test1111 >/mnt/index.html
[root@centos77 ~]# cat /mnt/index.html
test1111

#断开卷:dashboard--项目--卷--卷--管理连接--分离卷--分离卷

#扩容卷:dashboard--项目--卷--卷--扩展卷--大小:8G

#连接到其他实例:dashboard--项目--卷--卷--管理连接--连接到实例--选择实例--连接卷

#到目标实例验证数据,并拉伸磁盘空间:
[root@centos7 ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
vda 252:0 0 30G 0 disk
├─vda1 252:1 0 1G 0 part /boot
└─vda2 252:2 0 29G 0 part
├─centos-root 253:0 0 27G 0 lvm /
└─centos-swap 253:1 0 2G 0 lvm [SWAP]
vdb 252:16 0 8G 0 disk
[root@centos7 ~]# mount /dev/vdb /mnt
[root@centos7 ~]# cat /mnt/index.html
test1111
[root@centos7 ~]# df -h
Filesystem Size Used Avail Use% Mounted on
devtmpfs 908M 0 908M 0% /dev
tmpfs 920M 0 920M 0% /dev/shm
tmpfs 920M 8.6M 911M 1% /run
tmpfs 920M 0 920M 0% /sys/fs/cgroup
/dev/mapper/centos-root 27G 1.5G 26G 6% /
/dev/vda1 1014M 150M 865M 15% /boot
tmpfs 184M 0 184M 0% /run/user/0
/dev/vdb 2.0G 33M 2.0G 2% /mnt
[root@centos7 ~]# xfs_growfs /dev/vdb
meta-data=/dev/vdb isize=512 agcount=4, agsize=131072 blks
= sectsz=512 attr=2, projid32bit=1
= crc=1 finobt=0 spinodes=0
data = bsize=4096 blocks=524288, imaxpct=25
= sunit=0 swidth=0 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=1
log =internal bsize=4096 blocks=2560, version=2
= sectsz=512 sunit=0 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
data blocks changed from 524288 to 2097152
[root@centos7 ~]# df -h
Filesystem Size Used Avail Use% Mounted on
devtmpfs 908M 0 908M 0% /dev
tmpfs 920M 0 920M 0% /dev/shm
tmpfs 920M 8.6M 911M 1% /run
tmpfs 920M 0 920M 0% /sys/fs/cgroup
/dev/mapper/centos-root 27G 1.5G 26G 6% /
/dev/vda1 1014M 150M 865M 15% /boot
tmpfs 184M 0 184M 0% /run/user/0
/dev/vdb 8.0G 33M 8.0G 1% /mnt


#在 NFS 服务器验证磁盘:
[root@openstack-mysql ~]# ll /nfsdata/
total 10740
-rw-rw-rw- 1 107 107 8589934592 Sep 22 13:09 volume-25cf8ca9-9ea7-44d7-82ac-c741df8cfcd0