ceph16版本部署

发布时间 2023-07-17 23:25:23作者: A57

1.初始化配置

IP 主机名
10.0.0.10 storage01
10.0.0.11 storage02
10.0.0.12 storage03

1.1 配置离线源(所有节点)

tar zxvf ceph16pkg.tar.gz -C /opt/

cat > /etc/apt/sources.list << EOF
deb [trusted=yes] file:// /opt/ceph16pkg/debs/
EOF

apt clean all
apt update

1.2 安装基本工具(所有节点)

apt install -y bash-completion vim net-tools

1.3 修改主机名

  • storage01
hostnamectl set-hostname storage01
  • storage02
hostnamectl set-hostname storage02
  • storage03
hostnamectl set-hostname storage03

1.4 服务器优化(所有节点)

vim /etc/security/limits.conf
# root账户的资源限制
root soft core unlimited
root hard core unlimited
root soft nproc 1000000
root hard nproc 1000000
root soft nofile 1000000
root hard nofile 1000000
root soft memlock 32000
root hard memlock 32000
root soft msgqueue 8192000
root hard msgqueue 8192000
# 其它账户的资源限制
* soft core unlimited
* hard core unlimited
* soft nproc 1000000 # 系统最大进程数
* hard nproc 1000000
* soft nofile 1000000 # 系统最大打开文件个数
* hard nofile 1000000
* soft memlock 32000 # 最大锁的数量
* hard memlock 32000
* soft msgqueue 8192000 # 消息队列长度
* hard msgqueue 8192000
vim /etc/sysctl.conf
# Controls source route verification
net.ipv4.conf.default.rp_filter = 1
net.ipv4.ip_nonlocal_bind = 1
net.ipv4.ip_forward = 1
# Do not accept source routing
net.ipv4.conf.default.accept_source_route = 0
# Controls the System Request debugging functionality of the kernel
kernel.sysrq = 0
# Controls whether core dumps will append the PID to the core filename
# Useful for debugging multi-threaded application
kernel.core_uses_pid = 1
# Controls the use of TCP syncookies
net.ipv4.tcp_syncookies = 1
# Disable netfilter on bridges
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-arptables = 0
# Controls the default maxmimum size if a mesage queue
kernel.msgmnb = 65536
# Controls the maxmimum shared segment size, in bytes
kernel.msgmax = 65536
# Controls the maxmimum shared segment size, in bytes
kernel.shmmax = 68719476736
# Controls the maxmimum shared segment size, in bytes
kernel.shmall = 4294967296
# TCP kernel paramater
net.ipv4.tcp_mem = 786432 1048576 1572864
net.ipv4.tcp_rmem = 4096 87380 4194304
net.ipv4.tcp_wmem = 4096 16384 4194304
net.ipv4.tcp_window_scaling = 1
net.ipv4.tcp_sack = 1
# socket buffer
net.core.wmem_default = 8388608
net.core.rmem_default = 8388608
net.core.rmem_max = 16777216
net.core.wmem_max = 16777216
net.core.netdev_max_backlog = 262144
net.core.somaxconn = 20480
net.core.optmem_max = 81920
# TCP conn
net.ipv4.tcp_max_syn_backlog = 262144
net.ipv4.tcp_syn_retries = 3
net.ipv4.tcp_retries1 = 3
net.ipv4.tcp_retries2 = 15
# tcp conn reuse
net.ipv4.tcp_timestamps = 0
net.ipv4.tcp_tw_reuse = 0
net.ipv4.tcp_tw_recycle = 0
net.ipv4.tcp_fin_timeout = 1
net.ipv4.tcp_max_tw_buckets = 20000
net.ipv4.tcp_max_orphans = 3276800
net.ipv4.tcp_synack_retries = 1
net.ipv4.ip_local_port_range = 10001 65000
# swap
vm.overcommit_memory = 0
vm.swappiness = 10
#net.ipv4.conf.eth1.rp_filter = 0
#net.ipv4.conf.lo.arp_filter = 1
#net.ipv4.conf.lo.arp_announce = 2
#net.ipv4.conf.all.arp_ignore = 1
#net.ipv4.conf.all.arp_announce = 2
  • 重启实服务器生效
reboot

1.5 配置hosts解析(所有节点)

cat >> /etc/hosts <<EOF
10.0.0.10 storage01
10.0.0.11 storage02
10.0.0.12 storage03
EOF

1.6 配置时间同步

  • 安装服务(所有节点)
apt install -y chrony
  • storage01节点配置服务端
vim /etc/chrony/chrony.conf
'''
pool storage01 iburst
allow all
local stratum 10
'''

# 重启生效
systemctl restart chronyd
  • storage02/03节点配置客户端
vim /etc/chrony/chrony.conf
pool storage01 iburst

# 重启生效
systemctl restart chronyd

# 验证
chronyc sources -v

2.安装基本服务(所有节点)

apt install -y docker-ce

3.初始化集群

3.1 安装初始化工具

  • storage01节点
apt install -y cephadm

3.2 导入镜像(所有节点)

tar zxvf ceph16image.tar.gz

cd ceph16image/

for i in `ls`;do docker load -i $i; done

3.3 搭建本地仓库

  • storage01节点操作
docker run -d --name registry -p 5000:5000 --restart always 4bb5ea59f8e0

3.4 配置仓库地址(所有节点)

cat > /etc/docker/daemon.json << EOF
{
"insecure-registries":["10.0.0.10:5000"]
}
EOF

systemctl daemon-reload
systemctl restart docker

3.5 推入关键镜像到仓库

  • storage01节点
# 打tag标签
docker tag 327f301eff51 10.0.0.10:5000/ceph:v16

# 推入到私有仓库
docker push 10.0.0.10:5000/ceph:v16

3.6 初始化集群

  • storage01节点操作
# 安装ceph工具
mkdir /etc/ceph

# 初始化集群
cephadm --image 10.0.0.10:5000/ceph:v16 bootstrap --mon-ip 10.0.0.10 --initial-dashboard-user admin --initial-dashboard-password 000000 --skip-pull

=========================================================================
# 如果需要定义集群存储网络,换成如下:
cephadm --image 172.21.48.10:5000/ceph:v16 bootstrap --mon-ip 172.21.48.10 --initial-dashboard-user admin --initial-dashboard-password 000000 --cluster-network 10.0.0.0/24 --skip-pull

4.配置加入集群(storage01节点)

  • 传输密钥
ssh-copy-id -f -i /etc/ceph/ceph.pub storage02

ssh-copy-id -f -i /etc/ceph/ceph.pub storage03
  • 加入集群
cephadm shell

ceph orch host add storage02

ceph orch host add storage03

5.添加osd硬盘(storage01节点)

# 可查看那些硬盘设备可用
ceph orch device ls

# 将所有硬盘添加到ceph集群中
ceph orch apply osd --all-available-devices


======================如果特殊情况需要添加指定的硬盘=====================
ceph orch daemon add osd storage02:/dev/sdb

6.部署MDS

  • CephFS 需要两个 Pools,cephfs-data 和 cephfs-metadata,分别存储文件数据和文件元数据
ceph osd pool create cephfs-metadata 16 16

ceph osd pool create cephfs-data 32 32

ceph fs new cephfs cephfs-metadata cephfs-data

ceph orch apply mds cephfs --placement="3 storage01 storage02 storage03"

# 查看mds有三个,两个预备状态
ceph -s


mount -t ceph storage01:6789,storage02:6789,storage03:6789:/ /cloudssd/ -o name=fsclient,secretfile=/etc/ceph/fsclient.key

7.部署RGW

  • 存储对象存储
ceph orch apply rgw myorg cn-east-1 --placement="3 storage01 storage02 storage03"

# 最后一个,需要等一会
ceph orch ls