OpenStack额外组件

发布时间 2023-03-31 15:29:02作者: A57

swfit部署

控制节点

  • 对象存储服务

  • 安装软件

# centos
yum install openstack-swift-proxy python-swiftclient python-keystoneclient python-keystonemiddleware memcached -y

# ubuntu
apt install swift swift-proxy python3-swiftclient  python3-keystoneclient python3-keystonemiddleware  memcached -y
  • 创建用户、项目、端点

openstack user create --domain default --password swift swift
openstack role add --project service --user swift admin
openstack service create --name swift --description "OpenStack Object Storage" object-store
openstack endpoint create --region RegionOne object-store public http://controller:8080/v1/AUTH_%\(project_id\)s
openstack endpoint create --region RegionOne object-store internal http://controller:8080/v1/AUTH_%\(project_id\)s
openstack endpoint create --region RegionOne object-store admin http://controller:8080/v1
  • 配置代理配置文件

vim /etc/swift/proxy-server.conf
[DEFAULT]
bind_port = 8080
user = swift
swift_dir = /etc/swift
[pipeline:main]
pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk ratelimit authtoken keystoneauth container-quotas account-quotas slo dlo versioned_writes proxy-logging proxy-server
[app:proxy-server]
use = egg:swift#proxy
account_autocreate = True
[filter:keystoneauth]
use = egg:swift#keystoneauth
operator_roles = admin,user
[filter:authtoken]
paste.filter_factory = keystonemiddleware.auth_token:filter_factory
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = swift
password = swift
delay_auth_decision = True
[filter:tempauth]
use = egg:swift#tempauth
user_admin_admin = admin .admin .reseller_admin
user_test_tester = testing .admin
user_test2_tester2 = testing2 .admin
user_test_tester3 = testing3
user_test5_tester5 = testing5 service
[filter:healthcheck]
use = egg:swift#healthcheck
[filter:cache]
use = egg:swift#memcache
memcache_servers = controller:11211
[filter:ratelimit]
use = egg:swift#ratelimit
[filter:domain_remap]
use = egg:swift#domain_remap
[filter:catch_errors]
use = egg:swift#catch_errors
[filter:cname_lookup]
use = egg:swift#cname_lookup
[filter:staticweb]
use = egg:swift#staticweb
[filter:tempurl]
use = egg:swift#tempurl
[filter:formpost]
use = egg:swift#formpost
[filter:name_check]
use = egg:swift#name_check
[filter:list-endpoints]
use = egg:swift#list_endpoints
[filter:proxy-logging]
use = egg:swift#proxy_logging
[filter:bulk]
use = egg:swift#bulk
[filter:slo]
use = egg:swift#slo
[filter:dlo]
use = egg:swift#dlo
[filter:container-quotas]
use = egg:swift#container_quotas
[filter:account-quotas]
use = egg:swift#account_quotas
[filter:gatekeeper]
use = egg:swift#gatekeeper
[filter:container_sync]
use = egg:swift#container_sync
[filter:xprofile]
use = egg:swift#xprofile
[filter:versioned_writes]
use = egg:swift#versioned_writes
[filter:copy]
use = egg:swift#copy
[filter:keymaster]
use = egg:swift#keymaster
encryption_root_secret = changeme
[filter:kms_keymaster]
use = egg:swift#kms_keymaster
[filter:encryption]
use = egg:swift#encryption
[filter:listing_formats]
use = egg:swift#listing_formats
[filter:symlink]
use = egg:swift#symlink
  • 创建账户圈

  • 帐户服务器使用帐户环来维护容器列表

cd /etc/swift/

swift-ring-builder account.builder create 18 1 1
swift-ring-builder account.builder add --region 1 --zone 1 --ip $STORAGE_LOCAL_NET_IP --port 6202 --device $OBJECT_DISK --weight 100
swift-ring-builder account.builder
swift-ring-builder account.builder rebalance
  • 创建容器环

  • 容器服务器使用容器环来维护对象列表。但是,它不跟踪对象位置。

cd /etc/swift/

swift-ring-builder container.builder create 10 1 1
swift-ring-builder container.builder add --region 1 --zone 1 --ip $STORAGE_LOCAL_NET_IP --port 6201 --device $OBJECT_DISK --weight 100
swift-ring-builder container.builder
swift-ring-builder container.builder rebalance
  • 创建对象环

  • 对象服务器使用对象环来维护本地设备上的对象位置列表。

cd /etc/swift/

swift-ring-builder object.builder create 10 1 1
swift-ring-builder object.builder  add --region 1 --zone 1 --ip $STORAGE_LOCAL_NET_IP --port 6200 --device $OBJECT_DISK --weight 100 
swift-ring-builder object.builder
swift-ring-builder object.builder rebalance
  • 配置swift文件

vim /etc/swift/swift.conf
[swift-hash]
swift_hash_path_suffix = changeme
swift_hash_path_prefix = changeme
[storage-policy:0]
name = Policy-0
default = yes
aliases = yellow, orange
[swift-constraints]
  • 修改属性并启动服务

chown -R root:swift /etc/swift

# centos
systemctl enable --now openstack-swift-proxy.service memcached.service

# ubuntu
systemctl restart swift-proxy.service memcached.service

计算节点

  • 安装服务

# centos
yum install xfsprogs rsync openstack-swift-account openstack-swift-container openstack-swift-object -y

# ubuntu
apt install xfsprogs rsync swift-account swift-container swift-object -y
  • 格式化磁盘

mkfs.xfs -i size=1024 -f /dev/sdc
  • 配置开机挂载

mkdir -p /swift/node/sdc

echo "/dev/sdc /swift/node/sdc xfs loop,noatime 0 2" >> /etc/fstab

mount /dev/sdc /swift/node/sdc
  • 分发环配置文件

scp controller:/etc/swift/*.ring.gz /etc/swift/
  • 配置rsyonc启动文件

vim /etc/rsyncd.conf
pid file = /var/run/rsyncd.pid
log file = /var/log/rsyncd.log
uid = swift
gid = swift
address = 127.0.0.1
[account]
path            = /swift/node
read only       = false
write only      = no
list            = yes
incoming chmod  = 0644
outgoing chmod  = 0644
max connections = 25
lock file =     /var/lock/account.lock
[container]
path            = /swift/node
read only       = false
write only      = no
list            = yes
incoming chmod  = 0644
outgoing chmod  = 0644
max connections = 25
lock file =     /var/lock/container.lock
[object]
path            = /swift/node
read only       = false
write only      = no
list            = yes
incoming chmod  = 0644
outgoing chmod  = 0644
max connections = 25
lock file =     /var/lock/object.lock
[swift_server]
path            = /etc/swift
read only       = true
write only      = no
list            = yes
incoming chmod  = 0644
outgoing chmod  = 0644
max connections = 5
lock file =     /var/lock/swift_server.lock
  • 启动rsync

# centos
systemctl enable --now rsyncd.service

# ubuntu
systemctl enable --now rsync.service
  • 配置账户圈文件

vim /etc/swift/account-server.conf
[DEFAULT]
bind_port = 6202
user = swift
swift_dir = /etc/swift
devices = /swift/node
mount_check = false
[pipeline:main]
pipeline = healthcheck recon account-server
[app:account-server]
use = egg:swift#account
[filter:healthcheck]
use = egg:swift#healthcheck
[filter:recon]
use = egg:swift#recon
recon_cache_path = /var/cache/swift
[account-replicator]
[account-auditor]
[account-reaper]
[filter:xprofile]
use = egg:swift#xprofile
  • 配置容器环文件

vim /etc/swift/container-server.conf
[DEFAULT]
bind_port = 6201
user = swift
swift_dir = /etc/swift
devices = /swift/node
mount_check = false
[pipeline:main]
pipeline = healthcheck recon container-server
[app:container-server]
use = egg:swift#container
[filter:healthcheck]
use = egg:swift#healthcheck
[filter:recon]
use = egg:swift#recon
recon_cache_path = /var/cache/swift
[container-replicator]
[container-updater]
[container-auditor]
[container-sync]
[filter:xprofile]
use = egg:swift#xprofile
  • 配置对象环文件

vim /etc/swift/object-server.conf
[DEFAULT]
bind_port = 6200
user = swift
swift_dir = /etc/swift
devices = /swift/node
mount_check = false
[pipeline:main]
pipeline = healthcheck recon object-server
[app:object-server]
use = egg:swift#object
[filter:healthcheck]
use = egg:swift#healthcheck
[filter:recon]
use = egg:swift#recon
recon_cache_path = /var/cache/swift
recon_lock_path = /var/lock
[object-replicator]
[object-reconstructor]
[object-updater]
[object-auditor]
[filter:xprofile]
use = egg:swift#xprofile
  • 配置swift文件

vim /etc/swift/swift.conf
[swift-hash]
swift_hash_path_suffix = changeme
swift_hash_path_prefix = changeme
[storage-policy:0]
name = Policy-0
default = yes
aliases = yellow, orange
[swift-constraints]
  • 修改属性并创建需要的目录

chown -R swift:swift /swift/node
mkdir -p /var/cache/swift
chown -R root:swift /var/cache/swift
chmod -R 775 /var/cache/swift
chown -R root:swift /etc/swift
  • 启动服务

# centos
systemctl enable openstack-swift-account.service openstack-swift-account-auditor.service openstack-swift-account-reaper.service openstack-swift-account-replicator.service
systemctl restart openstack-swift-account.service openstack-swift-account-auditor.service openstack-swift-account-reaper.service openstack-swift-account-replicator.service
systemctl enable openstack-swift-container.service openstack-swift-container-auditor.service openstack-swift-container-replicator.service openstack-swift-container-updater.service
systemctl restart openstack-swift-container.service openstack-swift-container-auditor.service openstack-swift-container-replicator.service openstack-swift-container-updater.service
systemctl enable openstack-swift-object.service openstack-swift-object-auditor.service openstack-swift-object-replicator.service openstack-swift-object-updater.service
systemctl restart openstack-swift-object.service openstack-swift-object-auditor.service openstack-swift-object-replicator.service openstack-swift-object-updater.service

# ubuntu
systemctl restart swift-account swift-account-auditor swift-account-reaper swift-account-replicator

systemctl restart swift-container swift-container-auditor swift-container-replicator swift-container-updater

systemctl restart swift-object swift-object-auditor swift-object-replicator swift-object-updater

trove部署

  • 创建cinder卷类型

#create cinder_volume_type volumev2
cinder type-create volumev2
#create keypair trove-keypair
openstack keypair create trove-keypair
  • 配置变量

project_id_admin=`openstack project list|grep admin |awk '{print $2}'`
default_security_groups=`openstack security group list|grep $project_id_admin |awk '{print $2}'`
  • 创建数据库

mysql -uroot -e "CREATE DATABASE trove;"
mysql -uroot -e "GRANT ALL PRIVILEGES ON trove.* TO 'trove'@'%' IDENTIFIED BY 'trove123';"
  • 创建用户项目

openstack user create --domain default --password trove trove
openstack role add --project service --user trove admin
openstack service create --name trove --description "Database" database
openstack endpoint create --region RegionOne database public http://controller:8779/v1.0/%\(tenant_id\)s 
openstack endpoint create --region RegionOne database internal http://controller:8779/v1.0/%\(tenant_id\)s
openstack endpoint create --region RegionOne database admin http://controller:8779/v1.0/%\(tenant_id\)s
  • 安装服务

yum install openstack-trove openstack-trove-ui python-troveclient -y
  • 配置文件

vim /etc/trove/trove.conf
[DEFAULT]
log_dir = /var/log/trove
trove_auth_url = http://controller:5000/v3
nova_compute_url = http://controller:8774/v2.1
cinder_url = http://controller:8776/v2
swift_url = http://controller:8080/v2/AUTH_
cinder_service_type = volumev2
rpc_backend = rabbit
transport_url = rabbit://openstack:000000@controller
auth_strategy = keystone
api_paste_config = /etc/trove/api-paste.ini
nova_proxy_admin_user = admin
nova_proxy_admin_pass = 000000
nova_proxy_admin_tenant_name = admin
taskmanager_manager = trove.taskmanager.manager.Manager
use_nova_server_config_drive = True
network_driver = trove.network.neutron.NeutronDriver
management_security_groups = a312c1fd-f3e9-4b37-8ddd-78c3625e712d
nova_keypair = trove-keypair
network_label_regex = \.\*
[database]
connection = mysql+pymysql://trove:trove123@controller/trove
idle_timeout = 3600
[profiler]
[ssl]
[oslo_messaging_notifications]
[mysql]
[redis]
[cassandra]
[couchbase]
[mongodb]
[vertica]
[cors]
[cors.subdomain]
[oslo_middleware]
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = trove
password = trove
vim /etc/trove/trove-taskmanager.conf
[DEFAULT]
log_dir = /var/log/trove
log_file = trove-taskmanager.log
trove_auth_url = http://controller:5000/v3
nova_compute_url = http://controller:8774/v2.1
notifier_queue_hostname = controller
rpc_backend = rabbit
transport_url = rabbit://openstack:000000@controller
nova_proxy_admin_user = admin
nova_proxy_admin_pass = 000000
nova_proxy_admin_tenant_name = admin
taskmanager_manager = trove.taskmanager.manager.Manager
notification_driver = messagingv2
network_driver = trove.network.neutron.NeutronDriver
management_security_groups = a312c1fd-f3e9-4b37-8ddd-78c3625e712d
nova_keypair = trove-keypair
network_label_regex = \.\*
guest_config = /etc/trove/trove-guestagent.conf
guest_info = guest_info.conf
injected_config_location = /etc/trove/conf.d
cloudinit_location = /etc/trove/cloudinit
[database]
connection = mysql+pymysql://trove:trove123@controller/trove
idle_timeout = 3600
[profiler]
[ssl]
[oslo_messaging_notifications]
[mysql]
[redis]
[cassandra]
[couchbase]
[mongodb]
[vertica]
[cors]
[cors.subdomain]
[oslo_middleware]
vim /etc/trove/trove-conductor.conf
[DEFAULT]
log_dir = /var/log/trove
log_file = trove-conductor.log
trove_auth_url = http://controller:5000/v3
notifier_queue_hostname = controller
nova_proxy_admin_user = admin
nova_proxy_admin_pass = 000000
nova_proxy_admin_tenant_name = admin
rpc_backend = rabbit
transport_url = rabbit://openstack:000000@controller
[database]
connection = mysql+pymysql://trove:trove123@controller/trove
[profiler]
[ssl]
[oslo_messaging_notifications]
[mysql]
[redis]
[cassandra]
[couchbase]
[mongodb]
[vertica]
[cors]
[cors.subdomain]
[oslo_middleware]
vim /etc/trove/trove-guestagent.conf
[DEFAULT]
rpc_backend = rabbit
nova_proxy_admin_user = admin
nova_proxy_admin_pass = 000000
nova_proxy_admin_user = admin
trove_auth_url = http://controller:5000/v3
swift_url = http://10.0.0.10:8080/v1/AUTH_
os_region_name = RegionOne
swift_service_type = object-store
log_file = trove-guestagent.log
transport_url = rabbit://openstack:000000@10.0.0.10
trove_auth_url = http://10.0.0.10:5000/v3
rabbit_host = 10.0.0.10
rabbit_userid = openstack
rabbit_port = 5672
[oslo_messaging_rabbit]
rabbit_host = 10.0.0.10
rabbit_userid = openstack
rabbit_password = 000000
[profiler]
[ssl]
[oslo_messaging_notifications]
[mysql]
[redis]
[cassandra]
[couchbase]
[mongodb]
[vertica]
[cors]
[cors.subdomain]
[oslo_middleware]
  • 同步数据库

su -s /bin/sh -c "trove-manage db_sync" trove
  • 创建实例

glance image-create --name "mysql-5.6" --disk-format qcow2  --container-format bare --progress --file Mysql-5.6.qcow2
trove-manage datastore_update mysql ''
Glance_Image_ID=`glance image-list | awk '/ mysql-5.6 / { print $2 }'`
trove-manage datastore_version_update mysql mysql-5.6 mysql $Glance_Image_ID '' 1
openstack database instance create mysql-1 $flavor_id  --nic net-id=NETID  --size 1 --databases myDB --users user:password --datastore_version mysql-5.6 --datastore mysql 

heat部署

控制节点

  • 仓库授权
CREATE DATABASE heat;

GRANT ALL PRIVILEGES ON heat.* TO 'heat'@'%' IDENTIFIED BY 'heat123';
  • 创建用户
openstack user create --domain default --password heat heat
  • 将heat用户添加admin角色
openstack role add --project service --user heat admin
  • 创建heat和heat-cfn服务实体
openstack service create --name heat --description "Orchestration" orchestration

openstack service create --name heat-cfn --description "Orchestration"  cloudformation
  • 创建业务流程服务API端点
openstack endpoint create --region RegionOne orchestration public http://controller:8004/v1/%\(tenant_id\)s
  
openstack endpoint create --region RegionOne orchestration internal http://controller:8004/v1/%\(tenant_id\)s
  
openstack endpoint create --region RegionOne orchestration admin http://controller:8004/v1/%\(tenant_id\)s
  
openstack endpoint create --region RegionOne cloudformation public http://controller:8000/v1
  
openstack endpoint create --region RegionOne cloudformation internal http://controller:8000/v1
  
openstack endpoint create --region RegionOne cloudformation admin http://controller:8000/v1
  • 为堆栈创建包含项目和用户的热域
openstack domain create --description "Stack projects and users" heat
  • 创建用户“heat_domain_admin”,用于管理热域中的业务群组和用户
openstack user create --domain heat --password heat_domain_admin heat_domain_admin
  • 为热域的heat_domain_admin用户添加admin角色,使该用户具有管理堆栈的权限
openstack role add --domain heat --user-domain heat --user heat_domain_admin admin
  • 安装软件
yum install openstack-heat-api openstack-heat-api-cfn openstack-heat-engine -y
  • 编辑配置文件
cp /etc/heat/heat.conf{,.bak}

grep -Ev "^$|#" /etc/heat/heat.conf.bak > /etc/heat/heat.conf


# 完整配置如下
vim /etc/heat/heat.conf
[DEFAULT]
heat_metadata_server_url = http://controller:8000
heat_waitcondition_server_url = http://controller:8000/v1/waitcondition
stack_domain_admin = heat_domain_admin
stack_domain_admin_password = heat_domain_admin
stack_user_domain_name = heat
[auth_password]
[clients]
[clients_aodh]
[clients_barbican]
[clients_cinder]
[clients_designate]
[clients_glance]
[clients_heat]
[clients_keystone]
auth_uri = http://controller:5000
[clients_magnum]
[clients_manila]
[clients_mistral]
[clients_monasca]
[clients_neutron]
[clients_nova]
[clients_octavia]
[clients_sahara]
[clients_senlin]
[clients_swift]
[clients_trove]
[clients_zaqar]
[cors]
[database]
connection = mysql+pymysql://heat:heat123@controller/heat
[ec2authtoken]
[eventlet_opts]
[healthcheck]
[heat_api]
[heat_api_cfn]
[heat_api_cloudwatch]
[noauth]
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_middleware]
[oslo_policy]
[paste_deploy]
[profiler]
[revision]
[ssl]
[trustee]
auth_type = password
auth_url = http://controller:5000
username = heat
password = heat
user_domain_name = default
[volumes]
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = heat
password = heat
  • 填充数据库
su -s /bin/sh -c "heat-manage db_sync" heat
  • 启动服务
systemctl enable --now openstack-heat-api.service openstack-heat-api-cfn.service openstack-heat-engine.service
  • 利用heat创建云主机
vim demo-template.yml
heat_template_version: 2015-10-15
description: Launch a basic instance with CirrOS image using the
             ``m1.tiny`` flavor, ``mykey`` key,  and one network.

parameters:
  NetID:
    type: string
    description: Network ID to use for the instance.

resources:
  server:
    type: OS::Nova::Server
    properties:
      image: cirros04
      flavor: C1-512MB-1G
      networks:
      - network: { get_param: NetID }

outputs:
  instance_name:
    description: Name of the instance.
    value: { get_attr: [ server, name ] }
  instance_ip:
    description: IP address of the instance.
    value: { get_attr: [ server, first_address ] }
  • 将网络ID赋予变量
openstack network list

NET_ID=0c5ff70b-ab63-486c-8e37-4175d620dac3
  • 创建栈
openstack stack create -t demo-template.yml --parameter "NetID=$NET_ID" stack
  • 查看栈
openstack stack list
  • 删除栈
openstack stack delete --yes stack

ceilometer部署

控制节点

  • 创库授权
CREATE DATABASE gnocchi;

GRANT ALL PRIVILEGES ON gnocchi.* TO 'gnocchi'@'%' IDENTIFIED BY 'gnocchi123';
  • 创建用户
openstack user create --domain default --password ceilometer ceilometer
  • 将ceilometer用户赋予admin权限
openstack role add --project service --user ceilometer admin
  • 在Keystone注册子服务
openstack user create --domain default --password gnocchi gnocchi
  • 创建服务实体
openstack service create --name gnocchi --description "Metric Service" metric
  • 将gnocchi服务添加admin角色
openstack role add --project service --user gnocchi admin
  • 创建服务API端点
openstack endpoint create --region RegionOne metric public http://controller:8041

openstack endpoint create --region RegionOne metric internal http://controller:8041

openstack endpoint create --region RegionOne metric admin http://controller:8041
  • 安装服务
yum install openstack-gnocchi-api openstack-gnocchi-metricd python2-gnocchiclient python-devel uwsgi uwsgi-plugin-common uwsgi-plugin-python  redis openstack-ceilometer-notification openstack-ceilometer-central python2-ceilometerclient python-ceilometermiddleware -y
  • 编辑配置文件
cp /etc/gnocchi/gnocchi.conf{,.bak}

grep -Ev "^$|#" /etc/gnocchi/gnocchi.conf.bak > /etc/gnocchi/gnocchi.conf


# 完整配置
vim /etc/gnocchi/gnocchi.conf
[DEFAULT]
log_dir = /var/log/gnocchi
coordination_url = redis://controller:6379
debug = true
verbose = true
parallel_operations = 4
[api]
auth_mode = keystone
host = 0.0.0.0
post = 8041
uwsgi_mode = http-socket
max_limit = 1000
[archive_policy]
[cors]
[healthcheck]
[incoming]
[indexer]
url = mysql+pymysql://gnocchi:gnocchi123@controller/gnocchi
[metricd]
[oslo_middleware]
[oslo_policy]
[statsd]
[storage]
coordination_url = redis://controller:6379
file_basepath = /var/lib/gnocchi
driver = file
[keystone_authtoken]
auth_type = password
auth_url = http://controller:5000/v3
memcached_servers = controller:11211
project_domain_name = default
user_domain_name = default
project_name = service
username = gnocchi
password = ceilometer
interface = internalURL
region_name = RegionOne
  • 安装配置redis
yum install -y redis

# 编辑配置文件
vim /etc/redis.conf
61 bind 0.0.0.0
80 protected-mode no
128 daemonize yes

# 重启服务
systemctl restart redis
  • 初始化
gnocchi-upgrade
  • 服务权限
chmod -R 777 /var/lib/gnocchi
  • 启动服务
systemctl enable --now openstack-gnocchi-api.service openstack-gnocchi-metricd.service
  • 安装Ceilometer服务
yum install openstack-ceilometer-notification openstack-ceilometer-central -y
  • 编辑配置文件
vim /etc/ceilometer/pipeline.yaml
---
sources:
    - name: meter_source
      meters:
          - "*"
      sinks:
          - meter_sink
    - name: cpu_source
      interval: 600
      meters:
          - "cpu"
      sinks:
          - cpu_sink
    - name: disk_source
      interval: 600
      meters:
          - "disk.read.bytes"
          - "disk.read.requests"
          - "disk.write.bytes"
          - "disk.write.requests"
          - "disk.device.read.bytes"
          - "disk.device.read.requests"
          - "disk.device.write.bytes"
          - "disk.device.write.requests"
      sinks:
          - disk_sink
    - name: network_source
      interval: 600
      meters:
          - "network.incoming.bytes"
          - "network.incoming.packets"
          - "network.outgoing.bytes"
          - "network.outgoing.packets"
      sinks:
          - network_sink
sinks:
    - name: meter_sink
      publishers:
          - gnocchi://?filter_project=servicegnocchi://archive_policy=low
  • 编辑主体配置文件
cp /etc/ceilometer/ceilometer.conf{,.bak}

grep -Ev "^$|#" /etc/ceilometer/ceilometer.conf.bak > /etc/ceilometer/ceilometer.conf

# 完整配置
vim /etc/ceilometer/ceilometer.conf
[DEFAULT]
transport_url = rabbit://openstack:000000@controller
[compute]
[coordination]
[event]
[hardware]
[ipmi]
[meter]
[notification]
[oslo_concurrency]
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[polling]
[publisher]
[publisher_notifier]
[rgw_admin_credentials]
[rgw_client]
[service_credentials]
auth_type = password
auth_url = http://controller:5000
memcached_servers = controller:11211
project_domain_name = default
user_domain_name = default
project_name = service
username = ceilometer
password = ceilometer
[service_types]
[vmware]
[xenapi]
[api]
auth_mode = keystone
[dispatcher_gnocchi]
filter_service_activity = False
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http//controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = serviceservice
username = gnocchi
password = gnocchi
  • 在Gnocchi中创建Ceilometer资源。Gnocchi应该在这个阶段运行
ceilometer-upgrade
  • 启动ceilometer服务
systemctl enable --now openstack-ceilometer-notification.service openstack-ceilometer-central.service
  • 配置glance
vim /etc/glance/glance-api.conf
[oslo_messaging_notifications]
driver = messagingv2

# 重启生效
systemctl restart openstack-glance-api.service
  • 配置cinder
vim /etc/cinder/cinder.conf
[oslo_messaging_notifications]
driver = messagingv2

# 重启生效
systemctl restart openstack-cinder-api.service openstack-cinder-scheduler.service
  • 配置heat
vim /etc/heat/heat.conf
[oslo_messaging_notifications]
driver = messagingv2

# 重启生效
systemctl restart openstack-heat-api.service openstack-heat-engine.service
  • 配置网络
vim /etc/neutron/neutron.conf
[oslo_messaging_notifications]
driver = messagingv2

# 重启生效
systemctl restart neutron-server.service 
  • 配置swift
# 创建ResellerAdmin角色
openstack role create ResellerAdmin


# 赋予权限
openstack role add --project service --user ceilometer ResellerAdmin


# 编辑配置文件
vim /etc/swift/proxy-server.conf
[filter:keystoneauth]
operator_roles = admin, user, ResellerAdmin

[pipeline:main]
pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk ratelimit authtoken keystoneauth container-quotas account-quotas slo dlo versioned_writes proxy-logging ceilometer proxy-server

[filter:ceilometer]
paste.filter_factory = ceilometermiddleware.swift:filter_factory
control_exchange = swift
url = rabbit://openstack:000000@controller:5672/
driver = messagingv2
topic = notifications
log_level = WARN


# 重启服务生效
systemctl restart openstack-swift-proxy.service

计算节点

  • 安装服务
yum install openstack-ceilometer-compute -y
  • 编辑配置文件
# 备份文件
cp /etc/ceilometer/ceilometer.conf{,.bak}

# 过滤提前文件
grep -Ev "^$|#" /etc/ceilometer/ceilometer.conf.bak > /etc/ceilometer/ceilometer.conf

# 完整配置
vim /etc/ceilometer/ceilometer.conf
[DEFAULT]
transport_url = rabbit://openstack:000000@controller
[compute]
[coordination]
[event]
[hardware]
[ipmi]
[meter]
[notification]
[oslo_concurrency]
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[polling]
[publisher]
[publisher_notifier]
[rgw_admin_credentials]
[rgw_client]
[service_credentials]
auth_url = http://controller:5000
memcached_servers = controller:11211
project_domain_name = default
user_domain_name = default
project_name = service
auth_type = password
username = ceilometer
password = ceilometer
[service_types]
[vmware]
[xenapi]
  • 配置nova文件
vim /etc/nova/nova.conf
[DEFAULT]
instance_usage_audit = True
instance_usage_audit_period = hour
notify_on_state_change = vm_and_task_state

[oslo_messaging_notifications]
driver = messagingv2
  • 启动ceilometer服务
systemctl enable --now openstack-ceilometer-compute.service
  • 重启nova服务
systemctl restart openstack-nova-compute.service
  • 验证
gnocchi status

# 如果报AttributeError: _Environ instance has no attribute ‘set‘的错误
# 将os.environ.set("OS_AUTH_TYPE","password")修改为:
os.environ["OS_AUTH_TYPE"]="password"


# 上传镜像测试
openstack image create cirros04_v2  --disk-format qcow2 --file cirros-0.4.0-x86_64-disk.img 


gnocchi resource list  --type image

# 上一步生成的ID
gnocchi resource show $ID
  • 检测镜像下载速度
IMAGE_ID=$(glance image-list | grep 'cirros' | awk '{ print $2 }')

glance image-download $IMAGE_ID > /tmp/cirros.img

gnocchi resource show $ID