OpenStack Train版-16.對接Ceph儲存
阿新 • • 發佈:2022-04-15
1.叢集建立OpenStack所需的儲存池和使用者(Ceph Mon節點)
建立 OpenStack所需的儲存池
ceph osd pool create volumes 128 ceph osd pool create images 32 ceph osd pool create vms 128
同步配置檔案
cd /etc/ceph ceph-deploy --overwrite-conf admin controller compute01 compute02
建立使用者並授權
ceph auth get-or-create client.cinder mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=vms, allow rx pool=images' ceph auth get-or-create client.glance mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=images'
同步使用者祕鑰並修改屬主
ceph auth get-or-create client.glance | ssh controller tee /etc/ceph/ceph.client.glance.keyring ssh controller chown glance:glance /etc/ceph/ceph.client.glance.keyring ceph auth get-or-create client.cinder | ssh controller tee /etc/ceph/ceph.client.cinder.keyring ssh controller chown cinder:cinder /etc/ceph/ceph.client.cinder.keyring ceph auth get-or-create client.cinder | ssh compute01 tee /etc/ceph/ceph.client.cinder.keyring ssh compute01 chown cinder:cinder /etc/ceph/ceph.client.cinder.keyring ceph auth get-or-create client.cinder | ssh compute02 tee /etc/ceph/ceph.client.cinder.keyring ssh compute02 chown cinder:cinder /etc/ceph/ceph.client.cinder.keyring
2.各計算節點使用相同的UUID增加祕鑰到libvirt
生成UUID
UUID=$(uuidgen) echo $UUID 02aa5663-b0b5-453d-a0ce-1f24c61716c6
生成並同步secret.xml到所有計算節點(同步及增加祕鑰部分略)
cd /etc/ceph cat > secret.xml <<EOF <secret ephemeral='no' private='no'> <uuid>${UUID}</uuid> <usage type='ceph'> <name>client.cinder secret</name> </usage> </secret> EOF
新增金鑰到libvirt
virsh secret-define --file secret.xml Secret 02aa5663-b0b5-453d-a0ce-1f24c61716c6 created virsh secret-set-value --secret ${UUID} --base64 $(cat /etc/ceph/ceph.client.cinder.keyring | grep key | awk -F ' ' '{ print $3 }') Secret value set
檢視新增後的金鑰key和value
virsh secret-list UUID Usage -------------------------------------------------------------------------------- 02aa5663-b0b5-453d-a0ce-1f24c61716c6 ceph client.cinder secret
檢視value
virsh secret-get-value $UUID
3.配置Glance整合Ceph作為後端儲存並驗證(控制節點)
vim /etc/glance/glance-api.conf [DEFAULT] show_image_direct_url = True [glance_store] stores = rbd,file,http default_store = rbd filesystem_store_datadir = /var/lib/glance/images/ rbd_store_pool = images rbd_store_user = glance rbd_store_ceph_conf = /etc/ceph/ceph.conf #rbd_store_chunk_size = 8 [paste_deploy] flavor = keystone
重啟glance-api服務
systemctl restart openstack-glance-api.service
4.配置Cinder整合Ceph作為後端儲存並驗證(控制節點和儲存節點)
vim /etc/cinder/cinder.conf # 控制節點 [DEFAULT] default_volume_type = ceph
重啟cinder-api和cinder-scheduler服務
systemctl restart openstack-cinder-api.service openstack-cinder-scheduler.service
儲存節點一般會部署到控制節點上,根據實際環境而定
vim /etc/cinder/cinder.conf # 儲存節點 [DEFAULT] enabled_backends = ceph,lvm [ceph] volume_driver = cinder.volume.drivers.rbd.RBDDriver volume_backend_name = ceph rbd_pool = volumes rbd_ceph_conf = /etc/ceph/ceph.conf rbd_flatten_volume_from_snapshot = false rbd_max_clone_depth = 5 rbd_store_chunk_size = 4 rados_connect_timeout = -1 glance_api_version = 2 rbd_user = cinder rbd_secret_uuid = 02aa5663-b0b5-453d-a0ce-1f24c61716c6 # rbd_secret_uuid的值即為配置Ceph環境儲存的UUID值
重啟cinder-api和cinder-volume服務
systemctl restart openstack-cinder-volume.service
5.配置Nova整合Ceph(計算節點)
vim /etc/nova/nova.conf [DEFAULT] live_migration_flag="VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST,VIR_MIGRATE_TUNNELLED" [libvirt] virt_type = kvm inject_partition=-2 virt_type = kvm images_type = rbd images_rbd_pool = vms images_rbd_ceph_conf = /etc/ceph/ceph.conf disk_cachemodes="network=writeback" rbd_user = cinder rbd_secret_uuid = 02aa5663-b0b5-453d-a0ce-1f24c61716c6 # rbd_secret_uuid的值即為配置Ceph環境儲存的UUID值
重啟nova-compute服務
systemctl restart openstack-nova-compute.service