Centos8部署Ceph叢集對接OpenStack(U版本)
阿新 • • 發佈:2020-10-22
簡介
Linux持續不斷進軍可擴充套件計算空間,特別是可擴充套件儲存空間,Ceph 最近加入到 Linux 中令人印象深刻的檔案系統備選行列,它是一個分散式檔案系統,能夠在維護 POSIX 相容性的同時加入了複製和容錯功能
Ceph 生態系統架構可以劃分為四部分:
1、Clients:客戶端(資料使用者)
2、cmds:Metadata server cluster,元資料伺服器(快取和同步分散式元資料)
3、cosd:Object storage cluster,物件儲存叢集(將資料和元資料作為物件儲存,執行其他關鍵職能)
4、cmon:Cluster monitors,叢集監視器(執行監視功能)
前期準備
準備兩臺Centos8虛擬機器,配置IP地址和hostname,同步系統時間,關閉防火牆和selinux,修改IP地址和hostname對映,每臺虛擬機器新增一塊硬碟
ip | hostname |
---|---|
192.168.29.148 | controller |
192.168.29.149 | computer |
配置openstack可參考:https://blog.51cto.com/14832653/2542863
注:若已經建立openstack叢集,需要先把例項,映象和捲進行刪除
安裝ceph源
[root@controller ~]# yum install centos-release-ceph-octopus.noarch -y
[root@computer ~]# yum install centos-release-ceph-octopus.noarch -y
安裝ceph元件
[root@controller ~]# yum install cephadm -y
[root@computer ~]# yum install ceph -y
computer結點安裝libvirt
[root@computer ~]# yum install libvirt -y
部署ceph叢集
建立叢集
[root@controller ~]# mkdir -p /etc/ceph
[root@controller ~]# cd /etc/ceph/
[root@controller ceph]# cephadm boostrap --mon-ip 192.168.29.148
[root@controller ceph]# ceph status
[root@controller ceph]# cephadm install ceph-common
[root@controller ceph]# ssh-copy-id -f -i /etc/ceph/ceph.pub root@computer
修改配置
[root@controller ceph]# ceph config set mon public_network 192.168.29.0/24
新增主機
[root@controller ceph]# ceph orch host add computer
[root@controller ceph]# ceph orch host ls
初始化叢集監控
[root@controller ceph]# ceph orch host label add controller mon
[root@controller ceph]# ceph orch host label add computer mon
[root@controller ceph]# ceph orch apply mon label:mon
[root@controller ceph]# ceph orch daemon add mon computer:192.168.29.149
建立OSD
[root@controller ceph]# ceph orch daemon add osd controller:/dev/nvme0n2
[root@controller ceph]# ceph orch daemon add osd computer:/dev/nvme0n3
檢視叢集狀態
[root@controller ceph]# ceph -s
檢視叢集容量
[root@controller ceph]# ceph df
建立pool
[root@controller ceph]# ceph osd pool create volumes 64
[root@controller ceph]# ceph osd pool create vms 64
#設定自啟動
[root@controller ceph]# ceph osd pool application enable vms mon
[root@controller ceph]# ceph osd pool application enable volumes mon
檢視mon,osd,pool狀態
[root@controller ceph]# ceph mon stat
[root@controller ceph]# ceph osd status
[root@controller ceph]# ceph osd lspools
檢視pool情況
[root@controller ~]# rbd ls vms
[root@controller ~]# rbd ls volumes
ceph叢集與openstack對接
建立cinder並設定許可權
[root@controller ceph]# ceph auth get-or-create client.cinder mon 'allow r' osd 'allow class-read object_prefix rbd_children,allow rwx pool=volumes,allow rwx pool=vms'
設定金鑰
[root@controller ceph]# ceph auth get-or-create client.cinder | tee /etc/ceph/ceph.client.cinder.keyring
#傳送金鑰到computer
[root@controller ~]# ceph auth get-key client.cinder > client.cinder.key
[root@controller ~]# scp client.cinder.key computer:/root/
#修改許可權
[root@controller ceph]# chown cinder.cinder /etc/ceph/ceph.client.cinder.keyring
設定金鑰
#computer生成uuid
[root@computer ~]#uuidgen
1fad1f90-63fb-4c15-bfc3-366c6559c1fe
#建立金鑰檔案
[root@computer ~]# vi secret.xml
<secret ephemeral='no' private='no'>
<uuid>1fad1f90-63fb-4c15-bfc3-366c6559c1fe </uuid>
<usage type='ceph'>
<name>client.cinder secret</name>
</usage>
</secret>
#定義金鑰
virsh secret-define --file secret.xml
#設定金鑰
virsh secret-set-value --secret 1fad1f90-63fb-4c15-bfc3-366c6559c1fe --base64 $(cat client.cinder.key) && rm -rf client.cinder.key secret.xml
設定對接cinder模組
修改配置檔案
[root@controller ~]# vi /etc/cinder/cinder.conf
[default]
rpc_backend = rabbit
auth_strategy = keystone
my_ip = 192.168.29.148
enabled_backends = ceph
[ceph]
default_volume_type= ceph
glance_api_version = 2
volume_driver = cinder.volume.drivers.rbd.RBDDriver
volume_backend_name = ceph
rbd_pool = volumes
rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_flatten_volume_from_snapshot = false
rbd_max_clone_depth = 5
rbd_store_chunk_size = 4
rados_connect_timeout = -1
rbd_user = cinder
#對應computer建立的uuid
rbd_secret_uuid = 1fad1f90-63fb-4c15-bfc3-366c6559c1fe
同步資料庫
#若已經有資料庫,對資料庫進行刪除並重新建立和同步
[root@controller ~]# su -s /bin/sh -c "cinder-manage db sync" cinder
重啟服務
[root@controller ~]# systemctl restart openstack-cinder-api.service openstack-cinder-scheduler.service openstack-cinder-volume.service
設定ceph的型別和儲存型別
[root@controller ~]# source admin-openrc
[root@controller ~]# cinder type-create ceph
[root@controller ~]# cinder type-key ceph set volume_backend_name=ceph
對接nova-compute模組
computer結點修改配置檔案
[root@computer ~]# vi /etc.nova/nova.conf
[libvirt]
virt_type = qemu
inject_password = true
inject_partition = -1
images_type = rbd
images_rbd_pool = vms
images_rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_user = cinder
rbd_secret_uuid = 1fad1f90-63fb-4c15-bfc3-366c6559c1fe
disk_cachemodes = "network=writeback"
live_migration_flag = "VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST,VIR_MIGRATE_TUNNELLED"
hw_disk_discard = unmap
[root@computer ~]# vi /etc/ceph/ceph.conf
[client]
rbd cache=true
rbd cache writethrough until flush=true
admin socket = /var/run/ceph/guests/$cluster-$type.$id.$pid.$cctid.asok
log file = /var/log/qemu/qemu-guest-$pid.log
rbd concurrent management ops = 20
建立日誌目錄
[root@computer ~]# mkdir -p /var/run/ceph/guests/ /var/log/qemu/
[root@computer ~]# chown 777 -R /var/run/ceph/guests/ /var/log/qemu/
controller下發金鑰
[root@controller ~]# cd /etc/ceph
[root@controller ~]# scp ceph.client.cinder.keyring root@computer:/etc/ceph
重啟服務
[root@computer ~]# systemctl stop libvirtd openstack-nova-compute
[root@computer ~]# systemctl start libvirtd openstack-nova-compute