1. 程式人生 > 實用技巧 >ceph運維繫列

ceph運維繫列

一 摘要

基於centos8.1 7.6 連ceph 14.2.15

二 環境資訊

(一)作業系統資訊

[root@cephclient ~]# cat /etc/centos-release
CentOS Linux release 8.1.1911 (Core)
[root@cephclient ~]# uname -a
Linux cephclient.novalocal 4.18.0-147.el8.x86_64 #1 SMP Wed Dec 4 21:51:45 UTC 2019 x86_64 x86_64 x86_64 GNU/Linux
[root@cephclient ~]#

三 ceph 運維

(一)ceph 塊儲存

3.1.1 ceph 客戶端配置

3.1.1.1檢查核心是否支援rbd

[root@cephclient ~]# modprobe rbd
[root@cephclient ~]# echo $?
0
[root@cephclient ~]#

3.1.1.2 安裝ceph 客戶端

3.1.1.2.1 配置yum源
[root@cephclient yum.repos.d]# vim ceph14centos8.repo
[root@cephclient yum.repos.d]# cat ceph14centos8.repo
[Ceph]
name=Ceph packages for $basearch
baseurl=http://mirrors.aliyun.com/ceph/rpm-nautilus/el8/$basearch
enabled=1
gpgcheck=0
type=rpm-md
gpgkey=https://mirrors.aliyun.com/ceph/keys/release.asc
priority=1
[Ceph-noarch]
name=Ceph noarch packages
baseurl=http://mirrors.aliyun.com/ceph/rpm-nautilus/el8/noarch
enabled=1
gpgcheck=0
type=rpm-md
gpgkey=https://mirrors.aliyun.com/ceph/keys/release.asc
priority=1
[ceph-source]
name=Ceph source packages
baseurl=http://mirrors.aliyun.com/ceph/rpm-nautilus/el8/SRPMS
enabled=1
gpgcheck=0
type=rpm-md
gpgkey=https://mirrors.aliyun.com/ceph/keys/release.asc
priority=1
3.1.1.2.2 下載安裝包到本地
yum -y install --downloadonly --downloaddir=/root/software/cephcentos8/ ceph
3.1.1.2.3 安裝
yum -y install  ceph
3.1.1.2.4 建立ceph塊客戶端使用者名稱及認證金鑰(在伺服器端建立)

伺服器端叢集請參考ceph叢集部署

登入ceph-deploy 節點,切換到cephadmin使用者,進入cephcluster 目錄

[cephadmin@ceph001 ~]$ cd cephcluster/
[cephadmin@ceph001 cephcluster]$ pwd
/home/cephadmin/cephcluster
[cephadmin@ceph001 cephcluster]$


生成金鑰檔案並存放到ceph.client.rbd.keyring

[cephadmin@ceph001 cephcluster]$ ceph auth get-or-create client.rbd mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=rbd' | tee ./ceph.client.rbd.keyring
[client.rbd]
        key = AQBXoMVfJqKiJxAAIOCDFiEJey0GcHu1RP61PA==
[cephadmin@ceph001 cephcluster]$ ll
total 144
-rw------- 1 cephadmin cephadmin    113 Nov 30 17:17 ceph.bootstrap-mds.keyring
-rw------- 1 cephadmin cephadmin    113 Nov 30 17:17 ceph.bootstrap-mgr.keyring
-rw------- 1 cephadmin cephadmin    113 Nov 30 17:17 ceph.bootstrap-osd.keyring
-rw------- 1 cephadmin cephadmin    113 Nov 30 17:17 ceph.bootstrap-rgw.keyring
-rw------- 1 cephadmin cephadmin    151 Nov 30 17:17 ceph.client.admin.keyring
-rw-rw-r-- 1 cephadmin cephadmin     61 Dec  1 09:45 ceph.client.rbd.keyring
-rw-rw-r-- 1 cephadmin cephadmin    313 Nov 30 17:09 ceph.conf
-rw-rw-r-- 1 cephadmin cephadmin    247 Nov 30 17:00 ceph.conf.bak.orig
-rw-rw-r-- 1 cephadmin cephadmin 108766 Nov 30 17:46 ceph-deploy-ceph.log
-rw------- 1 cephadmin cephadmin     73 Nov 30 16:50 ceph.mon.keyring
[cephadmin@ceph001 cephcluster]$

3.1.1.2.5 ceph client 配置

從server 端將ceph.client.rbd.keyring \ceph.conf配置檔案拷貝到client

[cephadmin@ceph001 cephcluster]$ scp ceph.client.rbd.keyring ceph.conf [email protected]:/etc/ceph/
The authenticity of host '172.31.185.211 (172.31.185.211)' can't be established.
ECDSA key fingerprint is SHA256:ES6ytBX1siYV4WMG2CF3/21VKaDd5y27lbWQggeqRWM.
ECDSA key fingerprint is MD5:08:8e:ce:cd:2c:b4:24:69:44:c9:e4:42:a7:bb:ee:3a.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added '172.31.185.211' (ECDSA) to the list of known hosts.
[email protected]'s password:
ceph.client.rbd.keyring                                                                                      100%   61    28.6KB/s   00:00
ceph.conf                                                     

配置客戶端host

[root@cephclient etc]# cp /etc/hosts /etc/hosts.bak.orig
[root@cephclient etc]# vim /etc/hosts
[root@cephclient etc]#

172.31.185.127 ceph001
172.31.185.198 ceph002
172.31.185.203 ceph003
驗證客戶端配置是否成功
[root@cephclient etc]# ceph -s --name client.rbd
  cluster:
    id:     69002794-cf45-49fa-8849-faadae48544f
    health: HEALTH_OK

  services:
    mon: 3 daemons, quorum ceph001,ceph002,ceph003 (age 16h)
    mgr: ceph002(active, since 15h), standbys: ceph003, ceph001
    osd: 3 osds: 3 up (since 16h), 3 in (since 16h)

  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 B
    usage:   3.0 GiB used, 147 GiB / 150 GiB avail
    pgs:

[root@cephclient etc]#


3.1.2 建立塊裝置及客戶端對映

3.1.2.1 建立塊裝置

登入ceph 節點 ,先檢查下有沒有建立rbd 池

[cephadmin@ceph001 ~]$ ceph -s
  cluster:
    id:     69002794-cf45-49fa-8849-faadae48544f
    health: HEALTH_OK

  services:
    mon: 3 daemons, quorum ceph001,ceph002,ceph003 (age 17h)
    mgr: ceph002(active, since 17h), standbys: ceph003, ceph001
    osd: 3 osds: 3 up (since 17h), 3 in (since 17h)

  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 B
    usage:   3.0 GiB used, 147 GiB / 150 GiB avail
    pgs:

[cephadmin@ceph001 ~]$ ceph osd lspools
[cephadmin@ceph001 ~]$


經檢查沒有池,建osd 池

#64 是pg_num 生成環境怎麼配置,需要好好研究
[cephadmin@ceph001 ~]$ ceph osd pool create rbd 64
pool 'rbd' created
[cephadmin@ceph001 ~]$

確定 pg_num 取值是強制性的,因為不能自動計算。下面是幾個常用的值(總的pg):
少於 5 個 OSD 時可把 pg_num 設定為 128
OSD 數量在 5 到 10 個時,可把 pg_num 設定為 512
OSD 數量在 10 到 50 個時,可把 pg_num 設定為 4096
OSD 數量大於 50 時,你得理解權衡方法、以及如何自己計算 pg_num 取值

建立塊裝置,這個既可以ceph 叢集伺服器端執行,也可以在client 端*(因為剛才對client 授權了)

在客戶端伺服器執行該命令,建立一個2G 的塊

[root@cephclient ~]# rbd create rbd1 --size 2048 --name client.rbd
# 檢查建立是否成功
[root@cephclient ~]# rbd ls --name client.rbd
rbd1
[root@cephclient ~]#
#server 端也能看到
[cephadmin@ceph001 ~]$  rbd ls
rbd1
[cephadmin@ceph001 ~]$

對該塊指定一個池

[root@cephclient ~]# rbd ls -p rbd --name client.rbd    #-p 指定 池名稱
rbd1
[root@cephclient ~]#

檢視塊裝置詳細資訊

root@cephclient ~]# rbd --image rbd1 info --name client.rbd
rbd image 'rbd1':
        size 2 GiB in 512 objects
        order 22 (4 MiB objects)
        snapshot_count: 0
        id: 11256f6966f5
        block_name_prefix: rbd_data.11256f6966f5
        format: 2
        features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
        op_features:
        flags:
        create_timestamp: Tue Dec  1 11:30:53 2020
        access_timestamp: Tue Dec  1 11:30:53 2020
        modify_timestamp: Tue Dec  1 11:30:53 2020
[root@cephclient ~]#

3.1.2.2 對映到客戶端

客戶端執行

[root@cephclient ~]# rbd map --image rbd1 --name client.rbd
rbd: sysfs write failed
RBD image feature set mismatch. You can disable features unsupported by the kernel with "rbd feature disable rbd1 object-map fast-diff deep-flatten".
In some cases useful info is found in syslog - try "dmesg | tail".
rbd: map failed: (6) No such device or address
[root@cephclient ~]#

報錯 ,因為核心有些模組沒有開啟

有多種方法解決該問題,此處採用動態禁用

[root@cephclient ~]# rbd feature disable rbd1 exclusive-lock object-map deep-flatten fast-diff -n client.rbd
[root@cephclient ~]#

再次對映

[root@cephclient ~]# rbd map --image rbd1 --name client.rbd
/dev/rbd0
[root@cephclient ~]#

檢視對映資訊

[root@cephclient dev]# ll /dev/rbd*
brw-rw---- 1 root disk 252, 0 Dec  1 11:44 /dev/rbd0

/dev/rbd:
total 0
drwxr-xr-x 2 root root 60 Dec  1 11:44 rbd
[root@cephclient dev]# rbd showmapped --name client.rbd
id pool namespace image snap device
0  rbd            rbd1  -    /dev/rbd0
[root@cephclient dev]#


[root@cephclient dev]# fdisk -l  /dev/rbd0
Disk /dev/rbd0: 2 GiB, 2147483648 bytes, 4194304 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 4194304 bytes / 4194304 bytes
[root@cephclient dev]#

3.1.3 建立檔案系統並掛載

3.1.3.1 建立檔案系統

[root@cephclient dev]# mkfs.xfs /dev/rbd0
meta-data=/dev/rbd0              isize=512    agcount=8, agsize=65536 blks
         =                       sectsz=512   attr=2, projid32bit=1
         =                       crc=1        finobt=1, sparse=1, rmapbt=0
         =                       reflink=1
data     =                       bsize=4096   blocks=524288, imaxpct=25
         =                       sunit=1024   swidth=1024 blks
naming   =version 2              bsize=4096   ascii-ci=0, ftype=1
log      =internal log           bsize=4096   blocks=3072, version=2
         =                       sectsz=512   sunit=0 blks, lazy-count=1
realtime =none                   extsz=4096   blocks=0, rtextents=0
[root@cephclient dev]#

3.1.3.2 掛載

[root@cephclient dev]# mkdir /mnt/ceph-disk1
[root@cephclient dev]# mount /dev/rbd0 /mnt/ceph-disk1

檢查是否掛載成功
[root@cephclient dev]# df -h /mnt/ceph-disk1
Filesystem      Size  Used Avail Use% Mounted on
/dev/rbd0       2.0G   47M  2.0G   3% /mnt/ceph-disk1
[root@cephclient dev]#

寫入資料測試

[root@cephclient dev]# dd if=/dev/zero of=/mnt/ceph-disk1/file1 count=100 bs=1M
100+0 records in
100+0 records out
104857600 bytes (105 MB, 100 MiB) copied, 0.0430997 s, 2.4 GB/s
[root@cephclient dev]#

檢視是否寫入

[root@cephclient dev]# ll -h  /mnt/ceph-disk1/file1
-rw-r--r-- 1 root root 100M Dec  1 11:54 /mnt/ceph-disk1/file1
[root@cephclient dev]#

3.1.3.3 配置自動掛載服務

編寫指令碼 rbd-mount,存放到/usr/local/bin/rbd-mount

[root@cephclient dev]# vim /usr/local/bin/rbd-mount
#!/bin/bash

# Pool name where block device image is stored
export poolname=rbd

# Disk image name
export rbdimage=rbd1

# Mounted Directory
export mountpoint=/mnt/ceph-disk1

# Image mount/unmount and pool are passed from the systemd service as arguments
# Are we are mounting or unmounting
if [ "$1" == "m" ]; then
   modprobe rbd
   rbd feature disable $rbdimage object-map fast-diff deep-flatten
   rbd map $rbdimage --id rbd --keyring /etc/ceph/ceph.client.rbd.keyring
   mkdir -p $mountpoint
   mount /dev/rbd/$poolname/$rbdimage $mountpoint
fi
if [ "$1" == "u" ]; then
   umount $mountpoint
   rbd unmap /dev/rbd/$poolname/$rbdimage
fi
~


新增執行許可權

[root@cephclient dev]# chmod u+x /usr/local/bin/rbd-mount

配置服務 /etc/systemd/system 新增服務rbd-mount.service

[root@cephclient dev]# cat /etc/systemd/system/rbd-mount.service
[Unit]
Description=RADOS block device mapping for $rbdimage in pool $poolname"
Conflicts=shutdown.target
Wants=network-online.target
After=NetworkManager-wait-online.service
[Service]
Type=oneshot
RemainAfterExit=yes
ExecStart=/usr/local/bin/rbd-mount m
ExecStop=/usr/local/bin/rbd-mount u
[Install]
WantedBy=multi-user.target
[root@cephclient dev]#

配置開機自啟動

[root@cephclient dev]# systemctl daemon-reload
[root@cephclient dev]# systemctl enable rbd-mount.service
Created symlink /etc/systemd/system/multi-user.target.wants/rbd-mount.service → /etc/systemd/system/rbd-mount.service.
[root@cephclient dev]# reboot -f

重啟後檢查 掛載成功

[root@cephclient ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
devtmpfs        1.9G     0  1.9G   0% /dev
tmpfs           1.9G     0  1.9G   0% /dev/shm
tmpfs           1.9G  8.5M  1.9G   1% /run
tmpfs           1.9G     0  1.9G   0% /sys/fs/cgroup
/dev/vda2        19G  3.1G   16G  16% /
/dev/vda1      1014M  164M  851M  17% /boot
tmpfs           379M     0  379M   0% /run/user/0
/dev/rbd0       2.0G  147M  1.9G   8% /mnt/ceph-disk1
[root@cephclient ~]#