1. 程式人生 > 其它 >04 Ceph 叢集管理(轉載)

04 Ceph 叢集管理(轉載)

目錄

Ceph 叢集管理

Ceph 資源物件

Ceph 元件

  • mon monitor 管理叢集
  • mgr manager 監控管理
  • mds CephFS 源資料管理
  • rgw 物件儲存
  • osd 儲存

monitor、mgrosd、csi provisionerDeployments 的形式部署

[root@m1 ceph]# kubectl -n rook-ceph get deployments.apps 
NAME                                       READY   UP-TO-DATE   AVAILABLE   AGE
csi-cephfsplugin-provisioner               2/2     2            2           7h21m
csi-rbdplugin-provisioner                  2/2     2            2           7h21m
rook-ceph-crashcollector-192.168.100.133   1/1     1            1           6h30m
rook-ceph-crashcollector-192.168.100.134   1/1     1            1           7h10m
rook-ceph-crashcollector-192.168.100.135   1/1     1            1           7h14m
rook-ceph-crashcollector-192.168.100.136   1/1     1            1           7h21m
rook-ceph-crashcollector-192.168.100.137   1/1     1            1           7h12m
rook-ceph-mgr-a                            1/1     1            1           7h11m
rook-ceph-mon-a                            1/1     1            1           7h21m
rook-ceph-mon-b                            1/1     1            1           7h14m
rook-ceph-mon-c                            1/1     1            1           7h12m
rook-ceph-operator                         1/1     1            1           8h
rook-ceph-osd-0                            1/1     1            1           7h10m
rook-ceph-osd-1                            1/1     1            1           7h10m
rook-ceph-osd-2                            1/1     1            1           7h10m
rook-ceph-osd-3                            1/1     1            1           7h10m
rook-ceph-osd-4                            1/1     1            1           6h30m

CSICephFS 驅動和 RBD 驅動以 DaemonSets 的方式部署

[root@m1 ceph]# kubectl -n rook-ceph get daemonsets.apps 
NAME               DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR   AGE
csi-cephfsplugin   5         5         5       5            5           <none>          7h31m
csi-rbdplugin      5         5         5       5            5           <none>          7h31m

對外提供服務均通過 service 的形式

由於 pods的地址可能會經常發生變化,如重建,漂移,重啟等

[root@m1 ceph]# kubectl -n rook-ceph get svc
NAME                       TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)             AGE
csi-cephfsplugin-metrics   ClusterIP   10.68.182.104   <none>        8080/TCP,8081/TCP   7h32m
csi-rbdplugin-metrics      ClusterIP   10.68.37.66     <none>        8080/TCP,8081/TCP   7h32m
rook-ceph-mgr              ClusterIP   10.68.5.125     <none>        9283/TCP            7h22m
rook-ceph-mgr-dashboard    ClusterIP   10.68.136.162   <none>        8443/TCP            7h22m
rook-ceph-mon-a            ClusterIP   10.68.31.244    <none>        6789/TCP,3300/TCP   7h32m
rook-ceph-mon-b            ClusterIP   10.68.196.47    <none>        6789/TCP,3300/TCP   7h26m
rook-ceph-mon-c            ClusterIP   10.68.212.28    <none>        6789/TCP,3300/TCP   7h23m

初始化 osd 的使用的 jobs 控制器

[root@m1 ceph]# kubectl -n rook-ceph get jobs
NAME                                    COMPLETIONS   DURATION   AGE
rook-ceph-osd-prepare-192.168.100.133   1/1           8s         6h42m
rook-ceph-osd-prepare-192.168.100.134   1/1           2s         6h42m
rook-ceph-osd-prepare-192.168.100.135   1/1           3s         6h42m
rook-ceph-osd-prepare-192.168.100.136   1/1           3s         6h42m
rook-ceph-osd-prepare-192.168.100.137   1/1           2s         6h42m

toolbox 客戶端

使用 toolbox 客戶端連線 Ceph 叢集,執行 ceph 命令等

toolbox 資源清單

# rook 部署包中提供了 toolbox yaml 資源清單
[root@m1 ceph]# cat toolbox.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: rook-ceph-tools
  namespace: rook-ceph # namespace:cluster
  labels:
    app: rook-ceph-tools
spec:
  replicas: 1
  selector:
    matchLabels:
      app: rook-ceph-tools
  template:
    metadata:
      labels:
        app: rook-ceph-tools
    spec:
      dnsPolicy: ClusterFirstWithHostNet
      containers:
      - name: rook-ceph-tools
        image: rook/ceph:v1.5.5
        command: ["/tini"]
        args: ["-g", "--", "/usr/local/bin/toolbox.sh"]
        imagePullPolicy: IfNotPresent
        env:
          - name: ROOK_CEPH_USERNAME
            valueFrom:
              secretKeyRef:
                name: rook-ceph-mon
                key: ceph-username
          - name: ROOK_CEPH_SECRET
            valueFrom:
              secretKeyRef:
                name: rook-ceph-mon
                key: ceph-secret
        volumeMounts:
          - mountPath: /etc/ceph
            name: ceph-config
          - name: mon-endpoint-volume
            mountPath: /etc/rook
      volumes:
        - name: mon-endpoint-volume
          configMap:
            name: rook-ceph-mon-endpoints
            items:
            - key: data
              path: mon-endpoints
        - name: ceph-config
          emptyDir: {}
      tolerations:
        - key: "node.kubernetes.io/unreachable"
          operator: "Exists"
          effect: "NoExecute"
          tolerationSeconds: 5

toolbox 客戶端的部署

[root@m1 ceph]# kubectl apply -f toolbox.yaml
deployment.apps/rook-ceph-tools created

連線 Ceph 叢集

[root@m1 ceph]# kubectl -n rook-ceph exec -it rook-ceph-tools-77bf5b9b7d-rxdjb -- bash
[root@rook-ceph-tools-77bf5b9b7d-rxdjb /]# ceph -s
  cluster:
    id:     d9084983-64b8-480f-ba73-38a718d6b076
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum a,c,b (age 8h)
    mgr: a(active, since 8h)
    osd: 5 osds: 5 up (since 7h), 5 in (since 7h)
 
  data:
    pools:   1 pools, 1 pgs
    objects: 0 objects, 0 B
    usage:   5.0 GiB used, 245 GiB / 250 GiB avail
    pgs:     1 active+clean

常⻅ Ceph 命令

  • ceph status
  • cph osd status
  • ceph df
  • rados df
[root@rook-ceph-tools-77bf5b9b7d-rxdjb /]# ceph osd status
ID  HOST              USED  AVAIL  WR OPS  WR DATA  RD OPS  RD DATA  STATE      
 0  192.168.100.135  1027M  48.9G      0        0       0        0   exists,up  
 1  192.168.100.136  1027M  48.9G      0        0       0        0   exists,up  
 2  192.168.100.137  1027M  48.9G      0        0       0        0   exists,up  
 3  192.168.100.134  1027M  48.9G      0        0       0        0   exists,up  
 4  192.168.100.133  1027M  48.9G      0        0       0        0   exists,up  

[root@rook-ceph-tools-77bf5b9b7d-rxdjb /]# ceph osd tree  
ID   CLASS  WEIGHT   TYPE NAME                 STATUS  REWEIGHT  PRI-AFF
 -1         0.24399  root default                                       
-11         0.04880      host 192-168-100-133                           
  4    hdd  0.04880          osd.4                 up   1.00000  1.00000
 -9         0.04880      host 192-168-100-134                           
  3    hdd  0.04880          osd.3                 up   1.00000  1.00000
 -5         0.04880      host 192-168-100-135                           
  0    hdd  0.04880          osd.0                 up   1.00000  1.00000
 -3         0.04880      host 192-168-100-136                           
  1    hdd  0.04880          osd.1                 up   1.00000  1.00000
 -7         0.04880      host 192-168-100-137                           
  2    hdd  0.04880          osd.2                 up   1.00000  1.00000

[root@rook-ceph-tools-77bf5b9b7d-rxdjb /]# ceph df      
--- RAW STORAGE ---
CLASS  SIZE     AVAIL    USED    RAW USED  %RAW USED
hdd    250 GiB  245 GiB  18 MiB   5.0 GiB       2.01
TOTAL  250 GiB  245 GiB  18 MiB   5.0 GiB       2.01
 
--- POOLS ---
POOL                   ID  PGS  STORED  OBJECTS  USED  %USED  MAX AVAIL
device_health_metrics   1    1     0 B        0   0 B      0     77 GiB

[root@rook-ceph-tools-77bf5b9b7d-rxdjb /]# rados df
POOL_NAME              USED  OBJECTS  CLONES  COPIES  MISSING_ON_PRIMARY  UNFOUND  DEGRADED  RD_OPS   RD  WR_OPS   WR  USED COMPR  UNDER COMPR
device_health_metrics   0 B        0       0       0                   0        0         0       0  0 B       0  0 B         0 B          0 B

total_objects    0
total_used       5.0 GiB
total_avail      245 GiB
total_space      250 GiB

k8s 訪問 ceph

kubernetes 需要配置檔案和認證檔案來訪問 ceph 叢集
配置檔案和認證檔案可以直接使用 tools 容器中的資訊

配置 Ceph yum

[root@m1 ceph]# cat /etc/yum.repos.d/ceph.repo
[ceph]
name=ceph
baseurl=https://mirrors.aliyun.com/ceph/rpm-octopus/el7/x86_64/
enabled=1
gpgcheck=0

安裝 ceph-common

[root@m1 ceph]# yum -y install ceph-common -y

拷⻉ ceph.conf 檔案和 kering 檔案

[root@m1 ceph]# kubectl -n rook-ceph exec -it rook-ceph-tools-77bf5b9b7d-rxdjb -- cat /etc/ceph/ceph.conf | tee /etc/ceph/ceph.conf
[global]
mon_host = 10.68.196.47:6789,10.68.212.28:6789,10.68.31.244:6789

[client.admin]
keyring = /etc/ceph/keyring

[root@m1 ceph]# kubectl -n rook-ceph exec -it rook-ceph-tools-77bf5b9b7d-rxdjb -- cat /etc/ceph/keyring | tee /etc/ceph/keyring
[client.admin]
key = AQA5hn1jLkAIBBAA2cYo8uGuiwBCIXxtTBizGQ==

[root@m1 ~]# ls /etc/ceph/ -lh
total 12K
-rw-r--r-- 1 root root 123 Nov 23 19:29 ceph.conf
-rw-r--r-- 1 root root  64 Nov 23 19:30 keyring
-rw-r--r-- 1 root root  92 Aug 10 01:34 rbdmap

本地執行 ceph 命令檢視叢集狀態

[root@m1 ceph]# ceph -s
  cluster:
    id:     d9084983-64b8-480f-ba73-38a718d6b076
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum a,c,b (age 8h)
    mgr: a(active, since 8h)
    osd: 5 osds: 5 up (since 8h), 5 in (since 8h)
 
  data:
    pools:   1 pools, 1 pgs
    objects: 0 objects, 0 B
    usage:   5.0 GiB used, 245 GiB / 250 GiB avail
    pgs:     1 active+clean

訪問 RBD 塊儲存

建立 pool

[root@m1 ceph]# ceph osd pool create rook 16 16
pool 'rook' created
[root@m1 ceph]# ceph osd lspools
1 device_health_metrics
2 rook

pool 上建立 RBD 塊裝置

[root@m1 ceph]# rbd create -p rook --image rook-rbd.img --size 10G
[root@m1 ceph]# rbd -p rook ls
rook-rbd.img
[root@m1 ceph]# rbd info rook/rook-rbd.img
rbd image 'rook-rbd.img':
        size 10 GiB in 2560 objects
        order 22 (4 MiB objects)
        snapshot_count: 0
        id: 86b5b4bdb2ca
        block_name_prefix: rbd_data.86b5b4bdb2ca
        format: 2
        features: layering
        op_features: 
        flags: 
        create_timestamp: Wed Nov 23 19:41:48 2022
        access_timestamp: Wed Nov 23 19:41:48 2022
        modify_timestamp: Wed Nov 23 19:41:48 2022

客戶掛載 RBD

[root@m1 ceph]# rbd map rook/rook-rbd.img
/dev/rbd0

[root@m1 ceph]# rbd device ls
id  pool  namespace  image         snap  device   
0   rook             rook-rbd.img  -     /dev/rbd0
[root@m1 ceph]# rbd showmapped
id  pool  namespace  image         snap  device   
0   rook             rook-rbd.img  -     /dev/rbd0

[root@m1 ceph]# mkfs.xfs /dev/rbd0
Discarding blocks...Done.
meta-data=/dev/rbd0              isize=512    agcount=16, agsize=163840 blks
         =                       sectsz=512   attr=2, projid32bit=1
         =                       crc=1        finobt=0, sparse=0
data     =                       bsize=4096   blocks=2621440, imaxpct=25
         =                       sunit=1024   swidth=1024 blks
naming   =version 2              bsize=4096   ascii-ci=0 ftype=1
log      =internal log           bsize=4096   blocks=2560, version=2
         =                       sectsz=512   sunit=8 blks, lazy-count=1
realtime =none                   extsz=4096   blocks=0, rtextents=0

[root@m1 ceph]# mount /dev/rbd0 /mnt/
[root@m1 ceph]# ls /mnt

[root@m1 ceph]# df -h
Filesystem               Size  Used Avail Use% Mounted on
devtmpfs                 1.4G     0  1.4G   0% /dev
tmpfs                    1.4G     0  1.4G   0% /dev/shm
tmpfs                    1.4G   29M  1.4G   3% /run
tmpfs                    1.4G     0  1.4G   0% /sys/fs/cgroup
/dev/mapper/centos-root   37G   13G   25G  35% /
/dev/sda1               1014M  151M  864M  15% /boot
......
/dev/rbd0                 10G   33M   10G   1% /mnt