1. 程式人生 > 其它 >手動安裝ceph叢集

手動安裝ceph叢集

先使用管理員使用者做前期的準備,後面都是使用ceph使用者進行安裝。

sudo su -

設定主機名

hostnamectl set-hostname storage-ceph01
hostnamectl set-hostname storage-ceph02
hostnamectl set-hostname storage-ceph03

設定主機名對映

cat << EOF | sudo tee -a  /etc/hosts >> /dev/null
172.20.0.15 storage-ceph01
172.20.0.7 storage-ceph02
172.20.0.3 storage-ceph03
EOF

關閉防火牆

systemctl stop firewalld
systemctl disable firewalld

關閉selinux

setenforce 0
sed -ri 's#(SELINUX=).*#\1disabled#g' /etc/selinux/config

安裝ntp服務同步

yum install -y ntp
vi /etc/ntp.conf
註釋 `server xxxx iburst` 的幾行,在下面新增 `server ntp1.aliyun.com iburst`。
systemctl enable ntpd
systemctl start ntpd

新增ceph使用者

useradd -d /home/ceph -m ceph
echo 123456 | passwd --stdin ceph

設定ceph密碼

echo "ceph ALL = (root) NOPASSWD:ALL" | sudo tee /etc/sudoers.d/ceph
sudo chmod 0440 /etc/sudoers.d/ceph

新增ceph源

cat << EOM > /etc/yum.repos.d/ceph.repo
[ceph-noarch]
name=Ceph noarch packages
baseurl=https://mirrors.tuna.tsinghua.edu.cn/ceph/rpm-15.2.12/el7/noarch/
enabled=1
gpgcheck=0
 
[ceph-x84_64]
name=Ceph x86_64 packages
baseurl=https://mirrors.tuna.tsinghua.edu.cn/ceph/rpm-15.2.12/el7/x86_64/
enabled=1
gpgcheck=0
EOM
 
yum makecache

以下操作都使用ceph使用者

su - ceph

下載ceph相關的安裝包

sudo yum install -y snappy leveldb gdisk python-argparse gperftools-libs
sudo yum install -y ceph

安裝mon

生成ceph叢集的uuid

# uuidgen       (其中一臺主機生成即可)
4d8fec26-e363-4753-b60f-49d69ab44cab

export cephuid=4d8fec26-e363-4753-b60f-49d69ab44cab     (三臺主機都執行,且uuid一致。)

ceph的全域性配置檔案

cat <<EOF | sudo tee -a /etc/ceph/ceph.conf >> /dev/null
[global]
fsid = $cephuid
mon initial members = storage-ceph01, storage-ceph02, storage-ceph03
mon host = 172.20.0.15, 172.20.0.7, 172.20.0.3
public network = 192.168.31.0/24
cluster network = 172.20.0.0/24
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
osd journal size = 1024
osd pool default size = 3
osd pool default min size = 2
osd pool default pg num = 333
osd pool default pgp num = 333
osd crush chooseleaf type = 1
EOF

需要修改 mon initial members mon host public network cluster network 的值,如果只有一個內網的話,可以把 public network 的引數去掉即可。

生成monitor keyring

#storage-ceph01
ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'

生成client.admin keyring

#storage-ceph01
sudo ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *'
sudo chown ceph.ceph /etc/ceph/ceph.client.admin.keyring

生成用於叢集初始化初始化的cluster.bootstrap keyring

#storage-ceph01
ceph-authtool --create-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring --gen-key -n client.bootstrap-osd --cap mon 'profile bootstrap-osd' --cap mgr 'allow r'
 
ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring
ceph-authtool /tmp/ceph.mon.keyring --import-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring

生成初始化monmap

#storage-ceph01
monmaptool --create --add storage-ceph01 172.20.0.15 --add storage-ceph02 172.20.0.7 --add storage-ceph03 172.20.0.3 --fsid $cephuid /tmp/monmap

注意修改IP地址及主機名

分發monmap檔案

#storage-ceph01
scp /tmp/monmap root@storage-ceph02:/tmp/
scp /tmp/monmap root@storage-ceph03:/tmp/

分發client.admin keyring

#storage-ceph01
scp /etc/ceph/ceph.client.admin.keyring root@storage-ceph02:/etc/ceph/
scp /etc/ceph/ceph.client.admin.keyring root@storage-ceph03:/etc/ceph/

分發monitor keyring

#storage-ceph01
scp /tmp/ceph.mon.keyring root@storage-ceph02:/tmp/
scp /tmp/ceph.mon.keyring root@storage-ceph03:/tmp/

修改許可權

#storage-ceph02 && storage-ceph03
sudo chown ceph:ceph /tmp/ceph.mon.keyring
sudo chown ceph.ceph /etc/ceph/ceph.client.admin.keyring

建立mon的目錄

#storage-ceph01
mkdir /var/lib/ceph/mon/ceph-storage-ceph01
 
#storage-ceph02
mkdir /var/lib/ceph/mon/ceph-storage-ceph02
 
#storage-ceph03
mkdir /var/lib/ceph/mon/ceph-storage-ceph03

對節點monitor初始化

#storage-ceph01
ceph-mon --mkfs -i storage-ceph01 --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring
 
#storage-ceph02
ceph-mon --mkfs -i storage-ceph02 --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring
 
#storage-ceph03
ceph-mon --mkfs -i storage-ceph03 --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring
 
#所有檢視生成的檔案
[ceph@storage-ceph01 ~]$ ls /var/lib/ceph/mon/ceph-storage-ceph01/
keyring  kv_backend  store.db

啟動mon服務

sudo systemctl restart ceph-mon@storage-ceph01
sudo systemctl enable ceph-mon@storage-ceph01
sudo systemctl restart ceph-mon@storage-ceph02
sudo systemctl enable ceph-mon@storage-ceph02
sudo systemctl restart ceph-mon@storage-ceph03
sudo systemctl enable ceph-mon@storage-ceph03

問題一: 3 monitors have not enabled msgr2 ,該問題的方法。(所有mon節點執行)

ceph mon enable-msgr2

問題二: mons are allowing insecure global_id reclaim ,該問題的方法。(其中一個mon節點執行)

如果AUTH_INSECURE_GLOBAL_ID_RECLAIM還沒有引發健康警報並且auth_expose_insecure_global_id_reclaim尚未禁用該設定(預設情況下處於啟用狀態),則當前沒有需要升級的客戶端已連線,可以安全地禁止不安全的global_id回收:
ceph config set mon auth_allow_insecure_global_id_reclaim false
# 如果仍然有需要升級的客戶端,則可以使用以下方法暫時使此警報靜音:
ceph health mute AUTH_INSECURE_GLOBAL_ID_RECLAIM_ALLOWED 1w   # 1 week
# 不建議這樣做,但是您也可以無限期地禁用此警告,方法是:
ceph config set mon mon_warn_on_insecure_global_id_reclaim_allowed false

安裝mgr

生成mgr keyring

#storage-ceph01
mkdir /var/lib/ceph/mgr/ceph-storage-ceph01
cat <<EOF | tee /var/lib/ceph/mgr/ceph-storage-ceph01/keyring >> /dev/null
$(ceph auth get-or-create mgr.storage-ceph01 mon 'allow profile mgr' osd 'allow *' mds 'allow *')
EOF
 
#storage-ceph02
mkdir /var/lib/ceph/mgr/ceph-storage-ceph02
cat <<EOF | tee /var/lib/ceph/mgr/ceph-storage-ceph02/keyring >> /dev/null
$(ceph auth get-or-create mgr.storage-ceph02 mon 'allow profile mgr' osd 'allow *' mds 'allow *')
EOF
 
 
#storage-ceph03
mkdir /var/lib/ceph/mgr/ceph-storage-ceph03
cat <<EOF | tee /var/lib/ceph/mgr/ceph-storage-ceph03/keyring >> /dev/null
$(ceph auth get-or-create mgr.storage-ceph03 mon 'allow profile mgr' osd 'allow *' mds 'allow *')
EOF

啟動mgr服務

#storage-ceph01
sudo systemctl restart ceph-mgr@storage-ceph01
sudo systemctl enable ceph-mgr@storage-ceph01
 
#storage-ceph02
sudo systemctl restart ceph-mgr@storage-ceph02
sudo systemctl enable ceph-mgr@storage-ceph02
 
#storage-ceph03
sudo systemctl restart ceph-mgr@storage-ceph03
sudo systemctl enable ceph-mgr@storage-ceph03

問題三:解決 HEALTH_WARN Module 'restful' has failed dependency: No module named 'pecan' ,該問題的方法。(所有mgr節點執行)

sudo su -
pip3 install pecan werkzeug
su - ceph
pip3 install --user ceph pecan werkzeug

應該只需要執行 sudo su -pip3 install pecan werkzeug 即可。需要重啟作業系統。等一段時間再看看 ceph -s。如果還沒有好的話,pip3 install --user ceph pecan werkzeug 再重啟作業系統。

安裝osd

分發cluster.bootstrap keyring

#storage-ceph01
scp /var/lib/ceph/bootstrap-osd/ceph.keyring root@storage-ceph02:/var/lib/ceph/bootstrap-osd/
scp /var/lib/ceph/bootstrap-osd/ceph.keyring root@storage-ceph03:/var/lib/ceph/bootstrap-osd/
 
#三臺主機
sudo chown ceph.ceph /var/lib/ceph/bootstrap-osd/ceph.keyring 

建立lvm

# 清理磁碟
sudo dmsetup remove ceph--8ac0d9e1--ace9--4260--bc3d--9984442293f2-osd--block--05fa6b88--5b2b--4f06--8f7f--85218373da0e
sudo wipefs -af /dev/vdb 

# osd節點執行
sudo ceph-volume lvm create --data /dev/vdb

啟動服務

sudo systemctl restart [email protected]
sudo systemctl enable [email protected]
sudo systemctl restart [email protected]
sudo systemctl enable [email protected]
sudo systemctl restart [email protected]
sudo systemctl enable [email protected]

安裝mds

建立mds目錄

#storage-ceph01
mkdir -p /var/lib/ceph/mds/ceph-storage-ceph01
 
#storage-ceph02
mkdir -p /var/lib/ceph/mds/ceph-storage-ceph02
 
#storage-ceph03
mkdir -p /var/lib/ceph/mds/ceph-storage-ceph03

建立mds keyring

#storage-ceph01
ceph-authtool --create-keyring /var/lib/ceph/mds/ceph-storage-ceph01/keyring --gen-key -n mds.storage-ceph01
 
#storage-ceph02
ceph-authtool --create-keyring /var/lib/ceph/mds/ceph-storage-ceph02/keyring --gen-key -n mds.storage-ceph02
 
#storage-ceph03
ceph-authtool --create-keyring /var/lib/ceph/mds/ceph-storage-ceph03/keyring --gen-key -n mds.storage-ceph03

授權mds keyring

#storage-ceph01
ceph auth add mds.storage-ceph01 osd "allow rwx" mds "allow" mon "allow profile mds" -i /var/lib/ceph/mds/ceph-storage-ceph01/keyring
 
#storage-ceph02
ceph auth add mds.storage-ceph02 osd "allow rwx" mds "allow" mon "allow profile mds" -i /var/lib/ceph/mds/ceph-storage-ceph02/keyring
 
#storage-ceph03
ceph auth add mds.storage-ceph03 osd "allow rwx" mds "allow" mon "allow profile mds" -i /var/lib/ceph/mds/ceph-storage-ceph03/keyring

配置檔案新增mds配置

cat <<EOF | sudo tee -a /etc/ceph/ceph.conf >> /dev/null
 
[mds.storage-ceph01]
host = storage-ceph01
 
[mds.storage-ceph02]
host = storage-ceph02
 
[mds.storage-ceph03]
host = storage-ceph03
EOF

啟動mds服務

#storage-ceph01
sudo systemctl restart ceph-mon@storage-ceph01
sudo systemctl restart ceph-mgr@storage-ceph01
sudo systemctl restart ceph-mds@storage-ceph01
sudo systemctl enable ceph-mds@storage-ceph01
sudo systemctl restart ceph-osd@0
 
#storage-ceph02
sudo systemctl restart ceph-mon@storage-ceph02
sudo systemctl restart ceph-mgr@storage-ceph02
sudo systemctl restart ceph-mds@storage-ceph02
sudo systemctl enable ceph-mds@storage-ceph02
sudo systemctl restart ceph-osd@1
 
#storage-ceph03
sudo systemctl restart ceph-mon@storage-ceph03
sudo systemctl restart ceph-mgr@storage-ceph03
sudo systemctl restart ceph-mds@storage-ceph03
sudo systemctl enable ceph-mds@storage-ceph03
sudo systemctl restart ceph-osd@2