1. 程式人生 > 其它 >NFS高可用(NFS+keepalive+Sersync)

NFS高可用(NFS+keepalive+Sersync)

NFS高可用(NFS+keepalive+Sersync)

NFS高可用(NFS+keepalive+Sersync)

作者: 張首富

背景

NFS這樣古老的共享儲存的技術,被眾多小公司和沒錢的公司採用,現在就我司就需要出一套客戶的離線版本方案,客戶們想資料安全卻又不想花錢,所以我就採用了NFS做後端資料儲存,

NFS目前資料同步的方式主要兩種:

  • 使用Sersync來實現主從同步
  • 第二種藉助DRBD實現主從同步

但是這兩種方案都只是實現了資料的主從同步,對NFS服務的高可用沒有任何實現,網上大部分是採用heartbeat來實現,我這邊想採用不一樣的keepalive來實現這個

網路拓撲

安裝前準備

伺服器資訊:

IP

角色/HOSTNAME

192.168.1.110

×××(keepalive的虛擬IP)

192.168.1.112

NFS-Master

192.168.1.111

NFS-Slave

192.168.1.120

NFS-Client

伺服器資訊:

# cat /etc/redhat-release
CentOS Linux release 7.5.1804 (Core)
# uname  -r
3.10.0-862.el7.x86_64

共享的目錄:

nfs master 和slave 都建立一個/test_nfs 目錄來當做共享目錄

初始化環境安裝

在三臺機器上同時執行 伺服器基本優化:

#Yum源更換為國內阿里源
yum install wget telnet -y
mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.backup
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo

#新增阿里的epel源
#add the epel
wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
# rpm -ivh http://dl.fedoraproject.org/pub/epel/7/x86_64/e/epel-release-7-8.noarch.rpm

#yum重新建立快取
yum clean all
yum makecache
#同步時間
yum -y install ntp
/usr/sbin/ntpdate cn.pool.ntp.org
echo "* 4 * * * /usr/sbin/ntpdate cn.pool.ntp.org > /dev/null 2>&1" >> /var/spool/cron/root
systemctl  restart crond.service

#安裝vim
yum -y install vim

#設定最大開啟檔案描述符數
echo "ulimit -SHn 102400" >> /etc/rc.local
cat >> /etc/security/limits.conf << EOF
*           soft   nofile       655350
*           hard   nofile       655350
EOF

#禁用selinux
sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config
setenforce 0

#關閉防火牆
systemctl disable firewalld.service 
systemctl stop firewalld.service 

#set ssh
sed -i 's/^GSSAPIAuthentication yes$/GSSAPIAuthentication no/' /etc/ssh/sshd_config
sed -i 's/#UseDNS yes/UseDNS no/' /etc/ssh/sshd_config
systemctl  restart sshd.service

#核心引數優化
cat >> /etc/sysctl.conf << EOF
vm.overcommit_memory = 1
net.ipv4.ip_local_port_range = 1024 65536
net.ipv4.tcp_fin_timeout = 1
net.ipv4.tcp_keepalive_time = 1200
net.ipv4.tcp_mem = 94500000 915000000 927000000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_tw_recycle = 1
net.ipv4.tcp_timestamps = 0
net.ipv4.tcp_synack_retries = 1
net.ipv4.tcp_syn_retries = 1
net.ipv4.tcp_abort_on_overflow = 0
net.core.rmem_max = 16777216
net.core.wmem_max = 16777216
net.core.netdev_max_backlog = 262144
net.core.somaxconn = 262144
net.ipv4.tcp_max_orphans = 3276800
net.ipv4.tcp_max_syn_backlog = 262144
net.core.wmem_default = 8388608
net.core.rmem_default = 8388608
net.ipv4.netfilter.ip_conntrack_max = 2097152
net.nf_conntrack_max = 655360
net.netfilter.nf_conntrack_tcp_timeout_established = 1200
EOF
/sbin/sysctl -p

安裝nfs

yum -y install nfs-utils rpcbind

配置nfs共享目錄

NFS-Master:

[root@NFS-Master ~]# echo '/test_nfs 192.168.1.0/24(rw,sync,all_squash)' >> /etc/exports
[root@NFS-Master ~]# systemctl start rpcbind && systemctl start nfs
[root@NFS-Master ~]# systemctl enable rpcbind && systemctl enable nfs
Created symlink from /etc/systemd/system/multi-user.target.wants/nfs-server.service to /usr/lib/systemd/system/nfs-server.service.

NFS-Client:

[root@NFS-Slave ~]# echo '/test_nfs 192.168.1.0/24(rw,sync,all_squash)' >> /etc/exports
[root@NFS-Slave ~]# systemctl start rpcbind && systemctl start nfs
[root@NFS-Slave ~]# systemctl enable rpcbind && systemctl enable nfs
Created symlink from /etc/systemd/system/multi-user.target.wants/nfs-server.service to /usr/lib/systemd/system/nfs-server.service.

測試掛載是否成功:

#測試NFS-Master端
[root@NFS-Client /]# mount -t nfs 192.168.1.112:/test_nfs /mnt
[root@NFS-Client /]# df -Th
檔案系統                型別      容量  已用  可用 已用% 掛載點
192.168.1.112:/test_nfs nfs4      921G   69G  852G    8% /mnt
[root@NFS-Client /]# umount /mnt

#測試NFS-Slave端
[root@NFS-Client /]# mount -t nfs 192.168.1.111:/test_nfs /mnt
[root@NFS-Client /]# df -Th
檔案系統                型別      容量  已用  可用 已用% 掛載點
192.168.1.111:/test_nfs nfs4      931G   53G  878G    6% /mnt
[root@NFS-Client /]# umount /mnt

安裝配置rsync + Sersync

在NFS-Slave端安裝rsync,因為我們在NFS-Master上寫入資料要備份到NFS-Slave伺服器上,所以我們在NFS-Slave上啟動rsync,如果不清楚可以看rsync服務介紹這裡不多講

[root@NFS-Slave ~]# yum -y install rsync.x86_64
[root@NFS-Slave ~]# cat /etc/rsyncd.conf
uid = nfsnobody
gid = nfsnobody
port = 873
pid file = /var/rsyncd.pid
log file = /var/log/rsyncd.log
use chroot = no
max connections = 200
read only = false
list = false
fake super = yes
ignore errors
[test_nfs]
path = /test_nfs
auth users = test_nfs
secrets file = /etc/rsync.pass
hosts allow = 192.168.1.0/24
[root@NFS-Slave ~]# systemctl start rsyncd && systemctl enable rsyncd
[root@NFS-Slave ~]# echo 'test_nfs:zsf123' > /etc/rsync.pass
[root@NFS-Slave ~]# chmod 600 /etc/rsync.pass
[root@NFS-Slave ~]# chown nfsnobody:nfsnobody /test_nfs/

NFS-Master測試

[root@NFS-Master ~]# yum -y install rsync.x86_64
[root@NFS-Master ~]# chown nfsnobody:nfsnobody /test_nfs/
[root@NFS-Master ~]# echo "zsf123" > /etc/rsync.pass
[root@NFS-Master ~]# chmod 600 /etc/rsync.pass
#建立測試檔案,測試推送
[root@NFS-Master ~]# cd /test_nfs/
[root@NFS-Master test_nfs]# echo "This is test file" > file.txt
[root@NFS-Master test_nfs]# rsync -arv /test_nfs/ test_nfs@192.168.1.111::test_nfs --password-file=/etc/rsync.pass
sending incremental file list
./
file.txt

sent 155 bytes  received 38 bytes  386.00 bytes/sec
total size is 18  speedup is 0.09

#到NFS-Slave上檢視檔案
[root@NFS-Slave ~]# ls /test_nfs/
file.txt
[root@NFS-Slave ~]# cat /test_nfs/file.txt
This is test file

NFS-Master安裝Sersync 因為Sersync只有安裝在NFS-Master上的時候才能檢測到/test_nfs目錄是否有檔案寫入,才能觸發推送

[root@NFS-Master test_nfs]# cd /usr/local/
[root@NFS-Master local]# yum -y install wget.x86_64
#下載Sersync的安裝包
[root@NFS-Master local]# wget https://raw.githubusercontent.com/wsgzao/sersync/master/sersync2.5.4_64bit_binary_stable_final.tar.gz
[root@NFS-Master local]# tar xvf sersync2.5.4_64bit_binary_stable_final.tar.gz
GNU-Linux-x86/
GNU-Linux-x86/sersync2
GNU-Linux-x86/confxml.xml
[root@NFS-Master local]# mv GNU-Linux-x86/ sersync
[root@NFS-Master local]# cd sersync/
更改sersync的配置檔案
[root@NFS-Master local]# sed -ri 's#<delete start="true"/>#<delete start="false"/>#g'
[root@NFS-Master local]# sed -ri '24s#<localpath watch="/opt/tongbu">#<localpath watch="/test_nfs">#g' confxml.xml
[root@NFS-Master local]# sed -ri '25s#<remote ip="127.0.0.1" name="tongbu1"/>#<remote ip="192.168.1.111" name="test_nfs"/>#g' confxml.xml
[root@NFS-Master local]# sed -ri '30s#<commonParams params="-artuz"/>#<commonParams params="-az"/>#g' confxml.xml
[root@NFS-Master local]# sed -ri '31s#<auth start="false" users="root" passwordfile="/etc/rsync.pas"/>#<auth start="true" users="test_nfs" passwordfile="/etc/rsync.pass"/>#g' confxml.xml
[root@NFS-Master local]# sed -ri '33s#<timeout start="false" time="100"/><!-- timeout=100 -->#<timeout start="true" time="100"/><!-- timeout=100 -->#g' confxml.xml

#啟動Sersync
[root@NFS-Master sersync]# /usr/local/sersync/sersync2 -dro /usr/local/sersync/confxml.xml

測試:

[root@NFS-Master test_nfs]# echo "This is two test file" > two.file.txt
[root@NFS-Slave test_nfs]# ls
file.txt  two.file.txt
[root@NFS-Slave test_nfs]# cat two.file.txt
This is two test file

看到上面結果說明Sersync實時同步我們已經完成了,

安裝配置keepalive

NFS-Master

[root@NFS-Master test_nfs]# yum -y install keepalived.x86_64
[root@NFS-Master test_nfs]# cat /etc/keepalived/keepalived.conf
! Configuration File for keepalived

global_defs {
   router_id NFS-Master
}

vrrp_instance VI_1 {
    state MASTER
    interface enp0s31f6
    virtual_router_id 51
    priority 150
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass zhangshoufu
    }
    virtual_ipaddress {
        192.168.1.101
    }
}
[root@NFS-Master test_nfs]# systemctl start  keepalived.service && systemctl enable keepalived.service

NFS-Slave

[root@NFS-Slave test_nfs]# yum -y install keepalived.x86_64
[root@NFS-Slave test_nfs]# cat /etc/keepalived/keepalived.conf
! Configuration File for keepalived

global_defs {
   router_id NFS-Slave
}

vrrp_instance VI_1 {
    state MASTER
    interface enp0s31f6
    virtual_router_id 51
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass zhangshoufu
    }
    virtual_ipaddress {
        192.168.1.101
    }
}
[root@NFS-Slave test_nfs]# systemctl start  keepalived.service && systemctl enable keepalived.service

檢視虛擬IP是否存在

[root@NFS-Master test_nfs]# ip a | grep  192.168.1.101
    inet 192.168.1.101/32 scope global enp0s31f6

看到上圖說明keepalive啟動成功, 配置檔案解讀

//全域性定義模組
global_defs {
   router_id NFS-Master
   //執行keepalive機器的標識
}

vrrp_instance VI_1 {
    //靠priority 來區別主從,這個不生效
    state MASTER
    //例項繫結的網絡卡
    interface enp0s31f6
    //組ID,值一樣為一個組
    virtual_router_id 51
    //優先順序,優先順序高的為master
    priority 150
    //檢查時間間隔
    advert_int 1
    //認證模式為密碼認證
    authentication {
        auth_type PASS
        auth_pass zhangshoufu
    }
    //設定的VIP
    virtual_ipaddress {
        192.168.1.101
    }
}

NFS-Client通過VIP掛載測試

[root@NFS-Client ~]# mount -t nfs 192.168.1.101:/test_nfs /mnt
[root@NFS-Client ~]# ls /mnt/
file.txt  two.file.txt
[root@NFS-Client ~]# umount /mnt/

模擬機器Down機,測試虛擬IP地址是否會漂移

[root@NFS-Master scripts]# ip a | grep 101
    inet 192.168.1.101/32 scope global enp0s31f6
[root@NFS-Slave ~]# ip a | grep 101
[root@NFS-Slave ~]#

//關閉NFS-Master上的keepalive服務
[root@NFS-Master scripts]# systemctl stop keepalived.service
[root@NFS-Master scripts]# ip a | grep 101
[root@NFS-Master scripts]#

[root@NFS-Slave ~]# ip a | grep 101
    inet 192.168.1.101/32 scope global enp0s31f6

成功漂移 keepalive指令碼 因為keepalive的漂移機制是根據keepalive這個服務是否存活來判斷IP地址是否漂移的,如果我們是機器down機,此方法可以直接使用,但是如果是網路出現問題導致了服務不可用的,

[root@NFS-Master scripts]# pwd
/usr/local/scripts
[root@NFS-Master scripts]# cat check_keepalive.sh
#!/bin/bash
export PATH=$PATH
sum=0
for i in `seq 29`;do
    if ping -c 1 192.168.1.1  &> /dev/null ||  ;then
        continue
    else
        sum=`awk '{sum++;print sum}'`
        if [ $sum -eq 3 ];then
            systemctl stop keepalived.service
        fi
    fi
    sleep 2
done

加到定時任務裡:

[root@NFS-Master ~]# chmod 777 /usr/local/scripts/check_keepalive.sh
[root@NFS-Master ~]# crontab -e
* * * * * /usr/local/scripts/check_keepalive.sh &> /dev/null

客戶端檢測指令碼

#!/bin/bash
export PATH=$PATH
for i in `seq 29`;do
    df -Th &> /dev/null
    if [ `echo $?` -ne 0 ];then
        umount -lf /mnt && mount -t nfs 192.168.1.101:/test_nfs /mnt
    fi
    sleep 2
done

不足: 這個裡面會存在幾秒資料的丟失,如果真想保持資料強一致,還是不要省錢採用分散式儲存把