mysql的相關操作
1、如果主節點已經運行了一段時間,且有大量資料時,如何配置並啟動slave節點
如果主節點已經運行了一段時間,有大量的資料,要先備份恢復資料到從伺服器,在從複製起始位置開始備份從節點
主機10.0.0.8為主伺服器
主機10.0.0.18為從伺服器
(1)首先在主伺服器完全備份
[root@master ~]#mysqldump -A -F --single-transaction --master-data=1 >/backup/fullbackup_`date +%F_%T`.sql [root@master ~]#ll /backup/ total 2988 -rw-r--r-- 1 root root 3055918 Nov 27 17:41 fullbackup_2019-11-27_17:41:17.sql #將完全備份拷貝到從伺服器 [root@master ~]#scp /backup/fullbackup_2019-11-27_17\:41\:17.sql10.0.0.18:/data/#建議優化主和從節點伺服器的效能
(2)將完全備份還原到新的從節點
#在從節點安裝資料庫
[root@slave ~]#dnf -y install mariadb-server #編輯資料庫配置檔案啟動伺服器 [root@slave ~]#vim /etc/my.cnf.d/mariadb-server.cnf [mysqld] server-id=11 read-only [root@slave ~]#systemctl restart mariadb #配置從節點,從完全備份位置之後開始複製 [root@slave ~]#grep '^CHANGE MASTER' /data/fullbackup_2019-11-27_17\:41\:17.sql CHANGE MASTER TO MASTER_LOG_FILE='mariadb-bin.000003', MASTER_LOG_POS=389; [root@slave ~]#vim /data/fullbackup_2019-11-27_17\:41\:17.sql CHANGE MASTER TO MASTER_HOST='10.0.0.8', MASTER_USER='repluser', MASTER_PASSWORD='magedu', MASTER_PORT=3306, MASTER_LOG_FILE='mariadb-bin.000003', MASTER_LOG_POS=389; [root@slave ~]#mysql < /data/fullbackup_2019-11-27_17\:41\:17.sql [root@slave ~]#mysql Welcome to the MariaDB monitor. Commands end with ; or \g. Your MariaDB connection id is 9 Server version: 10.3.11-MariaDB MariaDB Server Copyright (c) 2000, 2018, Oracle, MariaDB Corporation Ab and others. Type 'help;' or '\h' for help. Type '\c' to clear the current input statement. MariaDB [(none)]> show slave status\G; *************************** 1. row *************************** Slave_IO_State: Master_Host: 10.0.0.8 Master_User: repluser Master_Port: 3306 Connect_Retry: 60 Master_Log_File: mariadb-bin.000003 Read_Master_Log_Pos: 389 Relay_Log_File: mariadb-relay-bin.000001 Relay_Log_Pos: 4 Relay_Master_Log_File: mariadb-bin.000003 Slave_IO_Running: No Slave_SQL_Running: No Replicate_Do_DB: Replicate_Ignore_DB: Replicate_Do_Table: Replicate_Ignore_Table: Replicate_Wild_Do_Table: Replicate_Wild_Ignore_Table: Last_Errno: 0 Last_Error: Skip_Counter: 0 Exec_Master_Log_Pos: 389 Relay_Log_Space: 256 Until_Condition: None Until_Log_File: Until_Log_Pos: 0 Master_SSL_Allowed: No Master_SSL_CA_File: Master_SSL_CA_Path: Master_SSL_Cert: Master_SSL_Cipher: Master_SSL_Key: Seconds_Behind_Master: NULL Master_SSL_Verify_Server_Cert: No Last_IO_Errno: 0 Last_IO_Error: Last_SQL_Errno: 0 Last_SQL_Error: Replicate_Ignore_Server_Ids: Master_Server_Id: 0 Master_SSL_Crl: Master_SSL_Crlpath: Using_Gtid: No Gtid_IO_Pos: Replicate_Do_Domain_Ids: Replicate_Ignore_Domain_Ids: Parallel_Mode: conservative SQL_Delay: 0 SQL_Remaining_Delay: NULL Slave_SQL_Running_State: Slave_DDL_Groups: 0 Slave_Non_Transactional_Groups: 0 Slave_Transactional_Groups: 0 1 row in set (0.000 sec) #開啟slave MariaDB [(none)]> start slave;2、當master伺服器宕機,提升一個slave成為新的master
#首先要找到那個從節點的資料庫是最新,讓它成為新的master
[root@centos8 ~]#cat /var/lib/mysql/relay-log.info 5 ./mariadb-relay-bin.000002 1180 mysql-bin.000002 996 0 #新master修改配置檔案,關閉只讀選項配置read-only [root@slave1 ~]#vim /etc/my.cnf.d/mariadb-server.cnf [mysqld] server-id=18 read-only=OFF log-bin=/data/mysql/logbin/mysql-bin #清除舊的master複製資訊 MariaDB [hellodb]>set global read_only=off; MariaDB [hellodb]>stop slave; MariaDB [hellodb]>reset slave all; #在新的master上完全備份 [root@slave1 ~]#mysqldump -A --single-transaction --master-data=1 -F >backup.sql [root@slave1 ~]#scp backup.sql 10.0.0.28: #在分析舊的master的二進位制日誌,將未同步到至新的master的二進位制日誌匯出來,恢復到新的master,儘可能恢復資料 #其他所有slave重新還原資料庫,指向新的master [root@slave2 ~]#vim backup.sql CHANGE MASTER TO MASTER_HOST='10.0.0.18', MASTER_USER='repluser', MASTER_PASSWORD='centos', MASTER_PORT=3306, MASTER_LOG_FILE='mysql-bin.000002', MASTER_LOG_POS=371; MariaDB [hellodb]>stop slave; MariaDB [hellodb]>reset slave all; MariaDB [hellodb]>set sql_log_bin=off; MariaDB [hellodb]>source backup.sql; MariaDB [hellodb]>set sql_log_bin=on; MariaDB [hellodb]>start slave;3、通過MHA0.58搭建一個數據庫叢集結構
#環境配置
10.0.0.7 centos7 MHA管理端
10.0.0.8 centos8 master
10.0.0.18 centos8 slave1
10.0.0.28 centos8 slave2
1.在管理節點安裝包檔案
[root@mha-manager ~]# yum -y install mha4mysql-*.rpm
2.在所有mysql伺服器上安裝node包
[root@master ~]# yum -y install mha4mysql-node-0.58-0.el7.centos.noarch.rpm [root@slave1 ~]# yum -y install mha4mysql-node-0.58-0.el7.centos.noarch.rpm [root@slave2 ~]# yum -y install mha4mysql-node-0.58-0.el7.centos.noarch.rpm
3.在所有節點之間實現基於key驗證
[root@mha-manager ~]# ssh-keygen Generating public/private rsa key pair. Enter file in which to save the key (/root/.ssh/id_rsa): Enter passphrase (empty for no passphrase): Enter same passphrase again: Your identification has been saved in /root/.ssh/id_rsa. Your public key has been saved in /root/.ssh/id_rsa.pub. The key fingerprint is: SHA256:erWaaksYynrpnlcId+FN35Z6e/nkFfa/o5nyEImuxvg root@mha-manager The key's randomart image is: +---[RSA 2048]----+ | | | . . | | . + . . . | | . . o . o = | | o.o S..= o | | . ..o......o . o| | o...= ...o . .+| | .o..o.+.o .o =+o| |.++. .=Eo o*.o*| +----[SHA256]-----+ [root@mha-manager ~]# ssh-copy-id 10.0.0.7 /usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub" The authenticity of host '10.0.0.7 (10.0.0.7)' can't be established. ECDSA key fingerprint is SHA256:1sCsDttAdJe3IIZtYIXEIYJKEN/doLn5IP/OxPQLFFM. ECDSA key fingerprint is MD5:67:c1:53:9c:33:f5:f9:9f:b5:5a:52:99:68:c1:d0:b2. Are you sure you want to continue connecting (yes/no)? yes /usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed /usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys root@10.0.0.7's password: Number of key(s) added: 1 Now try logging into the machine, with: "ssh '10.0.0.7'" and check to make sure that only the key(s) you wanted were added. [root@mha-manager ~]# rsync -av .ssh 10.0.0.8:/root/ [root@mha-manager ~]# rsync -av .ssh 10.0.0.18:/root/ [root@mha-manager ~]# rsync -av .ssh 10.0.0.28:/root/
4.在管理節點建立配置檔案
[root@mha-manager ~]# mkdir /etc/mastermha/ [root@mha-manager ~]# vim /etc/mastermha/app1.cnf [server default] user=mhauser #用於遠端連線MySQL所有節點的使用者,需要有管理員的許可權 password=magedu manager_workdir=/data/mastermha/app1/ #目錄會自動生成,無需手動建立 manager_log=/data/mastermha/app1/manager.log remote_workdir=/data/mastermha/app1/ ssh_user=root #用於實現遠端ssh基於KEY的連線,訪問二進位制日誌 repl_user=repluser #主從複製的使用者資訊 repl_password=magedu ping_interval=1 #健康性檢查的時間間隔 master_ip_failover_script=/usr/local/bin/master_ip_failover #切換VIP的perl指令碼 report_script=/usr/local/bin/sendmail.sh #當執行報警指令碼 check_repl_delay=0 #預設如果slave中從庫落後主庫relaylog超過100M,主庫不會選擇這個從庫為新的master,因為這個從庫進行恢復需要很長的時間.通過這個引數,mha觸發主從切換的時候會忽略複製的延時,通過check_repl_delay=0這個引數,mha觸發主從切換時會忽略複製的延時,對於設定candidate_master=1的從庫非常有用,這樣確保這個從庫一定能成為最新的master master_binlog_dir=/data/mysql/ #指定二進位制日誌存放的目錄,mha4mysql-manager-0.58必須指定,之前版本不需要指定 [server1] hostname=10.0.0.8 candidate_master=1 [server2] hostname=10.0.0.18 [server3] hostname=10.0.0.28 candidate_master=1 #設定為優先候選master,即使不是叢集中事件最新的slave,也會優先當master
5.製作相關指令碼
#傳送郵件指令碼 [root@mha-manager ~]# cat /usr/local/bin/sendmail.sh #!/bin/bash # #******************************************************************** #Author: llliuhuiii #QQ: 1694993508 #Date: 2020-10-15 #FileName: /usr/local/bin/sendmail.sh #Description: The test script #Copyright (C): 2020 All rights reserved #******************************************************************** echo "MySQL is down" | mail -s "MHA Warning" 1694993508@qq.com [root@mha-manager ~]# chmod +x /usr/local/bin/sendmail.sh #實現浮動IP即vip指令碼 [root@mha-manager ~]# cat /usr/local/bin/master_ip_failover #!/usr/bin/env perl use strict; use warnings FATAL => 'all'; use Getopt::Long; my ( $command, $ssh_user, $orig_master_host, $orig_master_ip, $orig_master_port, $new_master_host, $new_master_ip, $new_master_port ); my $vip = '10.0.0.100/24'; my $gateway = '10.0.0.2'; my $interface = 'eth0'; my $key = "1"; my $ssh_start_vip = "/sbin/ifconfig $interface:$key $vip;/sbin/arping -I $interface -c 3 -s $vip $gateway >/dev/null 2>&1"; my $ssh_stop_vip = "/sbin/ifconfig $interface:$key down"; GetOptions( 'command=s' => \$command, 'ssh_user=s' => \$ssh_user, 'orig_master_host=s' => \$orig_master_host, 'orig_master_ip=s' => \$orig_master_ip, 'orig_master_port=i' => \$orig_master_port, 'new_master_host=s' => \$new_master_host, 'new_master_ip=s' => \$new_master_ip, 'new_master_port=i' => \$new_master_port, ); exit &main(); sub main { print "\n\nIN SCRIPT TEST====$ssh_stop_vip==$ssh_start_vip===\n\n"; if ( $command eq "stop" || $command eq "stopssh" ) { # $orig_master_host, $orig_master_ip, $orig_master_port are passed. # If you manage master ip address at global catalog database, # invalidate orig_master_ip here. my $exit_code = 1; eval { print "Disabling the VIP on old master: $orig_master_host \n"; &stop_vip(); $exit_code = 0; }; if ($@) { warn "Got Error: $@\n"; exit $exit_code; } exit $exit_code; } elsif ( $command eq "start" ) { # all arguments are passed. # If you manage master ip address at global catalog database, # activate new_master_ip here. # You can also grant write access (create user, set read_only=0, etc) here. my $exit_code = 10; eval { print "Enabling the VIP - $vip on the new master - $new_master_host \n"; &start_vip(); $exit_code = 0; }; if ($@) { warn $@; exit $exit_code; } exit $exit_code; } elsif ( $command eq "status" ) { print "Checking the Status of the script.. OK \n"; `ssh $ssh_user\@$orig_master_host \" $ssh_start_vip \"`; exit 0; } else { &usage(); exit 1; }} # A simple system call that enable the VIP on the new master sub start_vip() { `ssh $ssh_user\@$new_master_host \" $ssh_start_vip \"`; } # A simple system call that disable the VIP on the old_master sub stop_vip() { `ssh $ssh_user\@$orig_master_host \" $ssh_stop_vip \"`; } sub usage { print "Usage: master_ip_failover --command=start|stop|stopssh|status -- orig_master_host=host --orig_master_ip=ip --orig_master_port=port -- new_master_host=host --new_master_ip=ip --new_master_port=port\n"; } #指令碼新增許可權 [root@mha-manager ~]# chmod +x /usr/local/bin/master_ip_failover
6.配置master主伺服器
#在master主機即8主機上安裝mysql5.7 #通過指令碼實現安裝 [root@master ~]# ls anaconda-ks.cfg mha4mysql-node-0.58-0.el7.centos.noarch.rpm install_mysql5.7or8.0_for_centos.sh mysql-5.7.30-linux-glibc2.12-x86_64.tar.gz [root@master ~]# bash mysql-5.7.30-linux-glibc2.12-x86_64.tar.gz 開始安裝MySQL資料庫... 建立mysql使用者 [ OK ] Starting MySQL. SUCCESS! 資料庫安裝完成 [ OK ] [root@master ~]# vim /etc/my.cnf [mysqld] server-id=1 log-bin=/data/mysql/mysql-bin skip_name_resolve=1 general_log #非必須項 [root@master ~]# service mysqld restart Shutting down MySQL.. SUCCESS! Starting MySQL. SUCCESS! [root@master ~]# mysql -pmagedu mysql> show master status; +------------------+----------+--------------+------------------+-------------------+ | File | Position | Binlog_Do_DB | Binlog_Ignore_DB | Executed_Gtid_Set | +------------------+----------+--------------+------------------+-------------------+ | mysql-bin.000001 | 154 | | | | +------------------+----------+--------------+------------------+-------------------+ 1 row in set (0.00 sec) mysql> grant replication slave on *.* to repluser@'10.0.0.%' identified by 'magedu'; Query OK, 0 rows affected, 1 warning (0.00 sec) mysql> grant all on *.* to mhauser@'10.0.0.%' identified by 'magedu'; #該授權使用者使用者名稱和密碼要和配置檔案指定的使用者及密碼相同 Query OK, 0 rows affected, 1 warning (0.00 sec) #配置vip [root@master ~]# ifconfig eth0:1 10.0.0.100/24
7.配置slave從伺服器
[root@slave1 ~]# vim /etc/my.cnf [mysqld] server-id=2 log-bin=/data/mysql/mysql-bin read-only relay_log_purge=0 #關閉清理中繼日誌,中繼日誌預設定期清理 skip_name_resolve=1 [root@slave1 ~]# service mysqld restart Shutting down MySQL.. SUCCESS! Starting MySQL. SUCCESS! [root@slave1 ~]# mysql -pmagedu mysql> CHANGE MASTER TO -> MASTER_HOST='10.0.0.8', -> MASTER_USER='repluser', -> MASTER_PASSWORD='magedu', -> MASTER_LOG_FILE='mysql-bin.000001', -> MASTER_LOG_POS=154; Query OK, 0 rows affected, 2 warnings (0.00 sec) mysql> start slave; Query OK, 0 rows affected (0.00 sec) mysql> show slave status\G *************************** 1. row *************************** Slave_IO_State: Waiting for master to send event Master_Host: 10.0.0.8 Master_User: repluser Master_Port: 3306 Connect_Retry: 60 Master_Log_File: mysql-bin.000001 Read_Master_Log_Pos: 738 Relay_Log_File: slave1-relay-bin.000002 Relay_Log_Pos: 904 Relay_Master_Log_File: mysql-bin.000001 Slave_IO_Running: Yes Slave_SQL_Running: Yes Replicate_Do_DB: Replicate_Ignore_DB: Replicate_Do_Table: Replicate_Ignore_Table: Replicate_Wild_Do_Table: Replicate_Wild_Ignore_Table: Last_Errno: 0 Last_Error: Skip_Counter: 0 Exec_Master_Log_Pos: 738 Relay_Log_Space: 1112 Until_Condition: None Until_Log_File: Until_Log_Pos: 0 Master_SSL_Allowed: No Master_SSL_CA_File: Master_SSL_CA_Path: Master_SSL_Cert: Master_SSL_Cipher: Master_SSL_Key: Seconds_Behind_Master: 0 Master_SSL_Verify_Server_Cert: No Last_IO_Errno: 0 Last_IO_Error: Last_SQL_Errno: 0 Last_SQL_Error: Replicate_Ignore_Server_Ids: Master_Server_Id: 1 Master_UUID: 7cf1fff9-0ede-11eb-a71e-000c2915e43d Master_Info_File: /data/mysql/master.info SQL_Delay: 0 SQL_Remaining_Delay: NULL Slave_SQL_Running_State: Slave has read all relay log; waiting for more updates Master_Retry_Count: 86400 Master_Bind: Last_IO_Error_Timestamp: Last_SQL_Error_Timestamp: Master_SSL_Crl: Master_SSL_Crlpath: Retrieved_Gtid_Set: Executed_Gtid_Set: Auto_Position: 0 Replicate_Rewrite_DB: Channel_Name: Master_TLS_Version: 1 row in set (0.00 sec) #同理配置從節點2即28主機 [root@slave2 ~]# vim /etc/my.cnf [mysqld] server-id=3 log-bin=/data/mysql/mysql-bin read-only relay_log_purge=0 skip_name_resolve=1 [root@slave2 ~]# service mysqld restart Shutting down MySQL.. SUCCESS! Starting MySQL. SUCCESS! [root@slave2 ~]# mysql -pmagedu mysql> CHANGE MASTER TO -> MASTER_HOST='10.0.0.8', -> MASTER_USER='repluser', -> MASTER_PASSWORD='magedu', -> MASTER_LOG_FILE='mysql-bin.000001', -> MASTER_LOG_POS=154; Query OK, 0 rows affected, 2 warnings (0.01 sec) mysql> start slave; Query OK, 0 rows affected (0.00 sec) mysql> show slave status\G *************************** 1. row *************************** Slave_IO_State: Waiting for master to send event Master_Host: 10.0.0.8 Master_User: repluser Master_Port: 3306 Connect_Retry: 60 Master_Log_File: mysql-bin.000001 Read_Master_Log_Pos: 738 Relay_Log_File: slave2-relay-bin.000002 Relay_Log_Pos: 904 Relay_Master_Log_File: mysql-bin.000001 Slave_IO_Running: Yes Slave_SQL_Running: Yes Replicate_Do_DB: Replicate_Ignore_DB: Replicate_Do_Table: Replicate_Ignore_Table: Replicate_Wild_Do_Table: Replicate_Wild_Ignore_Table: Last_Errno: 0 Last_Error: Skip_Counter: 0 Exec_Master_Log_Pos: 738 Relay_Log_Space: 1112 Until_Condition: None Until_Log_File: Until_Log_Pos: 0 Master_SSL_Allowed: No Master_SSL_CA_File: Master_SSL_CA_Path: Master_SSL_Cert: Master_SSL_Cipher: Master_SSL_Key: Seconds_Behind_Master: 0 Master_SSL_Verify_Server_Cert: No Last_IO_Errno: 0 Last_IO_Error: Last_SQL_Errno: 0 Last_SQL_Error: Replicate_Ignore_Server_Ids: Master_Server_Id: 1 Master_UUID: 7cf1fff9-0ede-11eb-a71e-000c2915e43d Master_Info_File: /data/mysql/master.info SQL_Delay: 0 SQL_Remaining_Delay: NULL Slave_SQL_Running_State: Slave has read all relay log; waiting for more updates Master_Retry_Count: 86400 Master_Bind: Last_IO_Error_Timestamp: Last_SQL_Error_Timestamp: Master_SSL_Crl: Master_SSL_Crlpath: Retrieved_Gtid_Set: Executed_Gtid_Set: Auto_Position: 0 Replicate_Rewrite_DB: Channel_Name: Master_TLS_Version: 1 row in set (0.00 sec) #測試是否完成主從複製 #在主伺服器8主機上建立資料庫db1 mysql> create database db1; Query OK, 1 row affected (0.00 sec) #檢視從伺服器即18,28主機是否實現同步 #18主機 mysql> show databases; +--------------------+ | Database | +--------------------+ | information_schema | | db1 | | mysql | | performance_schema | | sys | +--------------------+ 5 rows in set (0.00 sec) #28主機 mysql> show databases; +--------------------+ | Database | +--------------------+ | information_schema | | db1 | | mysql | | performance_schema | | sys | +--------------------+ 5 rows in set (0.00 sec)
8.檢查mha環境
#檢查環境 #在mha伺服器上執行以下命令 [root@mha-manager ~]# masterha_check_ssh --conf=/etc/mastermha/app1.cnf #檢測ssh服務 [root@mha-manager ~]# masterha_check_repl --conf=/etc/mastermha/app1.cnf #檢視狀態 [root@mha-manager ~]# masterha_check_status --conf=/etc/mastermha/app1.cnf app1 is stopped(2:NOT_RUNNING). #其中出現報錯 Sat Oct 17 11:42:59 2020 - [error][/usr/share/perl5/vendor_perl/MHA/ManagerUtil.pm, ln122] Got error when getting node version. Error: Sat Oct 17 11:42:59 2020 - [error][/usr/share/perl5/vendor_perl/MHA/ManagerUtil.pm, ln123] bash: apply_diff_relay_logs: command not found Sat Oct 17 11:42:59 2020 - [error][/usr/share/perl5/vendor_perl/MHA/MasterMonitor.pm, ln135] Failed to get MHA node version on the current master even though current master is reachable via SSH! Sat Oct 17 11:42:59 2020 - [error][/usr/share/perl5/vendor_perl/MHA/MasterMonitor.pm, ln427] Error happened on checking configurations. at /usr/bin/masterha_check_repl line 48. Sat Oct 17 11:42:59 2020 - [error][/usr/share/perl5/vendor_perl/MHA/MasterMonitor.pm, ln525] Error happened on monitoring servers. Sat Oct 17 11:42:59 2020 - [info] Got exit code 1 (Not master dead). MySQL Replication Health is NOT OK! #原因為主伺服器沒有安裝node包,在master伺服器上安裝node包 #再次進行檢測顯示檢測成功 [root@mha-manager ~]# masterha_check_repl --conf=/etc/mastermha/app1.cnf MySQL Replication Health is OK.
9.啟動mha
#開啟mha,預設是前臺執行 [root@mha-manager ~]# masterha_manager --conf=/etc/mastermha/app1.cnf Thu Oct 15 20:47:48 2020 - [warning] Global configuration file /etc/masterha_default.cnf not found. Skipping. Thu Oct 15 20:47:48 2020 - [info] Reading application default configuration from /etc/mastermha/app1.cnf.. Thu Oct 15 20:47:48 2020 - [info] Reading server configuration from /etc/mastermha/app1.cnf.. #開啟後臺執行的方法: [root@mha-manager ~]# nohup masterha_manager --conf=/etc/mastermha/app1.cnf &> /dev/null #再次檢視狀態 [root@mha-manager ~]# masterha_check_status --conf=/etc/mastermha/app1.cnf app1 (pid:3195) is running(0:PING_OK), master:10.0.0.8
10.測試mha功能
#在執行mha時,當主伺服器down掉之後,從伺服器會上臺成為新的主節點,同時程式會退出,即一次性任務 #檢視日誌服務 [root@mha-manager ~]# tail /data/mastermha/app1/manager.log -w timeout : how long to wait for a reply -I device : which ethernet device to use -s source : source ip address destination : ask for what ip address Thu Oct 15 20:47:53 2020 - [info] OK. Thu Oct 15 20:47:53 2020 - [warning] shutdown_script is not defined. Thu Oct 15 20:47:53 2020 - [info] Set master ping interval 1 seconds. Thu Oct 15 20:47:53 2020 - [warning] secondary_check_script is not defined. It is highly recommended setting it to check master reachability from two or more routes. Thu Oct 15 20:47:53 2020 - [info] Starting ping health check on 10.0.0.8(10.0.0.8:3306).. Thu Oct 15 20:47:53 2020 - [info] Ping(SELECT) succeeded, waiting until MySQL doesn't respond.. #持續ping10.0.0.8伺服器監控mha主伺服器 #在主服務檢視通用日誌,可檢視健康性檢查 [root@master ~]# tail /data/mysql/master.log 2020-10-15T13:36:14.633551Z 11 Query SELECT 1 As Value 2020-10-15T13:36:15.634506Z 11 Query SELECT 1 As Value 2020-10-15T13:36:16.636821Z 11 Query SELECT 1 As Value 2020-10-15T13:36:17.637874Z 11 Query SELECT 1 As Value 2020-10-15T13:36:18.637664Z 11 Query SELECT 1 As Value 2020-10-15T13:36:19.638769Z 11 Query SELECT 1 As Value 2020-10-15T13:36:20.639872Z 11 Query SELECT 1 As Value #從伺服器在配置檔案中開啟通用日誌選項同樣可以實現健康性檢查
11.模擬故障實現主從遷移
#當master down機之後,mha會自動退出,屬於一次性任務 #將master伺服器進行斷電源作為down機 [root@mha-manager ~]# masterha_manager --conf=/etc/mastermha/app1.cnf Fri Oct 16 20:03:58 2020 - [warning] Global configuration file /etc/masterha_default.cnf not found. Skipping. Fri Oct 16 20:03:58 2020 - [info] Reading application default configuration from /etc/mastermha/app1.cnf.. Fri Oct 16 20:03:58 2020 - [info] Reading server configuration from /etc/mastermha/app1.cnf.. Fri Oct 16 20:06:07 2020 - [warning] Global configuration file /etc/masterha_default.cnf not found. Skipping. Fri Oct 16 20:06:07 2020 - [info] Reading application default configuration from /etc/mastermha/app1.cnf.. Fri Oct 16 20:06:07 2020 - [info] Reading server configuration from /etc/mastermha/app1.cnf.. [root@mha-manager ~]# #mha自動退出 #檢視日誌資訊 [root@mha-manager ~]# cat /data/mastermha/app1/manager.log Sat Oct 17 12:10:32 2020 - [info] Resetting slave info on the new master.. Sat Oct 17 12:10:32 2020 - [info] 10.0.0.28: Resetting slave info succeeded. Sat Oct 17 12:10:32 2020 - [info] Master failover to 10.0.0.28(10.0.0.28:3306) completed successfully. Sat Oct 17 12:10:32 2020 - [info] ----- Failover Report ----- app1: MySQL Master failover 10.0.0.8(10.0.0.8:3306) to 10.0.0.28(10.0.0.28:3306) succeeded Master 10.0.0.8(10.0.0.8:3306) is down! Check MHA Manager logs at mha-manager:/data/mastermha/app1/manager.log for details. Started automated(non-interactive) failover. Invalidated master IP address on 10.0.0.8(10.0.0.8:3306) The latest slave 10.0.0.18(10.0.0.18:3306) has all relay logs for recovery. Selected 10.0.0.28(10.0.0.28:3306) as a new master. 10.0.0.28(10.0.0.28:3306): OK: Applying all logs succeeded. 10.0.0.28(10.0.0.28:3306): OK: Activated master IP address. 10.0.0.18(10.0.0.18:3306): This host has the latest relay log events. Generating relay diff files from the latest slave succeeded. 10.0.0.18(10.0.0.18:3306): OK: Applying all logs succeeded. Slave started, replicating from 10.0.0.28(10.0.0.28:3306) 10.0.0.28(10.0.0.28:3306): Resetting slave info succeeded. Master failover to 10.0.0.28(10.0.0.28:3306) completed successfully. Sat Oct 17 12:10:32 2020 - [info] Sending mail.. #master伺服器不再有vip地址 [root@master ~]# ip a 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever inet6 ::1/128 scope host valid_lft forever preferred_lft forever 2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000 link/ether 00:0c:29:15:e4:3d brd ff:ff:ff:ff:ff:ff inet 10.0.0.8/24 brd 10.0.0.255 scope global noprefixroute eth0 valid_lft forever preferred_lft forever inet6 fe80::20c:29ff:fe15:e43d/64 scope link valid_lft forever preferred_lft forever #vip地址漂移到slave2伺服器上 [root@slave2 ~]# ip a 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever inet6 ::1/128 scope host valid_lft forever preferred_lft forever 2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000 link/ether 00:0c:29:ac:4b:1b brd ff:ff:ff:ff:ff:ff inet 10.0.0.28/24 brd 10.0.0.255 scope global noprefixroute eth0 valid_lft forever preferred_lft forever inet6 fe80::20c:29ff:feac:4b1b/64 scope link valid_lft forever preferred_lft forever
12.若要再次執行mha,需要刪除下方檔案
[root@mha-manager ~]# ls /data/mastermha/app1/app1.failover.complete -l -rw-r--r-- 1 root root 0 Oct 17 12:10 /data/mastermha/app1/app1.failover.complete [root@mha-manager ~]# rm -f /data/mastermha/app1/app1.failover.complete
4、實戰案例:PerconaXtraDBCluster(PXC5.7)
1.環境準備
#os版本目前不支援centos8
[root@pxc1 ~]# cat /etc/redhat-release
CentOS Linux release 7.7.1908 (Core)
#四臺主機
pxc1:10.0.0.7
pxc2:10.0.0.17
pxc3:10.0.0.27
pxc4:10.0.0.37
#關閉防火牆和selinux,保證時間同步
#注意:如果已經安裝了mysql,必須解除安裝
2.安裝PerconaXtraDBCluster
#使用清華大學的yum源 [root@pxc1 ~]# vim /etc/yum.repos.d/pxc.repo [percona] name=percona_repo baseurl= https://mirrors.tuna.tsinghua.edu.cn/percona/release/$releasever/RPMS/$basearch enabled = 1 gpgcheck = 0 [root@pxc1 ~]# scp /etc/yum.repos.d/pxc.repo 10.0.0.17:/etc/yum.repos.d/ The authenticity of host '10.0.0.17 (10.0.0.17)' can't be established. ECDSA key fingerprint is SHA256:nrjyrvmetwTeaxl0k3UYKfIjIMN6hnGWrchgCuCSSDA. ECDSA key fingerprint is MD5:d9:f4:02:e9:e2:88:1c:f1:c7:cb:c7:c6:ac:7e:56:9a. Are you sure you want to continue connecting (yes/no)? yes Warning: Permanently added '10.0.0.17' (ECDSA) to the list of known hosts. root@10.0.0.17's password: pxc.repo 100% 150 153.4KB/s 00:00 [root@pxc1 ~]# scp /etc/yum.repos.d/pxc.repo 10.0.0.27:/etc/yum.repos.d/ The authenticity of host '10.0.0.27 (10.0.0.27)' can't be established. ECDSA key fingerprint is SHA256:Wd51d+v6/JEz4dmV09QW1rwe3rdmk1IbggniV2SyQSs. ECDSA key fingerprint is MD5:47:97:07:2a:cf:b7:2d:cb:3a:49:82:c3:ea:51:2b:a5. Are you sure you want to continue connecting (yes/no)? yes Warning: Permanently added '10.0.0.27' (ECDSA) to the list of known hosts. root@10.0.0.27's password: pxc.repo 100% 150 57.9KB/s 00:00 #在三個節點都安裝pxc5.7 [root@pxc1 ~]# yum -y install Percona-XtraDB-Cluster-57 [root@pxc2 ~]# yum -y install Percona-XtraDB-Cluster-57 [root@pxc3 ~]# yum -y install Percona-XtraDB-Cluster-57
3.在各個節點配置mysql叢集配置檔案
#下面的配置檔案修改節點編號 [root@pxc1 ~]# cat /etc/percona-xtradb-cluster.conf.d/mysqld.cnf # Template my.cnf for PXC # Edit to your requirements. [client] socket=/var/lib/mysql/mysql.sock [mysqld] server-id=1 #各個節點不要相同 datadir=/var/lib/mysql socket=/var/lib/mysql/mysql.sock log-error=/var/log/mysqld.log pid-file=/var/run/mysqld/mysqld.pid log-bin #建議啟用二進位制日誌,非必須向 log_slave_updates expire_logs_days=7 # Disabling symbolic-links is recommended to prevent assorted security risks symbolic-links=0 [root@pxc2 ~]# vim /etc/percona-xtradb-cluster.conf.d/mysqld.cnf [mysqld] server-id=2 [root@pxc3 ~]# vim /etc/percona-xtradb-cluster.conf.d/mysqld.cnf [mysqld] server-id=3 [root@pxc1 ~]# vim /etc/percona-xtradb-cluster.conf.d/wsrep.cnf wsrep_cluster_address=gcomm://10.0.0.7,10.0.0.17,10.0.0.27 wsrep_node_address=10.0.0.7 wsrep_sst_auth="sstuser:s3cretPass" #將本行註釋取消 [root@pxc1 ~]# grep -Ev "^#|^$" /etc/percona-xtradb-cluster.conf.d/wsrep.cnf [mysqld] wsrep_provider=/usr/lib64/galera3/libgalera_smm.so wsrep_cluster_address=gcomm://10.0.0.7,10.0.0.17,10.0.0.27 #三個節點的IP binlog_format=ROW default_storage_engine=InnoDB wsrep_slave_threads= 8 wsrep_log_conflicts innodb_autoinc_lock_mode=2 wsrep_node_address=10.0.0.7 #各個節點,指定自己的IP wsrep_cluster_name=pxc-cluster wsrep_node_name=pxc-cluster-node-1 #各個節點,指定自己得節點名稱 pxc_strict_mode=ENFORCING wsrep_sst_method=xtrabackup-v2 wsrep_sst_auth="sstuser:s3cretPass" #取消本行註釋 [root@pxc2 ~]# grep -Ev "^#|^$" /etc/percona-xtradb-cluster.conf.d/wsrep.cnf [mysqld] wsrep_provider=/usr/lib64/galera3/libgalera_smm.so wsrep_cluster_address=gcomm://10.0.0.7,10.0.0.17,10.0.0.27 binlog_format=ROW default_storage_engine=InnoDB wsrep_slave_threads= 8 wsrep_log_conflicts innodb_autoinc_lock_mode=2 wsrep_cluster_name=pxc-cluster wsrep_node_name=pxc-cluster-node-2 pxc_strict_mode=ENFORCING wsrep_sst_method=xtrabackup-v2 wsrep_sst_auth="sstuser:s3cretPass" [root@pxc3 ~]# grep -Ev "^#|^$" /etc/percona-xtradb-cluster.conf.d/wsrep.cnf [mysqld] wsrep_provider=/usr/lib64/galera3/libgalera_smm.so wsrep_cluster_address=gcomm://10.0.0.7,10.0.0.17,10.0.0.27 binlog_format=ROW default_storage_engine=InnoDB wsrep_slave_threads= 8 wsrep_log_conflicts innodb_autoinc_lock_mode=2 wsrep_node_address=10.0.0.27 wsrep_cluster_name=pxc-cluster wsrep_node_name=pxc-cluster-node-3 pxc_strict_mode=ENFORCING wsrep_sst_method=xtrabackup-v2 wsrep_sst_auth="sstuser:s3cretPass" #注意:儘管Galera Cluster不再需要通過binlog的形式進行同步,但還是建議在配置檔案中開啟二進位制日誌功能,原因是後期如果有新節點需要加入,老節點通過SST全量傳輸的方式向新節點傳輸資料,很可能會拖垮叢集效能,所以讓新節點先通過binlog方式完成同步後再加入叢集會是一種更好的選擇
4.啟動pxc叢集中的第一個節點
[root@pxc1 ~]# ss -ntlu Netid State Recv-Q Send-Q Local Address:Port Peer Address:Port tcp LISTEN 0 100 127.0.0.1:25 *:* tcp LISTEN 0 128 *:22 *:* tcp LISTEN 0 100 [::1]:25 [::]:* tcp LISTEN 0 128 [::]:22 [::]:* #啟動第一個節點 [root@pxc1 ~]# systemctl start [email protected] [root@pxc1 ~]# ss -ntul Netid State Recv-Q Send-Q Local Address:Port Peer Address:Port tcp LISTEN 0 128 *:4567 *:* tcp LISTEN 0 100 127.0.0.1:25 *:* tcp LISTEN 0 128 *:22 *:* tcp LISTEN 0 100 [::1]:25 [::]:* tcp LISTEN 0 80 [::]:3306 [::]:* tcp LISTEN 0 128 [::]:22 [::]:* #檢視root密碼 [root@pxc1 ~]# grep "temporary password" /var/log/mysqld.log 2020-10-17T06:55:55.852483Z 1 [Note] A temporary password is generated for root@localhost: sumajCl/G7LH #用隨機一次性密碼登入 [root@pxc1 ~]# mysql -uroot -p'sumajCl/G7LH' #登陸之後修改密碼 mysql> alter user 'root'@'localhost' identified by 'magedu'; Query OK, 0 rows affected (0.00 sec) #建立相關使用者並授權 mysql> create user 'sstuser'@'localhost' identified by 's3cretPass'; Query OK, 0 rows affected (0.01 sec) mysql> grant reload, lock tables, process, replication client on *.* to 'sstuser'@'localhost'; Query OK, 0 rows affected (0.01 sec) #檢視相關變數 mysql> show variables like 'wsrep%'\G *************************** 1. row *************************** Variable_name: wsrep_OSU_method Value: TOI *************************** 2. row *************************** Variable_name: wsrep_RSU_commit_timeout Value: 5000 *************************** 3. row *************************** Variable_name: wsrep_auto_increment_control Value: ON *************************** 4. row *************************** Variable_name: wsrep_causal_reads Value: OFF *************************** 5. row *************************** Variable_name: wsrep_certification_rules Value: strict *************************** 6. row *************************** Variable_name: wsrep_certify_nonPK Value: ON *************************** 7. row *************************** Variable_name: wsrep_cluster_address Value: gcomm://10.0.0.7,10.0.0.17,10.0.0.27 *************************** 8. row *************************** Variable_name: wsrep_cluster_name Value: pxc-cluster *************************** 9. row *************************** Variable_name: wsrep_convert_LOCK_to_trx Value: OFF *************************** 10. row *************************** Variable_name: wsrep_data_home_dir Value: /var/lib/mysql/ *************************** 11. row *************************** Variable_name: wsrep_dbug_option Value: *************************** 12. row *************************** Variable_name: wsrep_debug Value: OFF *************************** 13. row *************************** Variable_name: wsrep_desync Value: OFF *************************** 14. row *************************** Variable_name: wsrep_dirty_reads Value: OFF *************************** 15. row *************************** Variable_name: wsrep_drupal_282555_workaround Value: OFF *************************** 16. row *************************** Variable_name: wsrep_forced_binlog_format Value: NONE *************************** 17. row *************************** Variable_name: wsrep_load_data_splitting Value: ON *************************** 18. row *************************** Variable_name: wsrep_log_conflicts Value: ON *************************** 19. row *************************** Variable_name: wsrep_max_ws_rows Value: 0 *************************** 20. row *************************** Variable_name: wsrep_max_ws_size Value: 2147483647 *************************** 21. row *************************** Variable_name: wsrep_node_address Value: 10.0.0.7 *************************** 22. row *************************** Variable_name: wsrep_node_incoming_address Value: AUTO *************************** 23. row *************************** Variable_name: wsrep_node_name Value: pxc-cluster-node-1 *************************** 24. row *************************** Variable_name: wsrep_notify_cmd Value: *************************** 25. row *************************** Variable_name: wsrep_on Value: ON *************************** 26. row *************************** Variable_name: wsrep_preordered Value: OFF *************************** 27. row *************************** Variable_name: wsrep_provider Value: /usr/lib64/galera3/libgalera_smm.so *************************** 28. row *************************** Variable_name: wsrep_provider_options Value: base_dir = /var/lib/mysql/; base_host = 10.0.0.7; base_port = 4567; cert.log_conflicts = no; cert.optimistic_pa = yes; debug = no; evs.auto_evict = 0; evs.causal_keepalive_period = PT1S; evs.debug_log_mask = 0x1; evs.delay_margin = PT1S; evs.delayed_keep_period = PT30S; evs.inactive_check_period = PT0.5S; evs.inactive_timeout = PT15S; evs.info_log_mask = 0; evs.install_timeout = PT7.5S; evs.join_retrans_period = PT1S; evs.keepalive_period = PT1S; evs.max_install_timeouts = 3; evs.send_window = 10; evs.stats_report_period = PT1M; evs.suspect_timeout = PT5S; evs.use_aggregate = true; evs.user_send_window = 4; evs.version = 0; evs.view_forget_timeout = P1D; gcache.dir = /var/lib/mysql/; gcache.freeze_purge_at_seqno = -1; gcache.keep_pages_count = 0; gcache.keep_pages_size = 0; gcache.mem_size = 0; gcache.name = /var/lib/mysql//galera.cache; gcache.page_size = 128M; gcache.recover = no; gcache.size = 128M; gcomm.thread_prio = ; gcs.fc_debug = 0; gcs.fc_factor = 1; gcs.fc_limit = 100; gcs.fc_master_slave = no; gcs *************************** 29. row *************************** Variable_name: wsrep_recover Value: OFF *************************** 30. row *************************** Variable_name: wsrep_reject_queries Value: NONE *************************** 31. row *************************** Variable_name: wsrep_replicate_myisam Value: OFF *************************** 32. row *************************** Variable_name: wsrep_restart_slave Value: OFF *************************** 33. row *************************** Variable_name: wsrep_retry_autocommit Value: 1 *************************** 34. row *************************** Variable_name: wsrep_slave_FK_checks Value: ON *************************** 35. row *************************** Variable_name: wsrep_slave_UK_checks Value: OFF *************************** 36. row *************************** Variable_name: wsrep_slave_threads Value: 8 *************************** 37. row *************************** Variable_name: wsrep_sst_auth Value: ******** *************************** 38. row *************************** Variable_name: wsrep_sst_donor Value: *************************** 39. row *************************** Variable_name: wsrep_sst_donor_rejects_queries Value: OFF *************************** 40. row *************************** Variable_name: wsrep_sst_method Value: xtrabackup-v2 *************************** 41. row *************************** Variable_name: wsrep_sst_receive_address Value: AUTO *************************** 42. row *************************** Variable_name: wsrep_start_position Value: 00000000-0000-0000-0000-000000000000:-1 *************************** 43. row *************************** Variable_name: wsrep_sync_wait Value: 0 43 rows in set (0.00 sec) #檢視相關狀態變數 mysql> show status like 'wsrep%'\G *************************** 1. row *************************** Variable_name: wsrep_local_state_uuid Value: d2b19083-1045-11eb-a093-d33276afa751 *************************** 2. row *************************** Variable_name: wsrep_protocol_version Value: 9 *************************** 3. row *************************** Variable_name: wsrep_last_applied Value: 3 *************************** 4. row *************************** Variable_name: wsrep_last_committed Value: 3 *************************** 5. row *************************** Variable_name: wsrep_replicated Value: 3 *************************** 6. row *************************** Variable_name: wsrep_replicated_bytes Value: 760 *************************** 7. row *************************** Variable_name: wsrep_repl_keys Value: 3 *************************** 8. row *************************** Variable_name: wsrep_repl_keys_bytes Value: 96 *************************** 9. row *************************** Variable_name: wsrep_repl_data_bytes Value: 459 *************************** 10. row *************************** Variable_name: wsrep_repl_other_bytes Value: 0 *************************** 11. row *************************** Variable_name: wsrep_received Value: 2 *************************** 12. row *************************** Variable_name: wsrep_received_bytes Value: 150 *************************** 13. row *************************** Variable_name: wsrep_local_commits Value: 0 *************************** 14. row *************************** Variable_name: wsrep_local_cert_failures Value: 0 *************************** 15. row *************************** Variable_name: wsrep_local_replays Value: 0 *************************** 16. row *************************** Variable_name: wsrep_local_send_queue Value: 0 *************************** 17. row *************************** Variable_name: wsrep_local_send_queue_max Value: 1 *************************** 18. row *************************** Variable_name: wsrep_local_send_queue_min Value: 0 *************************** 19. row *************************** Variable_name: wsrep_local_send_queue_avg Value: 0.000000 *************************** 20. row *************************** Variable_name: wsrep_local_recv_queue Value: 0 *************************** 21. row *************************** Variable_name: wsrep_local_recv_queue_max Value: 2 *************************** 22. row *************************** Variable_name: wsrep_local_recv_queue_min Value: 0 *************************** 23. row *************************** Variable_name: wsrep_local_recv_queue_avg Value: 0.500000 *************************** 24. row *************************** Variable_name: wsrep_local_cached_downto Value: 1 *************************** 25. row *************************** Variable_name: wsrep_flow_control_paused_ns Value: 0 *************************** 26. row *************************** Variable_name: wsrep_flow_control_paused Value: 0.000000 *************************** 27. row *************************** Variable_name: wsrep_flow_control_sent Value: 0 *************************** 28. row *************************** Variable_name: wsrep_flow_control_recv Value: 0 *************************** 29. row *************************** Variable_name: wsrep_flow_control_interval Value: [ 100, 100 ] *************************** 30. row *************************** Variable_name: wsrep_flow_control_interval_low Value: 100 *************************** 31. row *************************** Variable_name: wsrep_flow_control_interval_high Value: 100 *************************** 32. row *************************** Variable_name: wsrep_flow_control_status Value: OFF *************************** 33. row *************************** Variable_name: wsrep_cert_deps_distance Value: 1.000000 *************************** 34. row *************************** Variable_name: wsrep_apply_oooe Value: 0.000000 *************************** 35. row *************************** Variable_name: wsrep_apply_oool Value: 0.000000 *************************** 36. row *************************** Variable_name: wsrep_apply_window Value: 1.000000 *************************** 37. row *************************** Variable_name: wsrep_commit_oooe Value: 0.000000 *************************** 38. row *************************** Variable_name: wsrep_commit_oool Value: 0.000000 *************************** 39. row *************************** Variable_name: wsrep_commit_window Value: 1.000000 *************************** 40. row *************************** Variable_name: wsrep_local_state Value: 4 *************************** 41. row *************************** Variable_name: wsrep_local_state_comment Value: Synced *************************** 42. row *************************** Variable_name: wsrep_cert_index_size Value: 1 *************************** 43. row *************************** Variable_name: wsrep_cert_bucket_count Value: 22 *************************** 44. row *************************** Variable_name: wsrep_gcache_pool_size Value: 2200 *************************** 45. row *************************** Variable_name: wsrep_causal_reads Value: 0 *************************** 46. row *************************** Variable_name: wsrep_cert_interval Value: 0.000000 *************************** 47. row *************************** Variable_name: wsrep_open_transactions Value: 0 *************************** 48. row *************************** Variable_name: wsrep_open_connections Value: 0 *************************** 49. row *************************** Variable_name: wsrep_ist_receive_status Value: *************************** 50. row *************************** Variable_name: wsrep_ist_receive_seqno_start Value: 0 *************************** 51. row *************************** Variable_name: wsrep_ist_receive_seqno_current Value: 0 *************************** 52. row *************************** Variable_name: wsrep_ist_receive_seqno_end Value: 0 *************************** 53. row *************************** Variable_name: wsrep_incoming_addresses Value: 10.0.0.7:3306 *************************** 54. row *************************** Variable_name: wsrep_cluster_weight Value: 1 *************************** 55. row *************************** Variable_name: wsrep_desync_count Value: 0 *************************** 56. row *************************** Variable_name: wsrep_evs_delayed Value: *************************** 57. row *************************** Variable_name: wsrep_evs_evict_list Value: *************************** 58. row *************************** Variable_name: wsrep_evs_repl_latency Value: 0/0/0/0/0 *************************** 59. row *************************** Variable_name: wsrep_evs_state Value: OPERATIONAL *************************** 60. row *************************** Variable_name: wsrep_gcomm_uuid Value: d2b120b0-1045-11eb-a6db-46ea8c26d5e8 *************************** 61. row *************************** Variable_name: wsrep_cluster_conf_id Value: 1 *************************** 62. row *************************** Variable_name: wsrep_cluster_size Value: 1 *************************** 63. row *************************** Variable_name: wsrep_cluster_state_uuid Value: d2b19083-1045-11eb-a093-d33276afa751 *************************** 64. row *************************** Variable_name: wsrep_cluster_status Value: Primary *************************** 65. row *************************** Variable_name: wsrep_connected Value: ON *************************** 66. row *************************** Variable_name: wsrep_local_bf_aborts Value: 0 *************************** 67. row *************************** Variable_name: wsrep_local_index Value: 0 *************************** 68. row *************************** Variable_name: wsrep_provider_name Value: Galera *************************** 69. row *************************** Variable_name: wsrep_provider_vendor Value: Codership Oy <info@codership.com> *************************** 70. row *************************** Variable_name: wsrep_provider_version Value: 3.45(ra60e019) *************************** 71. row *************************** Variable_name: wsrep_ready Value: ON 71 rows in set (0.00 sec) #檢視狀態,重點關注 mysql> show status like 'wsrep%'; +----------------------------------+--------------------------------------+ | Variable_name | Value | +----------------------------------+--------------------------------------+ | wsrep_local_state_uuid | d2b19083-1045-11eb-a093-d33276afa751 | | wsrep_protocol_version | 9 | | wsrep_last_applied | 3 | | wsrep_last_committed | 3 | | wsrep_replicated | 3 | | wsrep_replicated_bytes | 760 | | wsrep_repl_keys | 3 | | wsrep_repl_keys_bytes | 96 | | wsrep_repl_data_bytes | 459 | | wsrep_repl_other_bytes | 0 | | wsrep_received | 2 | | wsrep_received_bytes | 150 | | wsrep_local_commits | 0 | | wsrep_local_cert_failures | 0 | | wsrep_local_replays | 0 | | wsrep_local_send_queue | 0 | | wsrep_local_send_queue_max | 1 | | wsrep_local_send_queue_min | 0 | | wsrep_local_send_queue_avg | 0.000000 | | wsrep_local_recv_queue | 0 | | wsrep_local_recv_queue_max | 2 | | wsrep_local_recv_queue_min | 0 | | wsrep_local_recv_queue_avg | 0.500000 | | wsrep_local_cached_downto | 1 | | wsrep_flow_control_paused_ns | 0 | | wsrep_flow_control_paused | 0.000000 | | wsrep_flow_control_sent | 0 | | wsrep_flow_control_recv | 0 | | wsrep_flow_control_interval | [ 100, 100 ] | | wsrep_flow_control_interval_low | 100 | | wsrep_flow_control_interval_high | 100 | | wsrep_flow_control_status | OFF | | wsrep_cert_deps_distance | 1.000000 | | wsrep_apply_oooe | 0.000000 | | wsrep_apply_oool | 0.000000 | | wsrep_apply_window | 1.000000 | | wsrep_commit_oooe | 0.000000 | | wsrep_commit_oool | 0.000000 | | wsrep_commit_window | 1.000000 | | wsrep_local_state | 4 |#資料同步完成 | wsrep_local_state_comment | Synced | | wsrep_cert_index_size | 1 |#該叢集只有1個節點 | wsrep_cert_bucket_count | 22 | | wsrep_gcache_pool_size | 2200 | | wsrep_causal_reads | 0 | | wsrep_cert_interval | 0.000000 | | wsrep_open_transactions | 0 | | wsrep_open_connections | 0 | | wsrep_ist_receive_status | | | wsrep_ist_receive_seqno_start | 0 | | wsrep_ist_receive_seqno_current | 0 | | wsrep_ist_receive_seqno_end | 0 | | wsrep_incoming_addresses | 10.0.0.7:3306 | | wsrep_cluster_weight | 1 | | wsrep_desync_count | 0 | | wsrep_evs_delayed | | | wsrep_evs_evict_list | | | wsrep_evs_repl_latency | 0/0/0/0/0 | | wsrep_evs_state | OPERATIONAL | | wsrep_gcomm_uuid | d2b120b0-1045-11eb-a6db-46ea8c26d5e8 | | wsrep_cluster_conf_id | 1 | | wsrep_cluster_size | 1 | | wsrep_cluster_state_uuid | d2b19083-1045-11eb-a093-d33276afa751 | | wsrep_cluster_status | Primary | #完全連線狀態 | wsrep_connected | ON | | wsrep_local_bf_aborts | 0 | | wsrep_local_index | 0 | | wsrep_provider_name | Galera | | wsrep_provider_vendor | Codership Oy <info@codership.com> | | wsrep_provider_version | 3.45(ra60e019) | | wsrep_ready | ON | +----------------------------------+--------------------------------------+ 71 rows in set (0.00 sec) #說明: #wsrep_cluster_size表示,該Galera叢集中只有一個節點 #wsrep_local_state_comment 狀態為Synced(4),表示資料已同步完成(因為是第一個引導節點,無資料需要同步)。 如果狀態是Joiner, 意味著 SST 沒有完成. 只有所有節點狀態是Synced,才可以加新節點 #wsrep_cluster_status為Primary,且已經完全連線並準備好
5.啟用叢集其他所有節點
[root@pxc2 ~]# ss -ntul Netid State Recv-Q Send-Q Local Address:Port Peer Address:Port tcp LISTEN 0 100 127.0.0.1:25 *:* tcp LISTEN 0 128 *:22 *:* tcp LISTEN 0 100 [::1]:25 [::]:* tcp LISTEN 0 128 [::]:22 [::]:* [root@pxc2 ~]# systemctl start mysql [root@pxc2 ~]# ss -ntlu Netid State Recv-Q Send-Q Local Address:Port Peer Address:Port tcp LISTEN 0 128 *:4567 *:* tcp LISTEN 0 100 127.0.0.1:25 *:* tcp LISTEN 0 128 *:22 *:* tcp LISTEN 0 100 [::1]:25 [::]:* tcp LISTEN 0 80 [::]:3306 [::]:* tcp LISTEN 0 128 [::]:22 [::]:* [root@pxc3 ~]# systemctl start mysql
5、通過ansible部署二進位制mysql8
#實現部署需要準備的檔案 [root@centos8 ~]# ls anaconda-ks.cfg install_mysql.yml my.cnf mysql-8.0.21-linux-glibc2.12-x86_64.tar.xz ssh_key.sh #通過epel源下載ansible [root@centos8 ~]#yum -y install ansible #編輯配置檔案 [root@centos8 ~]# vim /etc/ansible/ansible.cfg log_path = /var/log/ansible.log #取消註釋,啟用日誌 [root@centos8 ~]# vim /etc/ansible/hosts [dbsrvs] 10.0.0.8 ansible_connection=local 10.0.0.18 10.0.0.7 #實現基於key驗證 #ansible的實現基於ssh連線,可通過基於key驗證避免每次執行時的詢問 [root@centos8 ~]# vim ssh_key.sh #!/bin/bash IPLIST=" 10.0.0.8 10.0.0.18 10.0.0.7" rpm -q sshpass &> /dev/null || yum -y install sshpass [ -f /root/.ssh/id_rsa ]|| ssh-keygen -f /root/.ssh/id_rsa -P '' export SSHPASS=1123 for IP in $IPLIST;do sshpass -e ssh-copy-id -o StrictHostKeyChecking=no $IP done [root@centos8 ~]# bash ssh_key.sh #執行完指令碼克通過ssh遠端連線主機進行測試是否實現基於key驗證 #準備配置檔案my.cnf [root@centos8 ~]# vim my.cnf [mysqld] socket=/tmp/mysql.sock user=mysql symbolic-links=0 datadir=/data/mysql innodb_file_per_table=1 log-bin pid-file=/data/mysql/mysqld.pid [client] port=3306 socket=/tmp/mysql.sock [mysqld_safe] log-error=/data/mysql/mysqld.log #編輯playbook檔案實現ansible完成部署二進位制資料庫 [root@centos8 ~]# vim install_mysql.yml --- # install mysql-8.0.21-linux-glibc2.12-x86_64.tar.xz - hosts: dbsrvs remote_user: root gather_facts: no tasks: - name: 下載安裝包 yum: name=libaio,numactl-libs - name: 建立組 group: name=mysql gid=306 - name: 建立使用者 user: name=mysql uid=306 group=mysql shell=/sbin/nologin system=yes create_home=no home=/data/mysql - name: 對壓縮檔案解壓縮 unarchive: src=/root/mysql-8.0.21-linux-glibc2.12-x86_64.tar.xz dest=/usr/local/ owner=root group=root - name: 建立軟連線 file: src=/usr/local/mysql-8.0.21-linux-glibc2.12-x86_64 dest=/usr/local/mysql state=link - name: 新增變數 copy: content='PATH=/usr/local/mysql/bin:$PATH' dest=/etc/profile.d/mysql.sh - name: 執行變數 shell: source /etc/profile.d/mysql.sh - name: 拷貝配置檔案 copy: src=/root/my.cnf dest=/etc/my.cnf - name: 生成資料庫檔案 shell: /usr/local/mysql/bin/mysqld --initialize --user=mysql --datadir=/data/mysql - name: 準備server檔案 shell: /bin/cp /usr/local/mysql/support-files/mysql.server /etc/init.d/mysqld - name: 啟動服務 shell: chkconfig --add mysqld;service mysqld restart
#檢測語法有沒有錯誤 [root@centos8 ~]# ansible-playbook install_mysql.yml --syntax-check playbook: install_mysql.yml [root@centos8 ~]# ansible-playbook install_mysql.yml --check #只檢測,不執行 #執行ansible [root@centos8 ~]# ansible-playbook install_mysql.yml #由於安裝資料庫使用的生成資料庫檔案是會生成隨機一次性口令,登入資料庫需要使用該口令,口令可通過檢視日誌檔案/data/mysql/mysql.log檔案檢視 #可通過mysqladmin修改口令密碼 mysqladmin -uroot -p'yEfKftgil1<e' password 1123