1. 程式人生 > 其它 >Hadoop框架高可用配置---HA

Hadoop框架高可用配置---HA

1.高可用叢集搭建

2.準備安裝環境

  tar -zxvf hadoop-3.1.2.tar.gz
  mv hadoop-3.1.2 /opt/
  cd /opt/hadoop-3.1.2/etc/hadoop/

3.修改叢集環境

  vim hadoop-env.sh
  # 直接在檔案的最後新增
  export JAVA_HOME=/usr/local/java/jdk1.8.0_261
  export HDFS_NAMENODE_USER=root
  export HDFS_DATANODE_USER=root
  export HDFS_ZKFC_USER=root
  export HDFS_JOURNALNODE_USER=root
  export YARN_RESOURCEMANAGER_USER=root
  export YARN_NODEMANAGER_USER=root

4.修改配置檔案

  vim core-site.xml
  <property>
  <name>fs.defaultFS</name>
  <value>hdfs://bdp</value> # 叢集的名字
  </property>
  <property>
  <name>hadoop.tmp.dir</name>
  <value>/var/bdp/hadoop/ha</value> # 儲存到linux本地的臨時目錄
  </property>
  <property>
  <name>hadoop.http.staticuser.user</name> # 登入的使用者
  <value>root</value>
  </property>
  <property>
  <name>ha.zookeeper.quorum</name> # ha用到的zookeeper的叢集地址
  <value>node001:2181,node002:2181,node003:2181</value>
  </property>
  vim hdfs-site.xml
  # 關於叢集
  <property>
  <name>dfs.nameservices</name>
  <value>bdp</value>
  </property>

  <property>
  <name>dfs.ha.namenodes.bdp</name>
  <value>nn1,nn2</value>
  </property>

  <property>
  <name>dfs.namenode.rpc-address.bdp.nn1</name>
  <value>node001:8020</value> # http連線時的埠和ip地址
  </property>

  <property>
  <name>dfs.namenode.rpc-address.bdp.nn2</name>
  <value>node002:8020</value>
  </property>

  <property>
  <name>dfs.namenode.http-address.bdp.nn1</name>
  <value>node001:9870</value>
  </property>

  <property>
  <name>dfs.namenode.http-address.bdp.nn2</name>
  <value>node002:9870</value>
  </property>

  # 關於journalnode
  <property>
  <name>dfs.namenode.shared.edits.dir</name>
  # journalnode中namenode儲存的路徑
  <value>qjournal://node001:8485;node002:8485;node003:8485/bdp</value>
  </property>

  <property>
  <name>dfs.journalnode.edits.dir</name>
  <value>/var/bdp/hadoop/ha/qjm</value>
  </property>

  # 故障轉移
  <property>
  <name>dfs.client.failover.proxy.provider.bdp</name>
  <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
  </property>

  <property>
  <name>dfs.ha.fencing.methods</name> # 啟動故障轉移的類
  <value>sshfence</value>
  <value>shell(true)</value>
  </property>

  <property>
  <name>dfs.ha.fencing.ssh.private-key-files</name> # 啟動故障轉移的引數
  <value>/root/.ssh/id_rsa</value>
  </property>

  <property>
  <name>dfs.ha.automatic-failover.enabled</name>
  <value>true</value>
  </property>
  # 啟動2個程序
  <property>
  <name>dfs.replication</name>
  <value>2</value>
  </property>
  [root@node001 hadoop] vim workers
  node001
  node002
  node003

5.拷貝分發軟體

  # 將配置好的軟體分發到其他主機
  scp -r /opt/ root@node002:/opt/
  scp -r /opt/ root@node003:/opt/

6.修改環境變數

  vim /etc/profile
  export HADOOP_HOME=/opt/hadoop-3.1.2
  export PATH=$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$PATH
  # 將環境變數拷貝到其他主機
  scp /etc/profile root@node002:/etc/profile
  scp /etc/profile root@node003:/etc/profile
  # 重新載入三臺伺服器的環境變數
  source /etc/profile

7.首先啟動Zookeeper

  zkServer.sh start
  zkServer.sh status

啟動JournalNode

  [123]hdfs --daemon start journalnode

格式化NameNode

  [root@node001]hdfs namenode -format
  # 開啟namenode的使用
  [root@node001]hdfs --daemon start namenode
  [root@node002]hdfs namenode -bootstrapStandby
  [root@node001]hdfs zkfc -formatZK
  [root@node001]start-dfs.sh

8.測試叢集

http://node001:9870
http://node002:9870

  [root@node001] hdfs dfs -mkdir -p /lzj
  [root@node001] hdfs dfs -put zookeeper-3.4.5.tar.gz /zj/
  # 設定塊的大小
  [root@node001] hdfs dfs -D dfs.blocksize=1048576 -put zookeeper-3.4.5.tar.gz /lzj/

9.關閉叢集

[root@node001]# stop-dfs.sh

10.關機拍攝快照

shutdown -h now

本文來自部落格園,作者:jsqup,轉載請註明原文連結:https://www.cnblogs.com/jsqup/p/15975592.html