1. 程式人生 > 實用技巧 >hadoop高可用搭建

hadoop高可用搭建

core.site.xml

<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
        <property>
                <name>fs.defaultFS</name>
                <value>hdfs://ostream-hdfs</value>
        </property
> <property> <name>hadoop.tmp.dir</name> <value>/home/hadoop/opt/hadoop-2.10.1/data/tmp</value> </property> <property> <name>fs.trash.interval</name> <value>
2</value> </property> <property> <name>fs.trash.checkpoint.interval</name> <value>1</value> </property> <property> <name>hadoop.http.staticuser.user</name> <
value>hadoop</value> </property> <property> <name>ha.zookeeper.quorum</name> <value>hadoop-zxkf-1:2181,hadoop-zxkf-2:2181,hadoop-zxkf-3:2181</value> </property> </configuration>
View Code

hdfs-site.xml

<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

<configuration>
        <!-- 完全分散式叢集名稱 -->
        <property>
                <name>dfs.nameservices</name>
                <value>ostream-hdfs</value>
        </property>
        <!-- 叢集中NameNode節點都有哪些 -->
        <property>
                <name>dfs.ha.namenodes.ostream-hdfs</name>
                <value>nn1,nn2</value>
        </property>
        <!-- nn1的RPC通訊地址 -->
        <property>
                <name>dfs.namenode.rpc-address.ostream-hdfs.nn1</name>
                <value>hadoop-zxkf-1:8020</value>
        </property>
        <!-- nn2的RPC通訊地址 -->
        <property>
                <name>dfs.namenode.rpc-address.ostream-hdfs.nn2</name>
                <value>hadoop-zxkf-3:8020</value>
        </property>
        <!-- nn1的http通訊地址 -->
        <property>
                <name>dfs.namenode.http-address.ostream-hdfs.nn1</name>
                <value>hadoop-zxkf-1:50070</value>
        </property>

        <!-- nn2的http通訊地址 -->
        <property>
                <name>dfs.namenode.http-address.ostream-hdfs.nn2</name>
                <value>hadoop-zxkf-3:50070</value>
        </property>
        <!-- 指定NameNode元資料在JournalNode上的存放位置 -->
        <property>
                <name>dfs.namenode.shared.edits.dir</name>
                <value>qjournal://hadoop-zxkf-1:8485;hadoop-zxkf-2:8485;hadoop-zxkf-3:8485/ostream-hdfs</value>
        </property>
        <!-- 配置隔離機制,即同一時刻只能有一臺伺服器對外響應 -->
        <property>
                <name>dfs.ha.fencing.methods</name>
                <value>sshfence</value>
        </property>
        <!-- 使用隔離機制時需要ssh無祕鑰登入-->
        <property>
                <name>dfs.ha.fencing.ssh.private-key-files</name>
                <value>/home/hadoop/.ssh/id_rsa</value>
        </property>
        <!-- 宣告journalnode伺服器儲存目錄-->
        <property>
                <name>dfs.journalnode.edits.dir</name>
                <value>/home/hadoop/opt/hadoop-2.10.1/data/jn</value>
        </property>
        <!-- 關閉許可權檢查-->
        <property>
                <name>dfs.permissions.enable</name>
                <value>false</value>
        </property>
        <!-- 訪問代理類:client,ostream-hdfs,active配置失敗自動切換實現方式-->
        <property>
                <name>dfs.client.failover.proxy.provider.ostream-hdfs</name>
                <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
        </property>
        <!-- automatic failover-->
        <property>
                 <name>dfs.ha.automatic-failover.enabled</name>
                 <value>true</value>
         </property>
</configuration>
View Code

yarn-site.xml

<?xml version="1.0"?>
<configuration>
        <property>
                <name>yarn.nodemanager.aux-services</name>
                <value>mapreduce_shuffle</value>
        </property>
        <!--日誌聚合-->
        <property>
                <name>yarn.log-aggregation-enable</name>
                <value>true</value>
        </property>
        <!--任務歷史服務-->
        <property>
                <name>yarn.log.server.url</name>
                <value>http://hadoop-zxkf-1:19888/jobhistory/logs/</value>
        </property>

        <property>
                <name>yarn.log-aggregation.retain-seconds</name>
                <value>86400</value>
        </property>

        <!--啟用resourcemanager ha-->
        <property>
                <name>yarn.resourcemanager.ha.enabled</name>
                <value>true</value>
        </property>

        <!--宣告兩臺resourcemanager的地址-->
        <property>
                <name>yarn.resourcemanager.cluster-id</name>
                <value>cluster-yarn1</value>
        </property>

        <property>
                <name>yarn.resourcemanager.ha.rm-ids</name>
                <value>rm1,rm2</value>
        </property>

        <property>
                <name>yarn.resourcemanager.hostname.rm1</name>
                <value>hadoop-zxkf-2</value>
        </property>

        <property>
                <name>yarn.resourcemanager.hostname.rm2</name>
                <value>hadoop-zxkf-3</value>
        </property>

        <!--指定zookeeper叢集的地址-->
        <property>
                <name>yarn.resourcemanager.zk-address</name>
                <value>hadoop-zxkf-1:2181,hadoop-zxkf-2:2181,hadoop-zxkf-3:2181</value>
        </property>

        <!--啟用自動恢復-->
        <property>
                <name>yarn.resourcemanager.recovery.enabled</name>
                <value>true</value>
        </property>

        <!--指定resourcemanager的狀態資訊儲存在zookeeper叢集-->
        <property>
                <name>yarn.resourcemanager.store.class</name>     
                <value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
        </property>
</configuration>
View Code