How-to: enable HDFS HA at a new cluster
阿新 • • 發佈:2019-02-12
Configuration update:
hdfs-site.xml:
<property>
<name>dfs.nameservices</name>
<value>dfscluster</value>
</property>
<property>
<name>dfs.ha.namenodes.dfscluster</name>
<value>nn1,nn2</value>
</property>
<property>
<name>dfs.namenode.rpc-address.dfscluster.nn1</name>
<value>master.chff.dc:8020</value>
</property>
<property>
<name>dfs.namenode.rpc-address.dfscluster.nn2</name>
<value>slave01.chff.dc:8020</value>
</property>
<property>
<name>dfs.namenode.http-address.dfscluster.nn1</name>
<value>master.chff.dc:50070</value>
</property>
<property>
<name>dfs.namenode.http-address.dfscluster.nn2</name>
<value>slave01.chff.dc:50070</value>
</property>
<property>
<name>dfs.namenode.shared.edits.dir</name>
<value>qjournal://master.chff.dc:8485;slave01.chff.dc:8485;slave.chff.dc:8485/dfscluster</value>
</property>
<property>
<name>dfs.journalnode.edits.dir</name>
<value>/journalnode/hadoop/data/dfs/journalnode</value>
</property>
<property>
<name>dfs.client.failover.proxy.provider.dfscluster</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<property>
<name>dfs.ha.automatic-failover.enabled</name>
<value>true</value>
</property>
core-site.xml:
<property>
<name>fs.defaultFS</name>
<value>hdfs://dfscluster</value>
<description>hdfs://host:port/</description>
</property>
hdfs-site.xml:
<property>
<name>dfs.nameservices</name>
<value>dfscluster</value>
</property>
<property>
<name>dfs.ha.namenodes.dfscluster</name>
<value>nn1,nn2</value>
</property>
<property>
<name>dfs.namenode.rpc-address.dfscluster.nn1</name>
<value>master.chff.dc:8020</value>
</property>
<property>
<name>dfs.namenode.rpc-address.dfscluster.nn2</name>
<value>slave01.chff.dc:8020</value>
</property>
<property>
<name>dfs.namenode.http-address.dfscluster.nn1</name>
<value>master.chff.dc:50070</value>
</property>
<property>
<name>dfs.namenode.http-address.dfscluster.nn2</name>
<value>slave01.chff.dc:50070</value>
</property>
<property>
<name>dfs.namenode.shared.edits.dir</name>
<value>qjournal://master.chff.dc:8485;slave01.chff.dc:8485;slave.chff.dc:8485/dfscluster</value>
</property>
<property>
<name>dfs.journalnode.edits.dir</name>
<value>/journalnode/hadoop/data/dfs/journalnode</value>
</property>
<property>
<name>dfs.client.failover.proxy.provider.dfscluster</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<property>
<name>dfs.ha.automatic-failover.enabled</name>
<value>true</value>
</property>
core-site.xml:
<property>
<name>fs.defaultFS</name>
<value>hdfs://dfscluster</value>
<description>hdfs://host:port/</description>
</property>