1. 程式人生 > 其它 >【大資料許可權分配】一、kerberos

【大資料許可權分配】一、kerberos

一、kerberos概念

金鑰分發中心,管理使用者身份資訊,進行身份認證。

二、安裝

選擇叢集中的一臺主機(hadoop102)作為Kerberos服務端,安裝KDC,所有主機都需要部署Kerberos客戶端。

選擇hadoop102 安裝伺服器

yum install -y krb5-server

所有機器安裝 客戶端

yum install -y krb5-workstation krb5-libs

修改服務端配置檔案

vim /var/kerberos/krb5kdc/kdc.conf

[kdcdefaults]
 kdc_ports = 88
 kdc_tcp_ports = 88

[realms]
 EXAMPLE.COM = {
  #master_key_type = aes256-cts
  acl_file = /var/kerberos/krb5kdc/kadm5.acl
  dict_file = /usr/share/dict/words
  admin_keytab = /var/kerberos/krb5kdc/kadm5.keytab
  supported_enctypes = aes256-cts:normal aes128-cts:normal des3-hmac-sha1:normal arcfour-hmac:normal camellia256-cts:normal camellia128-cts:normal des-hmac-sha1:normal des-cbc-md5:normal des-cbc-crc:normal
 }

  

修改所有客戶單主機

vim /etc/krb5.conf

# Configuration snippets may be placed in this directory as well
includedir /etc/krb5.conf.d/

[logging]
 default = FILE:/var/log/krb5libs.log
 kdc = FILE:/var/log/krb5kdc.log
 admin_server = FILE:/var/log/kadmind.log

[libdefaults]
 dns_lookup_realm = false
 dns_lookup_kdc = false
 ticket_lifetime = 24h
 renew_lifetime = 7d
 forwardable = true
 rdns = false
 pkinit_anchors = FILE:/etc/pki/tls/certs/ca-bundle.crt
 default_realm = EXAMPLE.COM
 #default_ccache_name = KEYRING:persistent:%{uid}

[realms]
 EXAMPLE.COM = {
  kdc = hadoop102
  admin_server = hadoop102
 }
[domain_realm]
# .example.com = EXAMPLE.COM
# example.com = EXAMPLE.COM

  

初始化kdc資料庫

kdb5_util create -s

  

修改服務端配置檔案

vim var/kerberos/krb5kdc/kadm5.acl

*/[email protected]     *

  

啟動kdc服務

systemctl start krb5kdc
systemctl enable krb5kdc

啟動kerberos的管理,是kdc資料訪問入口

systemctl start kadmin
systemctl enable kadmin

  

在服務端執行以下命令,並輸入密碼

kadmin.local -q "addprinc admin/admin"

  

三、操作kerberos

1.本地登入無需認證

kadmin.local

遠端登入需要主題認證

kadmin

報了一個錯誤

Authenticating as principal root/[email protected] with password.
kadmin: Client 'root/[email protected]' not found in Kerberos database while initializing kadmin interface

我是沒有root使用者的,只有admin/admin

需要建立一個

kadmin.local -q "addprinc root/admin"

再遠端登入ok

2.建立kerberos主體

登入資料庫後執行

kadmin.local: addprinc test

 建立了test這個主體

檢視所有主體

kadmin.local: list_principals

  

3.認證操作

1.輸入下面指令

kinit test

  按提示輸入密碼

檢視憑證

klist 

2.金鑰檔案認證

生成主體test的keytab檔案到指定目錄/root/test.keytab

kadmin.local -q "xst -norandkey -k  /root/test.keytab [email protected]"

使用keytab進行認證

kinit -kt /root/test.keytab test

檢視憑證

klist 

二、建立haoop系統使用者

Hadoop開啟Kerberos,需為不同服務準備不同的使用者,啟動服務時需要使用相應的使用者。須在所有節點建立以下使用者和使用者組。

為所有節點新增組

groupadd hadoop

各個節點新增三個使用者,並新增到hadoop組下面

useradd hdfs -g hadoop

echo hdfs| passwd --stdin hdfs

useradd yarn -g hadoop

echo yarn| passwd --stdinyarn

useradd mapred -g hadoop

echo mapred| passwd --stdinmapred

四、為haoop各個元件新增kerberos主體

服務

所在主機

主體(Principal

NameNode

hadoop102

nn/hadoop102

DataNode

hadoop102

dn/hadoop102

DataNode

hadoop103

dn/hadoop103

DataNode

hadoop104

dn/hadoop104

Secondary NameNode

hadoop104

sn/hadoop104

ResourceManager

hadoop103

rm/hadoop103

NodeManager

hadoop102

nm/hadoop102

NodeManager

hadoop103

nm/hadoop103

NodeManager

hadoop104

nm/hadoop104

JobHistory Server

hadoop102

jhs/hadoop102

Web UI

hadoop102

HTTP/hadoop102

Web UI

hadoop103

HTTP/hadoop103

Web UI

hadoop104

HTTP/hadoop104

1.伺服器端建立主體路徑準備

mkdir /etc/security/keytab/

chown -R root:hadoop /etc/security/keytab/

chmod 770 /etc/security/keytab/

管理員主體認證

kinit admin/admin

登入資料庫客戶端

kadmin

執行主體語句

kadmin:  addprinc -randkey test/test
kadmin:  xst -k /etc/security/keytab/test.keytab test/test

解釋語句含義

xst -k /etc/security/keytab/test.keytab test/test:作用是將主體的金鑰寫入keytab檔案
xst:將主體的金鑰寫入keytab檔案
-k /etc/security/keytab/test.keytab:指明keytab檔案路徑和檔名
test/test:主體

上述所有可以簡化為

[root@hadoop102 ~]# kadmin -padmin/admin -wadmin -q"addprinc -randkey test/test"
[root@hadoop102 ~]# kadmin -padmin/admin -wadmin -q"xst -k /etc/security/keytab/test.keytab test/test"

  

2.所有節點建立keytab檔案目錄

root@hadoop102 ~]# mkdir /etc/security/keytab/
[root@hadoop102 ~]# chown -R root:hadoop /etc/security/keytab/
[root@hadoop102 ~]# chmod 770 /etc/security/keytab/

[root@hadoop103 ~]# mkdir /etc/security/keytab/
[root@hadoop103 ~]# chown -R root:hadoop /etc/security/keytab/
[root@hadoop103 ~]# chmod 770 /etc/security/keytab/

[root@hadoop104 ~]# mkdir /etc/security/keytab/
[root@hadoop104 ~]# chown -R root:hadoop /etc/security/keytab/
[root@hadoop104 ~]# chmod 770 /etc/security/keytab/ 

3.根據上述表格 節點與元件的關係寫資料

以下在hadoop102執行

[root@hadoop102 ~]# kadmin -padmin/admin -wadmin -q"addprinc -randkey nn/hadoop102"
[root@hadoop102 ~]# kadmin -padmin/admin -wadmin -q"xst -k /etc/security/keytab/nn.service.keytab nn/hadoop102"

  

kadmin -padmin/admin -wadmin -q"addprinc -randkey dn/hadoop102"
[root@hadoop102 ~]# kadmin -padmin/admin -wadmin -q"xst -k /etc/security/keytab/dn.service.keytab dn/hadoop102"

  

[root@hadoop102 ~]# kadmin -padmin/admin -wadmin -q"addprinc -randkey nm/hadoop102"
[root@hadoop102 ~]# kadmin -padmin/admin -wadmin -q"xst -k /etc/security/keytab/nm.service.keytab nm/hadoop102"

  

[root@hadoop102 ~]# kadmin -padmin/admin -wadmin -q"addprinc -randkey jhs/hadoop102"
[root@hadoop102 ~]# kadmin -padmin/admin -wadmin -q"xst -k /etc/security/keytab/jhs.service.keytab jhs/hadoop102"

  

[root@hadoop102 ~]# kadmin -padmin/admin -wadmin -q"addprinc -randkey HTTP/hadoop102"
[root@hadoop102 ~]# kadmin -padmin/admin -wadmin -q"xst -k /etc/security/keytab/spnego.service.keytab HTTP/hadoop102"

  

以下在hadoop103執行

[root@hadoop103 ~]# kadmin -padmin/admin -wadmin -q"addprinc -randkey rm/hadoop103"
[root@hadoop103 ~]# kadmin -padmin/admin -wadmin -q"xst -k /etc/security/keytab/rm.service.keytab rm/hadoop103"

  

[root@hadoop103 ~]# kadmin -padmin/admin -wadmin -q"addprinc -randkey dn/hadoop103"
[root@hadoop103 ~]# kadmin -padmin/admin -wadmin -q"xst -k /etc/security/keytab/dn.service.keytab dn/hadoop103"

  

[root@hadoop103 ~]# kadmin -padmin/admin -wadmin -q"addprinc -randkey nm/hadoop103"
[root@hadoop103 ~]# kadmin -padmin/admin -wadmin -q"xst -k /etc/security/keytab/nm.service.keytab nm/hadoop103"

  

[root@hadoop103 ~]# kadmin -padmin/admin -wadmin -q"addprinc -randkey HTTP/hadoop103"
[root@hadoop103 ~]# kadmin -padmin/admin -wadmin -q"xst -k /etc/security/keytab/spnego.service.keytab HTTP/hadoop103"

  

hadoop104

[root@hadoop104 ~]# kadmin -padmin/admin -wadmin -q"addprinc -randkey dn/hadoop104"
[root@hadoop104 ~]# kadmin -padmin/admin -wadmin -q"xst -k /etc/security/keytab/dn.service.keytab dn/hadoop104"

  

[root@hadoop104 ~]# kadmin -padmin/admin -wadmin -q"addprinc -randkey sn/hadoop104"
[root@hadoop104 ~]# kadmin -padmin/admin -wadmin -q"xst -k /etc/security/keytab/sn.service.keytab sn/hadoop104"

  

[root@hadoop104 ~]# kadmin -padmin/admin -wadmin -q"addprinc -randkey nm/hadoop104"
[root@hadoop104 ~]# kadmin -padmin/admin -wadmin -q"xst -k /etc/security/keytab/nm.service.keytab nm/hadoop104"

  

[root@hadoop104 ~]# kadmin -padmin/admin -wadmin -q"addprinc -randkey HTTP/hadoop104"
[root@hadoop104 ~]# kadmin -padmin/admin -wadmin -q"xst -k /etc/security/keytab/spnego.service.keytab HTTP/hadoop104"

  

修改所有節點的訪問許可權

[root@hadoop102 ~]# chown -R root:hadoop /etc/security/keytab/
[root@hadoop102 ~]# chmod 660 /etc/security/keytab/*

[root@hadoop103 ~]# chown -R root:hadoop /etc/security/keytab/
[root@hadoop103 ~]# chmod 660 /etc/security/keytab/*

[root@hadoop104 ~]# chown -R root:hadoop /etc/security/keytab/
[root@hadoop104 ~]# chmod 660 /etc/security/keytab/*

  

4、修改hadoop配置檔案進行分發

core-site.xml

<!-- Kerberos主體到系統使用者的對映機制 -->
<property>
  <name>hadoop.security.auth_to_local.mechanism</name>
  <value>MIT</value>
</property>

<!-- Kerberos主體到系統使用者的具體對映規則 -->
<property>
  <name>hadoop.security.auth_to_local</name>
  <value>
    RULE:[2:$1/$2@$0]([ndj]n\/.*@EXAMPLE\.COM)s/.*/hdfs/
    RULE:[2:$1/$2@$0]([rn]m\/.*@EXAMPLE\.COM)s/.*/yarn/
    RULE:[2:$1/$2@$0](jhs\/.*@EXAMPLE\.COM)s/.*/mapred/
    DEFAULT
  </value>
</property>

<!-- 啟用Hadoop叢集Kerberos安全認證 -->
<property>
  <name>hadoop.security.authentication</name>
  <value>kerberos</value>
</property>

<!-- 啟用Hadoop叢集授權管理 -->
<property>
  <name>hadoop.security.authorization</name>
  <value>true</value>
</property>

<!-- Hadoop叢集間RPC通訊設為僅認證模式 -->
<property>
  <name>hadoop.rpc.protection</name>
  <value>authentication</value>
</property>

  

hdfs-site.xml

<!-- 訪問DataNode資料塊時需通過Kerberos認證 -->
<property>
  <name>dfs.block.access.token.enable</name>
  <value>true</value>
</property>

<!-- NameNode服務的Kerberos主體,_HOST會自動解析為服務所在的主機名 -->
<property>
  <name>dfs.namenode.kerberos.principal</name>
  <value>nn/[email protected]</value>
</property>

<!-- NameNode服務的Kerberos金鑰檔案路徑 -->
<property>
  <name>dfs.namenode.keytab.file</name>
  <value>/etc/security/keytab/nn.service.keytab</value>
</property>

<!-- Secondary NameNode服務的Kerberos主體 -->
<property>
  <name>dfs.secondary.namenode.keytab.file</name>
  <value>/etc/security/keytab/sn.service.keytab</value>
</property>

<!-- Secondary NameNode服務的Kerberos金鑰檔案路徑 -->
<property>
  <name>dfs.secondary.namenode.kerberos.principal</name>
  <value>sn/[email protected]</value>
</property>

<!-- NameNode Web服務的Kerberos主體 -->
<property>
  <name>dfs.namenode.kerberos.internal.spnego.principal</name>
  <value>HTTP/[email protected]</value>
</property>

<!-- WebHDFS REST服務的Kerberos主體 -->
<property>
  <name>dfs.web.authentication.kerberos.principal</name>
  <value>HTTP/[email protected]</value>
</property>

<!-- Secondary NameNode Web UI服務的Kerberos主體 -->
<property>
  <name>dfs.secondary.namenode.kerberos.internal.spnego.principal</name>
  <value>HTTP/[email protected]</value>
</property>

<!-- Hadoop Web UI的Kerberos金鑰檔案路徑 -->
<property>
  <name>dfs.web.authentication.kerberos.keytab</name>
  <value>/etc/security/keytab/spnego.service.keytab</value>
</property>

<!-- DataNode服務的Kerberos主體 -->
<property>
  <name>dfs.datanode.kerberos.principal</name>
  <value>dn/[email protected]</value>
</property>

<!-- DataNode服務的Kerberos金鑰檔案路徑 -->
<property>
  <name>dfs.datanode.keytab.file</name>
  <value>/etc/security/keytab/dn.service.keytab</value>
</property>

<!-- 配置NameNode Web UI 使用HTTPS協議 -->
<property>
  <name>dfs.http.policy</name>
  <value>HTTPS_ONLY</value>
</property>

<!-- 配置DataNode資料傳輸保護策略為僅認證模式 -->
<property>
  <name>dfs.data.transfer.protection</name>
  <value>authentication</value>
</property>

  

yarn-site.xml

<!-- Resource Manager 服務的Kerberos主體 -->
<property>
  <name>yarn.resourcemanager.principal</name>
  <value>rm/[email protected]</value>
</property>

<!-- Resource Manager 服務的Kerberos金鑰檔案 -->
<property>
  <name>yarn.resourcemanager.keytab</name>
  <value>/etc/security/keytab/rm.service.keytab</value>
</property>

<!-- Node Manager 服務的Kerberos主體 -->
<property>
  <name>yarn.nodemanager.principal</name>
  <value>nm/[email protected]</value>
</property>

<!-- Node Manager 服務的Kerberos金鑰檔案 -->
<property>
  <name>yarn.nodemanager.keytab</name>
  <value>/etc/security/keytab/nm.service.keytab</value>
</property>

  

mapred-site.xml

<!-- 歷史伺服器的Kerberos主體 -->
<property>
  <name>mapreduce.jobhistory.keytab</name>
  <value>/etc/security/keytab/jhs.service.keytab</value>
</property>

<!-- 歷史伺服器的Kerberos金鑰檔案 -->
<property>
  <name>mapreduce.jobhistory.principal</name>
  <value>jhs/[email protected]</value>
</property>

  分發配置檔案到叢集各個節點

5.配置hdfs使用https安全協議

生成金鑰對

keytool -keystore /etc/security/keytab/keystore -alias jetty -genkey -keyalg RSA 

解釋

Keytool是java資料證書的管理工具,使使用者能夠管理自己的公/私鑰對及相關證書。
-keystore 指定金鑰庫的名稱及位置(產生的各類資訊將存在.keystore檔案中)
-genkey(或者-genkeypair) 生成金鑰對
-alias  為生成的金鑰對指定別名,如果沒有預設是mykey
-keyalg 指定金鑰的演算法 RSA/DSA 預設是DSA

  

修改keystore訪問許可權

chown -R root:hadoop /etc/security/keytab/keystore

chmod 660 /etc/security/keytab/keystore

將證書分發到各個節點

xsync /etc/security/keytab/keystore

修改hadoop的http配置檔案

mv $HADOOP_HOME/etc/hadoop/ssl-server.xml.example $HADOOP_HOME/etc/hadoop/ssl-server.xml

vim $HADOOP_HOME/etc/hadoop/ssl-server.xml

<!-- SSL金鑰庫路徑 -->
<property>
  <name>ssl.server.keystore.location</name>
  <value>/etc/security/keytab/keystore</value>
</property>

<!-- SSL金鑰庫密碼 -->
<property>
  <name>ssl.server.keystore.password</name>
  <value>123456</value>
</property>

<!-- SSL可信任金鑰庫路徑 -->
<property>
  <name>ssl.server.truststore.location</name>
  <value>/etc/security/keytab/keystore</value>
</property>

<!-- SSL金鑰庫中金鑰的密碼 -->
<property>
  <name>ssl.server.keystore.keypassword</name>
  <value>123456</value>
</property>

<!-- SSL可信任金鑰庫密碼 -->
<property>
  <name>ssl.server.truststore.password</name>
  <value>123456</value>
</property>

  分發到各個節點

6.配置yarn使用LinuxContainerExecutor

修改所有節點的container-executor所有者和許可權,要求其所有者為root,所有組為hadoop(啟動NodeMangeryarn使用者的所屬組),許可權為6050。其預設路徑為$HADOOP_HOME/bin

[root@hadoop102 ~]# chown root:hadoop /opt/module/hadoop-3.1.3/bin/container-executor
[root@hadoop102 ~]# chmod 6050 /opt/module/hadoop-3.1.3/bin/container-executor

[root@hadoop103 ~]# chown root:hadoop /opt/module/hadoop-3.1.3/bin/container-executor
[root@hadoop103 ~]# chmod 6050 /opt/module/hadoop-3.1.3/bin/container-executor

[root@hadoop104 ~]# chown root:hadoop /opt/module/hadoop-3.1.3/bin/container-executor
[root@hadoop104 ~]# chmod 6050 /opt/module/hadoop-3.1.3/bin/container-executor

修改所有節點的container-executor.cfg檔案的所有者和許可權,要求該檔案及其所有的上級目錄的所有者均為root,所有組為hadoop(啟動NodeMangeryarn使用者的所屬組),許可權為400。其預設路徑為$HADOOP_HOME/etc/hadoop

[root@hadoop102 ~]# chown root:hadoop /opt/module/hadoop-3.1.3/etc/hadoop/container-executor.cfg
[root@hadoop102 ~]# chown root:hadoop /opt/module/hadoop-3.1.3/etc/hadoop
[root@hadoop102 ~]# chown root:hadoop /opt/module/hadoop-3.1.3/etc
[root@hadoop102 ~]# chown root:hadoop /opt/module/hadoop-3.1.3
[root@hadoop102 ~]# chown root:hadoop /opt/module
[root@hadoop102 ~]# chmod 400 /opt/module/hadoop-3.1.3/etc/hadoop/container-executor.cfg

[root@hadoop103 ~]# chown root:hadoop /opt/module/hadoop-3.1.3/etc/hadoop/container-executor.cfg
[root@hadoop103 ~]# chown root:hadoop /opt/module/hadoop-3.1.3/etc/hadoop
[root@hadoop103 ~]# chown root:hadoop /opt/module/hadoop-3.1.3/etc
[root@hadoop103 ~]# chown root:hadoop /opt/module/hadoop-3.1.3
[root@hadoop103 ~]# chown root:hadoop /opt/module
[root@hadoop103 ~]# chmod 400 /opt/module/hadoop-3.1.3/etc/hadoop/container-executor.cfg

[root@hadoop104 ~]# chown root:hadoop /opt/module/hadoop-3.1.3/etc/hadoop/container-executor.cfg
[root@hadoop104 ~]# chown root:hadoop /opt/module/hadoop-3.1.3/etc/hadoop
[root@hadoop104 ~]# chown root:hadoop /opt/module/hadoop-3.1.3/etc
[root@hadoop104 ~]# chown root:hadoop /opt/module/hadoop-3.1.3
[root@hadoop104 ~]# chown root:hadoop /opt/module
[root@hadoop104 ~]# chmod 400 /opt/module/hadoop-3.1.3/etc/hadoop/container-executor.cfg

  

vim $HADOOP_HOME/etc/hadoop/container-executor.cfg

yarn.nodemanager.linux-container-executor.group=hadoop
banned.users=hdfs,yarn,mapred
min.user.id=1000
allowed.system.users=
feature.tc.enabled=false

  

vim $HADOOP_HOME/etc/hadoop/yarn-site.xml

<!-- 配置Node Manager使用LinuxContainerExecutor管理Container -->
<property>
  <name>yarn.nodemanager.container-executor.class</name>
  <value>org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor</value>
</property>

<!-- 配置Node Manager的啟動使用者的所屬組 -->
<property>
  <name>yarn.nodemanager.linux-container-executor.group</name>
  <value>hadoop</value>
</property>

<!-- LinuxContainerExecutor指令碼路徑 -->
<property>
  <name>yarn.nodemanager.linux-container-executor.path</name>
  <value>/opt/module/hadoop-3.1.3/bin/container-executor</value>
</property>

分發container-executor.cfgyarn-site.xml檔案

五、安全模式下啟動叢集

local

$HADOOP_LOG_DIR

hdfs:hadoop

drwxrwxr-x

local

dfs.namenode.name.dir

hdfs:hadoop

drwx------

local

dfs.datanode.data.dir

hdfs:hadoop

drwx------

local

dfs.namenode.checkpoint.dir

hdfs:hadoop

drwx------

local

yarn.nodemanager.local-dirs

yarn:hadoop

drwxrwxr-x

local

yarn.nodemanager.log-dirs

yarn:hadoop

drwxrwxr-x

 

$HADOOP_LOG_DIR(所有節點)該變數位於hadoop-env.sh檔案,預設值為 ${HADOOP_HOME}/logs

[root@hadoop102 ~]# chown hdfs:hadoop /opt/module/hadoop-3.1.3/logs/
[root@hadoop102 ~]# chmod 775 /opt/module/hadoop-3.1.3/logs/

[root@hadoop103 ~]# chown hdfs:hadoop /opt/module/hadoop-3.1.3/logs/
[root@hadoop103 ~]# chmod 775 /opt/module/hadoop-3.1.3/logs/

[root@hadoop104 ~]# chown hdfs:hadoop /opt/module/hadoop-3.1.3/logs/
[root@hadoop104 ~]# chmod 775 /opt/module/hadoop-3.1.3/logs/

  

dfs.namenode.name.dirNameNode節點)該引數位於hdfs-site.xml檔案,預設值為file://${hadoop.tmp.dir}/dfs/name

[root@hadoop102 ~]# chown -R hdfs:hadoop /opt/module/hadoop-3.1.3/data/dfs/name/
[root@hadoop102 ~]# chmod 700 /opt/module/hadoop-3.1.3/data/dfs/name/

  

dfs.datanode.data.dirDataNode節點)該引數為於hdfs-site.xml檔案,預設值為file://${hadoop.tmp.dir}/dfs/data

[root@hadoop102 ~]# chown -R hdfs:hadoop /opt/module/hadoop-3.1.3/data/dfs/data/
[root@hadoop102 ~]# chmod 700 /opt/module/hadoop-3.1.3/data/dfs/data/

[root@hadoop103 ~]# chown -R hdfs:hadoop /opt/module/hadoop-3.1.3/data/dfs/data/
[root@hadoop103 ~]# chmod 700 /opt/module/hadoop-3.1.3/data/dfs/data/

[root@hadoop104 ~]# chown -R hdfs:hadoop /opt/module/hadoop-3.1.3/data/dfs/data/
[root@hadoop104 ~]# chmod 700 /opt/module/hadoop-3.1.3/data/dfs/data/

  

dfs.namenode.checkpoint.dirSecondaryNameNode節點)該引數位於hdfs-site.xml檔案,預設值為file://${hadoop.tmp.dir}/dfs/namesecondary

[root@hadoop104 ~]# chown -R hdfs:hadoop /opt/module/hadoop-3.1.3/data/dfs/namesecondary/
[root@hadoop104 ~]# chmod 700 /opt/module/hadoop-3.1.3/data/dfs/namesecondary/

 

yarn.nodemanager.local-dirsNodeManager節點)該引數位於yarn-site.xml檔案,預設值為file://${hadoop.tmp.dir}/nm-local-dir

[root@hadoop102 ~]# chown -R yarn:hadoop /opt/module/hadoop-3.1.3/data/nm-local-dir/
[root@hadoop102 ~]# chmod -R 775 /opt/module/hadoop-3.1.3/data/nm-local-dir/

[root@hadoop103 ~]# chown -R yarn:hadoop /opt/module/hadoop-3.1.3/data/nm-local-dir/
[root@hadoop103 ~]# chmod -R 775 /opt/module/hadoop-3.1.3/data/nm-local-dir/

[root@hadoop104 ~]# chown -R yarn:hadoop /opt/module/hadoop-3.1.3/data/nm-local-dir/
[root@hadoop104 ~]# chmod -R 775 /opt/module/hadoop-3.1.3/data/nm-local-dir/

yarn.nodemanager.log-dirsNodeManager節點)該引數位於yarn-site.xml檔案,預設值為$HADOOP_LOG_DIR/userlogs

[root@hadoop102 ~]# chown yarn:hadoop /opt/module/hadoop-3.1.3/logs/userlogs/
[root@hadoop102 ~]# chmod 775 /opt/module/hadoop-3.1.3/logs/userlogs/

[root@hadoop103 ~]# chown yarn:hadoop /opt/module/hadoop-3.1.3/logs/userlogs/
[root@hadoop103 ~]# chmod 775 /opt/module/hadoop-3.1.3/logs/userlogs/

[root@hadoop104 ~]# chown yarn:hadoop /opt/module/hadoop-3.1.3/logs/userlogs/
[root@hadoop104 ~]# chmod 775 /opt/module/hadoop-3.1.3/logs/userlogs/

  

啟動hdfs

1.單點

sudo -i -u hdfs hdfs --daemon start namenode
[root@hadoop102 ~]# sudo -i -u hdfs hdfs --daemon start datanode
[root@hadoop103 ~]# sudo -i -u hdfs hdfs --daemon start datanode
[root@hadoop104 ~]# sudo -i -u hdfs hdfs --daemon start datanode
[root@hadoop104 ~]# sudo -i -u hdfs hdfs --daemon start secondarynamenode

2.群起

先有ssh

再者修改配置

vim $HADOOP_HOME/sbin/start-dfs.sh 再頂部新增

HDFS_DATANODE_USER=hdfs
HDFS_NAMENODE_USER=hdfs
HDFS_SECONDARYNAMENODE_USER=hdfs

[root@hadoop102~]# start-dfs.sh

檢視頁面

6.修改hdfs特定路徑訪問許可權

hdfs

/

hdfs:hadoop

drwxr-xr-x

hdfs

/tmp

hdfs:hadoop

drwxrwxrwxt

hdfs

/user

hdfs:hadoop

drwxrwxr-x

hdfs

yarn.nodemanager.remote-app-log-dir

yarn:hadoop

drwxrwxrwxt

hdfs

mapreduce.jobhistory.intermediate-done-dir

mapred:hadoop

drwxrwxrwxt

hdfs

mapreduce.jobhistory.done-dir

mapred:hadoop

drwxrwx---

  

建立hdfs/hadoop主體,執行以下命令並按照提示輸入密碼

[root@hadoop102 ~]# kadmin.local -q "addprinc hdfs/hadoop"

認證hdfs/hadoop主體,執行以下命令並按照提示輸入密碼

[root@hadoop102 ~]# kinit hdfs/hadoop 

修改//tmp/user路徑

[root@hadoop102 ~]# hadoop fs -chown hdfs:hadoop / /tmp /user
[root@hadoop102 ~]# hadoop fs -chmod 755 /
[root@hadoop102 ~]# hadoop fs -chmod 1777 /tmp
[root@hadoop102 ~]# hadoop fs -chmod 775 /user

引數yarn.nodemanager.remote-app-log-dir位於yarn-site.xml檔案,預設值/tmp/logs

[root@hadoop102 ~]# hadoop fs -chown yarn:hadoop /tmp/logs
[root@hadoop102 ~]# hadoop fs -chmod 1777 /tmp/logs

  

引數mapreduce.jobhistory.intermediate-done-dir位於mapred-site.xml檔案,預設值為/tmp/hadoop-yarn/staging/history/done_intermediate,需保證該路徑的所有上級目錄(除/tmp)的所有者均為mapred,所屬組為hadoop,許可權為770

[root@hadoop102 ~]# hadoop fs -chown -R mapred:hadoop /tmp/hadoop-yarn/staging/history/done_intermediate
[root@hadoop102 ~]# hadoop fs -chmod -R 1777 /tmp/hadoop-yarn/staging/history/done_intermediate

[root@hadoop102 ~]# hadoop fs -chown mapred:hadoop /tmp/hadoop-yarn/staging/history/
[root@hadoop102 ~]# hadoop fs -chown mapred:hadoop /tmp/hadoop-yarn/staging/
[root@hadoop102 ~]# hadoop fs -chown mapred:hadoop /tmp/hadoop-yarn/

[root@hadoop102 ~]# hadoop fs -chmod 770 /tmp/hadoop-yarn/staging/history/
[root@hadoop102 ~]# hadoop fs -chmod 770 /tmp/hadoop-yarn/staging/
[root@hadoop102 ~]# hadoop fs -chmod 770 /tmp/hadoop-yarn/

引數mapreduce.jobhistory.done-dir位於mapred-site.xml檔案,預設值為/tmp/hadoop-yarn/staging/history/done,需保證該路徑的所有上級目錄(除/tmp)的所有者均為mapred,所屬組為hadoop,許可權為770

[root@hadoop102 ~]# hadoop fs -chown -R mapred:hadoop /tmp/hadoop-yarn/staging/history/done
[root@hadoop102 ~]# hadoop fs -chmod -R 750 /tmp/hadoop-yarn/staging/history/done

[root@hadoop102 ~]# hadoop fs -chown mapred:hadoop /tmp/hadoop-yarn/staging/history/
[root@hadoop102 ~]# hadoop fs -chown mapred:hadoop /tmp/hadoop-yarn/staging/
[root@hadoop102 ~]# hadoop fs -chown mapred:hadoop /tmp/hadoop-yarn/

[root@hadoop102 ~]# hadoop fs -chmod 770 /tmp/hadoop-yarn/staging/history/
[root@hadoop102 ~]# hadoop fs -chmod 770 /tmp/hadoop-yarn/staging/
[root@hadoop102 ~]# hadoop fs -chmod 770 /tmp/hadoop-yarn/

啟動yarn

vim $HADOOP_HOME/sbin/start-yarn.sh

在頂部增加如下內容

YARN_RESOURCEMANAGER_USER=yarn
YARN_NODEMANAGER_USER=yarn
[root@hadoop103 ~]# start-yarn.sh

  

啟動歷史伺服器

[root@hadoop102 ~]# sudo -i -u mapred mapred --daemon start historyserver

  

6.安全叢集使用說明