1. 程式人生 > >docker 安裝centos7配置hadoop叢集

docker 安裝centos7配置hadoop叢集

		$ docker pull centos
		
		##檢視下載映象##檢視下載映象
		$ doker image ls -a
		
		##啟動容器載入映象,同時進入啟動的容器
		$ docker run -it --name centos-1 centos /bin/bash
		
		## 配置時區
		$ ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
		##配置ifconfig
		$ yum install net-tools.x86_64
		
		##檢視網絡卡資訊
		$ ifconfig
		
		##修改root 密碼
		$ passwd

		##安裝openssh
		$ yum install openssh-server -y
		
		##生成公鑰、私鑰
		$ ssh-keygen -t rsa -f /etc/ssh/ssh_host_rsa_key
		$ ssh-keygen -t rsa -f /etc/ssh/ssh_host_ecdsa_key
		$ ssh-keygen -t rsa -f /etc/ssh/ssh_host_ed25519_key
		
		##編寫啟動指令碼
		$ vi run.sh
		內容:
			#!/bin/bash
			/usr/sbin/sshd -D
			
		##更改指令碼許可權
		$ chmod +x ./run.sh	
			
		##退出並儲存容器
		$ exit
		$ docker container stop cnetos-1
		$ docker commit centos-1 centos-me:v1.0
		
		##啟動新的容器(下一條更加)
		$ docker run --name hadoop0 -d -p 5001:22 -p 50070:50070 -p 9000:9000 centos_me:v1.0 /usr/sbin/sshd -D
		## 用下面這條命令替換可免去後面配置hosts:
		docker run --name hadoop2 -d -p 5001:22 -p 50070:50070 -p 9000:9000 --add-host hadoop2:172.17.0.2 --add-host 			hadoop3:172.17.0.3 --add-host hadoop4:172.17.0.4 centos_me:v1.0 /usr/sbin/sshd -D
		$ docker container ls
		$ docker exec -it hadoop0 bash

		##安裝和配置java 和 hadoop  
		$ vim /etc/profile
			
		export JAVA_HOME=/usr/local/jdk/jdk1.8.0_181
		export HADOOP_HOME=/usr/local/hadoop/hadoop-2.9.1
		export PATH=$JAVA_HOME/bin:$JAVA_HOME/jre/bin:$HADOOP_HOME/sbin:$HADOOP_HOME/lib/native:$HADOOP_HOME/bin:$PATH
		export CLASSSPATH=.:$JAVA_HOME/libtools.jar:$JAVA_HOME/lib/dt.jar
		
		##配置免密登陸
		$ ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa
		$ cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
		$ chmod 0600 ~/.ssh/authorized_keys
		
		##在hadoop根目錄下 /usr/local/hadoop/hadoop-2.9.1/etc/hadoop修改slaves 檔案 寫入:
			hadoop3
			hadoop4
		
		## 修改etc/hadoop下 hadoop-env.sh 在export JAVA_HOME之前插入一行
			JAVA_HOME=/usr/local/jdk/jdk1.8.0_181/
	##修改配置檔案
		1、core-site.xml
		<configuration>
		<property>
		<name>fs.default.name</name>
		<value>hdfs://hadoop2:9000</value>
		</property>
		<property>
		<name>io.file.buffer.size</name>
		<value>131072</value>
		</property>
		<property>
		<name>hadoop.tmp.dir</name>
		<value>/home/hadoop/tmp</value>
		<description>Abase for other temporary directories.</description>
		</property>
		</configuration>
		
		2、hdfs-site.xml
		<configuration>
		<property>
		<name>dfs.namenode.secondary.http-address</name>
		<value>hadoop2:9001</value>
		<description>#through web see HDFS status </description>
		</property>
		<property>
		<name>dfs.namenode.name.dir</name>
		<value>/home/hadoop/dfs/name</value>
		</property>
		<property>
		<name>dfs.datanode.data.dir</name>
		<value>/home/hadoop/dfs/data</value>
		</property>
		<property>
		<name>dfs.replication</name>
		<value>2</value>
		<description># each Block has 2backup</description>
		</property>
		<property>
		<name>dfs.webhdfs.enabled</name>
		<value>true</value>
		</property>
		</configuration>
		
		3、yarn-site.xml
		<configuration>
		<!-- Site specific YARN configuration properties -->
		<property>
		<name>yarn.nodemanager.aux-services</name>
		<value>mapreduce_shuffle</value>
		</property>
		<property>
		<name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
		<value>org.apache.hadoop.mapred.ShuffleHandler</value>
		</property>
		<property>
		<name>yarn.resourcemanager.address</name>
		<value>hadoop2:8032</value>
		</property>
		<property>
		<name>yarn.resourcemanager.scheduler.address</name>
		<value>hadoop2:8030</value>
		</property>
		<property>
		<name>yarn.resourcemanager.resource-tracker.address</name>
		<value>hadoop2:8031</value>
		</property>
		<property>
		<name>yarn.resourcemanager.admin.address</name>
		<value>hadoop2:8033</value>
		</property>
		<property>
		<name>yarn.resourcemanager.webapp.address</name>
		<value>hadoop2:8088</value>
		</property>
		<property>
		<name>yarn.nodemanager.resource.memory-mb</name>
		<value>1024</value>
		</property>
		<property>
		<name>yarn.nodemanager.resource.cpu-vcores</name>
		<value>1</value>
		</property>
		</configuration>
		
		4、mapred-site.xml
		<configuration>
		<property>
		<name>mapreduce.framework.name</name>
		<value>yarn</value>
		</property>
		<property>
		<name>mapreduce.jobhistory.address</name>
		<value>hadoop2:10020</value>
		</property>
		<property>
		<name>mapreduce.jobhistory.webapp.address</name>
		<value>hadoop2:19888</value>
		</property>
		</configuration>

## 配置ssh檔案
vi /etc/ssh/ssh_config  增加

		StrictHostKeyChecking no
        UserKnownHostsFile /dev/null

##停止容器
docker stop hadoop2
##儲存映象
docker commit hadoop2 centos_me:v1.0


###啟動
從新映象啟動3個容器
docker run --name hadoop2 --add-host hadoop2:172.17.0.2 --add-host hadoop3:172.17.0.3 --add-host hadoop4:172.17.0.4 -d -p 5002:22 -p 9870:9870 -p 8088:8088 -p 19888:19888 -p 50070:50070 -p 9000:9000 centos_me:v1.0 /usr/sbin/sshd -D

docker run --name hadoop3 --add-host hadoop2:172.17.0.2 --add-host hadoop3:172.17.0.3 --add-host hadoop4:172.17.0.4 -d -p 5003:22 centos_me:v1.0 /usr/sbin/sshd -D

docker run --name hadoop4 --add-host hadoop2:172.17.0.2 --add-host hadoop3:172.17.0.3 --add-host hadoop4:172.17.0.4 -d -p 5004:22 centos_me:v1.0 /usr/sbin/sshd -D


###為每臺機器安裝:
yum -y install openssh-clients
yum -y install which