hadoop生態搭建(3節點)-07.hive配置
# http://archive.apache.org/dist/hive/hive-2.1.1/
# ==================================================================安裝 hive
tar -zxvf apache-hive-2.1.1-bin.tar.gz -C /usr/local mv /usr/local/apache-hive-2.1.1-bin /usr/local/hive-2.1.1 rm –r ~/apache-hive-2.1.1-bin.tar.gz cp ~/mysql-connector-java-5.1.46.jar /usr/local/hive-2.1.1/lib/
# 配置環境變量
# ==================================================================node1 node2 node3
vi /etc/profile # 在export PATH USER LOGNAME MAIL HOSTNAME HISTSIZE HISTCONTROL下添加 export JAVA_HOME=/usr/java/jdk1.8.0_111 export ZOOKEEPER_HOME=/usr/local/zookeeper-3.4.12 export HADOOP_HOME=/usr/local/hadoop/hadoop-2.7.6 export MYSQL_HOME=/usr/local/mysql export HBASE_HOME=/usr/local/hbase-1.2.4 export HIVE_HOME=/usr/local/hive-2.1.1 export PATH=$PATH:$JAVA_HOME/bin:$JAVA_HOME/jre/bin:$ZOOKEEPER_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$MYSQL_HOME/bin:$HBASE_HOME/bin:$HIVE_HOME/bin export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar export HADOOP_INSTALL=$HADOOP_HOME export HADOOP_MAPRED_HOME=$HADOOP_HOME export HADOOP_COMMON_HOME=$HADOOP_HOME export HADOOP_HDFS_HOME=$HADOOP_HOME export YARN_HOME=$HADOOP_HOME export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native
# ==================================================================node1
# 使環境變量生效 source /etc/profile # 查看配置結果 echo $HIVE_HOME mkdir -p $HIVE_HOME/data/hive mkdir -p $HIVE_HOME/data/hive/operaion_logs mkdir -p $HIVE_HOME/data/resources # 配置 hive cp $HIVE_HOME/conf/hive-env.sh.template $HIVE_HOME/conf/hive-env.sh cp $HIVE_HOME/conf/hive-default.xml.template $HIVE_HOME/conf/hive-site.xml cp $HIVE_HOME/conf/hive-exec-log4j2.properties.template $HIVE_HOME/conf/hive-exec-log4j2.properties cp $HIVE_HOME/conf/hive-log4j2.properties.template $HIVE_HOME/conf/hive-log4j2.properties
# ${system:java.io.tmpdir}/${system:user.name} 替換為本機路徑 /usr/local/hive-2.1.1/data/hive # ${system:java.io.tmpdir}/${hive.session.id}_resources替換為本機路徑 /usr/local/hive-2.1.1/data/resources # ${system:java.io.tmpdir}/${system:user.name}/operation_logs 替換為本機路徑 /usr/local/hive-2.1.1/data/hive/operation_logs
vi $HIVE_HOME/conf/hive-site.xml
# esc 後輸入
:%s#${system:java.io.tmpdir}/${system:user.name}#/usr/local/hive-2.1.1/data/hive#
:%s#${system:java.io.tmpdir}/${hive.session.id}_resources#/usr/local/hive-2.1.1/data/resources#
# esc / 輸入 hive.exec.scratchdir 找到後<value> 點擊 Insert 鍵後進行修改
<property> <name>hive.exec.scratchdir</name> <value>/hive/tmp</value> </property> <property> <name>hive.metastore.warehouse.dir</name> <value>/hive/warehouse</value> </property> <!-- 通過jdbc協議連接mysql的hive庫 --> <property> <name>javax.jdo.option.ConnectionURL</name> <value>jdbc:mysql://node1:3306/hive?createDatabaseIfNotExist=true&useSSL=false</value> </property> <!-- jdbc的mysql驅動 --> <property> <name>javax.jdo.option.ConnectionDriverName</name> <value>com.mysql.jdbc.Driver</value> </property> <!-- mysql用戶名 --> <property> <name>javax.jdo.option.ConnectionUserName</name> <value>hive</value> </property> <!-- mysql用戶密碼 --> <property> <name>javax.jdo.option.ConnectionPassword</name> <value>Hive-123</value> </property> <!--hiveserver2的HA--> <property> <name>hive.server2.support.dynamic.service.discovery</name> <value>true</value> </property> <property> <name>hive.zookeeper.quorum</name> <value>node1:2181,node2:2181,node3:2181</value> </property> <!-- hive的web頁面暫時不配置 --> <property> <name>hive.hwi.war.file</name> <value>/usr/local/hive-2.1.1/lib/hive-hwi-2.1.1.jar</value> </property>
# 從hbase/lib下復制必要jar包到hive/lib下 cp $HBASE_HOME/lib/hbase-client-1.2.4.jar $HBASE_HOME/lib/hbase-common-1.2.4.jar $HIVE_HOME/lib # 同步hive和hadoop的jline版本 cp $HIVE_HOME/lib/jline-2.12.jar $HADOOP_HOME/share/hadoop/yarn/lib # 查看版本 # cd $HADOOP_HOME/share/hadoop/yarn/lib # find ./ -name "*jline*jar" # 刪除低版本的jline 0.9 # rm jline-0.9.94.jar # 復制jdk的tools.jar到hive/lib下 cp $JAVA_HOME/lib/tools.jar $HIVE_HOME/lib # rm -f $HIVE_HOME/lib/log4j-slf4j-impl-2.4.1.jar vi $HIVE_HOME/conf/hive-env.sh HADOOP_HOME=/usr/local/hadoop-2.7.6 export HIVE_HOME=/usr/local/hive-2.1.1 export HIVE_CONF_DIR=/usr/local/hive-2.1.1/conf export HIVE_AUX_JARS_PATH=/usr/local/hive-2.1.1/lib
# 如果hadoop之前沒有配置hadoop.proxyuser.root.groups需配置
# ==================================================================node1 node2 node3
# 如果沒有權限,在Hadoop的core-site.xml中增加配置:
vi $HADOOP_HOME/etc/hadoop/core-site.xml
<property>
<name>hadoop.proxyuser.root.groups</name>
<value>*</value>
</property>
<property>
<name>hadoop.proxyuser.root.hosts</name>
<value>*</value>
</property>
# ==================================================================node1
# 使用超級用戶刷新配置
yarn rmadmin -refreshSuperUserGroupsConfiguration
hdfs dfsadmin -refreshSuperUserGroupsConfiguration
# ==================================================================node1 node2
# 如果是對namenode做過HA,則需要在主備namenode上執行
hdfs dfsadmin -fs hdfs://appcluster -refreshSuperUserGroupsConfiguration
# ==================================================================node2 node3
# 使環境變量生效 source /etc/profile # 查看配置結果 echo $HIVE_HOME
# ==================================================================node1
$HIVE_HOME/bin/schematool -initSchema -dbType mysql scp -r $HIVE_HOME node2:/usr/local/ scp -r $HIVE_HOME node3:/usr/local/
# 啟動
# ==================================================================node1 node2 node3 # 啟動 zookeeper zkServer.sh start zkServer.sh status # ==================================================================node1 # 啟動hadoop所有進程 $HADOOP_HOME/sbin/start-all.sh $HADOOP_HOME/sbin/hadoop-daemon.sh start zkfc # ==================================================================node2 $HADOOP_HOME/sbin/yarn-daemon.sh start resourcemanager $HADOOP_HOME/sbin/hadoop-daemon.sh start zkfc # ==================================================================node1 # 要實現Hbase的HA(High Availablity) $HBASE_HOME/bin/hbase-daemon.sh start master # 啟動Hbase(start-hbase.sh) $HBASE_HOME/bin/start-hbase.sh # ==================================================================node2 # 開啟Hbase的HA $HBASE_HOME/bin/hbase-daemon.sh start master # ==================================================================node1 $HIVE_HOME/bin/hiveserver2 # ==================================================================node2 $HIVE_HOME/bin/hiveserver2 # ==================================================================node1 zkCli.sh ls /hiveserver2 get /hiveserver2/serverUri=node1:10000;version=2.1.1;sequence=0000000000 $HIVE_HOME/bin/beeline -u "jdbc:hive2://node1:2181,node2:2181,node3:2181/;serviceDiscoveryMode=zooKeeper;zooKeeperNamespace=hiveserver2" root 123456 # $HIVE_HOME/bin/beeline # > !connect jdbc:hive2://node1:2181,node2:2181,node3:2181/;serviceDiscoveryMode=zooKeeper;zooKeeperNamespace=hiveserver2 root "123456" > create external table user_info(user_id int comment ‘userID‘,user_name string comment ‘userName‘)row format delimited fields terminated by ‘\t‘ lines terminated by ‘\n‘; > show tables; mkdir /root/hive vi /root/hive/user_info.txt 1001 zhangsan 1002 lisi 1003 wangwu > load data local inpath ‘/root/hive/user_info.txt‘ into table user_info; > select * from user_info; > quit; hdfs dfs -ls / hdfs dfs -ls /hive/warehouse hdfs dfs -cat /hive/warehouse/user_info/user_info.txt hadoop fs -mkdir /hive_input_data vi /root/hive/user_info.txt 1001 zhangsan 1002 lisi 1003 wangwu 1004 liuliu 1005 qiqi hadoop fs -put /root/hive/user_info.txt /hive_input_data hdfs dfs -ls /hive_input_data # hdfs -dfs -chmod 777 /hive_input_data > select * from user_info; > load data inpath ‘/hive_input_data/user_info.txt‘ overwrite into table user_info; > select * from user_info; # ==================================================================node1 # stop已經啟動的進程 $HBASE_HOME/bin/stop-hbase.sh $HADOOP_HOME/sbin/stop-all.sh # ==================================================================node1 node2 node3 # 停止 zookeeper zkServer.sh stop # ==================================================================node1 $HADOOP_HOME/sbin/hadoop-daemon.sh stop zkfc # ==================================================================node2 $HADOOP_HOME/sbin/yarn-daemon.sh stop resourcemanager $HADOOP_HOME/sbin/hadoop-daemon.sh stop zkfc shutdown -h now # 快照 hive_hiveserver2集群
hadoop生態搭建(3節點)-07.hive配置