hadoop生態搭建(3節點)-17.sqoop配置_單節點
# ==================================================================安裝 sqoop
tar -zxvf ~/sqoop-1.4.7.bin__hadoop-2.6.0.tar.gz mv ~/sqoop-1.4.7.bin__hadoop-2.6.0 /usr/local/sqoop-1.4.7
# 環境變數
# ==================================================================node1
vi /etc/profile # 在export PATH USER LOGNAME MAIL HOSTNAME HISTSIZE HISTCONTROL下新增 export JAVA_HOME=/usr/java/jdk1.8.0_111 export JRE_HOME=/usr/java/jdk1.8.0_111/jre export ZOOKEEPER_HOME=/usr/local/zookeeper-3.4.12 export HADOOP_HOME=/usr/local/hadoop-2.7.6 export MYSQL_HOME=/usr/local/mysql export HBASE_HOME=/usr/local/hbase-1.2.4 export HIVE_HOME=/usr/local/hive-2.1.1 export SCALA_HOME=/usr/local/scala-2.12.4 export KAFKA_HOME=/usr/local/kafka_2.12-0.10.2.1 export FLUME_HOME=/usr/local/flume-1.8.0 export SPARK_HOME=/usr/local/spark-2.3.0 export STORM_HOME=/usr/local/storm-1.1.0 export REDIS_HOME=/usr/local/redis-4.0.2 export ERLANG_HOME=/usr/local/erlang export RABBITMQ_HOME=/usr/local/rabbitmq_server-3.7.5 export MONGODB_HOME=/usr/local/mongodb-3.4.5 export NGINX_HOME=/usr/local/nginx export CATALINA_BASE=/usr/local/tomcat export CATALINA_HOME=/usr/local/tomcat export TOMCAT_HOME=/usr/local/tomcat export KEEPALIVED_HOME=/usr/local/keepalived export ELASTICSEARCH_HOME=/usr/local/elasticsearch-6.2.4 export LOGSTASH_HOME=/usr/local/logstash-6.2.4 export KIBANA_HOME=/usr/local/kibana-6.2.4 export SQOOP_HOME=/usr/local/sqoop-1.4.7 export PATH=$PATH:$JAVA_HOME/bin:$JAVA_HOME/jre/bin:$ZOOKEEPER_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$MYSQL_HOME/bin:$HBASE_HOME/bin:$HIVE_HOME/bin:$SCALA_HOME/bin:$KAFKA_HOME/bin:$FLUME_HOME/bin:$SPARK_HOME/bin:$STORM_HOME/bin:$REDIS_HOME/bin:$ERLANG_HOME/bin:$RABBITMQ_HOME/ebin:$RABBITMQ_HOME/sbin:$MONGODB_HOME/bin:$NGINX_HOME/sbin:$CATALINA_HOME/bin:$KEEPALIVED_HOME/sbin:$ELASTICSEARCH_HOME/bin:$LOGSTASH_HOME/bin:$KIBANA_HOME/bin:$SQOOP_HOME/bin export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar export HADOOP_INSTALL=$HADOOP_HOME export HADOOP_MAPRED_HOME=$HADOOP_HOME export HADOOP_COMMON_HOME=$HADOOP_HOME export HADOOP_HDFS_HOME=$HADOOP_HOME export YARN_HOME=$HADOOP_HOME export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native
# 使環境變數生效 source /etc/profile # 檢視配置結果 echo $SQOOP_HOME
cp -a $SQOOP_HOME/conf/sqoop-env-template.sh $SQOOP_HOME/conf/sqoop-env.sh vi $SQOOP_HOME/conf/sqoop-env.sh #Set path to where bin/hadoop is available export HADOOP_COMMON_HOME=${HADOOP_HOME} #Set path to where hadoop-*-core.jar is available export HADOOP_MAPRED_HOME=${HADOOP_HOME} #set the path to where bin/hbase is available #export HBASE_HOME=${HBASE_HOME} #Set the path to where bin/hive is available export HIVE_HOME=${HIVE_HOME} #Set the path for where zookeper config dir is #export ZOOCFGDIR= cp -a ~/mysql-connector-java-5.1.46.jar $SQOOP_HOME/lib/ cp -a $HIVE_HOME/lib/hive-exec-2.1.1.jar $SQOOP_HOME/lib/ sqoop-version
mysql -u root -p > create database sqooptest character set utf8 ; > create user 'sqoop'@'%' identified by 'Sqoop-123'; > grant all privileges on *.* to 'sqoop'@'%'; > flush privileges; > show databases; > quit;
# 使用者 sqoop 登入 mysql
mysql -u sqoop -p Enter password: Sqoop-123 > use sqooptest; > create table emp(id INT NOT NULL PRIMARY KEY, name VARCHAR(20), age INT); > insert into emp(id, name, age)values(1, 'zhangsan', 11); insert into emp(id, name, age)values(2, 'lisi', 12); insert into emp(id, name, age)values(3, '王五', 13); > create table emp_add(id INT NOT NULL PRIMARY KEY, name VARCHAR(20), age INT, sex VARCHAR(20)); > insert into emp_add(id, name, age, sex)values(1, 'zhangsan', 11, '男'); insert into emp_add(id, name, age, sex)values(2, 'lisi', 12, '男'); insert into emp_add(id, name, age, sex)values(3, '王五', 13, '女'); insert into emp_add(id, name, age, sex)values(4,'liuliu', 11, '男'); > show tables; > select * from emp; select * from emp_add; > quit;
# 啟動 hadoop
# ==================================================================node1 node2 node3 zkServer.sh start # ==================================================================node1 # 啟動hadoop所有程序 $HADOOP_HOME/sbin/start-all.sh $HADOOP_HOME/sbin/hadoop-daemon.sh start zkfc # ==================================================================node2 $HADOOP_HOME/sbin/yarn-daemon.sh start resourcemanager $HADOOP_HOME/sbin/hadoop-daemon.sh start zkfc hadoop dfsadmin -safemode get # 命令強制離開 # hadoop dfsadmin -safemode leave # 網頁訪問 # http://node1:50070?user.name=hadoop # http://node2:50070?user.name=hadoop # http://node1:8088?user.name=hadoop/cluster/nodes
# 匯入表資料到HDFS
# ==================================================================node1
sqoop import \ --connect jdbc:mysql://node1:3306/sqooptest \ --username sqoop \ --password Sqoop-123 \ --table emp \ --m 1
# 如果包下面的錯誤請將 $HADOOP_HOME/etc/hadoop/yarn-site.xml 檔案進行調整
# ERROR tool.ImportTool: Import failed: java.io.IOException: org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException:
# Invalid resource request, requested memory < 0, or requested memory > max configured, requestedMemory=1536, maxMemory=1024
# ==================================================================node1 node2 node3 vi $HADOOP_HOME/etc/hadoop/yarn-site.xml <property> <name>yarn.nodemanager.resource.memory-mb</name> <value>2048</value> </property> <property> <name>yarn.scheduler.maximum-allocation-mb</name> <value>2048</value> </property>
# 重啟Yarn
# ==================================================================node1 $HADOOP_HOME/sbin/start-yarn.sh
# 匯入表資料到HDFS sqoop import \ --connect jdbc:mysql://node1:3306/sqooptest \ --username sqoop \ --password Sqoop-123 \ --table emp \ --m 1 # 檢視匯入的資料 hadoop fs -ls /user/root/emp hadoop fs -cat /user/root/emp/part-m-00000 # 匯入表到HDFS指定目錄 sqoop import --connect jdbc:mysql://node1:3306/sqooptest \ --username sqoop --password Sqoop-123 \ --target-dir /sqooptest/table_emp/queryresult \ --table emp --num-mappers 1 # 檢視匯入的資料 hadoop fs -ls /sqooptest/table_emp/queryresult hadoop fs -cat /sqooptest/table_emp/queryresult/part-m-00000
# 匯入關係表到HIVE
sqoop import --connect jdbc:mysql://node1:3306/sqooptest \ --username sqoop --password Sqoop-123 \ --table emp --hive-import \ --num-mappers 1
# 如報下面的錯誤,請先刪除 /user/root/emp
# ERROR tool.ImportTool: Import failed: org.apache.hadoop.mapred.FileAlreadyExistsException: Output directory hdfs://appcluster/user/root/emp already exists
hadoop fs -rmr /user/root/emp
# 重新匯入關係表到HIVE
sqoop import --connect jdbc:mysql://node1:3306/sqooptest \ --username sqoop --password Sqoop-123 \ --table emp --hive-import \ --num-mappers 1
# 啟動 hive
hive
# 檢視匯入的資料 > show tables; > select * from emp;
# 匯入表到HIVE指定庫指定表
sqoop import --connect jdbc:mysql://node1:3306/sqooptest \
--username sqoop --password Sqoop-123 \
--table emp \
--delete-target-dir \
--fields-terminated-by '\t' \
--hive-import \
--hive-database sqooptest \
--hive-table hive_emp \
--num-mappers 1
# 報錯 hive的庫sqooptest,必須先建立。否則會報:FAILED: SemanticException [Error 10072]: Database does not exist: sqooptest
> create database sqooptest; > show databases;
# 匯入表到HIVE指定庫指定表
sqoop import --connect jdbc:mysql://node1:3306/sqooptest \ --username sqoop --password Sqoop-123 \ --table emp \ --delete-target-dir \ --fields-terminated-by '\t' \ --hive-import \ --hive-database sqooptest \ --hive-table hive_emp \ --num-mappers 1 > use sqooptest; > show tables; > select * from hive_emp;
# 匯入表資料子集 where子句的匯入
sqoop import --connect jdbc:mysql://node1:3306/sqooptest \ --username sqoop --password Sqoop-123 \ --table emp_add \ --where "age =11" \ --target-dir /sqooptest/table_emp/queryresult2 \ --num-mappers 1 # 檢視匯入的資料 hadoop fs -ls /sqooptest/table_emp/queryresult2 hadoop fs -cat /sqooptest/table_emp/queryresult2/part-m-00000
# query按需匯入
sqoop import --connect jdbc:mysql://node1:3306/sqooptest \ --username sqoop --password Sqoop-123 \ --query 'select id,name,age from emp WHERE id>=2 and $CONDITIONS' \ --split-by id \ --fields-terminated-by '\t' \ --target-dir /sqooptest/table_emp/queryresult3 \ --num-mappers 1 # 檢視匯入的資料 hadoop fs -cat /sqooptest/table_emp/queryresult3/part-m-00000
# 增量匯入
sqoop import --connect jdbc:mysql://node1:3306/sqooptest \ --username sqoop --password Sqoop-123 \ --table emp \ --incremental append \ --check-column id \ --last-value 2 \ --fields-terminated-by '\t' \ --target-dir /sqooptest/table_emp/queryresult4 \ --num-mappers 1 # 檢視匯入的資料 hadoop fs -ls /sqooptest/table_emp/queryresult4/ hadoop fs -cat /sqooptest/table_emp/queryresult4/part-m-00000
# Sqoop的資料匯出
# 檢視 hdfs 資料
hadoop fs -ls /sqooptest/table_emp/queryresult hadoop fs -cat /sqooptest/table_emp/queryresult/part-m-00000
# 1、首先需要手動建立mysql中的目標表
mysql -u sqoop -p Enter password: Sqoop-123 > use sqooptest; > CREATE TABLE employee(id INT NOT NULL PRIMARY KEY,name VARCHAR(20),age INT); > show tables; > select * from employee;
# 2、然後執行匯出命令
sqoop export \ --connect jdbc:mysql://node1:3306/sqooptest \ --username sqoop --password Sqoop-123 \ --table employee \ --export-dir /sqooptest/table_emp/queryresult/ # 驗證表mysql命令列 > select * from employee; > quit;
shutdown -h now
# sqoop