hadoop生態搭建(3節點)-11.storm配置
阿新 • • 發佈:2018-10-12
zkCli.sh tmp 1.0 mysq spark 2-0 host 變量 mail
# http://archive.apache.org/dist/storm/apache-storm-1.1.0/
# ==================================================================安裝 storm
tar -zxvf ~/apache-storm-1.1.0.tar.gz -C /usr/local mv /usr/local/apache-storm-1.1.0 /usr/local/storm-1.1.0 rm –r ~/apache-storm-1.1.0.tar.gz
# 環境變量
# ==================================================================node1 node2 node3
vi /etc/profile # 在export PATH USER LOGNAME MAIL HOSTNAME HISTSIZE HISTCONTROL下添加 export JAVA_HOME=/usr/java/jdk1.8.0_111 export ZOOKEEPER_HOME=/usr/local/zookeeper-3.4.12 export HADOOP_HOME=/usr/local/hadoop-2.7.6 export MYSQL_HOME=/usr/local/mysql export HBASE_HOME=/usr/local/hbase-1.2.4 export HIVE_HOME=/usr/local/hive-2.1.1 export SCALA_HOME=/usr/local/scala-2.12.4 export KAFKA_HOME=/usr/local/kafka_2.12-0.10.2.1 export FLUME_HOME=/usr/local/flume-1.8.0 export SPARK_HOME=/usr/local/spark-2.3.0 export STORM_HOME=/usr/local/storm-1.1.0 export PATH=$PATH:$JAVA_HOME/bin:$JAVA_HOME/jre/bin:$ZOOKEEPER_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$MYSQL_HOME/bin:$HBASE_HOME/bin:$HIVE_HOME/bin:$SCALA_HOME/bin:$KAFKA_HOME/bin:$FLUME_HOME/bin:$SPARK_HOME/bin:$SPARK_HOME/sbin:$STORM_HOME/bin export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar export HADOOP_INSTALL=$HADOOP_HOME export HADOOP_MAPRED_HOME=$HADOOP_HOME export HADOOP_COMMON_HOME=$HADOOP_HOME export HADOOP_HDFS_HOME=$HADOOP_HOME export YARN_HOME=$HADOOP_HOME export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native
# ==================================================================node1
# 使環境變量生效 source /etc/profile # 查看配置結果 echo $FLUME_HOME
# ==================================================================node1
vi $STORM_HOME/conf/storm-env.sh export JAVA_HOME=/usr/java/jdk1.8.0_111 export STORM_CONF_DIR=/usr/local/storm-1.1.0/conf vi $STORM_HOME/conf/storm_env.ini JAVA_HOME:/usr/java/jdk1.8.0_111 vi $STORM_HOME/conf/storm.yaml storm.zookeeper.servers: - "node1" - "node2" - "node3" # Nimbus H/A nimbus.seeds: ["node1", "node2"] storm.zookeeper.port: 2181 storm.zookeeper.root: "/storm" # port修改避免與spark集群端口沖突 storm.exhibitor.port: 9090 # 默認情況下,Storm啟動worker進程時,JVM的最大內存是768M,更改為1024m worker.childopts: "-Xmx1024m" # port修改避免與spark集群端口沖突 ui.port: 9090 #Nimbus dir storm.local.dir: "/usr/local/storm-1.1.0/tmp" # supervisor worker port, 4 workers supervisor.slots.ports: - 6700 - 6701 - 6702 - 6703
scp -r $STORM_HOME node2:/usr/local/
scp -r $STORM_HOME node3:/usr/local/
# ==================================================================node2 node3
# 使環境變量生效 source /etc/profile # 查看配置結果 echo $FLUME_HOME
# 啟動
# ==================================================================node1 node2 node3 # 啟動zookeeper zkServer.sh start # ==================================================================node1 # 需要手動在ZooKeeper中創建路徑/storm,使用如下命令連接到任意一臺ZooKeeper服務器 zkCli.sh create /storm ‘‘ # ==================================================================node1 node2 # 啟動 nimbus $STORM_HOME/bin/storm nimbus >/dev/null 2>&1 & # ==================================================================node1 node2 node3 # 啟動 supervisor $STORM_HOME/bin/storm supervisor >/dev/null 2>&1 & # ==================================================================node1 node2 # 啟動 ui $STORM_HOME/bin/storm ui >/dev/null 2>&1 & # ==================================================================node1 node2 node3 # 啟動 logviewer $STORM_HOME/bin/storm logviewer >/dev/null 2>&1 & # http://node1:9090 shutdown -h now # 快照 storm
hadoop生態搭建(3節點)-11.storm配置