1. 程式人生 > >spark--環境搭建--5.kafka_292-081集群搭建

spark--環境搭建--5.kafka_292-081集群搭建

sca sep gin cer install 搭建 usr rom less

1. scala安裝

$ cd /usr/local

$ tar -zxvf scala-2.11.4.tgz

$ mv scala-2.11.4 scala

$ vi ~/.bashrc

export SCALA_HOME=/usr/local/scala/
export PATH=$PATH:$SCALA_HOME/bin/

$ source ~/.bashrc

$ scala -version

$ scp -r scala root@spark2:/usr/local/

$ scp -r scala root@spark3:/usr/local/

$ scp ~/.bashrc root@spark2:~/.bashrc

$ scp ~/.bashrc root@spark3:~/.bashrc

# 2和3分別執行 source ~/.bashrc

2. kafka安裝

$ tar -zxvf kafka_2.9.2-0.8.1.tgz

$ mv kafka_2.9.2-0.8.1 kafka

$ vi kafka/config/server.properties

# 修改
broker.id=0  #(2和3機器分別修改為1和2) zookeeper.connect=192.168.2.100:2181,192.168.2.101:2181,192.168.2.102:2181

$ yum install unzip

$ unzip slf4j-1.7.6.zip

$ cp slf4j-1.7.6/slf4j-nop-1.7.6.jar kafka/libs/

$ rm -rf slf4j-1.7.6

$ scp -r kafka root@spark2:/usr/local/

$ scp -r kafka root@spark3:/usr/local/

# 註意修改2和3的server.properties的配置

3. 啟動kafka集群

$ cd kafka

$ nohup bin/kafka-server-start.sh config/server.properties &

$ jps

# 若沒啟動查看

$ cat nohup.out

# 解決kafka Unrecognized VM option ‘UseCompressedOops‘問題(三臺機器分別執行)

$ vi bin/kafka-run-class.sh

# 刪除紅色部分
# JVM performance options if [ -z "$KAFKA_JVM_PERFORMANCE_OPTS" ]; then KAFKA_JVM_PERFORMANCE_OPTS="-server -XX:+UseCompressedOops -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:+CMSClassUnloadingEnabled -XX:+CMSScavengeBeforeRemark -XX:+DisableExplicitGC -Djava.awt.headless=true" fi

$ nohup bin/kafka-server-start.sh config/server.properties &

4. 測試kafka集群

$ bin/kafka-topics.sh --zookeeper 192.168.2.100:2181,192.168.2.101:2181,192.168.2.102:2181 --topic TestTopic --replication-factor 1 --partitions 1 --create

$ bin/kafka-console-producer.sh --broker-list 192.168.2.100:9092,192.168.2.101:9092,192.168.2.102:9092 --topic TestTopic

$ bin/kafka-console-consumer.sh --zookeeper 192.168.2.100:2181,192.168.2.101:2181,192.168.2.102:2181 --topic TestTopic --from-beginning

spark--環境搭建--5.kafka_292-081集群搭建