1. 程式人生 > 程式設計 >antd 表格列寬自適應方法以及錯誤處理操作

antd 表格列寬自適應方法以及錯誤處理操作

hadoop安裝


vi /etc/sysconfig/network-scripts/ifcfg-ens33 
``
IPADDR=192.168.182.8
``

vi /etc/hosts
``
192.168.182.8 hd1
192.168.182.9 hd2
192.168.182.10 hd3
``

vi /etc/hostname
``
hd1
``

ssh-keygen -t dsa -P '' -f ~/.ssh/id_dsa # 生成金鑰對
cat ~/.ssh/id_dsa.pub >> ~/.ssh/authorized_keys # 將公鑰寫入authorized_keys檔案
reboot # 重啟
mkdir /usr/local/hadoop/ # 建立hadoop資料夾
cd /usr/local/hadoop/ # 進入hadoop資料夾
wget https://mirror.bit.edu.cn/apache/hadoop/common/hadoop-2.9.2/hadoop-2.9.2.tar.gz # 下載hadoop
tar -zvxf hadoop-2.9.2.tar.gz # 解壓到當前目錄

vi /etc/profile
``
export JAVA_HOME=/home/fleam/jdk1.8.0_191
export JRE_HOME=${JAVA_HOME}/jre
export CLASSPATH=.:${JAVA_HOME}/lib:${JRE_HOME}/lib:$CLASSPATH
export JAVA_PATH=${JAVA_HOME}/bin:${JRE_HOME}/bin
export HADOOP_HOME=/usr/local/hadoop/hadoop-2.9.2
export PATH=$PATH:${JAVA_PATH}:/home/mongodb/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
``
source /etc/profile
hadoop version # 檢查配置
echo $JAVA_HOME # /home/fleam/jdk1.8.0_191

vi /usr/local/hadoop/hadoop-2.9.2/etc/hadoop/hadoop-env.sh
vi /usr/local/hadoop/hadoop-2.9.2/etc/hadoop/mapred-env.sh
vi /usr/local/hadoop/hadoop-2.9.2/etc/hadoop/yarn-env.sh
``
export JAVA_HOME=/home/fleam/jdk1.8.0_191
``

vi /usr/local/hadoop/hadoop-2.9.2/etc/hadoop/hdfs-site.xml
``
<configuration>
    <!--指定hdfs中namenode的儲存位置-->
    <property>
		<name>dfs.namenode.name.dir</name>
		<value>/usr/data/hadoop/namenode</value>
    </property>
    <!--指定hdfs中datanode的儲存位置-->
    <property>
		<name>dfs.datanode.data.dir</name>
		<value>/usr/data/hadoop/datanode</value>
    </property>
    <!-- 指定JournalNode在本地磁碟存放資料的位置 -->
    <property>
		<name>dfs.journalnode.edits.dir</name>
		<value>/usr/data/hadoop/journalnode</value>
    </property>
    <!--指定hdfs儲存資料的副本數量-->
    <property>
		<name>dfs.replication</name>
		<value>3</value>
    </property>
    <property>
		<name>dfs.permissions.enabled</name>
		<value>false</value>
    </property>
    <property>
		<name>dfs.namenode.datanode.registration.ip-hostname-check</name>                   
		<value>false</value>
    </property>
</configuration>
``

mkdir -p /usr/data/hadoop/namenode
mkdir -p /usr/data/hadoop/datanode
mkdir -p /usr/data/hadoop/journalnode

vi /usr/local/hadoop/hadoop-2.9.2/etc/hadoop/core-site.xml
``
<configuration>
        <!--指定namenode的地址-->
        <property>
                <name>fs.defaultFS</name>
            <value>hdfs://192.168.182.8:9000</value>
        </property>
        <!--用來指定使用hadoop時產生檔案的存放目錄-->
        <property>
                <name>hadoop.tmp.dir</name>
                <value>/usr/data/hadoop/tmp</value>
        </property>
        <!--指定zookeeper地址 (該條配置可先不設定,是後期配置hbase時做的更改,等安裝好zookeeper後再設定)-->
		<!--
        <property>
                <name>ha.zookeeper.quorum</name>
                <value>192.168.182.8:2181,192.168.182.9:2181,192.168.182.10:2181</value>
        </property>
        -->
</configuration>
``

cp /usr/local/hadoop/hadoop-2.9.2/etc/hadoop/mapred-site.xml.template /usr/local/hadoop/hadoop-2.9.2/etc/hadoop/mapred-site.xml
vi /usr/local/hadoop/hadoop-2.9.2/etc/hadoop/mapred-site.xml
``
<configuration>
	<!-- 指定mr框架為yarn方式 -->
	<property>
		<name>mapreduce.framework.name</name>
		<value>yarn</value>
	</property>
</configuration>
``

vi /usr/local/hadoop/hadoop-2.9.2/etc/hadoop/yarn-site.xml
``
<configuration>
	<property>
		<name>yarn.resourcemanager.address</name>
		<value>192.168.182.8:8032</value>
	</property>

	<property>
		<name>yarn.resourcemanager.scheduler.address</name>
		<value>192.168.182.8:8030</value>
	</property>

	<property>
		<name>yarn.resourcemanager.resource-tracker.address</name>
		<value>192.168.182.8:8031</value>
	</property>

	<property>
		<name>yarn.resourcemanager.admin.address</name>
		<value>192.168.182.8:8033</value>
	</property>

	<property>
		<name>yarn.resourcemanager.webapp.address</name>
		<value>192.168.182.8:8088</value>
	</property>
	
	
	
	<property>
		<name>yarn.nodemanager.aux-services</name>
		<value>mapreduce_shuffle</value>
	</property>
	<property>
		<name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
		<value>org.apache.hadoop.mapred.ShuffleHandler</value>
	</property>
	
	
</configuration>
``

vi /usr/local/hadoop/hadoop-2.9.2/etc/hadoop/slaves 
``
192.168.182.8
192.168.182.9
192.168.182.10
``

# 克隆出 hd2 hd3

vi /etc/sysconfig/network-scripts/ifcfg-ens33 
``
IPADDR=192.168.182.9
IPADDR=192.168.182.10
``

reboot
ssh [email protected] # 測試免密登入
ssh [email protected] # 測試免密登入

vi /etc/hostname
``
hd2
hd3
``

# 登入hd1
hadoop namenode –format # 用root賬戶格式化namenode
start-dfs.sh # 啟動hdfs
start-yarn.sh # 啟動yarn
start-all.sh # 啟動全部

jps # hd1校驗
##
3408 ResourceManager
3235 SecondaryNameNode
3996 Jps
2973 NameNode
3501 NodeManager
3070 DataNode
##

jps # hd2、hd3校驗
##
1797 Jps
1638 NodeManager
1532 DataNode
##

# 重啟格式化 hd1、hd2、hd3
rm -rf /usr/data/hadoop/tmp
rm -rf /usr/data/hadoop/namenode
rm -rf /usr/data/hadoop/datanode
rm -rf /usr/data/hadoop/journalnode
rm -rf /usr/local/hadoop/hadoop-2.9.2/logs/*
mkdir -p /usr/data/hadoop/namenode
mkdir -p /usr/data/hadoop/datanode
mkdir -p /usr/data/hadoop/journalnode

# hd1
hdfs namenode -format
start-all.sh

# 排錯
stop-all.sh # 停止
cd /usr/local/hadoop/hadoop-2.9.2/logs # 日誌

# 檢查埠是否被佔用
netstat -tunlp|grep 9000 # 檢視端口占用
lsof -i:9000 # 檢視9000埠程序
ps -ef | grep clickhouse # clickhouse開了9000埠


zookeeper安裝

# 所有節點
wget http://archive.apache.org/dist/zookeeper/zookeeper-3.4.14/zookeeper-3.4.14.tar.gz
tar -zxvf zookeeper-3.4.14.tar.gz
mv zookeeper-3.4.14 /home/bigData

vi /etc/profile
``
export ZOOKEEPER_HOME=/home/bigData/zookeeper-3.4.14
export PATH=$PATH:${JAVA_PATH}:/home/mongodb/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$ZOOKEEPER_HOME/bin:$ZOOKEEPER_HOME/conf
``
source /etc/profile

cp /home/bigData/zookeeper-3.4.14/conf/zoo_sample.cfg /home/bigData/zookeeper-3.4.14/conf/zoo.cfg
vim /home/bigData/zookeeper-3.4.14/conf/zoo.cfg
``
dataDir=/usr/data/zookeeper/data
dataLogDir=/usr/data/zookeeper/logs
#zookeeper叢集的節點,新增到末尾
server.1=192.168.182.8:2888:3888
server.2=192.168.182.9:2888:3888
server.3=192.168.182.10:2888:3888
``

mkdir -p /usr/data/zookeeper/data
mkdir -p /usr/data/zookeeper/logs
cd /usr/data/zookeeper/data
touch myid
vi myid
``
1
2
3
``

zkServer.sh start
zkServer.sh status

hbase安裝

# hd1
wget https://mirror.bit.edu.cn/apache/hbase/1.4.13/hbase-1.4.13-bin.tar.gz
tar -zxvf hbase-1.4.13-bin.tar.gz
yum install -y ntpdate # 所有節點
mkdir /usr/local/hbase
mv hbase-1.4.13 /usr/local/hbase/ # /usr/local/hbase/hbase-1.4.13

vi /etc/profile
``
export HBASE_HOME=/usr/local/hbase/hbase-1.4.13
:$HBASE_HOME/bin
``
source /etc/profile

vi /usr/local/hbase/hbase-1.4.13/conf/hbase-env.sh 
``
export JAVA_HOME=/home/fleam/jdk1.8.0_191
export HBASE_MANAGES_ZK=false  #由HBase負責啟動和關閉Zookeeper
export HBASE_CLASSPATH=$HBASE_CLASSPATH:/usr/local/hbase/hbase-1.4.13/conf:/usr/local/hbase/hbase-1.4.13/lib:/usr/local/hadoop/hadoop-2.9.2/etc/hadoop/
# export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -XX:PermSize=128m -XX:MaxPermSize=128m -XX:ReservedCodeCacheSize=256m" # 需要註釋掉
# export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -XX:PermSize=128m -XX:MaxPermSize=128m -XX:ReservedCodeCacheSize=256m" # 需要註釋掉
``

vi /usr/local/hbase/hbase-1.4.13/conf/hbase-site.xml
``
<configuration>
	<property>
		<name>hbase.rootdir</name>
		<value>hdfs://192.168.182.8:8020/hbase</value>
	</property>
	<property>
		<name>hbase.cluster.distributed</name>
		<value>true</value>
	</property>
	<property>
		<name>hbase.tmp.dir</name>
		<value>/usr/data/hbase/tmp</value>
	</property>
	<property>
		<name>hbase.zookeeper.quorum</name>
		<value>192.168.182.8,192.168.182.9,192.168.182.10</value>
	</property>
	<property>
		<name>hbase.zookeeper.property.clientPort</name>
		<value>2181</value>
	</property>
</configuration>
``

mkdir -p /usr/data/hbase/tmp

vi /usr/local/hbase/hbase-1.4.13/conf/regionservers
``
192.168.182.8
192.168.182.9
192.168.182.10
``

scp -r /usr/local/hbase/hbase-1.4.13/ 192.168.182.9:/usr/local/hbase/hbase-1.4.13/
scp -r /usr/local/hbase/hbase-1.4.13/ 192.168.182.10:/usr/local/hbase/hbase-1.4.13/

# 其餘節點
vi /etc/profile
``
export HBASE_HOME=/usr/local/hbase/hbase-1.4.13
:$HBASE_HOME/bin
``
source /etc/profile

# 啟動所有節點
rm -rf /usr/data/hadoop/tmp
rm -rf /usr/data/hadoop/namenode
rm -rf /usr/data/hadoop/datanode
rm -rf /usr/data/hadoop/journalnode
rm -rf /usr/local/hadoop/hadoop-2.9.2/logs/*
rm -rf /usr/local/hbase/hbase-1.4.13/logs/*
rm -rf /usr/data/hbase/tmp
mkdir -p /usr/data/hadoop/namenode
mkdir -p /usr/data/hadoop/datanode
mkdir -p /usr/data/hadoop/journalnode
mkdir -p /usr/data/hbase/tmp
zkServer.sh start
zkServer.sh status

# 主節點
zkCli.sh -server hd1
rmr /hbase
hdfs namenode -format
start-all.sh
start-hbase.sh
curl http://192.168.182.8:16010

mysql安裝

rpm -qa|grep mariadb
rpm -e mariadb-libs-5.5.65-1.el7.x86_64 --nodeps
wget http://dev.mysql.com/get/mysql-community-release-el7-5.noarch.rpm
rpm -ivh mysql-community-release-el7-5.noarch.rpm
yum install mysql-community-server
systemctl restart mysqld.service
mysql -u root 
set password for 'root'@'localhost' =password('root');

hive安裝

wget https://mirrors.tuna.tsinghua.edu.cn/apache/hive/hive-2.3.7/apache-hive-2.3.7-bin.tar.gz
tar -zxvf apache-hive-2.3.7-bin.tar.gz
mkdir -p /usr/local/hive/
mv apache-hive-2.3.7-bin  /usr/local/hive/
vi /etc/profile
``
export HIVE_HOME=/usr/local/hive/apache-hive-2.3.7-bin
export PATH=$PATH:$HIVE_HOME/bin
``

source /etc/profile
hive --version
cp /usr/local/hive/apache-hive-2.3.7-bin/conf/hive-env.sh.template /usr/local/hive/apache-hive-2.3.7-bin/conf/hive-env.sh
vi /usr/local/hive/apache-hive-2.3.7-bin/conf/hive-env.sh
``
HADOOP_HOME=/usr/local/hadoop/hadoop-2.9.2    #hadoop路徑
export HIVE_CONF_DIR=/usr/local/hive/apache-hive-2.3.7-bin/conf    #hive的conf路徑
export HIVE_AUX_JARS_PATH=/usr/local/hive/apache-hive-2.3.7-bin/lib    #hive的jar包路徑
export JAVA_HOME=/home/fleam/jdk1.8.0_191    #jdk安裝路徑
``

cp /usr/local/hive/apache-hive-2.3.7-bin/conf/hive-default.xml.template /usr/local/hive/apache-hive-2.3.7-bin/conf/hive-site.xml
vi /usr/local/hive/apache-hive-2.3.7-bin/conf/hive-site.xml
``
	<property>
		<name>hive.metastore.warehouse.dir</name>
		<value>/opt/hive/warehouse</value>
	</property>
	<property>
		<name>hive.metastore.local</name>
		<value>true</value>
	</property>
	<!-- 元資料庫的連結地址 mysql -->
	<!-- 如果是遠端mysql資料庫的話需要在這裡寫入遠端的IP或hosts -->
	<!--配置mysql連線,如果沒有hive_db庫則新建-->
	<property>
		<name>javax.jdo.option.ConnectionURL</name>
		<value>jdbc:mysql://localhost:3306/hive_db?createDatabaseIfNotExist=true</value>    //資料庫所在主機的IP
	</property>
	<!--配置jdbc驅動-->
	<property>
		<name>javax.jdo.option.ConnectionDriverName</name>
		<value>com.mysql.jdbc.Driver</value>
	</property>
	<!--mysql使用者名稱root-->
	<property>
		<name>javax.jdo.option.ConnectionUserName</name>
		<value>root</value>
	</property>
	<!--配置mysql密碼-->
	<property>
		<name>javax.jdo.option.ConnectionPassword</name>
		<value>root</value>
	</property>
``
# 凡有derby皆註釋
# hive.querylog.location => /usr/hive/tmp/root
# hive.server2.logging.operation.log.location => /home/hive/root/operation_logs
# hive.exec.local.scratchdir => /home/hive/root
# hive.downloaded.resources.dir => /home/hive/${hive.session.id}_resources
mkdir -p /usr/hive/tmp/root
mkdir -p /home/hive/root/operation_logs

cp /usr/local/hive/apache-hive-2.3.7-bin/conf/hive-exec-log4j2.properties.template /usr/local/hive/apache-hive-2.3.7-bin/conf/hive-exec-log4j2.properties
cp /usr/local/hive/apache-hive-2.3.7-bin/conf/hive-log4j2.properties.template /usr/local/hive/apache-hive-2.3.7-bin/conf/hive-log4j2.properties

wget https://cdn.mysql.com/archives/mysql-connector-java-5.1/mysql-connector-java-5.1.48.tar.gz
tar mysql-connector-java-5.1.48.tar.gz
cp /home/mysql-connector-java-5.1.48/mysql-connector-java-5.1.48-bin.jar /usr/local/hive/apache-hive-2.3.7-bin/lib

zkServer.sh start
zkServer.sh status
hadoop-daemon.sh start journalnode
start-all.sh
start-hbase.sh

schematool -initSchema -dbType mysql
##
schemaTool completed
##
hive

kylin安裝

wget https://mirror.bit.edu.cn/apache/kylin/apache-kylin-3.1.1/apache-kylin-3.1.1-bin-hbase1x.tar.gz
mkdir -p /usr/local/kylin/
tar -zxvf apache-kylin-3.1.1-bin-hbase1x.tar.gz -C /usr/local/kylin/
vi /etc/profile
``
export KYLIN_HOME=/usr/local/kylin/apache-kylin-3.1.1-bin-hbase1x
export PATH=$PATH:$KYLIN_HOME/bin
``
source /etc/profile
sh $KYLIN_HOME/bin/check-env.sh

1、所有節點
zkServer.sh start
zkServer.sh status
hadoop-daemon.sh start journalnode
2、主節點
start-all.sh
start-hbase.sh
nohup hive --service metastore &
nohup hive --service hiveserver2 &
mr-jobhistory-daemon.sh start historyserver
kylin.sh start

http://192.168.182.8:7070/kylin
預設使用者名稱:ADMIN
預設密碼:KYLIN

hdfs

hadoop fs -ls /
hadoop dfs -mkdir /input
hadoop fs -put  1.txt   /input

hive

beeline
!connect jdbc:hive2://127.0.0.1:10000

hbase

.\hbase shell
list  --檢視該使用者下的所有表格