1. 程式人生 > >SparkSQL(三)jdbc訪問hive表

SparkSQL(三)jdbc訪問hive表

一、目的:

使用jdbc訪問hive表

二、操作:

1.前提

開啟thriftserver

sbin/start-thriftserver.sh  \
--master local[2] \
--jars /opt/datas/mysql-connector-java-5.1.27-bin.jar  \
--hiveconf hive.server2.thrift.port=14000 

2.載入hive-jdbc依賴

    <dependency>
      <groupId>org.spark-project.hive</groupId>
      <artifactId>hive-jdbc</artifactId>
      <version>0.13.1</version>
    </dependency>

3.程式碼

package SparkSQL

import java.sql.DriverManager

/**
  * 通過jdbc的方式訪問
  */
object SparkSQLThriftServerApp {
  def main(args: Array[String]): Unit = {

    Class.forName("org.apache.hive.jdbc.HiveDriver")

    val conn=DriverManager.getConnection("jdbc:hive2://bigdata.ibeifeng.com:14000","bigdata.ibeifeng.com","")
    val pstmt=conn.prepareStatement("select empno,ename,sal from imooc.emp")
    val rs =pstmt.executeQuery()
    while (rs.next()){
      println("empno:"+rs.getInt("empno")+",ename:"+rs.getString("ename")+
        ",sal:"+rs.getDouble("sal"))
    }

    rs.close()
    pstmt.close()


  }
}