Spark+kafka+SparkStreaming例項
-------------------------------------------------------AdClickedStreamingStatus -------------------------------------------------------------------------------------------------
import java.sql.Connection;
import java.sql.DriverManager;import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.LinkedBlockingQueue;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;
import org.apache.spark.api.java.function.VoidFunction;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaPairDStream;
import org.apache.spark.streaming.api.java.JavaPairInputDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka.KafkaUtils;
import com.google.common.base.Optional;
import kafka.serializer.StringDecoder;
import scala.Tuple2;
/**
* 廣告點選的基本資料格式:timestamp、ip、userID、adID、province、city
*
*/
public class AdClickedStreamingStatus {
public static void main(String[] args) {
SparkConf conf = new SparkConf()
.setMaster("local[5]")
//.setMaster("spark://master:7077")
.setAppName("AdClickedStreamingStats");
/*SparkConf conf = new SparkConf().setMaster("spark://Master:7077").
setAppName("SparkStreamingOnKafkaReceiver");*/
JavaStreamingContext jsc = new JavaStreamingContext(conf, Durations.seconds(10));
jsc.checkpoint("d:/checkpoint");
/**
* 建立Kafka元資料,來讓Spark Streaming這個Kafka Consumer利用
*/
Map<String, String> kafkaParameters = new HashMap<String, String>();
kafkaParameters.put("metadata.broker.list",
"master:9092,slave1:9092,slave2:9092");
Set<String> topics = new HashSet<String>();
topics.add("AdClicked");
JavaPairInputDStream<String, String> adClickedStreaming = KafkaUtils.createDirectStream(jsc,
String.class, String.class,
StringDecoder.class, StringDecoder.class,
kafkaParameters,
topics);
/**
* 因為要對黑名單進行線上過濾,而資料是在RDD中的,所以必然使用transform這個函式;
* 但是在這裡我們必須使用transformToPair,原因是讀取進來的Kafka的資料是Pair<String,String>型別的,另外
* 一個原因是過濾後的資料要進行進一步處理,所以必須是讀進來的Kafka資料的原始型別DStream<String, String>
*
* 在此:再次說明每個Batch Duration中實際上講輸入的資料就是被一個且僅僅被一個RDD封裝的,你可以有多個
* InputDstream,但是其實在產生Job的時候,這些不同的InputDstream在Batch Duration中就相當於Spark基於
* HDFS資料操作的不同檔案來源而已罷了。
*/
JavaPairDStream<String, String> filteredadClickedStreaming = adClickedStreaming.transformToPair(new Function<JavaPairRDD<String,String>, JavaPairRDD<String,String>>() {
@Override
public JavaPairRDD<String, String> call(JavaPairRDD<String, String> rdd) throws Exception {
/**
* 線上黑名單過濾思路步驟:
* 1,從資料庫中獲取黑名單轉換成RDD,即新的RDD例項封裝黑名單資料;
* 2,然後把代表黑名單的RDD的例項和Batch Duration產生的rdd進行join操作,準確的說是進行
* leftOuterJoin操作,也就是說使用Batch Duration產生的rdd和代表黑名單的RDD的例項進行
* leftOuterJoin操作,如果兩者都有內容的話,就會是true,否則的話就是false;
*
* 我們要留下的是leftOuterJoin操作結果為false;
*
*/
List<String> blackListNames = new ArrayList<String>();
JDBCWrapper jdbcWrapper = JDBCWrapper.getJDBCInstance();
/* jdbcWrapper.doQuery("SELECT * FROM blacklisttable", null, new ExecuteCallBack(){
@Override
public void resultCallBack(ResultSet result) throws Exception {
while(result.next()){
blackListNames.add(result.getString(1));
}
}
});*/
List<Tuple2<String, Boolean>> blackListTuple = new ArrayList<Tuple2<String, Boolean>>();
for (String name : blackListNames){
blackListTuple.add(new Tuple2<String,Boolean>(name, true));
}
List<Tuple2<String, Boolean>> blackListFromDB = blackListTuple; //資料來自於查詢的黑名單表並且對映成為<String, Boolean>
JavaSparkContext jsc = new JavaSparkContext(rdd.context());
/**
* 黑名單的表中只有userID,但是如果要進行join操作的話,就必須是Key-Value,所以
* 在這裡我們需要基於資料表中的資料產生Key-Value型別的資料集合;
*/
JavaPairRDD<String, Boolean> blackListRDD = jsc.parallelizePairs(blackListFromDB);
/**
* 進行操作的時候肯定是基於userID進行join的,所以必須把傳入的rdd進行mapToPair操作轉化成為符合
* 格式的rdd
*
* 廣告點選的基本資料格式:timestamp、ip、userID、adID、province、city
*/
JavaPairRDD<String, Tuple2<String, String>> rdd2Pair = rdd.mapToPair(new PairFunction<Tuple2<String,String>, String, Tuple2<String,String>>() {
@Override
public Tuple2<String, Tuple2<String, String>> call(Tuple2<String, String> t) throws Exception {
System.out.println(t._2.split("/t")[0]);
String userID = t._2.split("/t")[2];
System.out.println("userID=" + userID);
return new Tuple2<String, Tuple2<String, String>>(userID, t);
}
});
JavaPairRDD<String, Tuple2<Tuple2<String, String>, Optional<Boolean>>> joined = rdd2Pair.leftOuterJoin(blackListRDD);
JavaPairRDD<String, String> result = joined.filter(new Function<Tuple2<String,
Tuple2<Tuple2<String,String>,Optional<Boolean>>>, Boolean>() {
@Override
public Boolean call(Tuple2<String, Tuple2<Tuple2<String, String>, Optional<Boolean>>> v1)
throws Exception {
Optional<Boolean> optional = v1._2._2;
if (optional.isPresent() && optional.get()){
return false;
} else {
return true;
}
}
}).mapToPair(new PairFunction<Tuple2<String,Tuple2<Tuple2<String,String>,Optional<Boolean>>>, String, String>() {
@Override
public Tuple2<String, String> call(
Tuple2<String, Tuple2<Tuple2<String, String>, Optional<Boolean>>> t) throws Exception {
// TODO Auto-generated method stub
return t._2._1;
}
});
return result;
}
});
/*
* 第四步:接下來就像對於RDD程式設計一樣基於DStream進行程式設計!!!原因是DStream是RDD產生的模板(或者說類),在Spark Streaming具體
* 發生計算前,其實質是把每個Batch的DStream的操作翻譯成為對RDD的操作!!!
*對初始的DStream進行Transformation級別的處理,例如map、filter等高階函式等的程式設計,來進行具體的資料計算
* 廣告點選的基本資料格式:timestamp、ip、userID、adID、province、city
*/
JavaPairDStream<String, Long> pairs = filteredadClickedStreaming.mapToPair(new PairFunction<Tuple2<String,String>, String, Long>() {
@Override
public Tuple2<String, Long> call(Tuple2<String, String> t) throws Exception {
String[] splited = t._2.split("_");
String timestamp = splited[0]; //yyyy-MM-dd
String ip = splited[1];
String userID = splited[2];
String adID = splited[3];
String province = splited[4];
String city = splited[5];
String clickedRecord = timestamp + "_" + ip + "_" + userID + "_" + adID + "_"
+ province + "_" + city;
return new Tuple2<String, Long>(clickedRecord, 1L);
}
});
/*
* 第四步:對初始的DStream進行Transformation級別的處理,例如map、filter等高階函式等的程式設計,來進行具體的資料計算
* 計算每個Batch Duration中每個User的廣告點選量
*/
JavaPairDStream<String, Long> adClickedUsers = pairs.reduceByKey(new Function2<Long, Long, Long>(){
@Override
public Long call(Long v1, Long v2) throws Exception {
// TODO Auto-generated method stub
return v1 + v2;
}
});
/**
*
* 計算出什麼叫有效的點選?
* 1,複雜化的一般都是採用機器學習訓練好模型直接線上進行過濾;
* 2,簡單的?可以通過一個Batch Duration中的點選次數來判斷是不是非法廣告點選,但是實際上講非法廣告
* 點選程式會盡可能模擬真實的廣告點選行為,所以通過一個Batch來判斷是 不完整的,我們需要對例如一天(也可以是每一個小時)
* 的資料進行判斷!
* 3,比線上機器學習退而求次的做法如下:
* 例如:一段時間內,同一個IP(MAC地址)有多個使用者的帳號訪問;
* 例如:可以統一一天內一個使用者點選廣告的次數,如果一天點選同樣的廣告操作50次的話,就列入黑名單;
*
* 黑名單有一個重點的特徵:動態生成!!!所以每一個Batch Duration都要考慮是否有新的黑名單加入,此時黑名單需要儲存起來
* 具體儲存在什麼地方呢,儲存在DB/Redis中即可;
*
* 例如郵件系統中的“黑名單”,可以採用Spark Streaming不斷的監控每個使用者的操作,如果使用者傳送郵件的頻率超過了設定的值,可以
* 暫時把使用者列入“黑名單”,從而阻止使用者過度頻繁的傳送郵件。
*/
JavaPairDStream<String, Long> filteredClickInBatch = adClickedUsers.filter(new Function<Tuple2<String,Long>, Boolean>() {
@Override
public Boolean call(Tuple2<String, Long> v1) throws Exception {
if ( 1 < v1._2){
//更新一下黑名單的資料表
return false;
} else {
return true;
}
}
});
// Todo。。。。
/*
* 此處的print並不會直接出發Job的執行,因為現在的一切都是在Spark Streaming框架的控制之下的,對於Spark Streaming
* 而言具體是否觸發真正的Job執行是基於設定的Duration時間間隔的
*
* 諸位一定要注意的是Spark Streaming應用程式要想執行具體的Job,對Dtream就必須有output Stream操作,
* output Stream有很多型別的函式觸發,類print、saveAsTextFile、saveAsHadoopFiles等,最為重要的一個
* 方法是foraeachRDD,因為Spark Streaming處理的結果一般都會放在Redis、DB、DashBoard等上面,foreachRDD
* 主要就是用用來完成這些功能的,而且可以隨意的自定義具體資料到底放在哪裡!!!
*
*/
filteredClickInBatch.print();
filteredClickInBatch.foreachRDD(new Function<JavaPairRDD<String,Long>, Void>() {
@Override
public Void call(JavaPairRDD<String, Long> rdd) throws Exception {
rdd.foreachPartition(new VoidFunction<Iterator<Tuple2<String,Long>>>() {
@Override
public void call(Iterator<Tuple2<String, Long>> partition) throws Exception {
/**
* 在這裡我們使用資料庫連線池的高效讀寫資料庫的方式把資料寫入資料庫MySQL;
* 由於傳入的引數是一個Iterator型別的集合,所以為了更加高效的操作我們需要批量處理
* 例如說一次性插入1000條Record,使用insertBatch或者updateBatch型別的操作;
* 插入的使用者資訊可以只包含:timestamp、ip、userID、adID、province、city
* 這裡面有一個問題:可能出現兩條記錄的Key是一樣的,此時就需要更新累加操作
*/
List<UserAdClicked> userAdClickedList = new ArrayList<UserAdClicked>();
while (partition.hasNext()){
Tuple2<String, Long> record = partition.next();
String[] splited = record._1.split("_");
UserAdClicked userClicked = new UserAdClicked();
userClicked.setTimestamp(splited[0]);
userClicked.setIp(splited[1]);
userClicked.setUserID(splited[2]);
userClicked.setAdID(splited[3]);
userClicked.setProvince(splited[4]);
userClicked.setCity(splited[5]);
userAdClickedList.add(userClicked);
}
List<UserAdClicked> inserting = new ArrayList<UserAdClicked>();
List<UserAdClicked> updating = new ArrayList<UserAdClicked>();
JDBCWrapper jdbcWrapper = JDBCWrapper.getJDBCInstance();
//adclicked 表的欄位:timestamp、ip、userID、adID、province、city、clickedCount
for (UserAdClicked clicked : userAdClickedList){
jdbcWrapper.doQuery("SELECT count(1) FROM adclicked WHERE "
+ " timestamp = ? AND userID = ? AND adID = ?",
new Object[]{clicked.getTimestamp(), clicked.getUserID(), clicked.getAdID()},
new ExecuteCallBack() {
@Override
public void resultCallBack(ResultSet result) throws Exception {
if(result.next()){
long count = result.getLong(1);
clicked.setClickedCount(count);
updating.add(clicked);
} else {
inserting.add(clicked);
}
}
});
}
//adclicked 表的欄位:timestamp、ip、userID、adID、province、city、clickedCount
ArrayList<Object[]> insertParametersList = new ArrayList<Object[]>();
for(UserAdClicked inserRecord : inserting){
insertParametersList.add(new Object[]{
inserRecord.getTimestamp(),
inserRecord.getIp(),
inserRecord.getUserID(),
inserRecord.getAdID(),
inserRecord.getProvince(),
inserRecord.getCity(),
inserRecord.getClickedCount()
});
}
jdbcWrapper.doBatch("INSERT INTO adclicked VALUES(?,?,?,?,?,?,?)", insertParametersList);
//adclicked 表的欄位:timestamp、ip、userID、adID、province、city、clickedCount
ArrayList<Object[]> updateParametersList = new ArrayList<Object[]>();
for(UserAdClicked updateRecord : updating){
updateParametersList.add(new Object[]{
updateRecord.getTimestamp(),
updateRecord.getIp(),
updateRecord.getUserID(),
updateRecord.getAdID(),
updateRecord.getProvince(),
updateRecord.getCity(),
updateRecord.getClickedCount()
});
}
jdbcWrapper.doBatch("UPDATE adclicked set clickedCount = ? WHERE "
+ " timestamp = ? AND ip = ? AND userID = ? AND adID = ? AND province = ? "
+ "AND city = ? ", updateParametersList);
}
});
return null;
}
});
JavaPairDStream<String, Long> blackListBasedOnHistory = filteredClickInBatch.filter(new Function<Tuple2<String,Long>, Boolean>() {
@Override
public Boolean call(Tuple2<String, Long> v1) throws Exception {
//廣告點選的基本資料格式:timestamp、ip、userID、adID、province、city
String[] splited = v1._1.split("_");
String date = splited[0];
String userID = splited[2];
String adID = splited[3];
/**
* 接下來根據date、userID、adID等條件去查詢使用者點選廣告的資料表,獲得總的點選次數
* 這個時候基於點選次數判斷是否屬於黑名單點選 *
*/
int clickedCountTotalToday = 81;
if (clickedCountTotalToday > 50)
{
return true;
} else {
return false;
}
}
});
/**
* 必須對黑名單的整個RDD進行去重操作!!!
*/
JavaDStream<String> blackListuserIDtBasedOnHistory = blackListBasedOnHistory.map(new Function<Tuple2<String,Long>, String>() {
@Override
public String call(Tuple2<String, Long> v1) throws Exception {
// TODO Auto-generated method stub
return v1._1.split("_")[2];
}
});
JavaDStream<String> blackListUniqueuserIDtBasedOnHistory = blackListuserIDtBasedOnHistory.transform(new Function<JavaRDD<String>, JavaRDD<String>>() {
@Override
public JavaRDD<String> call(JavaRDD<String> rdd) throws Exception {
// TODO Auto-generated method stub
return rdd.distinct();
}
});
//下一步寫入黑名單資料表中
blackListUniqueuserIDtBasedOnHistory.foreachRDD(new Function<JavaRDD<String>, Void>() {
@Override
public Void call(JavaRDD<String> rdd) throws Exception {
rdd.foreachPartition(new VoidFunction<Iterator<String>>() {
@Override
public void call(Iterator<String> t) throws Exception {
/**
* 在這裡我們使用資料庫連線池的高效讀寫資料庫的方式把資料寫入資料庫MySQL;
* 由於傳入的引數是一個Iterator型別的集合,所以為了更加高效的操作我們需要批量處理
* 例如說一次性插入1000條Record,使用insertBatch或者updateBatch型別的操作;
* 插入的使用者資訊可以只包含:useID
* 此時直接插入黑名單資料表即可。
*/
List<Object[]> blackList = new ArrayList<Object[]>();
while(t.hasNext()){
blackList.add(new Object[]{(Object)t.next()});
}
JDBCWrapper jdbcWrapper = JDBCWrapper.getJDBCInstance();
jdbcWrapper.doBatch("INSERT INTO blacklisttable VALUES (?) ", blackList);
}
});
return null;
}
});
/**
* 廣告點選累計動態更新,每個updateStateByKey都會在Batch Duration的時間間隔的基礎上進行更高點選次數的更新,
* 更新之後我們一般都會持久化到外部儲存裝置上,在這裡我們儲存到MySQL資料庫中;
*/
filteredadClickedStreaming.mapToPair(new PairFunction<Tuple2<String,String>, String, Long>() {
@Override
public Tuple2<String, Long> call(Tuple2<String, String> t) throws Exception {
String[] splited = t._2.split("\t");
String timestamp = splited[0]; //yyyy-MM-dd
String ip = splited[1];
String userID = splited[2];
String adID = splited[3];
String province = splited[4];
String city = splited[5];
String clickedRecord = timestamp + "_" + adID + "_"
+ province + "_" + city;
return new Tuple2<String, Long>(clickedRecord, 1L);
}
}).updateStateByKey(new Function2<List<Long>, Optional<Long>, Optional<Long>>() {
@Override
public Optional<Long> call(List<Long> v1, Optional<Long> v2) throws Exception {
/**在歷史的資料的基礎上進行更新
* v1:代表是當前的key在當前的Batch Duration中出現次數的集合,例如{1,1,1,1,1,1}
* v2:代表當前key在以前的Batch Duration中積累下來的結果;我們要再v2的基礎上不斷加v1的值
*/
Long clickedTotalHistory = 0L;
if(v2.isPresent()) {//如果v2存在
clickedTotalHistory = v2.get();//拿v2的值
}
//不用reduceBykey是因為會產生很多shuffle,shuffle裡面有很多內容的。updateStateByKey可以算過去一天,1年
for(Long one : v1){//迴圈v1
clickedTotalHistory += one;//一直在基礎上進行累加
}
return Optional.of(clickedTotalHistory);
}
}).foreachRDD(new Function<JavaPairRDD<String,Long>, Void>() {
@Override
public Void call(JavaPairRDD<String, Long> rdd) throws Exception {
rdd.foreachPartition(new VoidFunction<Iterator<Tuple2<String,Long>>>() {
@Override
public void call(Iterator<Tuple2<String, Long>> partition) throws Exception {
/**
* 在這裡我們使用資料庫連線池的高效讀寫資料庫的方式把資料寫入資料庫MySQL;
* 由於傳入的引數是一個Iterator型別的集合,所以為了更加高效的操作我們需要批量處理
* 例如說一次性插入1000條Record,使用insertBatch或者updateBatch型別的操作;
* 插入的使用者資訊可以只包含:timestamp、adID、province、city
* 這裡面有一個問題:可能出現兩條記錄的Key是一樣的,此時就需要更新累加操作
*/
List<AdClicked> adClickedList = new ArrayList<AdClicked>();
while (partition.hasNext()){
Tuple2<String, Long> record = partition.next();
String[] splited = record._1.split("_");
AdClicked adClicked = new AdClicked();
adClicked.setTimestamp(splited[0]);
adClicked.setAdID(splited[1]);
adClicked.setProvince(splited[2]);
adClicked.setCity(splited[3]);
adClicked.setClickedCount(record._2);
adClickedList.add(adClicked);
}
JDBCWrapper jdbcWrapper = JDBCWrapper.getJDBCInstance();
List<AdClicked> inserting = new ArrayList<AdClicked>();
List<AdClicked> updating = new ArrayList<AdClicked>();
//adclicked 表的欄位:timestamp、ip、userID、adID、province、city、clickedCount
for (AdClicked clicked : adClickedList){
jdbcWrapper.doQuery("SELECT count(1) FROM adclickedcount WHERE "
+ " timestamp = ? AND adID = ? AND province = ? AND city = ? ",
new Object[]{clicked.getTimestamp(), clicked.getAdID(), clicked.getProvince(),clicked.getCity()},
new ExecuteCallBack() {
@Override
public void resultCallBack(ResultSet result) throws Exception {
if(result.next()){
long count = result.getLong(1);
clicked.setClickedCount(count);
updating.add(clicked);
} else {
inserting.add(clicked);
}
}
});
}
//adclicked 表的欄位:timestamp、ip、userID、adID、province、city、clickedCount
ArrayList<Object[]> insertParametersList = new ArrayList<Object[]>();
for(AdClicked inserRecord : inserting){
insertParametersList.add(new Object[]{
inserRecord.getTimestamp(),
inserRecord.getAdID(),
inserRecord.getProvince(),
inserRecord.getCity(),
inserRecord.getClickedCount()
});
}
jdbcWrapper.doBatch("INSERT INTO adclickedcount VALUES(?,?,?,?,?)", insertParametersList);
//adclicked 表的欄位:timestamp、ip、userID、adID、province、city、clickedCount
/* ArrayList<Object[]> updateParametersList = new ArrayList<Object[]>();
for(AdClicked updateRecord : updating){
updateParametersList.add(new Object[]{
updateRecord.getTimestamp(),
updateRecord.getAdID(),
updateRecord.getProvince(),
updateRecord.getCity(),
updateRecord.getClickedCount()
});
}
jdbcWrapper.doBatch("UPDATE adclickedcount set clickedCount = ? WHERE "
+ " timestamp = ? AND adID = ? AND province = ? AND city = ? ", updateParametersList);*/
}
});
return null;
}
});
/*
* Spark Streaming執行引擎也就是Driver開始執行,Driver啟動的時候是位於一條新的執行緒中的,當然其內部有訊息迴圈體,用於
* 接受應用程式本身或者Executor中的訊息;
*/
jsc.start();
jsc.awaitTermination();
jsc.close();
}
}
class JDBCWrapper {
private static JDBCWrapper jdbcInstance = null;
private static LinkedBlockingQueue<Connection> dbConnectionPool = new LinkedBlockingQueue<Connection> ();
static {
try {
Class.forName("com.mysql.jdbc.Driver");
} catch (ClassNotFoundException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
public static JDBCWrapper getJDBCInstance(){
if (jdbcInstance == null){
synchronized(JDBCWrapper.class){
if (jdbcInstance == null){
jdbcInstance = new JDBCWrapper();
}
}
}
return jdbcInstance;
}
private JDBCWrapper(){
for (int i = 0; i < 10; i++){
try {
Connection conn = DriverManager.getConnection("jdbc:mysql://localhost:3306/sparkstreaming","root","root");
dbConnectionPool.put(conn);
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
}
public synchronized Connection getConnection(){
while (0 == dbConnectionPool.size()){
try {
Thread.sleep(20);
} catch (InterruptedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
return dbConnectionPool.poll();
}
public int[] doBatch(String sqlText, List<Object[]> paramsList) {
Connection conn = getConnection();
PreparedStatement preparedStatement = null;
int[] result = null;
try {
conn.setAutoCommit(false);
preparedStatement = conn.prepareStatement(sqlText);
for (Object[] parameters : paramsList){
for(int i = 0; i < parameters.length; i++){
preparedStatement.setObject(i+1, parameters[i]);
}
preparedStatement.addBatch();
}
result = preparedStatement.executeBatch();
conn.commit();
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
} finally {
if (preparedStatement != null){
try {
preparedStatement.close();
} catch (SQLException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
if (conn != null){
try {
dbConnectionPool.put(conn);
} catch (InterruptedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
}
return result;
}
public void doQuery(String sqlText, Object[] paramsList, ExecuteCallBack callBack) {
System.out.println("sqlText=" + sqlText);
Connection conn = getConnection();
PreparedStatement preparedStatement = null;
ResultSet result = null;
try {
preparedStatement = conn.prepareStatement(sqlText);
for(int i = 0; i < paramsList.length; i++){
preparedStatement.setObject(i+1, paramsList[i]);
}
result = preparedStatement.executeQuery();
callBack.resultCallBack(result);
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
} finally {
if (preparedStatement != null){
try {
preparedStatement.close();
} catch (SQLException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
if (conn != null){
try {
dbConnectionPool.put(conn);
} catch (InterruptedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
}
}
}
interface ExecuteCallBack {
void resultCallBack(ResultSet result) throws Exception;
}
class UserAdClicked {
private String timestamp;
private String ip;
private String userID;
private String adID;
private String province;
private String city;
private Long clickedCount;
public Long getClickedCount() {
return clickedCount;
}
public void setClickedCount(Long clickedCount) {
this.clickedCount = clickedCount;
}
public String getTimestamp() {
return timestamp;
}
public void setTimestamp(String timestamp) {
this.timestamp = timestamp;
}
public String getIp() {
return ip;
}
public void setIp(String ip) {
this.ip = ip;
}
public String getUserID() {
return userID;
}
public void setUserID(String userID) {
this.userID = userID;
}
public String getAdID() {
return adID;
}
public void setAdID(String adID) {
this.adID = adID;
}
public String getProvince() {
return province;
}
public void setProvince(String province) {
this.province = province;
}
public String getCity() {
return city;
}
public void setCity(String city) {
this.city = city;
}
}
class AdClicked{
private String timestamp;
private String adID;
private String province;
private String city;
private Long clickedCount;
public String getTimestamp() {
return timestamp;
}
public void setTimestamp(String timestamp) {
this.timestamp = timestamp;
}
public String getAdID() {
return adID;
}
public void setAdID(String adID) {
this.adID = adID;
}
public String getProvince() {
return province;
}
public void setProvince(String province) {
this.province = province;
}
public String getCity() {
return city;
}
public void setCity(String city) {
this.city = city;
}
public Long getClickedCount() {
return clickedCount;
}
public void setClickedCount(Long clickedCount) {
this.clickedCount = clickedCount;
}
}
-------------------------------------------------------------MockAdClickedStats ----------------------------------------------------------------------------------------------------------------------------------
import java.util.Date;
import java.util.HashMap;
import java.util.Properties;
import java.util.Random;
import kafka.javaapi.producer.Producer;
import kafka.producer.KeyedMessage;
import kafka.producer.ProducerConfig;
public class MockAdClickedStats {
public static void main(String[] args){
Random random = new Random();
String[] provinces = new String[]{"Guangdong","Zhejiang","Jiangsu","Fujian"};
HashMap<String,String[]> cityes = new HashMap<String,String[]>();
cityes.put("Guangdong",new String[]{"Guangzhou","Shenzhen","DongGuan"});
cityes.put("Zhejiang",new String[]{"Hangzhou","Wenzhou","Ninbo"});
cityes.put("Jiangsu",new String[]{"Nanjing","Suzhou","wuxi"});
cityes.put("Fujian",new String[]{"Fuzhou","Ximen","DongGuan"});
String[] ips = new String[]{
"192.168.112.240",
"192.168.112.241",
"192.168.112.242",
"192.168.112.243",
"192.168.112.244",
"192.168.112.245",
"192.168.112.246",
"192.168.112.247",
"192.168.112.248",
"192.168.112.249",
"192.168.112.250",
"192.168.112.251",
"192.168.112.252",
"192.168.112.253"
};
Properties kafkaConf = new Properties();
kafkaConf.put("serializer.class","kafka.serializer.StringEncoder");
kafkaConf.put("metadata.broker.list","master:9092,slave1:9092,slave2:9092");
ProducerConfig producerConfig = new ProducerConfig(kafkaConf);
Producer<Integer,String> producer = new Producer<Integer,String>(producerConfig);
new Thread(new Runnable(){
@Override
public void run(){
while(true){
Long timestamp = new Date().getTime();
String ip = ips[random.nextInt(14)];
int userID = random.nextInt(10000);
int adID = random.nextInt(100);
String province = provinces[random.nextInt(4)];
String city = cityes.get(province)[random.nextInt(3)];
String clickedAd = timestamp + "\t" + ip + "\t" + userID + "\t" + adID + "\t" + province + "\t" + city;
producer.send(new KeyedMessage("AdClicked",clickedAd));
System.out.println(clickedAd);
try{
Thread.sleep(50);
}catch(InterruptedException e){
e.printStackTrace();
}
}
}
}).start();
}
}