IDEA+Maven開發第一個Hadoop程式WordCount
阿新 • • 發佈:2018-12-29
IDEA+Maven開發第一個Hadoop程式WordCount
1. 新建一個maven專案選擇JDK版本。
2.設定GroupId和ArtifactId
3.設定專案名稱
4.來到setting的javaCompiler更改為自己對應的JDK版本。
5.複製叢集中hadoop-2.7.2.gz檔案到D盤,並且在bin目錄下新增hadoop.dll,winutils.exe,winutils.pdb檔案
6.設定環境變數,並且在path中加入%HADOOP_HOME\bin%;
7.新增依賴
<?xml version="1.0" encoding="UTF-8"?> <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> <modelVersion>4.0.0</modelVersion> <groupId>com.kevin.dt</groupId> <artifactId>DTWorker</artifactId> <version>1.0-SNAPSHOT</version> <properties> <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding> <!--設定hadoop版本--> <hadoop.version>2.7.2</hadoop.version> </properties> <dependencies> <dependency> <groupId>junit</groupId> <artifactId>junit</artifactId> <version>4.12</version> <scope>test</scope> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-common</artifactId> <version>${hadoop.version}</version> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-hdfs</artifactId> <version>${hadoop.version}</version> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-mapreduce-client-core</artifactId> <version>${hadoop.version}</version> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-mapreduce-client-common</artifactId> <version>${hadoop.version}</version> </dependency> <dependency> <groupId>log4j</groupId> <artifactId>log4j</artifactId> <version>1.2.17</version> </dependency> </dependencies> <build> <plugins> <plugin> <artifactId>maven-dependency-plugin</artifactId> <configuration> <excludeTransitive>false</excludeTransitive> <stripVersion>true</stripVersion> <outputDirectory>./lib</outputDirectory> </configuration> </plugin> </plugins> </build> </project>
8.寫日誌可以看詳細的執行資訊或者異常
log4j.rootLogger=INFO, stdout log4j.appender.stdout=org.apache.log4j.ConsoleAppender log4j.appender.stdout.layout=org.apache.log4j.PatternLayout log4j.appender.stdout.layout.ConversionPattern=%d %p [%c] - %m%n log4j.appender.logfile=org.apache.log4j.FileAppender log4j.appender.logfile.File=target/spring.log log4j.appender.logfile.layout=org.apache.log4j.PatternLayout log4j.appender.logfile.layout.ConversionPattern=%d %p [%c] - %m%n
9.WordCount
package com.kevin.hadoop;
import java.io.IOException;
import java.net.URI;
import java.util.StringTokenizer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
/**
* @author kevin
* @version 1.0
* @description 簡單的WordCount示例,單詞計數
* @createDate 2018/12/17
*/
public class WordCount {
public static class TokenizerMapper extends Mapper<Object, Text, Text, IntWritable> {
private final static IntWritable one = new IntWritable(1);
private Text word = new Text();
public void map(Object key, Text value, Context context) throws IOException, InterruptedException {
StringTokenizer itr = new StringTokenizer(value.toString());
while (itr.hasMoreTokens()) {
word.set(itr.nextToken());
context.write(word, one);
}
}
}
public static class IntSumReducer extends Reducer<Text, IntWritable, Text, IntWritable> {
private IntWritable result = new IntWritable();
public void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
int sum = 0;
for (IntWritable val : values) {
sum += val.get();
}
result.set(sum);
context.write(key, result);
}
}
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration(); // 讀取hadoop配置檔案
Job job = Job.getInstance(conf, "word count"); // 新建一個Job類,傳入配置資訊
job.setJarByClass(WordCount.class); // 設定主類
job.setMapperClass(TokenizerMapper.class); // 設定map類
job.setCombinerClass(IntSumReducer.class); // 設定combiner類
job.setReducerClass(IntSumReducer.class); // 設定reduce類
job.setOutputKeyClass(Text.class); // 設定輸出型別key
job.setOutputValueClass(IntWritable.class); // 設定輸出型別value
FileInputFormat.addInputPath(job, new Path("hdfs://192.168.171.100:9000/test/input_01/")); // 設定輸入檔案
FileOutputFormat.setOutputPath(job, new Path("hdfs://192.168.171.100:9000/test/output_01")); // 設定輸出檔案
System.exit(job.waitForCompletion(true) ? 0 : 1); // 等待完成退出
}
}
10.輸入源有兩個檔案
file01:
Hello World Bye World
Hello Hadoop Bye Hadoop
Bye Hadoop Hello Hadoop
yes me
good yes
file02:
Hello World Bye World
Hello Hadoop Bye Hadoop
Bye Hadoop Hello Hadoop
yes me
catch ese
11.結果