1. 程式人生 > >hadoop 開發---WordCount

hadoop 開發---WordCount

mapr xsd job 不用 site cti err extends mvn

參考http://hadoop.apache.org/docs/r2.7.6/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html


eclipse 新建maven項目

pom 文件內容

<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"

xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">

<modelVersion>4.0.0</modelVersion>


<groupId>hadoop_mapreduce</groupId>

<artifactId>WordCount</artifactId>

<version>0.0.1-SNAPSHOT</version>

<packaging>jar</packaging>


<name>WordCount</name>

<url>http://maven.apache.org</url>


<properties>

<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>

</properties>


<dependencies>

<dependency>

<groupId>org.apache.hadoop</groupId>

<artifactId>hadoop-client</artifactId>

<version>2.8.0</version>

</dependency>

<!-- <dependency>

<groupId>org.apache.phoenix</groupId>

<artifactId>phoenix-core</artifactId>

<version>5.0.0-alpha-HBase-2.0</version>

</dependency>

-->

<!-- https://mvnrepository.com/artifact/org.apache.phoenix/phoenix-core -->

<dependency>

<groupId>jdk.tools</groupId>

<artifactId>jdk.tools</artifactId>

<version>1.8</version>

<scope>system</scope>

<systemPath>C:\Program Files\Java\jdk1.8.0_151\lib\tools.jar</systemPath>

</dependency>

</dependencies>

</project>

註: 只需要hadoop-client包,如果引入hbase相關的包,很可能出現包沖突,運行會出現異常。


WordCount類代碼

package hadoop_mapreduce.WordCount;


import java.io.IOException;

import java.io.InterruptedIOException;

import java.util.StringTokenizer;


import org.apache.hadoop.conf.Configuration;

import org.apache.hadoop.fs.Path;

import org.apache.hadoop.io.IntWritable;

import org.apache.hadoop.io.Text;

import org.apache.hadoop.mapreduce.Job;

import org.apache.hadoop.mapreduce.Mapper;

import org.apache.hadoop.mapreduce.Reducer;

import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;

import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;


public class WordCount{

public static class TokenizerMapper

extends Mapper<Object,Text,Text,IntWritable>{

private final static IntWritable one =new IntWritable(1);

private Text word = new Text();

public void map(Object key,Text value,Context context) throws IOException,InterruptedIOException, InterruptedException

{

StringTokenizer itr = new StringTokenizer (value.toString());

while(itr.hasMoreTokens()) {

word.set(itr.nextToken());

context.write(word, one);

}

}

}

public static class IntSumReducer

extends Reducer<Text,IntWritable,Text,IntWritable> {

private IntWritable result = new IntWritable();

public void reduce(Text key, Iterable<IntWritable> values,Context context) throws IOException,InterruptedException {

int sum = 0;

for (IntWritable val : values) {

sum += val.get();

}

result.set(sum);

context.write(key, result);

}

}

//public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException

public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException

{

/*

* IntWritable intwritable = new IntWritable(1);

Text text = new Text("abc");

System.out.println(text.toString());

System.out.println(text.getLength());

System.out.println(intwritable.get());

System.out.println(intwritable);

StringTokenizer itr = new StringTokenizer ("www baidu com");

while(itr.hasMoreTokens()) {

System.out.println(itr.nextToken()); hdfs://192.168.50.107:8020/input hdfs://192.168.50.107:8020/output

*/

//String path = WordCount.class.getResource("/").toString();

//System.out.println("path = " + path);

System.out.println("Connection end");

//System.setProperty("hadoop.home.dir", "file://192.168.50.107/home/hadoop-user/hadoop-2.8.0");

//String StringInput = "hdfs://192.168.50.107:8020/input/a.txt";

//String StringOutput = "hdfs://192.168.50.107:8020/output/b.txt";

Configuration conf = new Configuration();

//conf.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem");

//conf.addResource("classpath:core-site.xml");

//conf.addResource("classpath:hdfs-site.xml");

//conf.addResource("classpath:mapred-site.xml");

//conf.set("HADOOP_HOME", "/home/hadoop-user/hadoop-2.8.0");

Job job = Job.getInstance(conf,"word count");

job.setJarByClass(WordCount.class);

job.setMapperClass(TokenizerMapper.class);

job.setCombinerClass(IntSumReducer.class);

job.setOutputKeyClass(Text.class);

job.setOutputValueClass(IntWritable.class);

//FileInputFormat.addInputPath(job, new Path(StringInput));

//FileOutputFormat.setOutputPath(job, new Path(StringOutput));

FileInputFormat.addInputPath(job, new Path(args[0]));

FileOutputFormat.setOutputPath(job, new Path(args[1]));

System.exit(job.waitForCompletion(true)?0:1);

}

}

連接hadoop的配置文件位置如圖

技術分享圖片


eclipse執行運行會報錯: HADOOP_HOME and hadoop.home.dir are unset.


編譯打包,放入linux系統

mvn clean

mvn compile

mvn pacakge

我將打包生成的WordCount-0.0.1-SNAPSHOT.jar放到了/home/hadoop-user/work目錄

在linux 運行 hadoop jar WordCount-0.0.1-SNAPSHOT.jar hadoop_mapreduce.WordCount.WordCount hdfs://192.168.50.107:8020/input hdfs://192.168.50.107:8020/output

註: 我這裏如果不帶類路徑就會報錯,找不到WordCount類。把要分析的文件放入hdfs的input目錄中,Output目錄不用自己創建。最後生成的分析結果會存在於output目錄中



hadoop 開發---WordCount