1. 程式人生 > >mapreduce,整合數據字典表

mapreduce,整合數據字典表

mapreduce 數據字典表 hadoop

這個坑踩了好長。結果卻是map方法中的context寫錯位置,導致錯誤。


源數據內容。就是想數據表中的第二列替換成字典表中的第二列。即字典表中的紅色,換成字典表的藍色。

//數據表data.txt

//one 1 two qqq

//two 2 two ccc


//字典表zidian.txt

//1 1 sex

//2 2 sex

//3 未知 0 sex

//4 結婚 1 marry

//5 未婚 2 marry

//6 未知 0 marry


想要的結果就是


附上代碼:

import java.io.BufferedReader;

import java.io.FileReader;

import java.io.IOException;

import java.net.URI;

import java.net.URISyntaxException;

import java.util.HashMap;

import java.util.Iterator;

import java.util.Map;


import org.apache.hadoop.conf.Configuration;

import org.apache.hadoop.filecache.DistributedCache;

import org.apache.hadoop.fs.Path;

import org.apache.hadoop.io.Text;

import org.apache.hadoop.mapreduce.Job;

import org.apache.hadoop.mapreduce.Mapper;

import org.apache.hadoop.mapreduce.Reducer;

import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;

import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;


public class Cache {


public static class Mapall extends Mapper<Object, Text, Text, Text> {


private Map<String, String> sexMap = new HashMap<String, String>();

private Path[] localFiles;


// 先做分布式緩存處理,將數據換成到內存中

public void setup(Context context) throws IOException {

Configuration conf = context.getConfiguration();

localFiles = DistributedCache.getLocalCacheFiles(conf);

for(int i = 0;i<localFiles.length;i++) {

String a ;

BufferedReader br = new BufferedReader(new FileReader(localFiles[i].toString()));

while ((a = br.readLine()) != null && a.split("\t")[3].equals("sex")) {

//以數據作為key,文字作為value

sexMap.put(a.split("\t")[2], a.split("\t")[1]);

}

br.close();

}

}


@SuppressWarnings("unlikely-arg-type")

public void map(Object key, Text value, Context context) throws IOException, InterruptedException {


// 獲取sex字段,是1,2這樣的數據

String sex = value.toString().split("\t")[1];

// 如果key部分有1,2這種形式,就替換成男、女這樣的內容

if (sexMap.keySet().equals(sex)) {

}

context.write(new Text(sexMap.get(sex)), new Text(""));

//就是這裏,坑我好久的時間。

}

}


public static class Reduce extends Reducer<Text, Text, Text, Text> {

public void reduce(Text key, Iterator<Text> values, Context context) throws IOException, InterruptedException {

context.write(key, new Text(""));


}

}


public static void main(String[] args)

throws URISyntaxException, IOException, ClassNotFoundException, InterruptedException {

Configuration conf = new Configuration();

DistributedCache.addCacheFile(new URI("hdfs://192.168.20.39:8020/qpf/zidian.txt"), conf);


Job job = Job.getInstance(conf, "get cache file");

job.setJarByClass(Cache.class);


job.setMapperClass(Mapall.class);

job.setReducerClass(Reduce.class);


job.setOutputKeyClass(Text.class);

job.setOutputValueClass(Text.class);


FileInputFormat.addInputPath(job, new Path("hdfs://192.168.20.39:8020/qpf/data.txt"));

FileOutputFormat.setOutputPath(job, new Path("hdfs://192.168.20.39:8020/qpf/data_out"));


System.exit(job.waitForCompletion(true) ? 0 : 1);

}

}


很簡單的一個數據替換的小例子。

本文出自 “白話” 博客,請務必保留此出處http://feature09.blog.51cto.com/12614993/1983555

mapreduce,整合數據字典表