1. 程式人生 > >提交mapreduce要配置的引數

提交mapreduce要配置的引數

linux平臺提交到yarn

import java.io.IOException;
import java.net.URISyntaxException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

public class LinuxToYarn {
	public static void main(String[] args) throws IOException, 
	ClassNotFoundException, InterruptedException, URISyntaxException {
		
		Configuration conf = new Configuration();
		
		Job job = Job.getInstance(conf);
		
		//jar包所在的位置
		job.setJarByClass(LinuxToYarn.class); 
		
		//本次job索要呼叫的mapped、reducer實現類
		job.setMapperClass(WordCount.class);
		job.setReducerClass(WordcountMapreduce.class);
		
		//job的mapped實現類產生的結果的key、value型別
		job.setMapOutputKeyClass(Text.class);
		job.setMapOutputValueClass(IntWritable.class);
		//
		job.setOutputKeyClass(Text.class);
		job.setOutputValueClass(IntWritable.class);
		
		//本次job要處理的輸入資料集所在路徑、最終結果的所在路徑
		FileInputFormat.setInputPaths(job, new Path("/input"));
		FileOutputFormat.setOutputPath(job, new Path("/output"));
		
		//想要啟動的reduce task的數量
		job.setNumReduceTasks(2);
		
		//提交資料
		boolean flg = job.waitForCompletion(true);	
		
		System.exit(flg?0:-1);
		
	}


}

window提交到yarn

import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

public class submittedWindowToyarn {
	public static void main(String[] args) throws IOException, 
	ClassNotFoundException, InterruptedException, URISyntaxException {
		
		Configuration conf = new Configuration();
		
		// 在程式碼中設定JVM系統引數,用於給job物件來獲取訪問HDFS的使用者身份
		System.setProperty("HADOOP_USER_NAME", "root");
		
		//設定job要訪問的預設檔案系統
		conf.set("fs.defaultFS","hdfs://hadoop1:9000");
		
		//設定job提交到哪執行
		conf.set("mapreduce.framework.name", "yarn");
		conf.set("yarn.resourcemanager.hostname", "hadoop2");
		
		//如果要從windows系統上執行這個job提交客戶端程式,則需要加這個跨平臺提交的引數
		conf.set("mapreduce.app-submission.cross-platform","true");
		
		Job job = Job.getInstance(conf);
		
		//jar包所在的位置
		job.setJar("H:/mapreduce.jar");
		
		//本次job索要呼叫的mapped、reducer實現類
		job.setMapperClass(WordCount.class);
		job.setReducerClass(WordcountMapreduce.class);
		
		//job的mapped實現類產生的結果的key、value型別
		job.setMapOutputKeyClass(Text.class);
		job.setMapOutputValueClass(IntWritable.class);
		//
		job.setOutputKeyClass(Text.class);
		job.setOutputValueClass(IntWritable.class);
		
		Path path = new Path("/output");
		FileSystem fs = FileSystem.get(new URI("hdfs://hadoop1:9000"),conf,"root");
		if(fs.exists(path)){
			fs.delete(path,true);
		}
		//本次job要處理的輸入資料集所在路徑、最終結果的所在路徑
		FileInputFormat.setInputPaths(job, new Path("/input"));
		FileOutputFormat.setOutputPath(job, new Path("/output"));
		
		//想要啟動的reduce task的數量
		job.setNumReduceTasks(2);
		
		//提交資料
		boolean flg = job.waitForCompletion(true);	
		
		System.exit(flg?0:-1);
		
	}


}