yuxing 发表于 2018-10-31 08:29:32

hadoop实践一

package test2;  
import java.net.URI;
  
import org.apache.hadoop.conf.Configuration;
  
import org.apache.hadoop.conf.Configured;
  
import org.apache.hadoop.fs.FileSystem;
  
import org.apache.hadoop.fs.Path;
  
import org.apache.hadoop.io.LongWritable;
  
import org.apache.hadoop.io.Text;
  
import org.apache.hadoop.mapreduce.Job;
  
import org.apache.hadoop.mapreduce.Mapper;
  
import org.apache.hadoop.mapreduce.Reducer;
  
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
  
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
  
import org.apache.hadoop.util.Tool;
  
import org.apache.hadoop.util.ToolRunner;

  
public>  static String INPUT_PATH = "/user/grid/chenshenglong/dedup_in/file1";
  static String OUT_PATH = "/user/grid/chenshenglong/make_there";
  @Override
  public int run(String[] arg0) throws Exception {
  INPUT_PATH = "/user/grid/chenshenglong/make_five/word1";
  OUT_PATH = "/user/grid/chenshenglong/make_six";
  Configuration conf = new Configuration();
  conf.set("fs.default.name", "hdfs://192.168.2.100:8020");
  final FileSystem fileSystem = FileSystem.get(conf);
  final Path outPath = new Path(OUT_PATH);
  if(fileSystem.exists(outPath)){
  fileSystem.delete(outPath, true);
  }
  final Job job = new Job(conf , WordCountApp.class.getSimpleName());
  //打包运行必须执行的秘密方法
  job.setJarByClass(WordCountApp.class);
  //1.1指定读取的文件位于哪里
  FileInputFormat.setInputPaths(job, INPUT_PATH);
  //指定如何对输入文件进行格式化,把输入文件每一行解析成键值对
  //job.setInputFormatClass(TextInputFormat.class);
  //1.2 指定自定义的map类
  job.setMapperClass(MyMapper.class);
  //map输出的类型。如果的类型与类型一致,则可以省略
  job.setMapOutputKeyClass(Text.class);
  job.setMapOutputValueClass(LongWritable.class);
  //1.3 分区
  //job.setPartitionerClass(HashPartitioner.class);
  //有一个reduce任务运行
  //job.setNumReduceTasks(1);
  //1.4 TODO 排序、分组
  //1.5 TODO 规约
  //2.2 指定自定义reduce类
  job.setReducerClass(MyReducer.class);
  //指定reduce的输出类型
  job.setOutputKeyClass(Text.class);
  job.setOutputValueClass(LongWritable.class);
  //2.3 指定写出到哪里
  FileOutputFormat.setOutputPath(job, outPath);
  //指定输出文件的格式化类
  //job.setOutputFormatClass(TextOutputFormat.class);
  //把job提交给JobTracker运行
  job.waitForCompletion(true);
  return 0;
  }
  public static void main(String[] args) throws Exception {
  // ToolRunner.run(new WordCountApp(), args);
  new WordCountApp().run(null);
  }
  /**
  * KEYIN    即k1   表示行的偏移量
  * VALUEIN即v1   表示行文本内容
  * KEYOUT   即k2   表示行中出现的单词
  * VALUEOUT 即v2   表示行中出现的单词的次数,固定值1
  */

  static>  protected void map(LongWritable k1, Text v1, Context context) throws java.io.IOException ,InterruptedException {
  //final String[] splited = v1.toString().split("\t");
  final String[] splited = v1.toString().split(" ");
  for (String word : splited) {
  context.write(new Text(word), new LongWritable(1));
  }
  };
  }
  /**
  * KEYIN    即k2   表示行中出现的单词
  * VALUEIN即v2   表示行中出现的单词的次数
  * KEYOUT   即k3   表示文本中出现的不同单词
  * VALUEOUT 即v3   表示文本中出现的不同单词的总次数
  *
  */

  static>  protected void reduce(Text k2, java.lang.Iterable v2s, Context ctx) throws java.io.IOException ,InterruptedException {
  long times = 0L;
  for (LongWritable count : v2s) {
  times += count.get();
  }
  ctx.write(k2, new LongWritable(times));
  };
  }
  
}


页: [1]
查看完整版本: hadoop实践一