设为首页 收藏本站
查看: 756|回复: 0

[经验分享] hadoop平台运行WordCount程序

[复制链接]
发表于 2016-12-5 07:49:37 | 显示全部楼层 |阅读模式
   1. 经典的WordCound程序(WordCount.java)

import java.io.IOException;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.StringTokenizer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
public class WordCount extends Configured implements Tool {
public static class MapClass extends MapReduceBase implements
Mapper<LongWritable, Text, Text, IntWritable> {
private final static IntWritable one = new IntWritable(1);
private Text word = new Text();
public void map(LongWritable key, Text value,
OutputCollector<Text, IntWritable> output, Reporter reporter)
throws IOException {
String line = value.toString();
StringTokenizer itr = new StringTokenizer(line);
while (itr.hasMoreTokens()) {
word.set(itr.nextToken());
output.collect(word, one);
}
}
}
/**
* A reducer class that just emits the sum of the input values.
*/
public static class Reduce extends MapReduceBase implements
Reducer<Text, IntWritable, Text, IntWritable> {
public void reduce(Text key, Iterator<IntWritable> values,
OutputCollector<Text, IntWritable> output, Reporter reporter)
throws IOException {
int sum = 0;
while (values.hasNext()) {
sum += values.next().get();
}
output.collect(key, new IntWritable(sum));
}
}
static int printUsage() {
System.out.println("wordcount [-m <maps>] [-r <reduces>] <input> <output>");
ToolRunner.printGenericCommandUsage(System.out);
return -1;
}
/**
* The main driver for word count map/reduce program. Invoke this method to
* submit the map/reduce job.
*
* @throws IOException
*             When there is communication problems with the job tracker.
*/
public int run(String[] args) throws Exception {
JobConf conf = new JobConf(getConf(), WordCount.class);
conf.setJobName("wordcount");
// the keys are words (strings)
conf.setOutputKeyClass(Text.class);
// the values are counts (ints)
conf.setOutputValueClass(IntWritable.class);
conf.setMapperClass(MapClass.class);
conf.setCombinerClass(Reduce.class);
conf.setReducerClass(Reduce.class);
List<String> other_args = new ArrayList<String>();
for (int i = 0; i < args.length; ++i) {
try {
if ("-m".equals(args)) {
conf.setNumMapTasks(Integer.parseInt(args[++i]));
} else if ("-r".equals(args)) {
conf.setNumReduceTasks(Integer.parseInt(args[++i]));
} else {
other_args.add(args);
}
} catch (NumberFormatException except) {
System.out.println("ERROR: Integer expected instead of "
+ args);
return printUsage();
} catch (ArrayIndexOutOfBoundsException except) {
System.out.println("ERROR: Required parameter missing from "
+ args[i - 1]);
return printUsage();
}
}
// Make sure there are exactly 2 parameters left.
if (other_args.size() != 2) {
System.out.println("ERROR: Wrong number of parameters: "
+ other_args.size() + " instead of 2.");
return printUsage();
}
FileInputFormat.setInputPaths(conf, other_args.get(0));
FileOutputFormat.setOutputPath(conf, new Path(other_args.get(1)));
JobClient.runJob(conf);
return 0;
}
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new Configuration(), new WordCount(), args);
System.exit(res);
}
}




2. 保证hadoop集群是配置好了的,单机的也好。新建一个目录,比如 /home/admin/WordCount编译WordCount.java程序。
  

javac -classpath /home/admin/hadoop/hadoop-0.19.1-core.jar WordCount.java -d /home/admin/WordCount


3. 编译完后在/home/admin/WordCount目录会发现三个class文件 WordCount.class,WordCount$Map.class,WordCount$Reduce.class。

  cd 进入 /home/admin/WordCount目录,然后执行:

jar cvf WordCount.jar*.class




  就会生成 WordCount.jar 文件。
  4. 构造一些输入数据

  input1.txt和input2.txt的文件里面是一些单词。如下:

[admin@host WordCount]$ cat input1.txt

Hello, i love china

are you ok
?

[admin@host WordCount]$ cat input2.txt

hello, i love word

You are ok





  在hadoop上新建目录,和put程序运行所需要的输入文件:

hadoop fs-mkdir/tmp/input

hadoop fs
-mkdir/tmp/output

hadoop fs
-put input1.txt/tmp/input/

hadoop fs
-put input2.txt/tmp/input/




  5. 运行程序,会显示job运行时的一些信息。

DSC0000.gif

[admin@host WordCount]$ hadoop jar WordCount.jar WordCount/tmp/input/tmp/output
10/09/1622:49:43WARN
mapred.JobClient: Use GenericOptionsParser
forparsing the arguments. Applications should implement
Tool
forthe same.
10/09/1622:49:43INFO
mapred.FileInputFormat: Total input paths to process :
2
10/09/1622:49:43INFO
mapred.JobClient: Running job: job_201008171228_76165
10/09/1622:49:44INFO
mapred.JobClient: map
0%reduce0%
10/09/1622:49:47INFO
mapred.JobClient: map
100%reduce0%
10/09/1622:49:54INFO
mapred.JobClient: map
100%reduce100%
10/09/1622:49:55INFO
mapred.JobClient: Job complete: job_201008171228_76165
10/09/1622:49:55INFO
mapred.JobClient: Counters:
16
10/09/1622:49:55INFO
mapred.JobClient: File Systems
10/09/1622:49:55INFO
mapred.JobClient: HDFS bytes read
=62
10/09/1622:49:55INFO
mapred.JobClient: HDFS bytes written
=73
10/09/1622:49:55INFO
mapred.JobClient: Local bytes read
=152
10/09/1622:49:55INFO
mapred.JobClient: Local bytes written
=366
10/09/1622:49:55INFO
mapred.JobClient: Job Counters
10/09/1622:49:55INFO
mapred.JobClient: Launched reduce tasks
=1
10/09/1622:49:55INFO
mapred.JobClient: Rack
-local map tasks=2
10/09/1622:49:55INFO
mapred.JobClient: Launched map tasks
=2
10/09/1622:49:55INFO
mapred.JobClient: Map
-Reduce Framework
10/09/1622:49:55INFO
mapred.JobClient: Reduce input groups
=11
10/09/1622:49:55INFO
mapred.JobClient: Combine output records
=14
10/09/1622:49:55INFO
mapred.JobClient: Map input records
=4
10/09/1622:49:55INFO
mapred.JobClient: Reduce output records
=11
10/09/1622:49:55INFO
mapred.JobClient: Map output bytes
=118
10/09/1622:49:55INFO
mapred.JobClient: Map input bytes
=62
10/09/1622:49:55INFO
mapred.JobClient: Combine input records
=14
10/09/1622:49:55INFO
mapred.JobClient: Map output records
=14
10/09/1622:49:55INFO
mapred.JobClient: Reduce input records
=14






  6. 查看运行结果



[admin@host WordCount]$ hadoop fs-ls/tmp/output/

Found
2items

drwxr
-x----admin
admin
02010-09-1622:43/tmp/output/_logs
-rw-r-----1admin
admin
1022010-09-1622:44/tmp/output/part-00000

[admin@host WordCount]$ hadoop fs
-cat/tmp/output/part-00000

Hello,
1

You
1

are
2

china
1

hello,
1

i
2

love
2

ok
1

ok
?1

word
1

you
1

















其中可能出现的问题  
1:java.io.FileNotFoundException
这个异常是因为目录创建上有问题,于是重新检查了下目录,发现自己弄成/opt/hadoop/tmp/inout。而是/tmp/input
2:org.apache.hadoop.mapred.FileAlreadyExistsException
这个异常主要是因为上一个导致的,因为hadoop 由于进行的是耗费资源的计算,生产的结果默认是不能被覆盖的,因此中间结果输出目录一定不能存在,否则出现这个错误。
于是就执行命令删除output文件 /opt/hadoop/bin/hadoop fs -rmr /tmp/output


3:ERROR namenode.NameNode: java.io.IOException: Cannot create
directory /usr/local/hadoop-datastore/hadoop-hadoop/dfs/name/current


是因为hadoop-database 文件夹没有获取权限
  

运维网声明 1、欢迎大家加入本站运维交流群:群②:261659950 群⑤:202807635 群⑦870801961 群⑧679858003
2、本站所有主题由该帖子作者发表,该帖子作者与运维网享有帖子相关版权
3、所有作品的著作权均归原作者享有,请您和我们一样尊重他人的著作权等合法权益。如果您对作品感到满意,请购买正版
4、禁止制作、复制、发布和传播具有反动、淫秽、色情、暴力、凶杀等内容的信息,一经发现立即删除。若您因此触犯法律,一切后果自负,我们对此不承担任何责任
5、所有资源均系网友上传或者通过网络收集,我们仅提供一个展示、介绍、观摩学习的平台,我们不对其内容的准确性、可靠性、正当性、安全性、合法性等负责,亦不承担任何法律责任
6、所有作品仅供您个人学习、研究或欣赏,不得用于商业或者其他用途,否则,一切后果均由您自己承担,我们对此不承担任何法律责任
7、如涉及侵犯版权等问题,请您及时通知我们,我们将立即采取措施予以解决
8、联系人Email:admin@iyunv.com 网址:www.yunweiku.com

所有资源均系网友上传或者通过网络收集,我们仅提供一个展示、介绍、观摩学习的平台,我们不对其承担任何法律责任,如涉及侵犯版权等问题,请您及时通知我们,我们将立即处理,联系人Email:kefu@iyunv.com,QQ:1061981298 本贴地址:https://www.iyunv.com/thread-309671-1-1.html 上篇帖子: hadoop分布式集群 下篇帖子: Hadoop集群安装详细步骤|Hadoop安装配置
您需要登录后才可以回帖 登录 | 立即注册

本版积分规则

扫码加入运维网微信交流群X

扫码加入运维网微信交流群

扫描二维码加入运维网微信交流群,最新一手资源尽在官方微信交流群!快快加入我们吧...

扫描微信二维码查看详情

客服E-mail:kefu@iyunv.com 客服QQ:1061981298


QQ群⑦:运维网交流群⑦ QQ群⑧:运维网交流群⑧ k8s群:运维网kubernetes交流群


提醒:禁止发布任何违反国家法律、法规的言论与图片等内容;本站内容均来自个人观点与网络等信息,非本站认同之观点.


本站大部分资源是网友从网上搜集分享而来,其版权均归原作者及其网站所有,我们尊重他人的合法权益,如有内容侵犯您的合法权益,请及时与我们联系进行核实删除!



合作伙伴: 青云cloud

快速回复 返回顶部 返回列表