hadoop mapreduce开发实践之输出数据压缩
#!/bin/bash HADOOP_CMD="/home/hadoop/app/hadoop/hadoop-2.6.0-cdh5.13.0/bin/hadoop"STREAM_JAR_PATH="/home/hadoop/app/hadoop/hadoop-2.6.0-cdh5.13.0/share/hadoop/tools/lib/hadoop-streaming-2.6.0-cdh5.13.0.jar"
INPUT_FILE_PATH="/input/The_Man_of_Property"
OUTPUT_FILE_PATH="/output/wordcount/CacheArchiveCompressFile"
$HADOOP_CMD fs -rmr -skipTrash $OUTPUT_FILE_PATH
$HADOOP_CMD jar $STREAM_JAR_PATH \
-input $INPUT_FILE_PATH \
-output $OUTPUT_FILE_PATH \
-jobconf "mapred.job.name=wordcount_wordwhite_cacheArchivefile_demo" \
-jobconf"mapred.compress.map.output=true" \
-jobconf"mapred.map.output.compression.codec=org.apache.hadoop.io.compress.GzipCodec" \
-jobconf"mapred.output.compress=true" \
-jobconf"mapred.output.compression.codec=org.apache.hadoop.io.compress.GzipCodec" \
-mapper "python mapper.py WHF.gz" \
-reducer "python reducer.py" \
-cacheArchive "hdfs://localhost:9000/input/cachefile/wordwhite.tar.gz#WHF.gz" \
-file "./mapper.py" \
-file "./reducer.py"
页:
[1]