# 题目要求
1.数据清洗:对每个词的格式进行规范化(去除不以英文字母开头的所有词)
2.词频少于 3 次的数据不在结果中显示
3.结果以有限数量的“+”表示词频统计
4.自定义一个 Partitioner 类和 getPartition()方法,将大写字母开头的词分配到一个 reducer,将小写字母开头的词分配到另一个 reducer
# 解题思路
运用函数 Character.isLowerCase(s.charAt(0)) 和 Character.isUpperCase(s.charAt(0))来查 找单词开头是否是大写字母或者小写字母构成的,从而来筛选由字母打头的单词。
用 if 语句来判断是否出现的频率大于 3,若大于则可以输出。
在原来的代码中,是在 IntSumReducer 类中来累计单词出现频率,用输出 sum 表示,在修改时只需在此基础上建立 Stringbuilder 对象 s,再用 for 循环语句和 append 函数将数 字频率 sum 转换成‘+’字符,输出<key,s>。此时输出的类型为 text。并注释掉主函数 中的 job.setCombinerClass(IntSumReducer.class)。
通过是否满足条件 Character.isLowerCase(s.charAt(0))来判断是否为小写字母打头的字 符串,如果是则返回 0,如果不是返回 1。
##代码如下
import java.io.IOException;
import java.util.StringTokenizer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.Partitioner;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class WordCount {
public static class TokenizerMapper
extends Mapper<Object, Text, Text, IntWritable>{
private final static IntWritable one = new IntWritable(1);
private Text word = new Text();
public void map(Object key, Text value, Context context
) throws IOException, InterruptedException {
StringTokenizer itr = new StringTokenizer(value.toString());
while (itr.hasMoreTokens()) {
String s=itr.nextToken();
if(Character.isLowerCase(s.charAt(0))||Character.isUpperCase(s.charAt(0))){
word.set(s);
context.write(word, one);
}
}
}
}
public class myPartitioner extends Partitioner<Text,IntWritable>{
public int getPartition(Text key, IntWritable value, int numPartitions)
{
String s= key.toString();
if(Character.isLowerCase(s.charAt(0)))
return 0;
else
return 1;
}
}
public static class IntSumReducer
extends Reducer<Text,IntWritable,Text,Text> {
private Text result = new Text();
public void reduce(Text key, Iterable<IntWritable> values,
Context context
) throws IOException, InterruptedException {
int sum = 0;
for (IntWritable val : values) {
sum += val.get();
}
if(sum>=3){
StringBuilder s=new StringBuilder();
for(int i=0;i<sum;i++){
s.append("+");
}
result.set(s.toString());
context.write(key, result);
}
}
}
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
Job job = Job.getInstance(conf, "word count");
job.setJarByClass(WordCount.class);
job.setMapperClass(TokenizerMapper.class);
//job.setCombinerClass(IntSumReducer.class);
job.setReducerClass(IntSumReducer.class);
job.setPartitionerClass(myPartitioner.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
FileInputFormat.addInputPath(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85