电影评分案例之高效TopN
例如:我们要求每部电影的最高评分的前n条记录,按照之前的做法在map端是以电影名为key,MovieBean为value,输出到reduce端,然后分组,将每组数组放入到List集合中按分数高低进行排序,取前n条.
此时我么可以考虑在map端时将MovieBean作为key,输出到缓存区中,让缓存区自动按电影名分区并排序,然后分组,在reduce端我们只需要取出前n条记录即可.这样我们可以避免放入List集合中再排一遍序,大大的减少了运算量.
那么当我们以MovieBean为key是,要想系统识别到是以MovieBean中的电影名分区,排序,分组,我们就需要重写这三个方法,并且还需要重写hadoop的序列化和反序列化方法
代码实现:
import org.apache.hadoop.io.WritableComparable;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
/**
*以MovieBeanTopN为key输出到缓存区中,系统排序时,需要知道排序规则,及序列化和反序列化规则
* 在MovieBeanTopN中我们可以实现WritableComparable接口,注意加上范型为MovieBeanTopN
* 然后重写它里面的三个方法:
* 序列化
* 反序列化
* 自定义排序
*/
public class MovieBeanTopN implements WritableComparable<MovieBeanTopN> {
private String movie;
private double rate;
private String timeStamp;
private String uid;
/**
* 自定义toString方法,方便生成的文件便于后续读取切分,还可以减少文件的大小
* @return
*/
@Override
public String toString() {
return movie + ", " + rate + ", " + timeStamp + ", " + uid ;
}
public String getMovie() {
return movie;
}
public void setMovie(String movie) {
this.movie = movie;
}
public double getRate() {
return rate;
}
public void setRate(double rate) {
this.rate = rate;
}
public String getTimeStamp() {
return timeStamp;
}
public void setTimeStamp(String timeStamp) {
this.timeStamp = timeStamp;
}
public String getUid() {
return uid;
}
public void setUid(String uid) {
this.uid = uid;
}
/**
* 重写序列化方法
* @param dataOutput
* @throws IOException
*/
@Override
public void write(DataOutput dataOutput) throws IOException {
dataOutput.writeUTF(movie);
dataOutput.writeDouble(rate);
dataOutput.writeUTF(timeStamp);
dataOutput.writeUTF(uid);
}
/**
* 重写反序列化方法
* @param dataInput
* @throws IOException
*/
@Override
public void readFields(DataInput dataInput) throws IOException {
this.movie = dataInput.readUTF();
this.rate = dataInput.readDouble();
this.timeStamp = dataInput.readUTF();
this.uid = dataInput.readUTF();
}
/**
* 自定义排序方法
* @param o
* @return
*/
@Override
public int compareTo(MovieBeanTopN o) {
//先比较电影名是否相同,如果相同就按评分降序,如果电影名不同就按电影名降序
return o.getMovie().compareTo(this.movie)==0?Double.compare(o.getRate(),this.rate):o.getMovie().compareTo(this.movie);
}
}
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.Partitioner;
/**
*extends Partitioner类,自定义分区方法,
* 注意范型类型为map端输出的两个类型
*/
public class MyPartition extends Partitioner<MovieBeanTopN, NullWritable> {
@Override //以电影名.hashCode() % reduce任务个数
public int getPartition(MovieBeanTopN key, NullWritable nullWritable, int i) {
return (key.getMovie().hashCode() & 2147483647) % i;
}
}
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;
/**
*extends WritableComparato自定义分组规则
*/
public class MyGropingComparapor extends WritableComparator {
/**
* 因为继承的关系,子类会默认调用父类的空参构造,但WritableComparator的空参构造中的参数为 null,所以调用父类对象时需要传入一个对象,避免空指针
*/
public MyGropingComparapor(){
super(MovieBeanTopN.class,true);
}
/**
* 自定义分组规则,比较相邻的两对象的movie值是否相同
* @param a
* @param b
* @return
*/
public int compare(WritableComparable a, WritableComparable b) {
MovieBeanTopN a1 = (MovieBeanTopN) a;
MovieBeanTopN b1 = (MovieBeanTopN) b;
return a1.getMovie().compareTo(b1.getMovie());
}
}
import com.alibaba.fastjson.JSON;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
/**
*以MovieBeanTopN为key,放入缓存区中让他自动分区,排序并分组,这样可以高效的处理数据,
* 但需要我们自定义分区规则,分组规则,排序规则,重写序列化和反序列化方法(上面已经定义)
*/
public class MovieTopN{
static class MovieTopNMap extends Mapper<LongWritable, Text,MovieBeanTopN, NullWritable> {
/**
* 以MovieBeanTopN为key,NullWritable为value输出,在缓存区就会自动按我们重写的规则分区并排序
* @param key
* @param value
* @param context
* @throws IOException
* @throws InterruptedException
*/
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
try {
String s = value.toString();
MovieBeanTopN mb = JSON.parseObject(s, MovieBeanTopN.class);
context.write(mb,NullWritable.get());
} catch (Exception e) {
e.printStackTrace();
}
}
}
static class MovieTopNReduce extends Reducer<MovieBeanTopN, NullWritable,MovieBeanTopN, NullWritable>{
/**
*获取排序后的key(MovieBeanTopN)和value(<NullWritable>),遍历value的迭代器,因为索引的关系,会将key和value联系起来
* 遍历value就能得到它对应的key,所以可以在遍历value时直接输出分好组的key和NullWritable
* @param key
* @param values
* @param context
* @throws IOException
* @throws InterruptedException
*/
@Override
protected void reduce(MovieBeanTopN key, Iterable<NullWritable> values, Context context) throws IOException, InterruptedException {
int count = 0;
for (NullWritable value : values) {
count++;
context.write(key,NullWritable.get());
if (count==3){
return;
}
}
}
}
public static void main(String[] args) throws Exception {
//配置对象
Configuration conf = new Configuration();
//配置任务对象
Job job = Job.getInstance(conf, "cont_top1");
//导入任务类
job.setMapperClass(MovieTopNMap.class);
job.setReducerClass(MovieTopNReduce.class);
//设置输出类型
job.setOutputKeyClass(MovieBeanTopN.class);
job.setOutputValueClass(NullWritable.class);
//导入我们自定义分区的类
job.setPartitionerClass(MyPartition.class);
//导入我们自定义分组的类
job.setGroupingComparatorClass(MyGropingComparapor.class);
//设置reduce的任务个数
job.setNumReduceTasks(2);
//设置输入输出路径
FileInputFormat.setInputPaths(job, new Path("D:\\txt\\mrdata\\movie\\input"));
FileOutputFormat.setOutputPath(job, new Path("D:\\txt\\mrdata\\movie\\output8"));
// 提交任务 等待程序执行完毕 返回值是否成功
boolean b = job.waitForCompletion(true);
}
}