利用mapreduce做join操作:
在mapreduce中我们经常用到两张表合成一张的情况,这样的话就要用到join。
比如现在有两个表:
orders.txt:
1001,20170710,p0001,1
1002,20170710,p0001,3
1003,20170710,p0001,3
1004,20170710,p0002,1
products.txt:
p0001,xiaomi,001
p0002,chuizi,001
要利用字段p0001,p002将两张表合起来。形成最终的结果
p0001 xiaomi 7
p0002 chuizi 1
然后代码是:
package com.lmz;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.v2.app.webapp.App;
import org.apache.hadoop.util.GenericOptionsParser;
public class Mymap {
public static class joinMapper extends Mapper<LongWritable, Text, Text, Text> {
private Text outKey = new Text();
private Text outValue = new Text();
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
String line = value.toString();
String[] split = line.split(",");
FileSplit inputSplit = (FileSplit) context.getInputSplit();
String name = inputSplit.getPath().getName();
//System.out.println(name);
//两个文件 在一个 mapper 中处理
//通过文件名判断是那种数据
if (name.contains("products")) {
//取商品ID 作为 输出key 和 商品名称 作为 输出value,即 第0、1 的数据
outKey.set(split[0]);
outValue.set("product#" + split[1]);
context.write(outKey, outValue);
} else {
//取商品ID 作为 输出key 和 购买数量 作为 输出value,即 第2、3 的数据
outKey.set(split[2]);
outValue.set("order#" + split[3]);
context.write(outKey, outValue);
}
}
}
public static class joinReducer extends Reducer<Text, Text, Text, Text> {
private Text outValue = new Text();
@Override
protected void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
//用来存放:商品ID、商品名称
List<String> productsList = new ArrayList<String>();
//用来存放:商品ID、购买数量
List<Integer> ordersList = new ArrayList<Integer>();
for (Text text : values) {
String value = text.toString();
if (value.contains("product#")) {
productsList.add(value.split("#")[1]);
} else if (value.contains("order#")) {
ordersList.add(Integer.parseInt(text.toString().split("#")[1].trim()));
}
}
int to = 0;
for (int i = 0; i < productsList.size(); i++) {
//System.out.println(productsList.size());
for (int j = 0; j < ordersList.size(); j++) {
//System.out.println(ordersList.size());
to += ordersList.get(j);
//System.out.println(to);
}
outValue.set(productsList.get(i) + "\t" + to);
context.write(key,new Text(outValue));
//最后的输出是:商品ID、商品名称、购买数量
}
//System.out.println(outValue);
}
//System.out.println(ordersList);
//int totalOrders = 0;
}
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
Configuration conf = new Configuration();
//conf.set("fs.defaultFS", "file:///");
File file1 = new File("E:\\data1\\out");
//Path path = new Path("E:\\data1\\out");
//FileSystem fileSystem = path.getFileSystem(conf);
if (file1.exists()) {
FileUtils.deleteDirectory(file1);
}
Job job = Job.getInstance(conf);
//设置job的各种属性
//作业名称
job.setJarByClass(Mymap.class); //搜索类
job.setInputFormatClass(TextInputFormat.class); //设置输入格式
job.setMapperClass(joinMapper.class);
job.setReducerClass(joinReducer.class);
//添加输入路径
FileInputFormat.setInputPaths(job, new Path("E:\\data1"));
;
//设置输出路径
FileOutputFormat.setOutputPath(job, new Path("E:\\data1\\out"));
//map输出类型
job.setOutputKeyClass(Text.class); //
job.setOutputValueClass(Text.class); //
job.waitForCompletion(true);
Boolean b = job.waitForCompletion(true);
System.out.println(b ? 0 : 1);
}
}
这个 代码是最简单的join操作。希望可以帮到大家。