基于内容的推荐算法实现
package com.oracle.moviecf; import java.io.IOException; import java.util.ArrayList; import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.Reducer; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; /** * 与Job2共同完成: * 用户对电影的评价矩阵 × 电影特征建模矩阵 = 用户对某种特征的偏好权重 * (矩阵的相乘) * */ public class Job1 { public static class Map extends Mapper<LongWritable, Text, Text, Text> { @Override protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException { String line = value.toString(); String split[]; String val; String mvID; String userID; if(line.startsWith("u")) { split = line.split("\t"); userID = split[0]; for(int i = 1; i < split.length; i++) { val = split[i].substring(split[i].indexOf("_")+1); mvID = split[i].substring(0, split[i].indexOf("_")); context.write(new Text(mvID), new Text(userID+"_"+val)); } } if(line.startsWith("m")) { split = line.split("\t"); for(int i = 1; i < split.length; i++) { context.write(new Text(split[0]), new Text(split[i])); } } } } public static class Reduce extends Reducer<Text, Text, Text, Text> { @Override protected void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException { List<String> list_user_val = new ArrayList<>(); List<String> list_tag_val = new ArrayList<>(); for(Text i: values) { if(i.toString().startsWith("u")) { list_user_val.add(i.toString()); }else { list_tag_val.add(i.toString()); } } String userID; double userVal; String tagID; double tagVal; for(String i: list_user_val) { userID = i.substring(0, i.indexOf("_")); userVal = Double.parseDouble(i.substring(i.indexOf("_")+1)); for(String j: list_tag_val) { tagID = j.substring(0, j.indexOf("_")); tagVal = Double.parseDouble(j.substring(j.indexOf("_")+1)); context.write(new Text(userID+"_"+tagID), new Text(String.valueOf(userVal*tagVal))); } } } } public static void main(String[] args) throws Exception { Configuration conf = new Configuration(); Job job = Job.getInstance(conf); job.setJarByClass(Job1.class); job.setMapperClass(Map.class); job.setReducerClass(Reduce.class); job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(Text.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(Text.class); FileInputFormat.setInputPaths(job, new Path("/moviecf/input")); FileOutputFormat.setOutputPath(job, new Path("/moviecf/output1")); job.waitForCompletion(true); } }
package com.oracle.moviecf; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.Reducer; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; /** * 与Job1共同完成 * 目的: * 用户对电影的评价矩阵 × 电影特征建模矩阵 = 用户对某种特征的偏好权重 * */ public class Job2 { public static class Map extends Mapper<LongWritable, Text, Text, Text> { @Override protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException { String line = value.toString(); String split[] = line.split("\t"); String userID; String tagID; userID = split[0].substring(0, split[0].indexOf("_")); tagID = split[0].substring(split[0].indexOf("_")+1); context.write(new Text(userID), new Text(tagID+"_"+split[1])); } } public static class Reduce extends Reducer<Text, Text, Text, Text> { @Override protected void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException { HashMap<String, Double> map = new HashMap<>(); List<String> list = new ArrayList<>(); String tagID; double val; for(Text i: values) { tagID = i.toString().substring(0, i.toString().indexOf("_")); val = Double.parseDouble(i.toString().substring(i.toString().indexOf("_")+1)); if(!map.containsKey(tagID)) { map.put(tagID, val); list.add(tagID); }else { map.put(tagID, val+map.get(tagID)); } } StringBuffer line = new StringBuffer(); Collections.sort(list); for(String i: list) { line.append(i+"_"+map.get(i)+"\t"); } context.write(key, new Text(line.toString())); } } public static void main(String[] args) throws Exception { Configuration conf = new Configuration(); Job job = Job.getInstance(conf); job.setJarByClass(Job2.class); job.setMapperClass(Map.class); job.setReducerClass(Reduce.class); job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(Text.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(Text.class); FileInputFormat.setInputPaths(job, new Path("/moviecf/output1")); FileOutputFormat.setOutputPath(job, new Path("/moviecf/output2")); job.waitForCompletion(true); } }
package com.oracle.moviecf; import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.Reducer; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;/** * 相似度 * 与Job4同用 * 根据 用户对特征的偏好权重矩阵 与 电影特征建模矩阵 ,求用户对电影的余弦相似度,即得到每个用户对每个电影的喜欢程度 * * */ public class Job3 { public static class Map extends Mapper<LongWritable, Text, Text, Text> { @Override protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException { String line = value.toString(); String split[] = line.split("\t"); String userID = split[0]; String tagID; double val; double a = 0; //分母 for(int i = 1; i < split.length; i++) { val = Double.parseDouble(split[i].substring(split[i].indexOf("_")+1)); a += val*val; } for(int i = 1; i < split.length; i++) { tagID = split[i].substring(0, split[i].indexOf("_")); val = Double.parseDouble(split[i].substring(split[i].indexOf("_")+1)); context.write(new Text(tagID), new Text(userID+"_"+val+"_"+a)); } } } public static class Reduce extends Reducer<Text, Text, Text, Text> { @Override protected void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException { List<String> list_user = new ArrayList<>(); List<String> list_movie = new ArrayList<>(); String user_a; String mv_a; double user_val; double mv_val; String userID; String mvID; for(Text i: values) { if(i.toString().startsWith("u")) { list_user.add(i.toString()); }else { list_movie.add(i.toString()); } } String split[]; for(String i: list_user) { split = i.split("_"); userID = split[0]; user_val = Double.parseDouble(split[1]); user_a = split[2]; for(String j: list_movie) { split = j.split("_"); mvID = split[0]; mv_val = Double.parseDouble(split[1]); mv_a = split[2]; context.write(new Text(userID+"_"+mvID), new Text((mv_val*user_val)+"_"+user_a+"_"+mv_a)); } } /*for(Text i: values) { context.write(key, i); }*/ } } public static void main(String[] args) throws Exception { Configuration conf = new Configuration(); Job job = Job.getInstance(conf); job.setJarByClass(Job3.class); job.setMapperClass(Map.class); job.setReducerClass(Reduce.class); job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(Text.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(Text.class); FileInputFormat.setInputPaths(job, new Path("/moviecf/output2")); FileInputFormat.addInputPath(job, new Path("/moviecf/input/data1.txt")); FileOutputFormat.setOutputPath(job, new Path("/moviecf/output3")); job.waitForCompletion(true); } }
package com.oracle.moviecf; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.Reducer; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; /** * 与Job3同用 * 根据 用户对特征的偏好权重矩阵 与 电影特征建模矩阵 ,求用户对电影的余弦相似度,即得到每个用户对每个电影的喜欢程度 * */ public class Job4 { public static class Map extends Mapper<LongWritable, Text, Text, Text> { @Override protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException { String line = value.toString(); String split[] = line.split("\t"); String userID = line.substring(0, line.indexOf("_")); String mvID = split[0].substring(split[0].indexOf("_")+1); context.write(new Text(userID), new Text(mvID+"\t"+split[1])); } } public static class Reduce extends Reducer<Text, Text, Text, Text> { @Override protected void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException { HashMap<String, Double> map_a = new HashMap<>(); HashMap<String, Double> map_b = new HashMap<>(); List<String> keyList = new ArrayList<>(); String[] split; String[] stemp; double a; double b; for(Text i: values) { split = i.toString().split("\t"); stemp = split[1].split("_"); a = Math.sqrt(Double.parseDouble(stemp[1])*Double.parseDouble(stemp[2])); b = Double.parseDouble(stemp[0]); if(!map_a.containsKey(split[0])) { map_a.put(split[0], a); map_b.put(split[0], b); keyList.add(split[0]); }else { map_b.put(split[0], b+map_b.get(split[0])); } } Collections.sort(keyList); StringBuffer result = new StringBuffer(); String userID = key.toString(); double val; for(String i: keyList) { val = map_b.get(i) / map_a.get(i); result.append(i+"_"+String.format("%.1f", val)+"\t"); } context.write(key, new Text(result.toString())); } } public static void main(String[] args) throws Exception { Configuration conf = new Configuration(); Job job = Job.getInstance(conf); job.setJarByClass(Job4.class); job.setMapperClass(Map.class); job.setReducerClass(Reduce.class); job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(Text.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(Text.class); FileInputFormat.setInputPaths(job, new Path("/moviecf/output3")); FileOutputFormat.setOutputPath(job, new Path("/moviecf/output4")); job.waitForCompletion(true); } }
相关推荐
-
java从图片中识别文字 java
2019-1-8
-
基于内容的推荐算法实现 java
2019-1-12
-
StringUtils工具类介绍 java
2019-1-13
-
文件读取工具类 java
2019-1-13
-
Spring 集群定时任务的redis实现 java
2019-1-7
-
找出项目中没有使用的js,图片文件并删除 java
2019-1-13
-
MongoDB增删改查 java
2019-1-7
-
java保存网络图片 java
2019-1-7
-
监听器 在线人数统计 java
2019-1-8
-
mybatis-plus代码生成器 java
2019-1-13