From 267bfc5ff2dad1940f792388913a4b87bc34f5b7 Mon Sep 17 00:00:00 2001
From: Arun Murthy
Date: Thu, 11 Oct 2012 17:22:27 +0000
Subject: [PATCH] Merge -c 1397182 from trunk to branch-2 to fix
MAPREDUCE-4616. Improve javadoc for MultipleOutputs. Contributed by Tony
Burton.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1397183 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-mapreduce-project/CHANGES.txt | 3 +
.../lib/output/LazyOutputFormat.java | 5 +-
.../mapreduce/lib/output/MultipleOutputs.java | 62 +++++++++++++++++++
3 files changed, 69 insertions(+), 1 deletion(-)
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 0275b847fa2..9f520ff3a45 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -14,6 +14,9 @@ Release 2.0.3-alpha - Unreleased
MAPREDUCE-3678. The Map tasks logs should have the value of input
split it processed. (harsh)
+ MAPREDUCE-4616. Improve javadoc for MultipleOutputs. (Tony Burton via
+ acmurthy)
+
OPTIMIZATIONS
BUG FIXES
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/LazyOutputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/LazyOutputFormat.java
index 2619e207358..c6c49fa6f5a 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/LazyOutputFormat.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/LazyOutputFormat.java
@@ -32,7 +32,10 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.util.ReflectionUtils;
/**
- * A Convenience class that creates output lazily.
+ * A Convenience class that creates output lazily.
+ * Use in conjuction with org.apache.hadoop.mapreduce.lib.output.MultipleOutputs to recreate the
+ * behaviour of org.apache.hadoop.mapred.lib.MultipleTextOutputFormat (etc) of the old Hadoop API.
+ * See {@link MultipleOutputs} documentation for more information.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/MultipleOutputs.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/MultipleOutputs.java
index 0db94e0475d..7974b78fb89 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/MultipleOutputs.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/MultipleOutputs.java
@@ -20,7 +20,10 @@ package org.apache.hadoop.mapreduce.lib.output;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.*;
+import org.apache.hadoop.mapreduce.Reducer.Context;
+import org.apache.hadoop.mapreduce.lib.output.MultipleOutputs;
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl;
import org.apache.hadoop.util.ReflectionUtils;
@@ -37,6 +40,7 @@ import java.util.*;
* Each additional output, or named output, may be configured with its own
* OutputFormat
, with its own key class and with its own value
* class.
+ *
*
*
* Case two: to write data to different files provided by user
@@ -107,6 +111,64 @@ import java.util.*;
*
* }
*
+ *
+ *
+ * When used in conjuction with org.apache.hadoop.mapreduce.lib.output.LazyOutputFormat,
+ * MultipleOutputs can mimic the behaviour of MultipleTextOutputFormat and MultipleSequenceFileOutputFormat
+ * from the old Hadoop API - ie, output can be written from the Reducer to more than one location.
+ *
+ *
+ *
+ * Use MultipleOutputs.write(KEYOUT key, VALUEOUT value, String baseOutputPath)
to write key and
+ * value to a path specified by baseOutputPath
, with no need to specify a named output:
+ *
+ *
+ *
+ * private MultipleOutputs out;
+ *
+ * public void setup(Context context) {
+ * out = new MultipleOutputs(context);
+ * ...
+ * }
+ *
+ * public void reduce(Text key, Iterable values, Context context) throws IOException, InterruptedException {
+ * for (Text t : values) {
+ * out.write(key, t, generateFileName(<parameter list...>));
+ * }
+ * }
+ *
+ * protected void cleanup(Context context) throws IOException, InterruptedException {
+ * out.close();
+ * }
+ *
+ *
+ *
+ * Use your own code in generateFileName()
to create a custom path to your results.
+ * '/' characters in baseOutputPath
will be translated into directory levels in your file system.
+ * Also, append your custom-generated path with "part" or similar, otherwise your output will be -00000, -00001 etc.
+ * No call to context.write()
is necessary. See example generateFileName()
code below.
+ *
+ *
+ *
+ * private String generateFileName(Text k) {
+ * // expect Text k in format "Surname|Forename"
+ * String[] kStr = k.toString().split("\\|");
+ *
+ * String sName = kStr[0];
+ * String fName = kStr[1];
+ *
+ * // example for k = Smith|John
+ * // output written to /user/hadoop/path/to/output/Smith/John-r-00000 (etc)
+ * return sName + "/" + fName;
+ * }
+ *
+ *
+ *
+ * Using MultipleOutputs in this way will still create zero-sized default output, eg part-00000.
+ * To prevent this use LazyOutputFormat.setOutputFormatClass(job, TextOutputFormat.class);
+ * instead of job.setOutputFormatClass(TextOutputFormat.class);
in your Hadoop job configuration.
+ *
+ *
*/
@InterfaceAudience.Public
@InterfaceStability.Stable