HBASE-5848 Addendum

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1330105 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
larsh 2012-04-25 05:07:15 +00:00
parent 917a07b73f
commit f4459661d2
2 changed files with 38 additions and 2 deletions

View File

@ -66,6 +66,7 @@ import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
import org.apache.hadoop.hbase.io.hfile.ChecksumUtil;
import org.apache.hadoop.hbase.io.hfile.Compression;
import org.apache.hadoop.hbase.io.hfile.Compression.Algorithm;
import org.apache.hadoop.hbase.mapreduce.MapreduceTestingShim;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
@ -87,6 +88,7 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MiniMRCluster;
import org.apache.hadoop.mapred.TaskLog;
import org.apache.zookeeper.KeeperException;
@ -1330,8 +1332,11 @@ public class HBaseTestingUtility {
// Allow the user to override FS URI for this map-reduce cluster to use.
mrCluster = new MiniMRCluster(servers,
FS_URI != null ? FS_URI : FileSystem.get(conf).getUri().toString(), 1);
mrCluster.getJobTrackerRunner().getJobTracker().getConf().set("mapred.local.dir",
conf.get("mapred.local.dir")); //Hadoop MiniMR overwrites this while it should not
JobConf jobConf = MapreduceTestingShim.getJobConf(mrCluster);
if (jobConf != null) {
jobConf.set("mapred.local.dir",
conf.get("mapred.local.dir")); //Hadoop MiniMR overwrites this while it should not
}
LOG.info("Mini mapreduce cluster started");
conf.set("mapred.job.tracker",
mrCluster.createJobConf().get("mapred.job.tracker"));

View File

@ -19,9 +19,12 @@ package org.apache.hadoop.hbase.mapreduce;
import java.io.IOException;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MiniMRCluster;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.JobID;
@ -34,6 +37,7 @@ import org.apache.hadoop.mapreduce.JobID;
*/
abstract public class MapreduceTestingShim {
private static MapreduceTestingShim instance;
private static Class[] emptyParam = new Class[] {};
static {
try {
@ -48,11 +52,17 @@ abstract public class MapreduceTestingShim {
abstract public JobContext newJobContext(Configuration jobConf)
throws IOException;
abstract public JobConf obtainJobConf(MiniMRCluster cluster);
public static JobContext createJobContext(Configuration jobConf)
throws IOException {
return instance.newJobContext(jobConf);
}
public static JobConf getJobConf(MiniMRCluster cluster) {
return instance.obtainJobConf(cluster);
}
private static class MapreduceV1Shim extends MapreduceTestingShim {
public JobContext newJobContext(Configuration jobConf) throws IOException {
@ -68,6 +78,23 @@ abstract public class MapreduceTestingShim {
"Failed to instantiate new JobContext(jobConf, new JobID())", e);
}
}
public JobConf obtainJobConf(MiniMRCluster cluster) {
if (cluster == null) return null;
try {
Object runner = cluster.getJobTrackerRunner();
Method meth = runner.getClass().getDeclaredMethod("getJobTracker", emptyParam);
Object tracker = meth.invoke(runner, new Object []{});
Method m = tracker.getClass().getDeclaredMethod("getConf", emptyParam);
return (JobConf) m.invoke(tracker, new Object []{});
} catch (NoSuchMethodException nsme) {
return null;
} catch (InvocationTargetException ite) {
return null;
} catch (IllegalAccessException iae) {
return null;
}
}
};
private static class MapreduceV2Shim extends MapreduceTestingShim {
@ -83,6 +110,10 @@ abstract public class MapreduceTestingShim {
"Failed to return from Job.getInstance(jobConf)");
}
}
public JobConf obtainJobConf(MiniMRCluster cluster) {
return null;
}
};
}