From 94e1703b7250117d6b18ef181f77bc58b5740cf9 Mon Sep 17 00:00:00 2001 From: Vinod Kumar Vavilapalli Date: Wed, 19 Oct 2011 17:55:08 +0000 Subject: [PATCH] MAPREDUCE-3176. Fixed ant mapreduce tests that are timing out because of wrong framework name. Contributed by Hitesh Shah. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1186368 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 3 ++ .../org/apache/hadoop/mapred/ReduceTask.java | 12 ++++-- .../org/apache/hadoop/mapreduce/Cluster.java | 39 +++++++++++++------ .../org/apache/hadoop/fs/TestFileSystem.java | 8 +++- .../apache/hadoop/ipc/TestSocketFactory.java | 2 + .../TestClientProtocolProviderImpls.java | 21 ++++++++++ 6 files changed, 70 insertions(+), 15 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index f2987ed782e..64badbc7bf4 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -1683,6 +1683,9 @@ Release 0.23.0 - Unreleased MAPREDUCE-3162. Separated application-init and container-init event types in NodeManager's Application state machine. (Todd Lipcon via vinodkv) + MAPREDUCE-3176. Fixed ant mapreduce tests that are timing out because + of wrong framework name. (Hitesh Shah via vinodkv) + Release 0.22.0 - Unreleased INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/ReduceTask.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/ReduceTask.java index 4017113bb65..969ddf17e31 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/ReduceTask.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/ReduceTask.java @@ -342,9 +342,15 @@ public class ReduceTask extends Task { RawKeyValueIterator rIter = null; boolean isLocal = false; - // local iff framework == local - String framework = job.get(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME); - isLocal = framework.equals(MRConfig.LOCAL_FRAMEWORK_NAME); + // local if + // 1) framework == local or + // 2) framework == null and job tracker address == local + String framework = job.get(MRConfig.FRAMEWORK_NAME); + String masterAddr = job.get(MRConfig.MASTER_ADDRESS, "local"); + if ((framework == null && masterAddr.equals("local")) + || (framework != null && framework.equals(MRConfig.LOCAL_FRAMEWORK_NAME))) { + isLocal = true; + } if (!isLocal) { Class combinerClass = conf.getCombinerClass(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java index 3e50520292a..94d1fc78f02 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java @@ -25,6 +25,8 @@ import java.util.ArrayList; import java.util.List; import java.util.ServiceLoader; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; @@ -62,6 +64,7 @@ public class Cluster { private Path sysDir = null; private Path stagingAreaDir = null; private Path jobHistoryDir = null; + private static final Log LOG = LogFactory.getLog(Cluster.class); static { ConfigUtil.loadResources(); @@ -83,17 +86,31 @@ public class Cluster { for (ClientProtocolProvider provider : ServiceLoader .load(ClientProtocolProvider.class)) { - ClientProtocol clientProtocol = null; - if (jobTrackAddr == null) { - clientProtocol = provider.create(conf); - } else { - clientProtocol = provider.create(jobTrackAddr, conf); - } - - if (clientProtocol != null) { - clientProtocolProvider = provider; - client = clientProtocol; - break; + LOG.debug("Trying ClientProtocolProvider : " + + provider.getClass().getName()); + ClientProtocol clientProtocol = null; + try { + if (jobTrackAddr == null) { + clientProtocol = provider.create(conf); + } else { + clientProtocol = provider.create(jobTrackAddr, conf); + } + + if (clientProtocol != null) { + clientProtocolProvider = provider; + client = clientProtocol; + LOG.debug("Picked " + provider.getClass().getName() + + " as the ClientProtocolProvider"); + break; + } + else { + LOG.info("Cannot pick " + provider.getClass().getName() + + " as the ClientProtocolProvider - returned null protocol"); + } + } + catch (Exception e) { + LOG.info("Failed to use " + provider.getClass().getName() + + " due to error: " + e.getMessage()); } } diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/fs/TestFileSystem.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/fs/TestFileSystem.java index 3f34d40decd..f299cb67401 100644 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/fs/TestFileSystem.java +++ b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/fs/TestFileSystem.java @@ -94,7 +94,13 @@ public class TestFileSystem extends TestCase { CommandFormat cf; cf= new CommandFormat("copyToLocal", 2,2,"crc","ignoreCrc"); assertEquals(cf.parse(new String[] {"-get","file", "-"}, 1).get(1), "-"); - assertEquals(cf.parse(new String[] {"-get","file","-ignoreCrc","/foo"}, 1).get(1),"/foo"); + try { + cf.parse(new String[] {"-get","file","-ignoreCrc","/foo"}, 1); + fail("Expected parsing to fail as it should stop at first non-option"); + } + catch (Exception e) { + // Expected + } cf = new CommandFormat("tail", 1, 1, "f"); assertEquals(cf.parse(new String[] {"-tail","fileName"}, 1).get(0),"fileName"); assertEquals(cf.parse(new String[] {"-tail","-f","fileName"}, 1).get(0),"fileName"); diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/ipc/TestSocketFactory.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/ipc/TestSocketFactory.java index 17cb18aa897..7faafae3bb0 100644 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/ipc/TestSocketFactory.java +++ b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/ipc/TestSocketFactory.java @@ -33,6 +33,7 @@ import org.apache.hadoop.mapred.JobClient; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.JobStatus; import org.apache.hadoop.mapred.MiniMRCluster; +import org.apache.hadoop.mapreduce.MRConfig; import org.apache.hadoop.net.StandardSocketFactory; /** @@ -92,6 +93,7 @@ public class TestSocketFactory extends TestCase { JobConf jconf = new JobConf(cconf); jconf.set("mapred.job.tracker", String.format("localhost:%d", jobTrackerPort + 10)); + jconf.set(MRConfig.FRAMEWORK_NAME, MRConfig.CLASSIC_FRAMEWORK_NAME); client = new JobClient(jconf); JobStatus[] jobs = client.jobsToComplete(); diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapreduce/TestClientProtocolProviderImpls.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapreduce/TestClientProtocolProviderImpls.java index 89eecb67b8f..f718e1f4998 100644 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapreduce/TestClientProtocolProviderImpls.java +++ b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapreduce/TestClientProtocolProviderImpls.java @@ -96,4 +96,25 @@ public class TestClientProtocolProviderImpls extends TestCase { } } + @Test + public void testClusterException() { + + Configuration conf = new Configuration(); + conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.CLASSIC_FRAMEWORK_NAME); + conf.set(JTConfig.JT_IPC_ADDRESS, "local"); + + // initializing a cluster with this conf should throw an error. + // However the exception thrown should not be specific to either + // the job tracker client provider or the local provider + boolean errorThrown = false; + try { + Cluster cluster = new Cluster(conf); + cluster.close(); + fail("Not expected - cluster init should have failed"); + } catch (IOException e) { + errorThrown = true; + assert(e.getMessage().contains("Cannot initialize Cluster. Please check")); + } + assert(errorThrown); + } }