MAPREDUCE-3176. Fixed ant mapreduce tests that are timing out because of wrong framework name. Contributed by Hitesh Shah.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1186368 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Vinod Kumar Vavilapalli 2011-10-19 17:55:08 +00:00
parent e3bb120e9f
commit 94e1703b72
6 changed files with 70 additions and 15 deletions

View File

@ -1683,6 +1683,9 @@ Release 0.23.0 - Unreleased
MAPREDUCE-3162. Separated application-init and container-init event types MAPREDUCE-3162. Separated application-init and container-init event types
in NodeManager's Application state machine. (Todd Lipcon via vinodkv) in NodeManager's Application state machine. (Todd Lipcon via vinodkv)
MAPREDUCE-3176. Fixed ant mapreduce tests that are timing out because
of wrong framework name. (Hitesh Shah via vinodkv)
Release 0.22.0 - Unreleased Release 0.22.0 - Unreleased
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -342,9 +342,15 @@ public class ReduceTask extends Task {
RawKeyValueIterator rIter = null; RawKeyValueIterator rIter = null;
boolean isLocal = false; boolean isLocal = false;
// local iff framework == local // local if
String framework = job.get(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME); // 1) framework == local or
isLocal = framework.equals(MRConfig.LOCAL_FRAMEWORK_NAME); // 2) framework == null and job tracker address == local
String framework = job.get(MRConfig.FRAMEWORK_NAME);
String masterAddr = job.get(MRConfig.MASTER_ADDRESS, "local");
if ((framework == null && masterAddr.equals("local"))
|| (framework != null && framework.equals(MRConfig.LOCAL_FRAMEWORK_NAME))) {
isLocal = true;
}
if (!isLocal) { if (!isLocal) {
Class combinerClass = conf.getCombinerClass(); Class combinerClass = conf.getCombinerClass();

View File

@ -25,6 +25,8 @@ import java.util.ArrayList;
import java.util.List; import java.util.List;
import java.util.ServiceLoader; import java.util.ServiceLoader;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -62,6 +64,7 @@ public class Cluster {
private Path sysDir = null; private Path sysDir = null;
private Path stagingAreaDir = null; private Path stagingAreaDir = null;
private Path jobHistoryDir = null; private Path jobHistoryDir = null;
private static final Log LOG = LogFactory.getLog(Cluster.class);
static { static {
ConfigUtil.loadResources(); ConfigUtil.loadResources();
@ -83,7 +86,10 @@ public class Cluster {
for (ClientProtocolProvider provider : ServiceLoader for (ClientProtocolProvider provider : ServiceLoader
.load(ClientProtocolProvider.class)) { .load(ClientProtocolProvider.class)) {
LOG.debug("Trying ClientProtocolProvider : "
+ provider.getClass().getName());
ClientProtocol clientProtocol = null; ClientProtocol clientProtocol = null;
try {
if (jobTrackAddr == null) { if (jobTrackAddr == null) {
clientProtocol = provider.create(conf); clientProtocol = provider.create(conf);
} else { } else {
@ -93,8 +99,19 @@ public class Cluster {
if (clientProtocol != null) { if (clientProtocol != null) {
clientProtocolProvider = provider; clientProtocolProvider = provider;
client = clientProtocol; client = clientProtocol;
LOG.debug("Picked " + provider.getClass().getName()
+ " as the ClientProtocolProvider");
break; break;
} }
else {
LOG.info("Cannot pick " + provider.getClass().getName()
+ " as the ClientProtocolProvider - returned null protocol");
}
}
catch (Exception e) {
LOG.info("Failed to use " + provider.getClass().getName()
+ " due to error: " + e.getMessage());
}
} }
if (null == clientProtocolProvider || null == client) { if (null == clientProtocolProvider || null == client) {

View File

@ -94,7 +94,13 @@ public class TestFileSystem extends TestCase {
CommandFormat cf; CommandFormat cf;
cf= new CommandFormat("copyToLocal", 2,2,"crc","ignoreCrc"); cf= new CommandFormat("copyToLocal", 2,2,"crc","ignoreCrc");
assertEquals(cf.parse(new String[] {"-get","file", "-"}, 1).get(1), "-"); assertEquals(cf.parse(new String[] {"-get","file", "-"}, 1).get(1), "-");
assertEquals(cf.parse(new String[] {"-get","file","-ignoreCrc","/foo"}, 1).get(1),"/foo"); try {
cf.parse(new String[] {"-get","file","-ignoreCrc","/foo"}, 1);
fail("Expected parsing to fail as it should stop at first non-option");
}
catch (Exception e) {
// Expected
}
cf = new CommandFormat("tail", 1, 1, "f"); cf = new CommandFormat("tail", 1, 1, "f");
assertEquals(cf.parse(new String[] {"-tail","fileName"}, 1).get(0),"fileName"); assertEquals(cf.parse(new String[] {"-tail","fileName"}, 1).get(0),"fileName");
assertEquals(cf.parse(new String[] {"-tail","-f","fileName"}, 1).get(0),"fileName"); assertEquals(cf.parse(new String[] {"-tail","-f","fileName"}, 1).get(0),"fileName");

View File

@ -33,6 +33,7 @@ import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.JobStatus; import org.apache.hadoop.mapred.JobStatus;
import org.apache.hadoop.mapred.MiniMRCluster; import org.apache.hadoop.mapred.MiniMRCluster;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.net.StandardSocketFactory; import org.apache.hadoop.net.StandardSocketFactory;
/** /**
@ -92,6 +93,7 @@ public class TestSocketFactory extends TestCase {
JobConf jconf = new JobConf(cconf); JobConf jconf = new JobConf(cconf);
jconf.set("mapred.job.tracker", String.format("localhost:%d", jconf.set("mapred.job.tracker", String.format("localhost:%d",
jobTrackerPort + 10)); jobTrackerPort + 10));
jconf.set(MRConfig.FRAMEWORK_NAME, MRConfig.CLASSIC_FRAMEWORK_NAME);
client = new JobClient(jconf); client = new JobClient(jconf);
JobStatus[] jobs = client.jobsToComplete(); JobStatus[] jobs = client.jobsToComplete();

View File

@ -96,4 +96,25 @@ public class TestClientProtocolProviderImpls extends TestCase {
} }
} }
@Test
public void testClusterException() {
Configuration conf = new Configuration();
conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.CLASSIC_FRAMEWORK_NAME);
conf.set(JTConfig.JT_IPC_ADDRESS, "local");
// initializing a cluster with this conf should throw an error.
// However the exception thrown should not be specific to either
// the job tracker client provider or the local provider
boolean errorThrown = false;
try {
Cluster cluster = new Cluster(conf);
cluster.close();
fail("Not expected - cluster init should have failed");
} catch (IOException e) {
errorThrown = true;
assert(e.getMessage().contains("Cannot initialize Cluster. Please check"));
}
assert(errorThrown);
}
} }