Merge -c 1392075 from trunk to branch-2 to fix MAPREDUCE-4681. Fix unit tests broken by HDFS-3910. Contributed by Arun C. Murthy.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1392076 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Arun Murthy 2012-09-30 17:02:27 +00:00
parent 10f82d1922
commit bfb69f6eea
4 changed files with 8 additions and 6 deletions

View File

@ -21,6 +21,8 @@ Release 2.0.3-alpha - Unreleased
MAPREDUCE-4674. Hadoop examples secondarysort has a typo MAPREDUCE-4674. Hadoop examples secondarysort has a typo
"secondarysrot" in the usage. (Robert Justice via eli) "secondarysrot" in the usage. (Robert Justice via eli)
MAPREDUCE-4681. Fix unit tests broken by HDFS-3910. (acmurthy)
Release 2.0.2-alpha - 2012-09-07 Release 2.0.2-alpha - 2012-09-07
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -98,7 +98,7 @@ public class TestJobQueueInformation extends TestCase {
dfsCluster.shutdown(); dfsCluster.shutdown();
} }
public void testJobQueues() throws IOException { public void testJobQueues() throws Exception {
JobClient jc = new JobClient(mrCluster.createJobConf()); JobClient jc = new JobClient(mrCluster.createJobConf());
String expectedQueueInfo = "Maximum Tasks Per Job :: 10"; String expectedQueueInfo = "Maximum Tasks Per Job :: 10";
JobQueueInfo[] queueInfos = jc.getQueues(); JobQueueInfo[] queueInfos = jc.getQueues();

View File

@ -149,7 +149,7 @@ public class TestSetupAndCleanupFailure extends TestCase {
private void testSetupAndCleanupKill(MiniMRCluster mr, private void testSetupAndCleanupKill(MiniMRCluster mr,
MiniDFSCluster dfs, MiniDFSCluster dfs,
boolean commandLineKill) boolean commandLineKill)
throws IOException { throws Exception {
// launch job with waiting setup/cleanup // launch job with waiting setup/cleanup
RunningJob job = launchJobWithWaitingSetupAndCleanup(mr); RunningJob job = launchJobWithWaitingSetupAndCleanup(mr);
@ -223,7 +223,7 @@ public class TestSetupAndCleanupFailure extends TestCase {
// Also Tests the command-line kill for setup/cleanup attempts. // Also Tests the command-line kill for setup/cleanup attempts.
// tests the setup/cleanup attempts getting killed if // tests the setup/cleanup attempts getting killed if
// they were running on a lost tracker // they were running on a lost tracker
public void testWithDFS() throws IOException { public void testWithDFS() throws Exception {
MiniDFSCluster dfs = null; MiniDFSCluster dfs = null;
MiniMRCluster mr = null; MiniMRCluster mr = null;
FileSystem fileSys = null; FileSystem fileSys = null;

View File

@ -449,7 +449,7 @@ public class UtilsForTests {
static void signalTasks(MiniDFSCluster dfs, FileSystem fileSys, static void signalTasks(MiniDFSCluster dfs, FileSystem fileSys,
String mapSignalFile, String mapSignalFile,
String reduceSignalFile, int replication) String reduceSignalFile, int replication)
throws IOException { throws Exception {
writeFile(dfs.getNameNode(), fileSys.getConf(), new Path(mapSignalFile), writeFile(dfs.getNameNode(), fileSys.getConf(), new Path(mapSignalFile),
(short)replication); (short)replication);
writeFile(dfs.getNameNode(), fileSys.getConf(), new Path(reduceSignalFile), writeFile(dfs.getNameNode(), fileSys.getConf(), new Path(reduceSignalFile),
@ -462,7 +462,7 @@ public class UtilsForTests {
static void signalTasks(MiniDFSCluster dfs, FileSystem fileSys, static void signalTasks(MiniDFSCluster dfs, FileSystem fileSys,
boolean isMap, String mapSignalFile, boolean isMap, String mapSignalFile,
String reduceSignalFile) String reduceSignalFile)
throws IOException { throws Exception {
// signal the maps to complete // signal the maps to complete
writeFile(dfs.getNameNode(), fileSys.getConf(), writeFile(dfs.getNameNode(), fileSys.getConf(),
isMap isMap
@ -483,7 +483,7 @@ public class UtilsForTests {
} }
static void writeFile(NameNode namenode, Configuration conf, Path name, static void writeFile(NameNode namenode, Configuration conf, Path name,
short replication) throws IOException { short replication) throws Exception {
FileSystem fileSys = FileSystem.get(conf); FileSystem fileSys = FileSystem.get(conf);
SequenceFile.Writer writer = SequenceFile.Writer writer =
SequenceFile.createWriter(fileSys, conf, name, SequenceFile.createWriter(fileSys, conf, name,