diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f2e8f76db11..535f4b8e995 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -466,6 +466,8 @@ Release 2.0.2-alpha - 2012-09-07
HDFS-3907. Allow multiple users for local block readers. (eli)
+ HDFS-3910. DFSTestUtil#waitReplication should timeout. (eli)
+
OPTIMIZATIONS
HDFS-2982. Startup performance suffers when there are many edit log
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index b20baa9bd2c..c2d5520726a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -273,7 +273,7 @@ public class DFSTestUtil {
* specified target.
*/
public void waitReplication(FileSystem fs, String topdir, short value)
- throws IOException {
+ throws IOException, InterruptedException, TimeoutException {
Path root = new Path(topdir);
/** wait for the replication factor to settle down */
@@ -498,36 +498,44 @@ public class DFSTestUtil {
return fileNames;
}
}
-
- /** wait for the file's replication to be done */
- public static void waitReplication(FileSystem fs, Path fileName,
- short replFactor) throws IOException {
- boolean good;
+
+ /**
+ * Wait for the given file to reach the given replication factor.
+ * @throws TimeoutException if we fail to sufficiently replicate the file
+ */
+ public static void waitReplication(FileSystem fs, Path fileName, short replFactor)
+ throws IOException, InterruptedException, TimeoutException {
+ boolean correctReplFactor;
+ final int ATTEMPTS = 20;
+ int count = 0;
+
do {
- good = true;
+ correctReplFactor = true;
BlockLocation locs[] = fs.getFileBlockLocations(
fs.getFileStatus(fileName), 0, Long.MAX_VALUE);
+ count++;
for (int j = 0; j < locs.length; j++) {
String[] hostnames = locs[j].getNames();
if (hostnames.length != replFactor) {
- String hostNameList = "";
- for (String h : hostnames) hostNameList += h + " ";
- System.out.println("Block " + j + " of file " + fileName
- + " has replication factor " + hostnames.length + "; locations "
- + hostNameList);
- good = false;
- try {
- System.out.println("Waiting for replication factor to drain");
- Thread.sleep(100);
- } catch (InterruptedException e) {}
+ correctReplFactor = false;
+ System.out.println("Block " + j + " of file " + fileName
+ + " has replication factor " + hostnames.length
+ + " (desired " + replFactor + "); locations "
+ + Joiner.on(' ').join(hostnames));
+ Thread.sleep(1000);
break;
}
}
- if (good) {
+ if (correctReplFactor) {
System.out.println("All blocks of file " + fileName
+ " verified to have replication factor " + replFactor);
}
- } while(!good);
+ } while (!correctReplFactor && count < ATTEMPTS);
+
+ if (count == ATTEMPTS) {
+ throw new TimeoutException("Timed out waiting for " + fileName +
+ " to reach " + replFactor + " replicas");
+ }
}
/** delete directory and everything underneath it.*/
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocal.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocal.java
index ea1b58c7afc..7ccd5b6f52f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocal.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocal.java
@@ -61,7 +61,7 @@ public class TestBlockReaderLocal {
* of this class might immediately issue a retry on failure, so it's polite.
*/
@Test
- public void testStablePositionAfterCorruptRead() throws IOException {
+ public void testStablePositionAfterCorruptRead() throws Exception {
final short REPL_FACTOR = 1;
final long FILE_LENGTH = 512L;
cluster.waitActive();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java
index 327dd7c7ef9..51fab6653f3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java
@@ -24,6 +24,7 @@ import java.io.IOException;
import java.io.PrintStream;
import java.io.RandomAccessFile;
import java.util.Random;
+import java.util.concurrent.TimeoutException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -199,11 +200,11 @@ public class TestClientReportBadBlock {
}
/**
- * create a file with one block and corrupt some/all of the block replicas.
+ * Create a file with one block and corrupt some/all of the block replicas.
*/
private void createAFileWithCorruptedBlockReplicas(Path filePath, short repl,
int corruptBlockCount) throws IOException, AccessControlException,
- FileNotFoundException, UnresolvedLinkException {
+ FileNotFoundException, UnresolvedLinkException, InterruptedException, TimeoutException {
DFSTestUtil.createFile(dfs, filePath, BLOCK_SIZE, repl, 0);
DFSTestUtil.waitReplication(dfs, filePath, repl);
// Locate the file blocks by asking name node
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
index 9500ec2bce9..815e7e19aca 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
@@ -789,8 +789,7 @@ public class TestDFSClientRetries {
* way. See HDFS-3067.
*/
@Test
- public void testRetryOnChecksumFailure()
- throws UnresolvedLinkException, IOException {
+ public void testRetryOnChecksumFailure() throws Exception {
HdfsConfiguration conf = new HdfsConfiguration();
MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
index a766263707f..dfc9e9a667a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
@@ -173,7 +173,7 @@ public class TestDatanodeBlockScanner {
}
@Test
- public void testBlockCorruptionPolicy() throws IOException {
+ public void testBlockCorruptionPolicy() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
Random random = new Random();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java
index 56cb4506c81..28c3e9cb373 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java
@@ -25,6 +25,7 @@ import static org.junit.Assert.fail;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.Random;
+import java.util.concurrent.TimeoutException;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
@@ -100,7 +101,7 @@ public class TestFileStatus {
}
private void checkFile(FileSystem fileSys, Path name, int repl)
- throws IOException {
+ throws IOException, InterruptedException, TimeoutException {
DFSTestUtil.waitReplication(fileSys, name, (short) repl);
}
@@ -129,7 +130,7 @@ public class TestFileStatus {
/** Test the FileStatus obtained calling getFileStatus on a file */
@Test
- public void testGetFileStatusOnFile() throws IOException {
+ public void testGetFileStatusOnFile() throws Exception {
checkFile(fs, file1, 1);
// test getFileStatus on a file
FileStatus status = fs.getFileStatus(file1);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
index e819e023b14..7ea963d4c7d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
@@ -27,6 +27,7 @@ import java.io.RandomAccessFile;
import java.net.InetSocketAddress;
import java.util.Iterator;
import java.util.Random;
+import java.util.concurrent.TimeoutException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -420,8 +421,8 @@ public class TestReplication {
}
}
- private void changeBlockLen(MiniDFSCluster cluster,
- int lenDelta) throws IOException, InterruptedException {
+ private void changeBlockLen(MiniDFSCluster cluster, int lenDelta)
+ throws IOException, InterruptedException, TimeoutException {
final Path fileName = new Path("/file1");
final short REPLICATION_FACTOR = (short)1;
final FileSystem fs = cluster.getFileSystem();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
index 965082f4af1..e9e1ddeb9e5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
@@ -88,7 +88,7 @@ public class TestBalancer {
/* create a file with a length of fileLen
*/
static void createFile(MiniDFSCluster cluster, Path filePath, long fileLen,
short replicationFactor, int nnIndex)
- throws IOException {
+ throws IOException, InterruptedException, TimeoutException {
FileSystem fs = cluster.getFileSystem(nnIndex);
DFSTestUtil.createFile(fs, filePath, fileLen,
replicationFactor, r.nextLong());
@@ -100,7 +100,7 @@ public class TestBalancer {
* whose used space to be size
*/
private ExtendedBlock[] generateBlocks(Configuration conf, long size,
- short numNodes) throws IOException {
+ short numNodes) throws IOException, InterruptedException, TimeoutException {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numNodes).build();
try {
cluster.waitActive();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java
index dfd0b947c1d..f5848041bcf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java
@@ -23,6 +23,7 @@ import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import java.util.Random;
+import java.util.concurrent.TimeoutException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -96,7 +97,7 @@ public class TestBalancerWithMultipleNameNodes {
/* create a file with a length of fileLen
*/
private static void createFile(Suite s, int index, long len
- ) throws IOException {
+ ) throws IOException, InterruptedException, TimeoutException {
final FileSystem fs = s.cluster.getFileSystem(index);
DFSTestUtil.createFile(fs, FILE_PATH, len, s.replication, RANDOM.nextLong());
DFSTestUtil.waitReplication(fs, FILE_PATH, s.replication);
@@ -106,7 +107,7 @@ public class TestBalancerWithMultipleNameNodes {
* whose used space to be size
*/
private static ExtendedBlock[][] generateBlocks(Suite s, long size
- ) throws IOException {
+ ) throws IOException, InterruptedException, TimeoutException {
final ExtendedBlock[][] blocks = new ExtendedBlock[s.clients.length][];
for(int n = 0; n < s.clients.length; n++) {
final long fileLen = size/s.replication;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java
index dc8578e13ec..79785961c91 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java
@@ -53,7 +53,7 @@ public class TestOverReplicatedBlocks {
* corrupt ones.
*/
@Test
- public void testProcesOverReplicateBlock() throws IOException {
+ public void testProcesOverReplicateBlock() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
conf.set(
@@ -141,7 +141,7 @@ public class TestOverReplicatedBlocks {
* send heartbeats.
*/
@Test
- public void testChooseReplicaToDelete() throws IOException {
+ public void testChooseReplicaToDelete() throws Exception {
MiniDFSCluster cluster = null;
FileSystem fs = null;
try {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
index b9f58baef35..a138071b525 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
@@ -89,7 +89,7 @@ public class TestBlockReplacement {
}
@Test
- public void testBlockReplacement() throws IOException, TimeoutException {
+ public void testBlockReplacement() throws Exception {
final Configuration CONF = new HdfsConfiguration();
final String[] INITIAL_RACKS = {"/RACK0", "/RACK1", "/RACK2"};
final String[] NEW_RACKS = {"/RACK2"};
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java
index 83d7a496036..d1c3d9eccb1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java
@@ -27,6 +27,9 @@ import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeoutException;
+
+import junit.framework.Assert;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -65,7 +68,7 @@ import org.mockito.invocation.InvocationOnMock;
/**
* This test simulates a variety of situations when blocks are being
- * intentionally orrupted, unexpectedly modified, and so on before a block
+ * intentionally corrupted, unexpectedly modified, and so on before a block
* report is happening
*/
public class TestBlockReport {
@@ -316,7 +319,7 @@ public class TestBlockReport {
* @throws IOException in case of an error
*/
@Test
- public void blockReport_06() throws IOException {
+ public void blockReport_06() throws Exception {
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path filePath = new Path("/" + METHOD_NAME + ".dat");
final int DN_N1 = DN_N0 + 1;
@@ -353,7 +356,7 @@ public class TestBlockReport {
@Test
// Currently this test is failing as expected 'cause the correct behavior is
// not yet implemented (9/15/09)
- public void blockReport_07() throws IOException {
+ public void blockReport_07() throws Exception {
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path filePath = new Path("/" + METHOD_NAME + ".dat");
final int DN_N1 = DN_N0 + 1;
@@ -670,21 +673,24 @@ public class TestBlockReport {
}
private void startDNandWait(Path filePath, boolean waitReplicas)
- throws IOException {
- if(LOG.isDebugEnabled()) {
+ throws IOException, InterruptedException, TimeoutException {
+ if (LOG.isDebugEnabled()) {
LOG.debug("Before next DN start: " + cluster.getDataNodes().size());
}
cluster.startDataNodes(conf, 1, true, null, null);
+ cluster.waitClusterUp();
ArrayList datanodes = cluster.getDataNodes();
assertEquals(datanodes.size(), 2);
- if(LOG.isDebugEnabled()) {
+ if (LOG.isDebugEnabled()) {
int lastDn = datanodes.size() - 1;
LOG.debug("New datanode "
+ cluster.getDataNodes().get(lastDn).getDisplayName()
+ " has been started");
}
- if (waitReplicas) DFSTestUtil.waitReplication(fs, filePath, REPL_FACTOR);
+ if (waitReplicas) {
+ DFSTestUtil.waitReplication(fs, filePath, REPL_FACTOR);
+ }
}
private ArrayList prepareForRide(final Path filePath,
@@ -836,8 +842,9 @@ public class TestBlockReport {
public void run() {
try {
startDNandWait(filePath, true);
- } catch (IOException e) {
- LOG.warn("Shouldn't happen", e);
+ } catch (Exception e) {
+ e.printStackTrace();
+ Assert.fail("Failed to start BlockChecker: " + e);
}
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
index ac1cf034338..5a80098329b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
@@ -105,7 +105,7 @@ public class TestDataNodeVolumeFailure {
* failure if the configuration parameter allows this.
*/
@Test
- public void testVolumeFailure() throws IOException {
+ public void testVolumeFailure() throws Exception {
FileSystem fs = cluster.getFileSystem();
dataDir = new File(cluster.getDataDirectory());
System.out.println("Data dir: is " + dataDir.getPath());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java
index 9b4e0d44aed..a91baec9156 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java
@@ -137,7 +137,7 @@ public class TestDatanodeRestart {
}
// test recovering unlinked tmp replicas
- @Test public void testRecoverReplicas() throws IOException {
+ @Test public void testRecoverReplicas() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024L);
conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 512);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
index af5b76d4c2a..79e893961ad 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
@@ -116,7 +116,7 @@ public class TestFSEditLogLoader {
* automatically bumped up to the new minimum upon restart.
*/
@Test
- public void testReplicationAdjusted() throws IOException {
+ public void testReplicationAdjusted() throws Exception {
// start a cluster
Configuration conf = new HdfsConfiguration();
// Replicate and heartbeat fast to shave a few seconds off test
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProcessCorruptBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProcessCorruptBlocks.java
index a843962e081..d6d849da0ab 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProcessCorruptBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProcessCorruptBlocks.java
@@ -53,7 +53,7 @@ public class TestProcessCorruptBlocks {
* replicas (2) is equal to replication factor (2))
*/
@Test
- public void testWhenDecreasingReplication() throws IOException {
+ public void testWhenDecreasingReplication() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
@@ -108,7 +108,7 @@ public class TestProcessCorruptBlocks {
*
*/
@Test
- public void testByAddingAnExtraDataNode() throws IOException {
+ public void testByAddingAnExtraDataNode() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
@@ -159,7 +159,7 @@ public class TestProcessCorruptBlocks {
* replicas (1) is equal to replication factor (1))
*/
@Test
- public void testWithReplicationFactorAsOne() throws IOException {
+ public void testWithReplicationFactorAsOne() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
@@ -208,7 +208,7 @@ public class TestProcessCorruptBlocks {
* Verify that all replicas are corrupt and 3 replicas are present.
*/
@Test
- public void testWithAllCorruptReplicas() throws IOException {
+ public void testWithAllCorruptReplicas() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFileInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFileInputFormat.java
index 3c59943159a..6b0406f0287 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFileInputFormat.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFileInputFormat.java
@@ -23,6 +23,7 @@ import static org.mockito.Mockito.when;
import java.io.DataOutputStream;
import java.io.IOException;
+import java.util.concurrent.TimeoutException;
import junit.framework.TestCase;
@@ -95,7 +96,7 @@ public class TestFileInputFormat extends TestCase {
}
private void createInputs(FileSystem fs, Path inDir, String fileName)
- throws IOException {
+ throws IOException, TimeoutException, InterruptedException {
// create a multi-block file on hdfs
Path path = new Path(inDir, fileName);
final short replication = 2;
@@ -157,7 +158,7 @@ public class TestFileInputFormat extends TestCase {
}
}
- public void testMultiLevelInput() throws IOException {
+ public void testMultiLevelInput() throws Exception {
JobConf job = new JobConf(conf);
job.setBoolean("dfs.replication.considerLoad", false);
@@ -291,7 +292,8 @@ public class TestFileInputFormat extends TestCase {
}
static void writeFile(Configuration conf, Path name,
- short replication, int numBlocks) throws IOException {
+ short replication, int numBlocks)
+ throws IOException, TimeoutException, InterruptedException {
FileSystem fileSys = FileSystem.get(conf);
FSDataOutputStream stm = fileSys.create(name, true,
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMultipleLevelCaching.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMultipleLevelCaching.java
index 6d3fd2927ab..cc9e88a2f09 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMultipleLevelCaching.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMultipleLevelCaching.java
@@ -71,13 +71,13 @@ public class TestMultipleLevelCaching extends TestCase {
return rack.toString();
}
- public void testMultiLevelCaching() throws IOException {
+ public void testMultiLevelCaching() throws Exception {
for (int i = 1 ; i <= MAX_LEVEL; ++i) {
testCachingAtLevel(i);
}
}
- private void testCachingAtLevel(int level) throws IOException {
+ private void testCachingAtLevel(int level) throws Exception {
String namenode = null;
MiniDFSCluster dfs = null;
MiniMRCluster mr = null;
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/UtilsForTests.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/UtilsForTests.java
index 63353b6a51e..972391c5e13 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/UtilsForTests.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/UtilsForTests.java
@@ -31,6 +31,7 @@ import java.util.Enumeration;
import java.util.Iterator;
import java.util.List;
import java.util.Properties;
+import java.util.concurrent.TimeoutException;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@@ -449,11 +450,14 @@ public class UtilsForTests {
static void signalTasks(MiniDFSCluster dfs, FileSystem fileSys,
String mapSignalFile,
String reduceSignalFile, int replication)
- throws IOException {
- writeFile(dfs.getNameNode(), fileSys.getConf(), new Path(mapSignalFile),
- (short)replication);
- writeFile(dfs.getNameNode(), fileSys.getConf(), new Path(reduceSignalFile),
- (short)replication);
+ throws IOException, TimeoutException {
+ try {
+ writeFile(dfs.getNameNode(), fileSys.getConf(), new Path(mapSignalFile),
+ (short)replication);
+ writeFile(dfs.getNameNode(), fileSys.getConf(), new Path(reduceSignalFile), (short)replication);
+ } catch (InterruptedException ie) {
+ // Ignore
+ }
}
/**
@@ -462,12 +466,16 @@ public class UtilsForTests {
static void signalTasks(MiniDFSCluster dfs, FileSystem fileSys,
boolean isMap, String mapSignalFile,
String reduceSignalFile)
- throws IOException {
- // signal the maps to complete
- writeFile(dfs.getNameNode(), fileSys.getConf(),
- isMap
- ? new Path(mapSignalFile)
- : new Path(reduceSignalFile), (short)1);
+ throws IOException, TimeoutException {
+ try {
+ // signal the maps to complete
+ writeFile(dfs.getNameNode(), fileSys.getConf(),
+ isMap
+ ? new Path(mapSignalFile)
+ : new Path(reduceSignalFile), (short)1);
+ } catch (InterruptedException ie) {
+ // Ignore
+ }
}
static String getSignalFile(Path dir) {
@@ -483,7 +491,8 @@ public class UtilsForTests {
}
static void writeFile(NameNode namenode, Configuration conf, Path name,
- short replication) throws IOException {
+ short replication)
+ throws IOException, TimeoutException, InterruptedException {
FileSystem fileSys = FileSystem.get(conf);
SequenceFile.Writer writer =
SequenceFile.createWriter(fileSys, conf, name,
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestCombineFileInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestCombineFileInputFormat.java
index 96217871e99..2aaa1efc2ce 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestCombineFileInputFormat.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestCombineFileInputFormat.java
@@ -23,6 +23,7 @@ import java.net.URI;
import java.util.List;
import java.util.ArrayList;
import java.util.zip.GZIPOutputStream;
+import java.util.concurrent.TimeoutException;
import junit.framework.TestCase;
@@ -278,7 +279,7 @@ public class TestCombineFileInputFormat extends TestCase {
assertFalse(rr.nextKeyValue());
}
- public void testSplitPlacement() throws IOException {
+ public void testSplitPlacement() throws Exception {
MiniDFSCluster dfs = null;
FileSystem fileSys = null;
try {
@@ -678,7 +679,8 @@ public class TestCombineFileInputFormat extends TestCase {
}
static void writeFile(Configuration conf, Path name,
- short replication, int numBlocks) throws IOException {
+ short replication, int numBlocks)
+ throws IOException, TimeoutException, InterruptedException {
FileSystem fileSys = FileSystem.get(conf);
FSDataOutputStream stm = fileSys.create(name, true,
@@ -689,7 +691,8 @@ public class TestCombineFileInputFormat extends TestCase {
// Creates the gzip file and return the FileStatus
static FileStatus writeGzipFile(Configuration conf, Path name,
- short replication, int numBlocks) throws IOException {
+ short replication, int numBlocks)
+ throws IOException, TimeoutException, InterruptedException {
FileSystem fileSys = FileSystem.get(conf);
GZIPOutputStream out = new GZIPOutputStream(fileSys.create(name, true, conf
@@ -699,7 +702,8 @@ public class TestCombineFileInputFormat extends TestCase {
}
private static void writeDataAndSetReplication(FileSystem fileSys, Path name,
- OutputStream out, short replication, int numBlocks) throws IOException {
+ OutputStream out, short replication, int numBlocks)
+ throws IOException, TimeoutException, InterruptedException {
for (int i = 0; i < numBlocks; i++) {
out.write(databuf);
}
@@ -707,7 +711,7 @@ public class TestCombineFileInputFormat extends TestCase {
DFSTestUtil.waitReplication(fileSys, name, replication);
}
- public void testSplitPlacementForCompressedFiles() throws IOException {
+ public void testSplitPlacementForCompressedFiles() throws Exception {
MiniDFSCluster dfs = null;
FileSystem fileSys = null;
try {
@@ -1058,7 +1062,7 @@ public class TestCombineFileInputFormat extends TestCase {
/**
* Test that CFIF can handle missing blocks.
*/
- public void testMissingBlocks() throws IOException {
+ public void testMissingBlocks() throws Exception {
String namenode = null;
MiniDFSCluster dfs = null;
FileSystem fileSys = null;