HDFS-6862. Add missing timeout annotations to tests. (Contributed by Xiaoyu Yao)

This commit is contained in:
arp 2014-09-05 11:08:03 -07:00
parent b051327ab6
commit 9609b7303a
7 changed files with 97 additions and 125 deletions

View File

@ -607,6 +607,9 @@ Release 2.6.0 - UNRELEASED
HDFS-6979. hdfs.dll does not produce .pdb files. (cnauroth) HDFS-6979. hdfs.dll does not produce .pdb files. (cnauroth)
HDFS-6862. Add missing timeout annotations to tests. (Xiaoyu Yao via
Arpit Agarwal)
BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
HDFS-6387. HDFS CLI admin tool for creating & deleting an HDFS-6387. HDFS CLI admin tool for creating & deleting an

View File

@ -17,14 +17,6 @@
*/ */
package org.apache.hadoop.hdfs; package org.apache.hadoop.hdfs;
import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.IOException;
import java.net.UnknownHostException;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -39,6 +31,14 @@ import org.apache.hadoop.net.DNS;
import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.test.PathUtils;
import org.junit.Test; import org.junit.Test;
import java.io.File;
import java.io.IOException;
import java.net.UnknownHostException;
import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
/** /**
* This test checks correctness of port usage by hdfs components: * This test checks correctness of port usage by hdfs components:
* NameNode, DataNode, SecondaryNamenode and BackupNode. * NameNode, DataNode, SecondaryNamenode and BackupNode.
@ -245,7 +245,7 @@ public class TestHDFSServerPorts {
return true; return true;
} }
@Test @Test(timeout = 300000)
public void testNameNodePorts() throws Exception { public void testNameNodePorts() throws Exception {
runTestNameNodePorts(false); runTestNameNodePorts(false);
runTestNameNodePorts(true); runTestNameNodePorts(true);
@ -296,7 +296,7 @@ public class TestHDFSServerPorts {
/** /**
* Verify datanode port usage. * Verify datanode port usage.
*/ */
@Test @Test(timeout = 300000)
public void testDataNodePorts() throws Exception { public void testDataNodePorts() throws Exception {
NameNode nn = null; NameNode nn = null;
try { try {
@ -332,7 +332,7 @@ public class TestHDFSServerPorts {
/** /**
* Verify secondary namenode port usage. * Verify secondary namenode port usage.
*/ */
@Test @Test(timeout = 300000)
public void testSecondaryNodePorts() throws Exception { public void testSecondaryNodePorts() throws Exception {
NameNode nn = null; NameNode nn = null;
try { try {
@ -361,7 +361,7 @@ public class TestHDFSServerPorts {
/** /**
* Verify BackupNode port usage. * Verify BackupNode port usage.
*/ */
@Test @Test(timeout = 300000)
public void testBackupNodePorts() throws Exception { public void testBackupNodePorts() throws Exception {
NameNode nn = null; NameNode nn = null;
try { try {

View File

@ -17,11 +17,6 @@
*/ */
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.namenode;
import java.io.File;
import java.io.IOException;
import java.net.BindException;
import java.util.Random;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.FileUtil;
@ -33,6 +28,11 @@ import org.apache.hadoop.test.GenericTestUtils;
import org.junit.After; import org.junit.After;
import org.junit.Test; import org.junit.Test;
import java.io.File;
import java.io.IOException;
import java.net.BindException;
import java.util.Random;
/** /**
* This class tests the validation of the configuration object when passed * This class tests the validation of the configuration object when passed
* to the NameNode * to the NameNode
@ -49,7 +49,7 @@ public class TestValidateConfigurationSettings {
* an exception * an exception
* is thrown when trying to re-use the same port * is thrown when trying to re-use the same port
*/ */
@Test(expected = BindException.class) @Test(expected = BindException.class, timeout = 300000)
public void testThatMatchingRPCandHttpPortsThrowException() public void testThatMatchingRPCandHttpPortsThrowException()
throws IOException { throws IOException {
@ -79,7 +79,7 @@ public class TestValidateConfigurationSettings {
* Tests setting the rpc port to a different as the web port that an * Tests setting the rpc port to a different as the web port that an
* exception is NOT thrown * exception is NOT thrown
*/ */
@Test @Test(timeout = 300000)
public void testThatDifferentRPCandHttpPortsAreOK() public void testThatDifferentRPCandHttpPortsAreOK()
throws IOException { throws IOException {
@ -117,7 +117,7 @@ public class TestValidateConfigurationSettings {
* HDFS-3013: NameNode format command doesn't pick up * HDFS-3013: NameNode format command doesn't pick up
* dfs.namenode.name.dir.NameServiceId configuration. * dfs.namenode.name.dir.NameServiceId configuration.
*/ */
@Test @Test(timeout = 300000)
public void testGenericKeysForNameNodeFormat() public void testGenericKeysForNameNodeFormat()
throws IOException { throws IOException {
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();

View File

@ -17,27 +17,7 @@
*/ */
package org.apache.hadoop.hdfs.server.namenode.ha; package org.apache.hadoop.hdfs.server.namenode.ha;
import static org.junit.Assert.assertArrayEquals; import com.google.common.base.Joiner;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Mockito.mock;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.URI;
import java.security.PrivilegedExceptionAction;
import java.util.Collection;
import java.util.HashSet;
import java.util.Map;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.core.Response;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -45,11 +25,7 @@ import org.apache.hadoop.fs.AbstractFileSystem;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.*;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
@ -75,7 +51,20 @@ import org.junit.Test;
import org.mockito.internal.util.reflection.Whitebox; import org.mockito.internal.util.reflection.Whitebox;
import org.mortbay.util.ajax.JSON; import org.mortbay.util.ajax.JSON;
import com.google.common.base.Joiner; import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.core.Response;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.URI;
import java.security.PrivilegedExceptionAction;
import java.util.Collection;
import java.util.HashSet;
import java.util.Map;
import static org.junit.Assert.*;
import static org.mockito.Mockito.mock;
/** /**
* Test case for client support of delegation tokens in an HA cluster. * Test case for client support of delegation tokens in an HA cluster.
@ -129,7 +118,7 @@ public class TestDelegationTokensWithHA {
} }
} }
@Test @Test(timeout = 300000)
public void testDelegationTokenDFSApi() throws Exception { public void testDelegationTokenDFSApi() throws Exception {
final Token<DelegationTokenIdentifier> token = final Token<DelegationTokenIdentifier> token =
getDelegationToken(fs, "JobTracker"); getDelegationToken(fs, "JobTracker");
@ -192,7 +181,7 @@ public class TestDelegationTokensWithHA {
* Test if correct exception (StandbyException or RetriableException) can be * Test if correct exception (StandbyException or RetriableException) can be
* thrown during the NN failover. * thrown during the NN failover.
*/ */
@Test @Test(timeout = 300000)
public void testDelegationTokenDuringNNFailover() throws Exception { public void testDelegationTokenDuringNNFailover() throws Exception {
EditLogTailer editLogTailer = nn1.getNamesystem().getEditLogTailer(); EditLogTailer editLogTailer = nn1.getNamesystem().getEditLogTailer();
// stop the editLogTailer of nn1 // stop the editLogTailer of nn1
@ -260,7 +249,7 @@ public class TestDelegationTokensWithHA {
doRenewOrCancel(token, clientConf, TokenTestAction.CANCEL); doRenewOrCancel(token, clientConf, TokenTestAction.CANCEL);
} }
@Test @Test(timeout = 300000)
public void testDelegationTokenWithDoAs() throws Exception { public void testDelegationTokenWithDoAs() throws Exception {
final Token<DelegationTokenIdentifier> token = final Token<DelegationTokenIdentifier> token =
getDelegationToken(fs, "JobTracker"); getDelegationToken(fs, "JobTracker");
@ -292,7 +281,7 @@ public class TestDelegationTokensWithHA {
}); });
} }
@Test @Test(timeout = 300000)
public void testHAUtilClonesDelegationTokens() throws Exception { public void testHAUtilClonesDelegationTokens() throws Exception {
final Token<DelegationTokenIdentifier> token = final Token<DelegationTokenIdentifier> token =
getDelegationToken(fs, "JobTracker"); getDelegationToken(fs, "JobTracker");
@ -354,7 +343,7 @@ public class TestDelegationTokensWithHA {
* exception if the URI is a logical URI. This bug fails the combination of * exception if the URI is a logical URI. This bug fails the combination of
* ha + mapred + security. * ha + mapred + security.
*/ */
@Test @Test(timeout = 300000)
public void testDFSGetCanonicalServiceName() throws Exception { public void testDFSGetCanonicalServiceName() throws Exception {
URI hAUri = HATestUtil.getLogicalUri(cluster); URI hAUri = HATestUtil.getLogicalUri(cluster);
String haService = HAUtil.buildTokenServiceForLogicalUri(hAUri, String haService = HAUtil.buildTokenServiceForLogicalUri(hAUri,
@ -369,7 +358,7 @@ public class TestDelegationTokensWithHA {
token.cancel(dfs.getConf()); token.cancel(dfs.getConf());
} }
@Test @Test(timeout = 300000)
public void testHdfsGetCanonicalServiceName() throws Exception { public void testHdfsGetCanonicalServiceName() throws Exception {
Configuration conf = dfs.getConf(); Configuration conf = dfs.getConf();
URI haUri = HATestUtil.getLogicalUri(cluster); URI haUri = HATestUtil.getLogicalUri(cluster);
@ -390,7 +379,7 @@ public class TestDelegationTokensWithHA {
* password. (HDFS-6475). With StandbyException, the client can failover to try * password. (HDFS-6475). With StandbyException, the client can failover to try
* activeNN. * activeNN.
*/ */
@Test @Test(timeout = 300000)
public void testDelegationTokenStandbyNNAppearFirst() throws Exception { public void testDelegationTokenStandbyNNAppearFirst() throws Exception {
// make nn0 the standby NN, and nn1 the active NN // make nn0 the standby NN, and nn1 the active NN
cluster.transitionToStandby(0); cluster.transitionToStandby(0);

View File

@ -17,9 +17,6 @@
*/ */
package org.apache.hadoop.hdfs.server.namenode.ha; package org.apache.hadoop.hdfs.server.namenode.ha;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -33,6 +30,9 @@ import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import org.junit.Test; import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
/** /**
* Make sure HA-related metrics are updated and reported appropriately. * Make sure HA-related metrics are updated and reported appropriately.
*/ */
@ -40,7 +40,7 @@ public class TestHAMetrics {
private static final Log LOG = LogFactory.getLog(TestHAMetrics.class); private static final Log LOG = LogFactory.getLog(TestHAMetrics.class);
@Test @Test(timeout = 300000)
public void testHAMetrics() throws Exception { public void testHAMetrics() throws Exception {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);

View File

@ -17,20 +17,7 @@
*/ */
package org.apache.hadoop.hdfs.server.namenode.ha; package org.apache.hadoop.hdfs.server.namenode.ha;
import static org.junit.Assert.assertEquals; import com.google.common.util.concurrent.Uninterruptibles;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.DataOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.net.URI;
import java.util.LinkedList;
import java.util.List;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger; import org.apache.commons.logging.impl.Log4JLogger;
@ -40,13 +27,7 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.ha.HAServiceProtocol.RequestSource; import org.apache.hadoop.ha.HAServiceProtocol.RequestSource;
import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo; import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.*;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
@ -66,7 +47,16 @@ import org.junit.Assert;
import org.junit.Test; import org.junit.Test;
import org.mockito.Mockito; import org.mockito.Mockito;
import com.google.common.util.concurrent.Uninterruptibles; import java.io.DataOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.net.URI;
import java.util.LinkedList;
import java.util.List;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import static org.junit.Assert.*;
/** /**
* Tests state transition from active->standby, and manual failover * Tests state transition from active->standby, and manual failover
@ -92,7 +82,7 @@ public class TestHAStateTransitions {
* active and standby mode, making sure it doesn't * active and standby mode, making sure it doesn't
* double-play any edits. * double-play any edits.
*/ */
@Test @Test(timeout = 300000)
public void testTransitionActiveToStandby() throws Exception { public void testTransitionActiveToStandby() throws Exception {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
@ -148,7 +138,7 @@ public class TestHAStateTransitions {
* Test that transitioning a service to the state that it is already * Test that transitioning a service to the state that it is already
* in is a nop, specifically, an exception is not thrown. * in is a nop, specifically, an exception is not thrown.
*/ */
@Test @Test(timeout = 300000)
public void testTransitionToCurrentStateIsANop() throws Exception { public void testTransitionToCurrentStateIsANop() throws Exception {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS, 1L); conf.setLong(DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS, 1L);
@ -220,7 +210,7 @@ public class TestHAStateTransitions {
/** /**
* Tests manual failover back and forth between two NameNodes. * Tests manual failover back and forth between two NameNodes.
*/ */
@Test @Test(timeout = 300000)
public void testManualFailoverAndFailback() throws Exception { public void testManualFailoverAndFailback() throws Exception {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
@ -346,7 +336,7 @@ public class TestHAStateTransitions {
/** /**
* Test that delegation tokens continue to work after the failover. * Test that delegation tokens continue to work after the failover.
*/ */
@Test @Test(timeout = 300000)
public void testDelegationTokensAfterFailover() throws IOException { public void testDelegationTokensAfterFailover() throws IOException {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
conf.setBoolean( conf.setBoolean(
@ -383,7 +373,7 @@ public class TestHAStateTransitions {
* Tests manual failover back and forth between two NameNodes * Tests manual failover back and forth between two NameNodes
* for federation cluster with two namespaces. * for federation cluster with two namespaces.
*/ */
@Test @Test(timeout = 300000)
public void testManualFailoverFailbackFederationHA() throws Exception { public void testManualFailoverFailbackFederationHA() throws Exception {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
@ -403,12 +393,12 @@ public class TestHAStateTransitions {
} }
} }
@Test @Test(timeout = 300000)
public void testFailoverWithEmptyInProgressEditLog() throws Exception { public void testFailoverWithEmptyInProgressEditLog() throws Exception {
testFailoverAfterCrashDuringLogRoll(false); testFailoverAfterCrashDuringLogRoll(false);
} }
@Test @Test(timeout = 300000)
public void testFailoverWithEmptyInProgressEditLogWithHeader() public void testFailoverWithEmptyInProgressEditLogWithHeader()
throws Exception { throws Exception {
testFailoverAfterCrashDuringLogRoll(true); testFailoverAfterCrashDuringLogRoll(true);
@ -570,7 +560,7 @@ public class TestHAStateTransitions {
* by virtue of the fact that it wouldn't work properly if the proxies * by virtue of the fact that it wouldn't work properly if the proxies
* returned were not for the correct NNs. * returned were not for the correct NNs.
*/ */
@Test @Test(timeout = 300000)
public void testIsAtLeastOneActive() throws Exception { public void testIsAtLeastOneActive() throws Exception {
MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()) MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration())
.nnTopology(MiniDFSNNTopology.simpleHATopology()) .nnTopology(MiniDFSNNTopology.simpleHATopology())

View File

@ -17,23 +17,11 @@
*/ */
package org.apache.hadoop.hdfs.server.namenode.ha; package org.apache.hadoop.hdfs.server.namenode.ha;
import static org.junit.Assert.assertEquals; import com.google.common.base.Supplier;
import static org.junit.Assert.assertFalse; import com.google.common.collect.ImmutableList;
import static org.junit.Assert.assertTrue; import com.google.common.collect.ImmutableSet;
import static org.junit.Assert.fail; import com.google.common.collect.Lists;
import com.google.common.io.Files;
import java.io.File;
import java.io.IOException;
import java.io.OutputStream;
import java.net.BindException;
import java.lang.management.ManagementFactory;
import java.lang.management.ThreadInfo;
import java.lang.management.ThreadMXBean;
import java.net.URI;
import java.net.URL;
import java.util.List;
import java.util.Random;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -43,14 +31,8 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology; import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.server.namenode.FSImage; import org.apache.hadoop.hdfs.server.namenode.*;
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.JournalSet;
import org.apache.hadoop.hdfs.server.namenode.NNStorage;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.hdfs.util.Canceler; import org.apache.hadoop.hdfs.util.Canceler;
import org.apache.hadoop.io.compress.CompressionCodecFactory; import org.apache.hadoop.io.compress.CompressionCodecFactory;
import org.apache.hadoop.io.compress.CompressionOutputStream; import org.apache.hadoop.io.compress.CompressionOutputStream;
@ -64,11 +46,19 @@ import org.junit.Before;
import org.junit.Test; import org.junit.Test;
import org.mockito.Mockito; import org.mockito.Mockito;
import com.google.common.base.Supplier; import java.io.File;
import com.google.common.collect.ImmutableList; import java.io.IOException;
import com.google.common.collect.ImmutableSet; import java.io.OutputStream;
import com.google.common.collect.Lists; import java.lang.management.ManagementFactory;
import com.google.common.io.Files; import java.lang.management.ThreadInfo;
import java.lang.management.ThreadMXBean;
import java.net.BindException;
import java.net.URI;
import java.net.URL;
import java.util.List;
import java.util.Random;
import static org.junit.Assert.*;
public class TestStandbyCheckpoints { public class TestStandbyCheckpoints {
private static final int NUM_DIRS_IN_LOG = 200000; private static final int NUM_DIRS_IN_LOG = 200000;
@ -143,7 +133,7 @@ public class TestStandbyCheckpoints {
} }
} }
@Test @Test(timeout = 300000)
public void testSBNCheckpoints() throws Exception { public void testSBNCheckpoints() throws Exception {
JournalSet standbyJournalSet = NameNodeAdapter.spyOnJournalSet(nn1); JournalSet standbyJournalSet = NameNodeAdapter.spyOnJournalSet(nn1);
@ -185,7 +175,7 @@ public class TestStandbyCheckpoints {
* checkpoint for the given txid, but this should not cause * checkpoint for the given txid, but this should not cause
* an abort, etc. * an abort, etc.
*/ */
@Test @Test(timeout = 300000)
public void testBothNodesInStandbyState() throws Exception { public void testBothNodesInStandbyState() throws Exception {
doEdits(0, 10); doEdits(0, 10);
@ -216,7 +206,7 @@ public class TestStandbyCheckpoints {
* same txid, which is a no-op. This test makes sure this doesn't * same txid, which is a no-op. This test makes sure this doesn't
* cause any problem. * cause any problem.
*/ */
@Test @Test(timeout = 300000)
public void testCheckpointWhenNoNewTransactionsHappened() public void testCheckpointWhenNoNewTransactionsHappened()
throws Exception { throws Exception {
// Checkpoint as fast as we can, in a tight loop. // Checkpoint as fast as we can, in a tight loop.