diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 7b8917b87e4..0772ea6ae41 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -607,6 +607,9 @@ Release 2.6.0 - UNRELEASED HDFS-6979. hdfs.dll does not produce .pdb files. (cnauroth) + HDFS-6862. Add missing timeout annotations to tests. (Xiaoyu Yao via + Arpit Agarwal) + BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS HDFS-6387. HDFS CLI admin tool for creating & deleting an diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java index 59d1615025d..ce8a4e75d11 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java @@ -17,14 +17,6 @@ */ package org.apache.hadoop.hdfs; -import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -import java.io.File; -import java.io.IOException; -import java.net.UnknownHostException; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -39,6 +31,14 @@ import org.apache.hadoop.net.DNS; import org.apache.hadoop.test.PathUtils; import org.junit.Test; +import java.io.File; +import java.io.IOException; +import java.net.UnknownHostException; + +import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + /** * This test checks correctness of port usage by hdfs components: * NameNode, DataNode, SecondaryNamenode and BackupNode. @@ -245,7 +245,7 @@ public class TestHDFSServerPorts { return true; } - @Test + @Test(timeout = 300000) public void testNameNodePorts() throws Exception { runTestNameNodePorts(false); runTestNameNodePorts(true); @@ -296,7 +296,7 @@ public class TestHDFSServerPorts { /** * Verify datanode port usage. */ - @Test + @Test(timeout = 300000) public void testDataNodePorts() throws Exception { NameNode nn = null; try { @@ -332,7 +332,7 @@ public class TestHDFSServerPorts { /** * Verify secondary namenode port usage. */ - @Test + @Test(timeout = 300000) public void testSecondaryNodePorts() throws Exception { NameNode nn = null; try { @@ -361,7 +361,7 @@ public class TestHDFSServerPorts { /** * Verify BackupNode port usage. */ - @Test + @Test(timeout = 300000) public void testBackupNodePorts() throws Exception { NameNode nn = null; try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java index 9221653a80b..0cf1fed81e9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java @@ -17,11 +17,6 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import java.io.File; -import java.io.IOException; -import java.net.BindException; -import java.util.Random; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; @@ -33,6 +28,11 @@ import org.apache.hadoop.test.GenericTestUtils; import org.junit.After; import org.junit.Test; +import java.io.File; +import java.io.IOException; +import java.net.BindException; +import java.util.Random; + /** * This class tests the validation of the configuration object when passed * to the NameNode @@ -49,7 +49,7 @@ public class TestValidateConfigurationSettings { * an exception * is thrown when trying to re-use the same port */ - @Test(expected = BindException.class) + @Test(expected = BindException.class, timeout = 300000) public void testThatMatchingRPCandHttpPortsThrowException() throws IOException { @@ -79,7 +79,7 @@ public class TestValidateConfigurationSettings { * Tests setting the rpc port to a different as the web port that an * exception is NOT thrown */ - @Test + @Test(timeout = 300000) public void testThatDifferentRPCandHttpPortsAreOK() throws IOException { @@ -117,7 +117,7 @@ public class TestValidateConfigurationSettings { * HDFS-3013: NameNode format command doesn't pick up * dfs.namenode.name.dir.NameServiceId configuration. */ - @Test + @Test(timeout = 300000) public void testGenericKeysForNameNodeFormat() throws IOException { Configuration conf = new HdfsConfiguration(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java index b2cc9197aa8..33b5350222f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java @@ -17,27 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode.ha; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; -import static org.mockito.Mockito.mock; - -import java.io.ByteArrayInputStream; -import java.io.DataInputStream; -import java.io.IOException; -import java.net.InetSocketAddress; -import java.net.URI; -import java.security.PrivilegedExceptionAction; -import java.util.Collection; -import java.util.HashSet; -import java.util.Map; - -import javax.servlet.http.HttpServletResponse; -import javax.ws.rs.core.Response; - +import com.google.common.base.Joiner; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -45,11 +25,7 @@ import org.apache.hadoop.fs.AbstractFileSystem; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.DistributedFileSystem; -import org.apache.hadoop.hdfs.HAUtil; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.MiniDFSNNTopology; +import org.apache.hadoop.hdfs.*; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager; @@ -75,7 +51,20 @@ import org.junit.Test; import org.mockito.internal.util.reflection.Whitebox; import org.mortbay.util.ajax.JSON; -import com.google.common.base.Joiner; +import javax.servlet.http.HttpServletResponse; +import javax.ws.rs.core.Response; +import java.io.ByteArrayInputStream; +import java.io.DataInputStream; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.net.URI; +import java.security.PrivilegedExceptionAction; +import java.util.Collection; +import java.util.HashSet; +import java.util.Map; + +import static org.junit.Assert.*; +import static org.mockito.Mockito.mock; /** * Test case for client support of delegation tokens in an HA cluster. @@ -128,8 +117,8 @@ public class TestDelegationTokensWithHA { cluster.shutdown(); } } - - @Test + + @Test(timeout = 300000) public void testDelegationTokenDFSApi() throws Exception { final Token token = getDelegationToken(fs, "JobTracker"); @@ -192,7 +181,7 @@ public class TestDelegationTokensWithHA { * Test if correct exception (StandbyException or RetriableException) can be * thrown during the NN failover. */ - @Test + @Test(timeout = 300000) public void testDelegationTokenDuringNNFailover() throws Exception { EditLogTailer editLogTailer = nn1.getNamesystem().getEditLogTailer(); // stop the editLogTailer of nn1 @@ -260,7 +249,7 @@ public class TestDelegationTokensWithHA { doRenewOrCancel(token, clientConf, TokenTestAction.CANCEL); } - @Test + @Test(timeout = 300000) public void testDelegationTokenWithDoAs() throws Exception { final Token token = getDelegationToken(fs, "JobTracker"); @@ -291,8 +280,8 @@ public class TestDelegationTokensWithHA { } }); } - - @Test + + @Test(timeout = 300000) public void testHAUtilClonesDelegationTokens() throws Exception { final Token token = getDelegationToken(fs, "JobTracker"); @@ -354,7 +343,7 @@ public class TestDelegationTokensWithHA { * exception if the URI is a logical URI. This bug fails the combination of * ha + mapred + security. */ - @Test + @Test(timeout = 300000) public void testDFSGetCanonicalServiceName() throws Exception { URI hAUri = HATestUtil.getLogicalUri(cluster); String haService = HAUtil.buildTokenServiceForLogicalUri(hAUri, @@ -368,8 +357,8 @@ public class TestDelegationTokensWithHA { token.renew(dfs.getConf()); token.cancel(dfs.getConf()); } - - @Test + + @Test(timeout = 300000) public void testHdfsGetCanonicalServiceName() throws Exception { Configuration conf = dfs.getConf(); URI haUri = HATestUtil.getLogicalUri(cluster); @@ -390,7 +379,7 @@ public class TestDelegationTokensWithHA { * password. (HDFS-6475). With StandbyException, the client can failover to try * activeNN. */ - @Test + @Test(timeout = 300000) public void testDelegationTokenStandbyNNAppearFirst() throws Exception { // make nn0 the standby NN, and nn1 the active NN cluster.transitionToStandby(0); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAMetrics.java index cc85c83b3d7..1cd76f48fc5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAMetrics.java @@ -17,9 +17,6 @@ */ package org.apache.hadoop.hdfs.server.namenode.ha; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -33,14 +30,17 @@ import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.io.IOUtils; import org.junit.Test; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + /** * Make sure HA-related metrics are updated and reported appropriately. */ public class TestHAMetrics { private static final Log LOG = LogFactory.getLog(TestHAMetrics.class); - - @Test + + @Test(timeout = 300000) public void testHAMetrics() throws Exception { Configuration conf = new Configuration(); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAStateTransitions.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAStateTransitions.java index e33d8076343..f7474b84e7e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAStateTransitions.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAStateTransitions.java @@ -17,20 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode.ha; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -import java.io.DataOutputStream; -import java.io.File; -import java.io.FileOutputStream; -import java.io.IOException; -import java.net.URI; -import java.util.LinkedList; -import java.util.List; -import java.util.concurrent.locks.ReentrantReadWriteLock; - +import com.google.common.util.concurrent.Uninterruptibles; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; @@ -40,13 +27,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.ha.HAServiceProtocol.RequestSource; import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.DFSTestUtil; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdfs.HAUtil; -import org.apache.hadoop.hdfs.HdfsConfiguration; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.MiniDFSNNTopology; +import org.apache.hadoop.hdfs.*; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; @@ -66,7 +47,16 @@ import org.junit.Assert; import org.junit.Test; import org.mockito.Mockito; -import com.google.common.util.concurrent.Uninterruptibles; +import java.io.DataOutputStream; +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.net.URI; +import java.util.LinkedList; +import java.util.List; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +import static org.junit.Assert.*; /** * Tests state transition from active->standby, and manual failover @@ -92,7 +82,7 @@ public class TestHAStateTransitions { * active and standby mode, making sure it doesn't * double-play any edits. */ - @Test + @Test(timeout = 300000) public void testTransitionActiveToStandby() throws Exception { Configuration conf = new Configuration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) @@ -148,7 +138,7 @@ public class TestHAStateTransitions { * Test that transitioning a service to the state that it is already * in is a nop, specifically, an exception is not thrown. */ - @Test + @Test(timeout = 300000) public void testTransitionToCurrentStateIsANop() throws Exception { Configuration conf = new Configuration(); conf.setLong(DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS, 1L); @@ -220,7 +210,7 @@ public class TestHAStateTransitions { /** * Tests manual failover back and forth between two NameNodes. */ - @Test + @Test(timeout = 300000) public void testManualFailoverAndFailback() throws Exception { Configuration conf = new Configuration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) @@ -346,7 +336,7 @@ public class TestHAStateTransitions { /** * Test that delegation tokens continue to work after the failover. */ - @Test + @Test(timeout = 300000) public void testDelegationTokensAfterFailover() throws IOException { Configuration conf = new Configuration(); conf.setBoolean( @@ -383,7 +373,7 @@ public class TestHAStateTransitions { * Tests manual failover back and forth between two NameNodes * for federation cluster with two namespaces. */ - @Test + @Test(timeout = 300000) public void testManualFailoverFailbackFederationHA() throws Exception { Configuration conf = new Configuration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) @@ -403,12 +393,12 @@ public class TestHAStateTransitions { } } - @Test + @Test(timeout = 300000) public void testFailoverWithEmptyInProgressEditLog() throws Exception { testFailoverAfterCrashDuringLogRoll(false); } - - @Test + + @Test(timeout = 300000) public void testFailoverWithEmptyInProgressEditLogWithHeader() throws Exception { testFailoverAfterCrashDuringLogRoll(true); @@ -570,7 +560,7 @@ public class TestHAStateTransitions { * by virtue of the fact that it wouldn't work properly if the proxies * returned were not for the correct NNs. */ - @Test + @Test(timeout = 300000) public void testIsAtLeastOneActive() throws Exception { MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()) .nnTopology(MiniDFSNNTopology.simpleHATopology()) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java index e9b91249c4c..b00f91647d4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java @@ -17,23 +17,11 @@ */ package org.apache.hadoop.hdfs.server.namenode.ha; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -import java.io.File; -import java.io.IOException; -import java.io.OutputStream; -import java.net.BindException; -import java.lang.management.ManagementFactory; -import java.lang.management.ThreadInfo; -import java.lang.management.ThreadMXBean; -import java.net.URI; -import java.net.URL; -import java.util.List; -import java.util.Random; - +import com.google.common.base.Supplier; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Lists; +import com.google.common.io.Files; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -43,14 +31,8 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSNNTopology; -import org.apache.hadoop.hdfs.server.namenode.FSImage; -import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; -import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; -import org.apache.hadoop.hdfs.server.namenode.JournalSet; -import org.apache.hadoop.hdfs.server.namenode.NNStorage; +import org.apache.hadoop.hdfs.server.namenode.*; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile; -import org.apache.hadoop.hdfs.server.namenode.NameNode; -import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.hdfs.util.Canceler; import org.apache.hadoop.io.compress.CompressionCodecFactory; import org.apache.hadoop.io.compress.CompressionOutputStream; @@ -64,11 +46,19 @@ import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; -import com.google.common.base.Supplier; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableSet; -import com.google.common.collect.Lists; -import com.google.common.io.Files; +import java.io.File; +import java.io.IOException; +import java.io.OutputStream; +import java.lang.management.ManagementFactory; +import java.lang.management.ThreadInfo; +import java.lang.management.ThreadMXBean; +import java.net.BindException; +import java.net.URI; +import java.net.URL; +import java.util.List; +import java.util.Random; + +import static org.junit.Assert.*; public class TestStandbyCheckpoints { private static final int NUM_DIRS_IN_LOG = 200000; @@ -143,7 +133,7 @@ public class TestStandbyCheckpoints { } } - @Test + @Test(timeout = 300000) public void testSBNCheckpoints() throws Exception { JournalSet standbyJournalSet = NameNodeAdapter.spyOnJournalSet(nn1); @@ -185,7 +175,7 @@ public class TestStandbyCheckpoints { * checkpoint for the given txid, but this should not cause * an abort, etc. */ - @Test + @Test(timeout = 300000) public void testBothNodesInStandbyState() throws Exception { doEdits(0, 10); @@ -216,7 +206,7 @@ public class TestStandbyCheckpoints { * same txid, which is a no-op. This test makes sure this doesn't * cause any problem. */ - @Test + @Test(timeout = 300000) public void testCheckpointWhenNoNewTransactionsHappened() throws Exception { // Checkpoint as fast as we can, in a tight loop.