diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index f9447a3ee13..d546c4e5644 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -391,6 +391,9 @@ Release 2.0.5-beta - UNRELEASED HDFS-4743. TestNNStorageRetentionManager fails on Windows. (Chris Nauroth via suresh) + HDFS-4740. Fixes for a few test failures on Windows. + (Arpit Agarwal via suresh) + Release 2.0.4-alpha - 2013-04-25 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java index 2533d3e9160..a4fadc5e82f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java @@ -29,11 +29,11 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICES; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import org.apache.hadoop.util.Shell; + +import static org.junit.Assert.*; +import org.junit.Assume; +import static org.hamcrest.CoreMatchers.*; import java.io.IOException; import java.net.InetSocketAddress; @@ -619,19 +619,25 @@ public void testGetNNUris() throws Exception { assertEquals(1, uris.size()); assertTrue(uris.contains(new URI("hdfs://" + NN1_SRVC_ADDR))); + } + + @Test (timeout=15000) + public void testLocalhostReverseLookup() { + // 127.0.0.1 -> localhost reverse resolution does not happen on Windows. + Assume.assumeTrue(!Shell.WINDOWS); // Make sure when config FS_DEFAULT_NAME_KEY using IP address, // it will automatically convert it to hostname - conf = new HdfsConfiguration(); + HdfsConfiguration conf = new HdfsConfiguration(); conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://127.0.0.1:8020"); - uris = DFSUtil.getNameServiceUris(conf); + Collection uris = DFSUtil.getNameServiceUris(conf); assertEquals(1, uris.size()); for (URI uri : uris) { - assertFalse(uri.getHost().equals("127.0.0.1")); + assertThat(uri.getHost(), not("127.0.0.1")); } } - - @Test + + @Test (timeout=15000) public void testIsValidName() { assertFalse(DFSUtil.isValidName("/foo/../bar")); assertFalse(DFSUtil.isValidName("/foo//bar")); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java index 64c5ef4c056..cdaf9c78936 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java @@ -155,10 +155,12 @@ static void checkFullFile(FileSystem fs, Path name, final long fileSize) } /** - * Test for block size of 2GB + 512B + * Test for block size of 2GB + 512B. This test can take a rather long time to + * complete on Windows (reading the file back can be slow) so we use a larger + * timeout here. * @throws IOException in case of errors */ - @Test(timeout = 120000) + @Test (timeout = 900000) public void testLargeBlockSize() throws IOException { final long blockSize = 2L * 1024L * 1024L * 1024L + 512L; // 2GB + 512B runTest(blockSize); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java index 551a37b5c0e..b084a630a4c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java @@ -54,7 +54,7 @@ public void resetUGI() { UserGroupInformation.setConfiguration(new Configuration()); } - @Test(timeout=4000) + @Test(timeout=60000) public void testSimpleAuthParamsInUrl() throws IOException { Configuration conf = new Configuration(); @@ -75,7 +75,7 @@ public void testSimpleAuthParamsInUrl() throws IOException { fileStatusUrl); } - @Test(timeout=4000) + @Test(timeout=60000) public void testSimpleProxyAuthParamsInUrl() throws IOException { Configuration conf = new Configuration(); @@ -98,7 +98,7 @@ public void testSimpleProxyAuthParamsInUrl() throws IOException { fileStatusUrl); } - @Test(timeout=4000) + @Test(timeout=60000) public void testSecureAuthParamsInUrl() throws IOException { Configuration conf = new Configuration(); // fake turning on security so api thinks it should use tokens @@ -178,7 +178,7 @@ public void testSecureAuthParamsInUrl() throws IOException { fileStatusUrl); } - @Test(timeout=4000) + @Test(timeout=60000) public void testSecureProxyAuthParamsInUrl() throws IOException { Configuration conf = new Configuration(); // fake turning on security so api thinks it should use tokens @@ -290,7 +290,7 @@ private WebHdfsFileSystem getWebHdfsFileSystem(UserGroupInformation ugi, return (WebHdfsFileSystem) FileSystem.get(uri, conf); } - @Test(timeout=4000) + @Test(timeout=60000) public void testSelectHdfsDelegationToken() throws Exception { SecurityUtilTestHelper.setTokenServiceUseIp(true);