From 51c861d3cc6d6cdfdb15b1b84da41ec858d92bc1 Mon Sep 17 00:00:00 2001 From: Jing Zhao Date: Thu, 24 Jul 2014 18:30:22 +0000 Subject: [PATCH] HDFS-6715. Merge r1613237 from trunk. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1613239 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../web/resources/NamenodeWebHdfsMethods.java | 3 +- .../apache/hadoop/hdfs/web/TestWebHDFS.java | 39 +++++++++- .../hadoop/hdfs/web/TestWebHDFSForHA.java | 76 +++++++++++++++++-- 4 files changed, 112 insertions(+), 9 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 963beb50575..b229a630342 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -112,6 +112,9 @@ Release 2.6.0 - UNRELEASED HDFS-6455. NFS: Exception should be added in NFS log for invalid separator in nfs.exports.allowed.hosts. (Abhiraj Butala via brandonli) + HDFS-6715. Webhdfs wont fail over when it gets java.io.IOException: Namenode + is in startup mode. (jing9) + Release 2.5.0 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java index 92a58f9822e..d7235b38727 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java @@ -113,6 +113,7 @@ import org.apache.hadoop.hdfs.web.resources.XAttrSetFlagParam; import org.apache.hadoop.hdfs.web.resources.XAttrValueParam; import org.apache.hadoop.io.Text; +import org.apache.hadoop.ipc.RetriableException; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.net.NetworkTopology.InvalidTopologyException; import org.apache.hadoop.net.Node; @@ -190,7 +191,7 @@ private static NamenodeProtocols getRPCServer(NameNode namenode) throws IOException { final NamenodeProtocols np = namenode.getRpcServer(); if (np == null) { - throw new IOException("Namenode is in startup mode"); + throw new RetriableException("Namenode is in startup mode"); } return np; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java index e9c74c6de30..14312110aa6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java @@ -39,14 +39,18 @@ import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.TestDFSClientRetries; +import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper; import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods; -import org.apache.hadoop.hdfs.TestDFSClientRetries; +import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; +import org.apache.hadoop.ipc.RetriableException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; import org.apache.log4j.Level; import org.junit.Assert; import org.junit.Test; +import org.mockito.internal.util.reflection.Whitebox; /** Test WebHDFS */ public class TestWebHDFS { @@ -445,4 +449,37 @@ public void testWebHdfsRenameSnapshot() throws Exception { } } } + + /** + * Make sure a RetriableException is thrown when rpcServer is null in + * NamenodeWebHdfsMethods. + */ + @Test + public void testRaceWhileNNStartup() throws Exception { + MiniDFSCluster cluster = null; + final Configuration conf = WebHdfsTestUtil.createConf(); + try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); + cluster.waitActive(); + final NameNode namenode = cluster.getNameNode(); + final NamenodeProtocols rpcServer = namenode.getRpcServer(); + Whitebox.setInternalState(namenode, "rpcServer", null); + + final Path foo = new Path("/foo"); + final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, + WebHdfsFileSystem.SCHEME); + try { + webHdfs.mkdirs(foo); + fail("Expected RetriableException"); + } catch (RetriableException e) { + GenericTestUtils.assertExceptionContains("Namenode is in startup mode", + e); + } + Whitebox.setInternalState(namenode, "rpcServer", rpcServer); + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java index 772e367f93c..0340b952259 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java @@ -18,6 +18,15 @@ package org.apache.hadoop.hdfs.web; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; + +import java.io.IOException; +import java.net.URI; +import java.util.HashMap; +import java.util.Map; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; @@ -29,18 +38,14 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSNNTopology; import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; +import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.token.Token; import org.junit.Assert; import org.junit.Test; - -import java.io.IOException; -import java.net.URI; - -import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.verify; +import org.mockito.internal.util.reflection.Whitebox; public class TestWebHDFSForHA { private static final String LOGICAL_NAME = "minidfs"; @@ -182,4 +187,61 @@ public void testMultipleNamespacesConfigured() throws Exception { } } } + + /** + * Make sure the WebHdfsFileSystem will retry based on RetriableException when + * rpcServer is null in NamenodeWebHdfsMethods while NameNode starts up. + */ + @Test (timeout=120000) + public void testRetryWhileNNStartup() throws Exception { + final Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME); + MiniDFSCluster cluster = null; + final Map resultMap = new HashMap(); + + try { + cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo) + .numDataNodes(0).build(); + HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME); + cluster.waitActive(); + cluster.transitionToActive(0); + + final NameNode namenode = cluster.getNameNode(0); + final NamenodeProtocols rpcServer = namenode.getRpcServer(); + Whitebox.setInternalState(namenode, "rpcServer", null); + + new Thread() { + @Override + public void run() { + boolean result = false; + FileSystem fs = null; + try { + fs = FileSystem.get(WEBHDFS_URI, conf); + final Path dir = new Path("/test"); + result = fs.mkdirs(dir); + } catch (IOException e) { + result = false; + } finally { + IOUtils.cleanup(null, fs); + } + synchronized (TestWebHDFSForHA.this) { + resultMap.put("mkdirs", result); + TestWebHDFSForHA.this.notifyAll(); + } + } + }.start(); + + Thread.sleep(1000); + Whitebox.setInternalState(namenode, "rpcServer", rpcServer); + synchronized (this) { + while (!resultMap.containsKey("mkdirs")) { + this.wait(); + } + Assert.assertTrue(resultMap.get("mkdirs")); + } + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } }