HDFS-5939. WebHdfs returns misleading error code and logs nothing if trying to create a file with no DNs in cluster. Contributed by Yongjun Zhang.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1571781 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jing Zhao 2014-02-25 18:36:46 +00:00
parent 5a42e1b7c3
commit df6e1ab491
4 changed files with 42 additions and 3 deletions

View File

@ -712,6 +712,11 @@ public class NetworkTopology {
numOfDatanodes -= ((InnerNode)node).getNumOfLeaves(); numOfDatanodes -= ((InnerNode)node).getNumOfLeaves();
} }
} }
if (numOfDatanodes == 0) {
throw new InvalidTopologyException(
"Failed to find datanode (scope=\"" + String.valueOf(scope) +
"\" excludedScope=\"" + String.valueOf(excludedScope) + "\").");
}
int leaveIndex = r.nextInt(numOfDatanodes); int leaveIndex = r.nextInt(numOfDatanodes);
return innerNode.getLeaf(leaveIndex, node); return innerNode.getLeaf(leaveIndex, node);
} }

View File

@ -433,6 +433,9 @@ Release 2.4.0 - UNRELEASED
HDFS-5935. New Namenode UI FS browser should throw smarter error messages. HDFS-5935. New Namenode UI FS browser should throw smarter error messages.
(Travis Thompson via jing9) (Travis Thompson via jing9)
HDFS-5939. WebHdfs returns misleading error code and logs nothing if trying
to create a file with no DNs in cluster. (Yongjun Zhang via jing9)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-5790. LeaseManager.findPath is very slow when many leases need recovery HDFS-5790. LeaseManager.findPath is very slow when many leases need recovery

View File

@ -103,6 +103,7 @@ import org.apache.hadoop.hdfs.web.resources.UriFsPathParam;
import org.apache.hadoop.hdfs.web.resources.UserParam; import org.apache.hadoop.hdfs.web.resources.UserParam;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.Server; import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.net.NetworkTopology.InvalidTopologyException;
import org.apache.hadoop.net.NodeBase; import org.apache.hadoop.net.NodeBase;
import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
@ -244,8 +245,12 @@ public class NamenodeWebHdfsMethods {
final String path, final HttpOpParam.Op op, final long openOffset, final String path, final HttpOpParam.Op op, final long openOffset,
final long blocksize, final long blocksize,
final Param<?, ?>... parameters) throws URISyntaxException, IOException { final Param<?, ?>... parameters) throws URISyntaxException, IOException {
final DatanodeInfo dn = chooseDatanode(namenode, path, op, openOffset, final DatanodeInfo dn;
blocksize); try {
dn = chooseDatanode(namenode, path, op, openOffset, blocksize);
} catch (InvalidTopologyException ite) {
throw new IOException("Failed to find datanode, suggest to check cluster health.", ite);
}
final String delegationQuery; final String delegationQuery;
if (!UserGroupInformation.isSecurityEnabled()) { if (!UserGroupInformation.isSecurityEnabled()) {

View File

@ -36,9 +36,10 @@ import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.TestDFSClientRetries;
import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods; import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
import org.apache.hadoop.hdfs.TestDFSClientRetries;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level; import org.apache.log4j.Level;
import org.junit.Assert; import org.junit.Assert;
import org.junit.Test; import org.junit.Test;
@ -289,6 +290,31 @@ public class TestWebHDFS {
} }
} }
/**
* Test for catching "no datanode" IOException, when to create a file
* but datanode is not running for some reason.
*/
@Test(timeout=300000)
public void testCreateWithNoDN() throws Exception {
MiniDFSCluster cluster = null;
final Configuration conf = WebHdfsTestUtil.createConf();
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
cluster.waitActive();
FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
WebHdfsFileSystem.SCHEME);
fs.create(new Path("/testnodatanode"));
Assert.fail("No exception was thrown");
} catch (IOException ex) {
GenericTestUtils.assertExceptionContains("Failed to find datanode", ex);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
/** /**
* WebHdfs should be enabled by default after HDFS-5532 * WebHdfs should be enabled by default after HDFS-5532
* *