HDFS-11085. Add unit test for NameNode failing to start when name dir is unwritable. Contributed by Xiaobing Zhou
(cherry picked from commit 0c0ab102ab
)
This commit is contained in:
parent
f9a7b83a53
commit
00625004b7
|
@ -19,8 +19,12 @@ package org.apache.hadoop.hdfs.server.namenode;
|
||||||
|
|
||||||
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption.IMPORT;
|
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption.IMPORT;
|
||||||
import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI;
|
import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI;
|
||||||
|
import static org.hamcrest.CoreMatchers.allOf;
|
||||||
|
import static org.hamcrest.CoreMatchers.containsString;
|
||||||
|
import static org.hamcrest.CoreMatchers.is;
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertNotNull;
|
import static org.junit.Assert.assertNotNull;
|
||||||
|
import static org.junit.Assert.assertThat;
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
import static org.junit.Assert.fail;
|
import static org.junit.Assert.fail;
|
||||||
|
|
||||||
|
@ -30,6 +34,8 @@ import java.lang.management.ManagementFactory;
|
||||||
import java.net.InetAddress;
|
import java.net.InetAddress;
|
||||||
import java.net.URI;
|
import java.net.URI;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
|
import java.nio.file.Paths;
|
||||||
|
import java.util.Collection;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Random;
|
import java.util.Random;
|
||||||
|
@ -53,6 +59,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
|
||||||
|
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
|
||||||
import org.apache.hadoop.hdfs.server.common.Storage;
|
import org.apache.hadoop.hdfs.server.common.Storage;
|
||||||
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
|
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
|
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
|
||||||
|
@ -125,7 +132,7 @@ public class TestStartup {
|
||||||
fileAsURI(new File(hdfsDir, "secondary")).toString());
|
fileAsURI(new File(hdfsDir, "secondary")).toString());
|
||||||
config.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
|
config.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
|
||||||
WILDCARD_HTTP_HOST + "0");
|
WILDCARD_HTTP_HOST + "0");
|
||||||
|
|
||||||
FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0");
|
FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -698,6 +705,52 @@ public class TestStartup {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 30000)
|
||||||
|
public void testNNFailToStartOnReadOnlyNNDir() throws Exception {
|
||||||
|
/* set NN dir */
|
||||||
|
final String nnDirStr = Paths.get(
|
||||||
|
hdfsDir.toString(),
|
||||||
|
GenericTestUtils.getMethodName(), "name").toString();
|
||||||
|
config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nnDirStr);
|
||||||
|
|
||||||
|
try(MiniDFSCluster cluster = new MiniDFSCluster.Builder(config)
|
||||||
|
.numDataNodes(1)
|
||||||
|
.manageNameDfsDirs(false)
|
||||||
|
.build()) {
|
||||||
|
cluster.waitActive();
|
||||||
|
|
||||||
|
/* get and verify NN dir */
|
||||||
|
final Collection<URI> nnDirs = FSNamesystem.getNamespaceDirs(config);
|
||||||
|
assertNotNull(nnDirs);
|
||||||
|
assertTrue(nnDirs.iterator().hasNext());
|
||||||
|
assertEquals(
|
||||||
|
"NN dir should be created after NN startup.",
|
||||||
|
nnDirStr,
|
||||||
|
nnDirs.iterator().next().getPath());
|
||||||
|
final File nnDir = new File(nnDirStr);
|
||||||
|
assertTrue(nnDir.exists());
|
||||||
|
assertTrue(nnDir.isDirectory());
|
||||||
|
|
||||||
|
try {
|
||||||
|
/* set read only */
|
||||||
|
assertTrue(
|
||||||
|
"Setting NN dir read only should succeed.",
|
||||||
|
nnDir.setReadOnly());
|
||||||
|
cluster.restartNameNodes();
|
||||||
|
fail("Restarting NN should fail on read only NN dir.");
|
||||||
|
} catch (InconsistentFSStateException e) {
|
||||||
|
assertThat(e.toString(), is(allOf(
|
||||||
|
containsString("InconsistentFSStateException"),
|
||||||
|
containsString(nnDirStr),
|
||||||
|
containsString("in an inconsistent state"),
|
||||||
|
containsString(
|
||||||
|
"storage directory does not exist or is not accessible."))));
|
||||||
|
} finally {
|
||||||
|
/* set back to writable in order to clean it */
|
||||||
|
assertTrue("Setting NN dir should succeed.", nnDir.setWritable(true));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Verify the following scenario.
|
* Verify the following scenario.
|
||||||
|
|
Loading…
Reference in New Issue