HDFS-7132. hdfs namenode -metadataVersion command does not honor configured name dirs. Contributed by Charles Lamb.

This commit is contained in:
Andrew Wang 2014-09-23 14:10:02 -07:00
parent 3dc28e2052
commit f48686a1ad
3 changed files with 28 additions and 10 deletions

View File

@ -795,6 +795,9 @@ Release 2.6.0 - UNRELEASED
HDFS-7001. Tests in TestTracing depends on the order of execution HDFS-7001. Tests in TestTracing depends on the order of execution
(iwasakims via cmccabe) (iwasakims via cmccabe)
HDFS-7132. hdfs namenode -metadataVersion command does not honor
configured name dirs. (Charles Lamb via wang)
BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
HDFS-6387. HDFS CLI admin tool for creating & deleting an HDFS-6387. HDFS CLI admin tool for creating & deleting an

View File

@ -1347,6 +1347,9 @@ public class NameNode implements NameNodeStatusMXBean {
*/ */
private static boolean printMetadataVersion(Configuration conf) private static boolean printMetadataVersion(Configuration conf)
throws IOException { throws IOException {
final String nsId = DFSUtil.getNamenodeNameServiceId(conf);
final String namenodeId = HAUtil.getNameNodeId(conf, nsId);
NameNode.initializeGenericKeys(conf, nsId, namenodeId);
final FSImage fsImage = new FSImage(conf); final FSImage fsImage = new FSImage(conf);
final FSNamesystem fs = new FSNamesystem(conf, fsImage, false); final FSNamesystem fs = new FSNamesystem(conf, fsImage, false);
return fsImage.recoverTransitionRead( return fsImage.recoverTransitionRead(

View File

@ -25,27 +25,22 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.junit.After; import org.junit.After;
import org.junit.Before;
import org.junit.Test; import org.junit.Test;
import java.io.ByteArrayOutputStream; import java.io.ByteArrayOutputStream;
import java.io.IOException; import java.io.IOException;
import java.io.PrintStream; import java.io.PrintStream;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
public class TestMetadataVersionOutput { public class TestMetadataVersionOutput {
private MiniDFSCluster dfsCluster = null; private MiniDFSCluster dfsCluster = null;
private final Configuration conf = new Configuration(); private final Configuration conf = new Configuration();
@Before
public void setUp() throws Exception {
dfsCluster = new MiniDFSCluster.Builder(conf).
numDataNodes(1).
checkExitOnShutdown(false).
build();
dfsCluster.waitClusterUp();
}
@After @After
public void tearDown() throws Exception { public void tearDown() throws Exception {
if (dfsCluster != null) { if (dfsCluster != null) {
@ -54,9 +49,26 @@ public class TestMetadataVersionOutput {
Thread.sleep(2000); Thread.sleep(2000);
} }
private void initConfig() {
conf.set(DFS_NAMESERVICE_ID, "ns1");
conf.set(DFS_HA_NAMENODES_KEY_PREFIX + ".ns1", "nn1");
conf.set(DFS_HA_NAMENODE_ID_KEY, "nn1");
conf.set(DFS_NAMENODE_NAME_DIR_KEY + ".ns1.nn1", MiniDFSCluster.getBaseDirectory() + "1");
conf.unset(DFS_NAMENODE_NAME_DIR_KEY);
}
@Test(timeout = 30000) @Test(timeout = 30000)
public void testMetadataVersionOutput() throws IOException { public void testMetadataVersionOutput() throws IOException {
initConfig();
dfsCluster = new MiniDFSCluster.Builder(conf).
manageNameDfsDirs(false).
numDataNodes(1).
checkExitOnShutdown(false).
build();
dfsCluster.waitClusterUp();
dfsCluster.shutdown(false);
initConfig();
final PrintStream origOut = System.out; final PrintStream origOut = System.out;
final ByteArrayOutputStream baos = new ByteArrayOutputStream(); final ByteArrayOutputStream baos = new ByteArrayOutputStream();
final PrintStream stdOut = new PrintStream(baos); final PrintStream stdOut = new PrintStream(baos);