HDFS-6572. Merging change r1605928 from trunk to branch-2.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1605930 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Chris Nauroth 2014-06-26 23:16:12 +00:00
parent 1e852f955f
commit 816b152c4a
5 changed files with 132 additions and 13 deletions

View File

@ -225,6 +225,9 @@ Release 2.5.0 - UNRELEASED
HDFS-6595. Allow the maximum threads for balancing on datanodes to be
configurable. (Benoy Antony via szetszwo)
HDFS-6572. Add an option to the NameNode that prints the software and
on-disk image versions. (Charles Lamb via cnauroth)
OPTIMIZATIONS
HDFS-6214. Webhdfs has poor throughput for files >2GB (daryn)

View File

@ -92,7 +92,8 @@ public final class HdfsServerConstants {
RECOVER ("-recover"),
FORCE("-force"),
NONINTERACTIVE("-nonInteractive"),
RENAMERESERVED("-renameReserved");
RENAMERESERVED("-renameReserved"),
METADATAVERSION("-metadataVersion");
private static final Pattern ENUM_WITH_ROLLING_UPGRADE_OPTION = Pattern.compile(
"(\\w+)\\((\\w+)\\)");

View File

@ -214,6 +214,13 @@ public class FSImage implements Closeable {
int layoutVersion = storage.getLayoutVersion();
if (startOpt == StartupOption.METADATAVERSION) {
System.out.println("HDFS Image Version: " + layoutVersion);
System.out.println("Software format version: " +
HdfsConstants.NAMENODE_LAYOUT_VERSION);
return false;
}
if (layoutVersion < Storage.LAST_PRE_UPGRADE_LAYOUT_VERSION) {
NNStorage.checkVersionUpgradable(storage.getLayoutVersion());
}
@ -289,6 +296,12 @@ public class FSImage implements Closeable {
storage.dirIterator(); it.hasNext();) {
StorageDirectory sd = it.next();
StorageState curState;
if (startOpt == StartupOption.METADATAVERSION) {
/* All we need is the layout version. */
storage.readProperties(sd);
return true;
}
try {
curState = sd.analyzeStorage(startOpt, storage);
// sd is locked but not opened

View File

@ -200,25 +200,28 @@ public class NameNode implements NameNodeStatusMXBean {
};
private static final String USAGE = "Usage: java NameNode ["
+ StartupOption.BACKUP.getName() + "] | ["
+ StartupOption.CHECKPOINT.getName() + "] | ["
+ StartupOption.BACKUP.getName() + "] | \n\t["
+ StartupOption.CHECKPOINT.getName() + "] | \n\t["
+ StartupOption.FORMAT.getName() + " ["
+ StartupOption.CLUSTERID.getName() + " cid ] ["
+ StartupOption.FORCE.getName() + "] ["
+ StartupOption.NONINTERACTIVE.getName() + "] ] | ["
+ StartupOption.NONINTERACTIVE.getName() + "] ] | \n\t["
+ StartupOption.UPGRADE.getName() +
" [" + StartupOption.CLUSTERID.getName() + " cid]" +
" [" + StartupOption.RENAMERESERVED.getName() + "<k-v pairs>] ] | ["
+ StartupOption.ROLLBACK.getName() + "] | ["
" [" + StartupOption.RENAMERESERVED.getName() + "<k-v pairs>] ] | \n\t["
+ StartupOption.ROLLBACK.getName() + "] | \n\t["
+ StartupOption.ROLLINGUPGRADE.getName() + " <"
+ RollingUpgradeStartupOption.DOWNGRADE.name().toLowerCase() + "|"
+ RollingUpgradeStartupOption.ROLLBACK.name().toLowerCase() + "> ] | ["
+ StartupOption.FINALIZE.getName() + "] | ["
+ StartupOption.IMPORT.getName() + "] | ["
+ StartupOption.INITIALIZESHAREDEDITS.getName() + "] | ["
+ StartupOption.BOOTSTRAPSTANDBY.getName() + "] | ["
+ StartupOption.RECOVER.getName() + " [ " + StartupOption.FORCE.getName()
+ " ] ]";
+ RollingUpgradeStartupOption.ROLLBACK.name().toLowerCase() + "> ] | \n\t["
+ StartupOption.FINALIZE.getName() + "] | \n\t["
+ StartupOption.IMPORT.getName() + "] | \n\t["
+ StartupOption.INITIALIZESHAREDEDITS.getName() + "] | \n\t["
+ StartupOption.BOOTSTRAPSTANDBY.getName() + "] | \n\t["
+ StartupOption.RECOVER.getName() + " [ "
+ StartupOption.FORCE.getName() + "] ] | \n\t["
+ StartupOption.METADATAVERSION.getName() + " ] "
+ " ]";
public long getProtocolVersion(String protocol,
long clientVersion) throws IOException {
@ -1265,6 +1268,8 @@ public class NameNode implements NameNodeStatusMXBean {
"can't understand option \"" + args[i] + "\"");
}
}
} else if (StartupOption.METADATAVERSION.getName().equalsIgnoreCase(cmd)) {
startOpt = StartupOption.METADATAVERSION;
} else {
return null;
}
@ -1317,6 +1322,21 @@ public class NameNode implements NameNodeStatusMXBean {
}
}
/**
* Verify that configured directories exist, then print the metadata versions
* of the software and the image.
*
* @param conf configuration to use
* @throws IOException
*/
private static boolean printMetadataVersion(Configuration conf)
throws IOException {
final FSImage fsImage = new FSImage(conf);
final FSNamesystem fs = new FSNamesystem(conf, fsImage, false);
return fsImage.recoverTransitionRead(
StartupOption.METADATAVERSION, fs, null);
}
public static NameNode createNameNode(String argv[], Configuration conf)
throws IOException {
LOG.info("createNameNode " + Arrays.asList(argv));
@ -1377,6 +1397,11 @@ public class NameNode implements NameNodeStatusMXBean {
NameNode.doRecovery(startOpt, conf);
return null;
}
case METADATAVERSION: {
printMetadataVersion(conf);
terminate(0);
return null; // avoid javac warning
}
default: {
DefaultMetricsSystem.initialize("NameNode");
return new NameNode(conf);

View File

@ -0,0 +1,77 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
import static org.junit.Assert.assertTrue;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.PrintStream;
public class TestMetadataVersionOutput {
private MiniDFSCluster dfsCluster = null;
private final Configuration conf = new Configuration();
@Before
public void setUp() throws Exception {
dfsCluster = new MiniDFSCluster.Builder(conf).
numDataNodes(1).
checkExitOnShutdown(false).
build();
dfsCluster.waitClusterUp();
}
@After
public void tearDown() throws Exception {
if (dfsCluster != null) {
dfsCluster.shutdown();
}
Thread.sleep(2000);
}
@Test(timeout = 30000)
public void testMetadataVersionOutput() throws IOException {
final PrintStream origOut = System.out;
final ByteArrayOutputStream baos = new ByteArrayOutputStream();
final PrintStream stdOut = new PrintStream(baos);
System.setOut(stdOut);
try {
NameNode.createNameNode(new String[] { "-metadataVersion" }, conf);
} catch (Exception e) {
assertExceptionContains("ExitException", e);
}
/* Check if meta data version is printed correctly. */
final String verNumStr = HdfsConstants.NAMENODE_LAYOUT_VERSION + "";
assertTrue(baos.toString("UTF-8").
contains("HDFS Image Version: " + verNumStr));
assertTrue(baos.toString("UTF-8").
contains("Software format version: " + verNumStr));
System.setOut(origOut);
}
}