diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 8a7c1595214..5e7f0d53d9b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -46,6 +46,9 @@ Release 2.6.0 - UNRELEASED HDFS-4120. Add a new "-skipSharedEditsCheck" option for BootstrapStandby (Liang Xie and Rakesh R via vinayakumarb) + HDFS-6597. Add a new option to NN upgrade to terminate the process after + upgrade on NN is completed. (Danilo Vunjak via cnauroth) + OPTIMIZATIONS HDFS-6690. Deduplicate xattr names in memory. (wang) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java index 486d426bab1..8c63cc47a48 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java @@ -93,7 +93,8 @@ public final class HdfsServerConstants { FORCE("-force"), NONINTERACTIVE("-nonInteractive"), RENAMERESERVED("-renameReserved"), - METADATAVERSION("-metadataVersion"); + METADATAVERSION("-metadataVersion"), + UPGRADEONLY("-upgradeOnly"); private static final Pattern ENUM_WITH_ROLLING_UPGRADE_OPTION = Pattern.compile( "(\\w+)\\((\\w+)\\)"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java index a4b3634ba82..d9aa3dcc4ef 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java @@ -225,6 +225,7 @@ public class FSImage implements Closeable { NNStorage.checkVersionUpgradable(storage.getLayoutVersion()); } if (startOpt != StartupOption.UPGRADE + && startOpt != StartupOption.UPGRADEONLY && !RollingUpgradeStartupOption.STARTED.matches(startOpt) && layoutVersion < Storage.LAST_PRE_UPGRADE_LAYOUT_VERSION && layoutVersion != HdfsConstants.NAMENODE_LAYOUT_VERSION) { @@ -263,6 +264,7 @@ public class FSImage implements Closeable { // 3. Do transitions switch(startOpt) { case UPGRADE: + case UPGRADEONLY: doUpgrade(target); return false; // upgrade saved image already case IMPORT: @@ -748,11 +750,13 @@ public class FSImage implements Closeable { editLog.recoverUnclosedStreams(); } else if (HAUtil.isHAEnabled(conf, nameserviceId) && (startOpt == StartupOption.UPGRADE + || startOpt == StartupOption.UPGRADEONLY || RollingUpgradeStartupOption.ROLLBACK.matches(startOpt))) { // This NN is HA, but we're doing an upgrade or a rollback of rolling // upgrade so init the edit log for write. editLog.initJournalsForWrite(); - if (startOpt == StartupOption.UPGRADE) { + if (startOpt == StartupOption.UPGRADE + || startOpt == StartupOption.UPGRADEONLY) { long sharedLogCTime = editLog.getSharedLogCTime(); if (this.storage.getCTime() < sharedLogCTime) { throw new IOException("It looks like the shared log is already " + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 6f0e79689f6..c77ec1616c9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -971,7 +971,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats, } // This will start a new log segment and write to the seen_txid file, so // we shouldn't do it when coming up in standby state - if (!haEnabled || (haEnabled && startOpt == StartupOption.UPGRADE)) { + if (!haEnabled || (haEnabled && startOpt == StartupOption.UPGRADE) + || (haEnabled && startOpt == StartupOption.UPGRADEONLY)) { fsImage.openEditLogForWrite(); } success = true; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java index a576f1c5bb1..2eef037ae8d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java @@ -836,7 +836,7 @@ public class NNStorage extends Storage implements Closeable, */ void processStartupOptionsForUpgrade(StartupOption startOpt, int layoutVersion) throws IOException { - if (startOpt == StartupOption.UPGRADE) { + if (startOpt == StartupOption.UPGRADE || startOpt == StartupOption.UPGRADEONLY) { // If upgrade from a release that does not support federation, // if clusterId is provided in the startupOptions use it. // Else generate a new cluster ID diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 365857fe631..65190e78a88 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -209,6 +209,9 @@ public class NameNode implements NameNodeStatusMXBean { + StartupOption.UPGRADE.getName() + " [" + StartupOption.CLUSTERID.getName() + " cid]" + " [" + StartupOption.RENAMERESERVED.getName() + "] ] | \n\t[" + + StartupOption.UPGRADEONLY.getName() + + " [" + StartupOption.CLUSTERID.getName() + " cid]" + + " [" + StartupOption.RENAMERESERVED.getName() + "] ] | \n\t[" + StartupOption.ROLLBACK.getName() + "] | \n\t[" + StartupOption.ROLLINGUPGRADE.getName() + " <" + RollingUpgradeStartupOption.DOWNGRADE.name().toLowerCase() + "|" @@ -712,6 +715,7 @@ public class NameNode implements NameNodeStatusMXBean { *
  • {@link StartupOption#BACKUP BACKUP} - start backup node
  • *
  • {@link StartupOption#CHECKPOINT CHECKPOINT} - start checkpoint node
  • *
  • {@link StartupOption#UPGRADE UPGRADE} - start the cluster + *
  • {@link StartupOption#UPGRADEONLY UPGRADEONLY} - upgrade the cluster * upgrade and create a snapshot of the current file system state
  • *
  • {@link StartupOption#RECOVER RECOVERY} - recover name node * metadata
  • @@ -766,7 +770,8 @@ public class NameNode implements NameNodeStatusMXBean { } protected HAState createHAState(StartupOption startOpt) { - if (!haEnabled || startOpt == StartupOption.UPGRADE) { + if (!haEnabled || startOpt == StartupOption.UPGRADE + || startOpt == StartupOption.UPGRADEONLY) { return ACTIVE_STATE; } else { return STANDBY_STATE; @@ -1197,8 +1202,10 @@ public class NameNode implements NameNodeStatusMXBean { startOpt = StartupOption.BACKUP; } else if (StartupOption.CHECKPOINT.getName().equalsIgnoreCase(cmd)) { startOpt = StartupOption.CHECKPOINT; - } else if (StartupOption.UPGRADE.getName().equalsIgnoreCase(cmd)) { - startOpt = StartupOption.UPGRADE; + } else if (StartupOption.UPGRADE.getName().equalsIgnoreCase(cmd) + || StartupOption.UPGRADEONLY.getName().equalsIgnoreCase(cmd)) { + startOpt = StartupOption.UPGRADE.getName().equalsIgnoreCase(cmd) ? + StartupOption.UPGRADE : StartupOption.UPGRADEONLY; /* Can be followed by CLUSTERID with a required parameter or * RENAMERESERVED with an optional parameter */ @@ -1402,6 +1409,12 @@ public class NameNode implements NameNodeStatusMXBean { terminate(0); return null; // avoid javac warning } + case UPGRADEONLY: { + DefaultMetricsSystem.initialize("NameNode"); + new NameNode(conf); + terminate(0); + return null; + } default: { DefaultMetricsSystem.initialize("NameNode"); return new NameNode(conf); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupOptionUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupOptionUpgrade.java index b0d55843bb7..ef51acc0449 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupOptionUpgrade.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupOptionUpgrade.java @@ -21,6 +21,8 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import java.net.URI; +import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import org.apache.hadoop.conf.Configuration; @@ -30,11 +32,15 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.junit.After; import org.junit.Before; import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; /** * This class tests various upgrade cases from earlier versions to current * version with and without clusterid. */ +@RunWith(value = Parameterized.class) public class TestStartupOptionUpgrade { private Configuration conf; @@ -42,10 +48,21 @@ public class TestStartupOptionUpgrade { private int layoutVersion; NNStorage storage; + @Parameters + public static Collection startOption() { + Object[][] params = new Object[][] { { StartupOption.UPGRADE }, + { StartupOption.UPGRADEONLY } }; + return Arrays.asList(params); + } + + public TestStartupOptionUpgrade(StartupOption startOption) { + super(); + this.startOpt = startOption; + } + @Before public void setUp() throws Exception { conf = new HdfsConfiguration(); - startOpt = StartupOption.UPGRADE; startOpt.setClusterId(null); storage = new NNStorage(conf, Collections.emptyList(),