diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 2358e09c22c..0706240a498 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -92,6 +92,9 @@ Release 2.0.3-alpha - Unreleased HDFS-3573. Supply NamespaceInfo when instantiating JournalManagers. (todd and ivank via umamahesh) + HDFS-3695. Genericize format() to non-file JournalManagers. + (todd via umamahesh) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java index 6d83c96c941..8636fb72466 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java @@ -271,6 +271,23 @@ public class BookKeeperJournalManager implements JournalManager { } } + @Override + public void format(NamespaceInfo ns) { + // Currently, BKJM automatically formats itself when first accessed. + // TODO: change over to explicit formatting so that the admin can + // clear out the BK storage when reformatting a cluster. + LOG.info("Not formatting " + this + " - BKJM does not currently " + + "support reformatting. If it has not been used before, it will" + + "be formatted automatically upon first use."); + } + + @Override + public boolean hasSomeData() throws IOException { + // Don't confirm format on BKJM, since format() is currently a + // no-op anyway + return false; + } + /** * Start a new log segment in a BookKeeper ledger. * First ensure that we have the write lock for this journal. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java index e224830d212..be1c154cd70 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.util.ToolRunner; import org.apache.hadoop.util.VersionInfo; @@ -225,7 +226,7 @@ public abstract class Storage extends StorageInfo { * One of the storage directories. */ @InterfaceAudience.Private - public static class StorageDirectory { + public static class StorageDirectory implements FormatConfirmable { final File root; // root directory final boolean useLock; // flag to enable storage lock final StorageDirType dirType; // storage dir type @@ -580,6 +581,32 @@ public abstract class Storage extends StorageInfo { throw new IOException("Unexpected FS state: " + curState); } } + + /** + * @return true if the storage directory should prompt the user prior + * to formatting (i.e if the directory appears to contain some data) + * @throws IOException if the SD cannot be accessed due to an IO error + */ + @Override + public boolean hasSomeData() throws IOException { + // Its alright for a dir not to exist, or to exist (properly accessible) + // and be completely empty. + if (!root.exists()) return false; + + if (!root.isDirectory()) { + // a file where you expect a directory should not cause silent + // formatting + return true; + } + + if (FileUtil.listFiles(root).length == 0) { + // Empty dir can format without prompt. + return false; + } + + return true; + } + /** * Lock storage to provide exclusive access. @@ -773,6 +800,68 @@ public abstract class Storage extends StorageInfo { } + /** + * Iterate over each of the {@link FormatConfirmable} objects, + * potentially checking with the user whether it should be formatted. + * + * If running in interactive mode, will prompt the user for each + * directory to allow them to format anyway. Otherwise, returns + * false, unless 'force' is specified. + * + * @param force format regardless of whether dirs exist + * @param interactive prompt the user when a dir exists + * @return true if formatting should proceed + * @throws IOException if some storage cannot be accessed + */ + public static boolean confirmFormat( + Iterable items, + boolean force, boolean interactive) throws IOException { + for (FormatConfirmable item : items) { + if (!item.hasSomeData()) + continue; + if (force) { // Don't confirm, always format. + System.err.println( + "Data exists in " + item + ". Formatting anyway."); + continue; + } + if (!interactive) { // Don't ask - always don't format + System.err.println( + "Running in non-interactive mode, and data appears to exist in " + + item + ". Not formatting."); + return false; + } + if (!ToolRunner.confirmPrompt("Re-format filesystem in " + item + " ?")) { + System.err.println("Format aborted in " + item); + return false; + } + } + + return true; + } + + /** + * Interface for classes which need to have the user confirm their + * formatting during NameNode -format and other similar operations. + * + * This is currently a storage directory or journal manager. + */ + @InterfaceAudience.Private + public interface FormatConfirmable { + /** + * @return true if the storage seems to have some valid data in it, + * and the user should be required to confirm the format. Otherwise, + * false. + * @throws IOException if the storage cannot be accessed at all. + */ + public boolean hasSomeData() throws IOException; + + /** + * @return a string representation of the formattable item, suitable + * for display to the user inside a prompt + */ + public String toString(); + } + /** * Get common storage fields. * Should be overloaded if additional fields need to be get. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java index 2f6fe8cbde2..5420b129cab 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java @@ -22,6 +22,7 @@ import java.util.Collection; import org.apache.hadoop.hdfs.server.protocol.JournalInfo; import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration; +import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; /** * A JournalManager implementation that uses RPCs to log transactions @@ -38,6 +39,20 @@ class BackupJournalManager implements JournalManager { this.bnReg = bnReg; } + @Override + public void format(NamespaceInfo nsInfo) { + // format() should only get called at startup, before any BNs + // can register with the NN. + throw new UnsupportedOperationException( + "BackupNode journal should never get formatted"); + } + + @Override + public boolean hasSomeData() { + throw new UnsupportedOperationException(); + } + + @Override public EditLogOutputStream startLogSegment(long txId) throws IOException { EditLogBackupOutputStream stm = new EditLogBackupOutputStream(bnReg, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java index 3d231d23b55..f313becc45e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifie import static org.apache.hadoop.util.ExitUtil.terminate; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; +import org.apache.hadoop.hdfs.server.common.Storage.FormatConfirmable; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.*; import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream; @@ -313,6 +314,39 @@ public class FSEditLog implements LogsPurgeable { state = State.CLOSED; } + + /** + * Format all configured journals which are not file-based. + * + * File-based journals are skipped, since they are formatted by the + * Storage format code. + */ + void formatNonFileJournals(NamespaceInfo nsInfo) throws IOException { + Preconditions.checkState(state == State.BETWEEN_LOG_SEGMENTS, + "Bad state: %s", state); + + for (JournalManager jm : journalSet.getJournalManagers()) { + if (!(jm instanceof FileJournalManager)) { + jm.format(nsInfo); + } + } + } + + List getFormatConfirmables() { + Preconditions.checkState(state == State.BETWEEN_LOG_SEGMENTS, + "Bad state: %s", state); + + List ret = Lists.newArrayList(); + for (final JournalManager jm : journalSet.getJournalManagers()) { + // The FJMs are confirmed separately since they are also + // StorageDirectories + if (!(jm instanceof FileJournalManager)) { + ret.add(jm); + } + } + return ret; + } + /** * Write an operation to the edit log. Do not sync to persistent * store yet. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java index b12f6e6ebd2..caf7e6d92a0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java @@ -38,6 +38,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; +import org.apache.hadoop.hdfs.server.common.Storage.FormatConfirmable; import org.apache.hadoop.hdfs.server.common.GenerationStamp; import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException; import org.apache.hadoop.hdfs.server.common.Storage; @@ -138,10 +139,33 @@ public class FSImage implements Closeable { fileCount + " files"); NamespaceInfo ns = NNStorage.newNamespaceInfo(); ns.clusterID = clusterId; + storage.format(ns); + editLog.formatNonFileJournals(ns); saveFSImageInAllDirs(fsn, 0); } + /** + * Check whether the storage directories and non-file journals exist. + * If running in interactive mode, will prompt the user for each + * directory to allow them to format anyway. Otherwise, returns + * false, unless 'force' is specified. + * + * @param force format regardless of whether dirs exist + * @param interactive prompt the user when a dir exists + * @return true if formatting should proceed + * @throws IOException if some storage cannot be accessed + */ + boolean confirmFormat(boolean force, boolean interactive) throws IOException { + List confirms = Lists.newArrayList(); + for (StorageDirectory sd : storage.dirIterable(null)) { + confirms.add(sd); + } + + confirms.addAll(editLog.getFormatConfirmables()); + return Storage.confirmFormat(confirms, force, interactive); + } + /** * Analyze storage directories. * Recover from previous transitions if required. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java index ce779fda800..51718a7bf89 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hdfs.server.common.StorageErrorReporter; import org.apache.hadoop.hdfs.server.namenode.NNStorageRetentionManager.StoragePurger; import org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.EditLogValidation; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile; +import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog; import com.google.common.annotations.VisibleForTesting; @@ -77,6 +78,22 @@ class FileJournalManager implements JournalManager { @Override public void close() throws IOException {} + @Override + public void format(NamespaceInfo ns) { + // Formatting file journals is done by the StorageDirectory + // format code, since they may share their directory with + // checkpoints, etc. + throw new UnsupportedOperationException(); + } + + @Override + public boolean hasSomeData() { + // Formatting file journals is done by the StorageDirectory + // format code, since they may share their directory with + // checkpoints, etc. + throw new UnsupportedOperationException(); + } + @Override synchronized public EditLogOutputStream startLogSegment(long txid) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java index 61c65e5a349..e4b8d9d2051 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java @@ -22,6 +22,8 @@ import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hdfs.server.common.Storage.FormatConfirmable; +import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; /** * A JournalManager is responsible for managing a single place of storing @@ -32,7 +34,15 @@ import org.apache.hadoop.classification.InterfaceStability; */ @InterfaceAudience.Private @InterfaceStability.Evolving -public interface JournalManager extends Closeable, LogsPurgeable { +public interface JournalManager extends Closeable, LogsPurgeable, + FormatConfirmable { + + /** + * Format the underlying storage, removing any previously + * stored data. + */ + void format(NamespaceInfo ns); + /** * Begin writing to a new segment of the log stream, which starts at * the given transaction ID. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java index 7a74296d58b..416663c6b06 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java @@ -29,6 +29,7 @@ import java.util.SortedSet; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; @@ -170,6 +171,20 @@ public class JournalSet implements JournalManager { this.minimumRedundantJournals = minimumRedundantResources; } + @Override + public void format(NamespaceInfo nsInfo) { + // The iteration is done by FSEditLog itself + throw new UnsupportedOperationException(); + } + + @Override + public boolean hasSomeData() throws IOException { + // This is called individually on the underlying journals, + // not on the JournalSet. + throw new UnsupportedOperationException(); + } + + @Override public EditLogOutputStream startLogSegment(final long txId) throws IOException { mapJournalsAndReportErrors(new JournalClosure() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java index b72e24dabc8..abc871fa9f1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java @@ -28,7 +28,6 @@ import java.util.Collection; import java.util.HashMap; import java.util.Iterator; import java.util.List; -import java.util.Map; import java.util.Properties; import java.util.UUID; import java.util.concurrent.CopyOnWriteArrayList; @@ -55,7 +54,6 @@ import org.apache.hadoop.util.Time; import com.google.common.base.Preconditions; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Lists; -import com.google.common.collect.Maps; /** * NNStorage is responsible for management of the StorageDirectories used by diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 7f72d5a5ce9..43148872c09 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -64,6 +64,7 @@ import org.apache.hadoop.hdfs.server.protocol.JournalProtocol; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration; +import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.ipc.StandbyException; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; @@ -731,9 +732,6 @@ public class NameNode { dirsToPrompt.addAll(sharedDirs); List editDirsToFormat = FSNamesystem.getNamespaceEditsDirs(conf); - if (!confirmFormat(dirsToPrompt, force, isInteractive)) { - return true; // aborted - } // if clusterID is not provided - see if you can find the current one String clusterId = StartupOption.FORMAT.getClusterId(); @@ -745,62 +743,16 @@ public class NameNode { FSImage fsImage = new FSImage(conf, nameDirsToFormat, editDirsToFormat); FSNamesystem fsn = new FSNamesystem(conf, fsImage); + fsImage.getEditLog().initJournalsForWrite(); + + if (!fsImage.confirmFormat(force, isInteractive)) { + return true; // aborted + } + fsImage.format(fsn, clusterId); return false; } - /** - * Check whether the given storage directories already exist. - * If running in interactive mode, will prompt the user for each - * directory to allow them to format anyway. Otherwise, returns - * false, unless 'force' is specified. - * - * @param dirsToFormat the dirs to check - * @param force format regardless of whether dirs exist - * @param interactive prompt the user when a dir exists - * @return true if formatting should proceed - * @throws IOException - */ - public static boolean confirmFormat(Collection dirsToFormat, - boolean force, boolean interactive) - throws IOException { - for(Iterator it = dirsToFormat.iterator(); it.hasNext();) { - URI dirUri = it.next(); - if (!dirUri.getScheme().equals(NNStorage.LOCAL_URI_SCHEME)) { - System.err.println("Skipping format for directory \"" + dirUri - + "\". Can only format local directories with scheme \"" - + NNStorage.LOCAL_URI_SCHEME + "\"."); - continue; - } - // To validate only file based schemes are formatted - assert dirUri.getScheme().equals(NNStorage.LOCAL_URI_SCHEME) : - "formatting is not supported for " + dirUri; - - File curDir = new File(dirUri.getPath()); - // Its alright for a dir not to exist, or to exist (properly accessible) - // and be completely empty. - if (!curDir.exists() || - (curDir.isDirectory() && FileUtil.listFiles(curDir).length == 0)) - continue; - if (force) { // Don't confirm, always format. - System.err.println( - "Storage directory exists in " + curDir + ". Formatting anyway."); - continue; - } - if (!interactive) { // Don't ask - always don't format - System.err.println( - "Running in non-interactive mode, and image appears to exist in " + - curDir + ". Not formatting."); - return false; - } - if (!confirmPrompt("Re-format filesystem in " + curDir + " ?")) { - System.err.println("Format aborted in " + curDir); - return false; - } - } - return true; - } - public static void checkAllowFormat(Configuration conf) throws IOException { if (!conf.getBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY, DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_DEFAULT)) { @@ -853,17 +805,26 @@ public class NameNode { FSNamesystem.getNamespaceEditsDirs(conf, false)); existingStorage = fsns.getFSImage().getStorage(); + NamespaceInfo nsInfo = existingStorage.getNamespaceInfo(); - Collection sharedEditsDirs = FSNamesystem.getSharedEditsDirs(conf); - if (!confirmFormat(sharedEditsDirs, force, interactive)) { - return true; // aborted - } - NNStorage newSharedStorage = new NNStorage(conf, + List sharedEditsDirs = FSNamesystem.getSharedEditsDirs(conf); + + FSImage sharedEditsImage = new FSImage(conf, Lists.newArrayList(), sharedEditsDirs); + sharedEditsImage.getEditLog().initJournalsForWrite(); - newSharedStorage.format(existingStorage.getNamespaceInfo()); + if (!sharedEditsImage.confirmFormat(force, interactive)) { + return true; // abort + } + NNStorage newSharedStorage = sharedEditsImage.getStorage(); + // Call Storage.format instead of FSImage.format here, since we don't + // actually want to save a checkpoint - just prime the dirs with + // the existing namespace info + newSharedStorage.format(nsInfo); + sharedEditsImage.getEditLog().formatNonFileJournals(nsInfo); + // Need to make sure the edit log segments are in good shape to initialize // the shared edits dir. fsns.getFSImage().getEditLog().close(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java index f038305a5a1..e9549ce8f18 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HAUtil; import org.apache.hadoop.hdfs.NameNodeProxies; import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream; import org.apache.hadoop.hdfs.server.namenode.FSImage; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; @@ -55,7 +56,6 @@ import org.apache.hadoop.util.ToolRunner; import com.google.common.base.Joiner; import com.google.common.base.Preconditions; -import com.google.common.collect.Sets; /** * Tool which allows the standby node's storage directories to be bootstrapped @@ -171,19 +171,18 @@ public class BootstrapStandby implements Tool, Configurable { " Layout version: " + nsInfo.getLayoutVersion() + "\n" + "====================================================="); + long imageTxId = proxy.getMostRecentCheckpointTxId(); + long curTxId = proxy.getTransactionID(); + + NNStorage storage = new NNStorage(conf, dirsToFormat, editUrisToFormat); + // Check with the user before blowing away data. - if (!NameNode.confirmFormat( - Sets.union(Sets.newHashSet(dirsToFormat), - Sets.newHashSet(editUrisToFormat)), + if (!Storage.confirmFormat(storage.dirIterable(null), force, interactive)) { return ERR_CODE_ALREADY_FORMATTED; } - long imageTxId = proxy.getMostRecentCheckpointTxId(); - long curTxId = proxy.getTransactionID(); - // Format the storage (writes VERSION file) - NNStorage storage = new NNStorage(conf, dirsToFormat, editUrisToFormat); storage.format(nsInfo); // Load the newly formatted image, using all of the directories (including shared diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java index 1ff208fa3e9..276b557ccbf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java @@ -20,12 +20,14 @@ package org.apache.hadoop.hdfs.server.namenode; import static org.junit.Assert.fail; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.mock; import java.io.IOException; import java.net.URI; import java.util.Collection; +import org.apache.hadoop.hdfs.server.common.Storage.FormatConfirmable; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -125,6 +127,8 @@ public class TestGenericJournalConf { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); cluster.waitActive(); + assertTrue(DummyJournalManager.shouldPromptCalled); + assertTrue(DummyJournalManager.formatCalled); assertNotNull(DummyJournalManager.conf); assertEquals(new URI(DUMMY_URI), DummyJournalManager.uri); assertNotNull(DummyJournalManager.nsInfo); @@ -141,6 +145,8 @@ public class TestGenericJournalConf { static Configuration conf = null; static URI uri = null; static NamespaceInfo nsInfo = null; + static boolean formatCalled = false; + static boolean shouldPromptCalled = false; public DummyJournalManager(Configuration conf, URI u, NamespaceInfo nsInfo) { @@ -150,6 +156,11 @@ public class TestGenericJournalConf { DummyJournalManager.nsInfo = nsInfo; } + @Override + public void format(NamespaceInfo nsInfo) { + formatCalled = true; + } + @Override public EditLogOutputStream startLogSegment(long txId) throws IOException { return mock(EditLogOutputStream.class); @@ -178,6 +189,12 @@ public class TestGenericJournalConf { @Override public void close() throws IOException {} + + @Override + public boolean hasSomeData() throws IOException { + shouldPromptCalled = true; + return false; + } } public static class BadConstructorJournalManager extends DummyJournalManager {