diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index acf600d08cb..ed3632ec85d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -164,6 +164,9 @@ Release 0.23.3 - UNRELEASED HDFS-1765. Block Replication should respect under-replication block priority. (Uma Maheswara Rao G via eli) + HDFS-2285. BackupNode should reject requests to modify namespace. + (shv and Uma Maheswara Rao) + Release 0.23.2 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java index 8aab4bfd46f..a5abd1b073d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalProtocolService; import org.apache.hadoop.hdfs.protocolPB.JournalProtocolPB; import org.apache.hadoop.hdfs.protocolPB.JournalProtocolServerSideTranslatorPB; @@ -142,6 +143,10 @@ protected void initialize(Configuration conf) throws IOException { CommonConfigurationKeys.FS_TRASH_INTERVAL_DEFAULT); NamespaceInfo nsInfo = handshake(conf); super.initialize(conf); + if (false == namesystem.isInSafeMode()) { + namesystem.setSafeMode(SafeModeAction.SAFEMODE_ENTER); + } + // Backup node should never do lease recovery, // therefore lease hard limit should never expire. namesystem.leaseManager.setLeasePeriod( @@ -195,7 +200,12 @@ public void stop() { // Stop name-node threads super.stop(); } - + + /* @Override */// NameNode + public boolean setSafeMode(SafeModeAction action) throws IOException { + throw new UnsupportedActionException("setSafeMode"); + } + static class BackupNodeRpcServer extends NameNodeRpcServer implements JournalProtocol { private final String nnRpcAddress; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java index a4ac99bfddb..39d2abaee75 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java @@ -240,8 +240,12 @@ void doCheckpoint() throws IOException { rollForwardByApplyingLogs(manifest, bnImage, backupNode.getNamesystem()); } - + long txid = bnImage.getLastAppliedTxId(); + + backupNode.namesystem.dir.setReady(); + backupNode.namesystem.setBlockTotal(); + bnImage.saveFSImageInAllDirs(backupNode.getNamesystem(), txid); bnStorage.writeAll(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 311e1621266..25fd12d1c7c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -159,6 +159,11 @@ private BlockManager getBlockManager() { */ void imageLoadComplete() { Preconditions.checkState(!ready, "FSDirectory already loaded"); + setReady(); + } + + void setReady() { + if(ready) return; writeLock(); try { setReady(true); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index ba52a3d8965..2fbf18ea1bb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -3375,7 +3375,7 @@ public void decrementSafeBlockCount(Block b) { /** * Set the total number of blocks in the system. */ - private void setBlockTotal() { + void setBlockTotal() { // safeMode is volatile, and may be set to null at any time SafeModeInfo safeMode = this.safeMode; if (safeMode == null) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java index 8de92d0eb81..fc389e9fa5d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java @@ -21,9 +21,12 @@ import java.io.File; import java.io.IOException; +import java.net.InetSocketAddress; import java.util.Collections; import java.util.List; +import junit.framework.TestCase; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; @@ -31,13 +34,13 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; -import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.test.GenericTestUtils; import org.apache.log4j.Level; import org.junit.Before; @@ -244,8 +247,11 @@ public void testBackupNode() throws Exception { void testCheckpoint(StartupOption op) throws Exception { Path file1 = new Path("checkpoint.dat"); Path file2 = new Path("checkpoint2.dat"); + Path file3 = new Path("backup.dat"); Configuration conf = new HdfsConfiguration(); + short replication = (short)conf.getInt("dfs.replication", 3); + int numDatanodes = Math.max(3, replication); conf.set(DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY, "0"); conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1); // disable block scanner conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 1); @@ -293,7 +299,7 @@ void testCheckpoint(StartupOption op) throws Exception { // // Restart cluster and verify that file1 still exist. // - cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0) + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes) .format(false).build(); fileSys = cluster.getFileSystem(); // check that file1 still exists @@ -322,6 +328,26 @@ void testCheckpoint(StartupOption op) throws Exception { backup.doCheckpoint(); waitCheckpointDone(cluster, txid); + // Try BackupNode operations + InetSocketAddress add = backup.getNameNodeAddress(); + // Write to BN + FileSystem bnFS = FileSystem.get(new Path("hdfs://" + + NameNode.getHostPortString(add)).toUri(), conf); + boolean canWrite = true; + try { + TestCheckpoint.writeFile(bnFS, file3, replication); + } catch (IOException eio) { + LOG.info("Write to BN failed as expected: ", eio); + canWrite = false; + } + assertFalse("Write to BackupNode must be prohibited.", canWrite); + + TestCheckpoint.writeFile(fileSys, file3, replication); + TestCheckpoint.checkFile(fileSys, file3, replication); + // should also be on BN right away + assertTrue("file3 does not exist on BackupNode", + op != StartupOption.BACKUP || bnFS.exists(file3)); + } catch(IOException e) { LOG.error("Error in TestBackupNode:", e); assertTrue(e.getLocalizedMessage(), false);