diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index c59c1dfc55f..af93e845cd5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -417,9 +417,6 @@ Release 2.0.3-alpha - Unreleased HDFS-1245. Pluggable block id generation. (shv) - HDFS-4288. NN accepts incremental BR as IBR in safemode - (Daryn Sharp via todd) - BREAKDOWN OF HDFS-3077 SUBTASKS HDFS-3077. Quorum-based protocol for reading and writing edit logs. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index c3b98692f63..02ba8992339 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -62,7 +62,6 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.namenode.FSClusterStats; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.Namesystem; -import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; import org.apache.hadoop.hdfs.server.protocol.BlockCommand; import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; @@ -1577,10 +1576,7 @@ public class BlockManager { } // Log the block report processing stats from Namenode perspective - final NameNodeMetrics metrics = NameNode.getNameNodeMetrics(); - if (metrics != null) { - metrics.addBlockReport((int) (endTime - startTime)); - } + NameNode.getNameNodeMetrics().addBlockReport((int) (endTime - startTime)); blockLog.info("BLOCK* processReport: from " + nodeID + ", blocks: " + newReport.getNumberOfBlocks() + ", processing time: " + (endTime - startTime) + " msecs"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java index 56ac12f5ffe..a398a3fb0fd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java @@ -551,7 +551,6 @@ public class DatanodeDescriptor extends DatanodeInfo { @Override public void updateRegInfo(DatanodeID nodeReg) { super.updateRegInfo(nodeReg); - firstBlockReport = true; // must re-process IBR after re-registration } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java index 589510dc9a7..435a26ddc72 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java @@ -406,7 +406,7 @@ public class DatanodeManager { } /** Add a datanode. */ - void addDatanode(final DatanodeDescriptor node) { + private void addDatanode(final DatanodeDescriptor node) { // To keep host2DatanodeMap consistent with datanodeMap, // remove from host2DatanodeMap the datanodeDescriptor removed // from datanodeMap before adding node to host2DatanodeMap. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java index eabbda064e7..b9811e7e5e5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java @@ -34,16 +34,13 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; -import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.net.NetworkTopology; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; -import static org.mockito.Mockito.*; import com.google.common.base.Joiner; import com.google.common.collect.ImmutableList; @@ -488,70 +485,4 @@ public class TestBlockManager { new NumberReplicas(), UnderReplicatedBlocks.QUEUE_HIGHEST_PRIORITY)); } - - @Test - public void testSafeModeIBR() throws Exception { - DatanodeDescriptor node = spy(nodes.get(0)); - node.setStorageID("dummy-storage"); - node.isAlive = true; - - DatanodeRegistration nodeReg = - new DatanodeRegistration(node, null, null, ""); - - // pretend to be in safemode - doReturn(true).when(fsn).isInStartupSafeMode(); - - // register new node - bm.getDatanodeManager().registerDatanode(nodeReg); - bm.getDatanodeManager().addDatanode(node); // swap in spy - assertEquals(node, bm.getDatanodeManager().getDatanode(node)); - assertTrue(node.isFirstBlockReport()); - // send block report, should be processed - reset(node); - bm.processReport(node, "pool", new BlockListAsLongs()); - verify(node).receivedBlockReport(); - assertFalse(node.isFirstBlockReport()); - // send block report again, should NOT be processed - reset(node); - bm.processReport(node, "pool", new BlockListAsLongs()); - verify(node, never()).receivedBlockReport(); - assertFalse(node.isFirstBlockReport()); - - // re-register as if node restarted, should update existing node - bm.getDatanodeManager().removeDatanode(node); - reset(node); - bm.getDatanodeManager().registerDatanode(nodeReg); - verify(node).updateRegInfo(nodeReg); - assertTrue(node.isFirstBlockReport()); // ready for report again - // send block report, should be processed after restart - reset(node); - bm.processReport(node, "pool", new BlockListAsLongs()); - verify(node).receivedBlockReport(); - assertFalse(node.isFirstBlockReport()); - } - - @Test - public void testSafeModeIBRAfterIncremental() throws Exception { - DatanodeDescriptor node = spy(nodes.get(0)); - node.setStorageID("dummy-storage"); - node.isAlive = true; - - DatanodeRegistration nodeReg = - new DatanodeRegistration(node, null, null, ""); - - // pretend to be in safemode - doReturn(true).when(fsn).isInStartupSafeMode(); - - // register new node - bm.getDatanodeManager().registerDatanode(nodeReg); - bm.getDatanodeManager().addDatanode(node); // swap in spy - assertEquals(node, bm.getDatanodeManager().getDatanode(node)); - assertTrue(node.isFirstBlockReport()); - // send block report while pretending to already have blocks - reset(node); - doReturn(1).when(node).numBlocks(); - bm.processReport(node, "pool", new BlockListAsLongs()); - verify(node).receivedBlockReport(); - assertFalse(node.isFirstBlockReport()); - } }