From a8f209dee781259ef053c8d3c882d8b57799cef8 Mon Sep 17 00:00:00 2001 From: Kihwal Lee Date: Wed, 10 May 2017 14:22:17 -0500 Subject: [PATCH] HDFS-11755. Underconstruction blocks can be considered missing. Contributed by Nathan Roberts. --- .../server/blockmanagement/BlockManager.java | 2 +- .../hdfs/server/namenode/CacheManager.java | 3 +- .../hadoop/hdfs/TestFileCorruption.java | 1 + .../blockmanagement/TestBlockManager.java | 72 +++++++++++++++++-- 4 files changed, 68 insertions(+), 10 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 6ff8a62b116..7ade4eadd10 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -3514,7 +3514,7 @@ public class BlockManager implements BlockStatsMXBean { final int curReplicasDelta, int expectedReplicasDelta) { namesystem.writeLock(); try { - if (!isPopulatingReplQueues()) { + if (!isPopulatingReplQueues() || !block.isComplete()) { return; } NumberReplicas repl = countNodes(block); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java index 12e64f62032..01f70b6d477 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java @@ -100,7 +100,7 @@ import com.google.common.collect.Lists; * caching directives, we will schedule caching and uncaching work. */ @InterfaceAudience.LimitedPrivate({"HDFS"}) -public final class CacheManager { +public class CacheManager { public static final Logger LOG = LoggerFactory.getLogger(CacheManager.class); private static final float MIN_CACHED_BLOCKS_PERCENT = 0.001f; @@ -188,7 +188,6 @@ public final class CacheManager { this.directives = directives; } } - CacheManager(FSNamesystem namesystem, Configuration conf, BlockManager blockManager) { this.namesystem = namesystem; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java index 5477700fa07..59ba376c558 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java @@ -274,6 +274,7 @@ public class TestFileCorruption { out.write(outBuffer, 0, bufferSize); out.close(); dfs.setReplication(filePath, (short) 10); + cluster.triggerBlockReports(); // underreplicated Blocks should be one after setrep GenericTestUtils.waitFor(new Supplier() { @Override public Boolean get() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java index f4de9685cf9..b846658c928 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java @@ -78,11 +78,15 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica; import org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten; +import org.apache.hadoop.hdfs.server.namenode.CacheManager; +import org.apache.hadoop.hdfs.server.namenode.CachedBlock; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.server.namenode.INodeId; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.hdfs.server.namenode.TestINodeFile; +import org.apache.hadoop.hdfs.server.namenode.ha.HAContext; +import org.apache.hadoop.hdfs.server.namenode.ha.HAState; import org.apache.hadoop.hdfs.server.protocol.BlockReportContext; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; @@ -97,6 +101,8 @@ import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.MetricsAsserts; +import org.apache.hadoop.util.GSet; +import org.apache.hadoop.util.LightWeightGSet; import org.apache.log4j.Level; import org.apache.log4j.Logger; import org.junit.Assert; @@ -141,6 +147,19 @@ public class TestBlockManager { Mockito.doReturn(true).when(fsn).hasReadLock(); Mockito.doReturn(true).when(fsn).isRunning(); bm = new BlockManager(fsn, conf); + //Make shouldPopulaeReplQueues return true + HAContext haContext = Mockito.mock(HAContext.class); + HAState haState = Mockito.mock(HAState.class); + Mockito.when(haContext.getState()).thenReturn(haState); + Mockito.when(haState.shouldPopulateReplQueues()).thenReturn(true); + Mockito.when(fsn.getHAContext()).thenReturn(haContext); + bm.setInitializedReplQueues(true); + CacheManager cm = Mockito.mock(CacheManager.class); + Mockito.doReturn(cm).when(fsn).getCacheManager(); + GSet cb = + new LightWeightGSet(1); + Mockito.when(cm.getCachedBlocks()).thenReturn(cb); + final String[] racks = { "/rackA", "/rackA", @@ -515,7 +534,7 @@ public class TestBlockManager { } return ret; } - + private List startDecommission(int ... indexes) { List nodes = getNodes(indexes); for (DatanodeDescriptor node : nodes) { @@ -848,6 +867,42 @@ public class TestBlockManager { (ds) >= 0); } + @Test + public void testUCBlockNotConsideredMissing() throws Exception { + DatanodeDescriptor node = nodes.get(0); + DatanodeStorageInfo ds = node.getStorageInfos()[0]; + node.setAlive(true); + DatanodeRegistration nodeReg = + new DatanodeRegistration(node, null, null, ""); + + // register new node + bm.getDatanodeManager().registerDatanode(nodeReg); + bm.getDatanodeManager().addDatanode(node); + + // Build an incremental report + List rdbiList = new ArrayList<>(); + + // blk_42 is under construction, finalizes on one node and is + // immediately deleted on same node + long blockId = 42; // arbitrary + BlockInfo receivedBlock = addUcBlockToBM(blockId); + + rdbiList.add(new ReceivedDeletedBlockInfo(new Block(receivedBlock), + ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null)); + rdbiList.add(new ReceivedDeletedBlockInfo( + new Block(blockId), + ReceivedDeletedBlockInfo.BlockStatus.DELETED_BLOCK, null)); + + // process IBR + StorageReceivedDeletedBlocks srdb = + new StorageReceivedDeletedBlocks(new DatanodeStorage(ds.getStorageID()), + rdbiList.toArray(new ReceivedDeletedBlockInfo[rdbiList.size()])); + bm.processIncrementalBlockReport(node, srdb); + // Needed replications should still be 0. + assertEquals("UC block was incorrectly added to needed Replications", + 0, bm.neededReplications.size()); + } + private BlockInfo addBlockToBM(long blkId) { Block block = new Block(blkId); BlockInfo blockInfo = new BlockInfoContiguous(block, (short) 3); @@ -1180,14 +1235,17 @@ public class TestBlockManager { FileInputStream fstream = new FileInputStream(file); DataInputStream in = new DataInputStream(fstream); BufferedReader reader = new BufferedReader(new InputStreamReader(in)); + String corruptBlocksLine; + Boolean foundIt = false; try { - for(int i =0;i<6;i++) { - reader.readLine(); + while ((corruptBlocksLine = reader.readLine()) != null) { + if (corruptBlocksLine.compareTo("Corrupt Blocks:") == 0) { + foundIt = true; + break; + } } - String corruptBlocksLine = reader.readLine(); - assertEquals("Unexpected text in metasave," + - "was expecting corrupt blocks section!", 0, - corruptBlocksLine.compareTo("Corrupt Blocks:")); + assertTrue("Unexpected text in metasave," + + "was expecting corrupt blocks section!", foundIt); corruptBlocksLine = reader.readLine(); String regex = "Block=[0-9]+\\tNode=.*\\tStorageID=.*StorageState.*" + "TotalReplicas=.*Reason=GENSTAMP_MISMATCH";