diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 3bfc5504f5a..1d733a0b63b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -83,6 +83,9 @@ Release 2.8.0 - UNRELEASED HDFS-7997. The first non-existing xattr should also throw IOException. (zhouyingchao via yliu) + HDFS-6945. BlockManager should remove a block from excessReplicateMap and + decrement ExcessBlocks metric when the block is removed. (aajisaka) + Release 2.7.0 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 11965c150aa..acb5c4459b0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -3354,8 +3354,7 @@ public class BlockManager { // file already removes them from the block map below. block.setNumBytes(BlockCommand.NO_ACK); addToInvalidates(block); - corruptReplicas.removeFromCorruptReplicasMap(block); - blocksMap.removeBlock(block); + removeBlockFromMap(block); // Remove the block from pendingReplications and neededReplications pendingReplications.remove(block); neededReplications.remove(block, UnderReplicatedBlocks.LEVEL); @@ -3531,11 +3530,30 @@ public class BlockManager { } public void removeBlockFromMap(Block block) { + removeFromExcessReplicateMap(block); blocksMap.removeBlock(block); // If block is removed from blocksMap remove it from corruptReplicasMap corruptReplicas.removeFromCorruptReplicasMap(block); } + /** + * If a block is removed from blocksMap, remove it from excessReplicateMap. + */ + private void removeFromExcessReplicateMap(Block block) { + for (DatanodeStorageInfo info : blocksMap.getStorages(block)) { + String uuid = info.getDatanodeDescriptor().getDatanodeUuid(); + LightWeightLinkedSet excessReplicas = excessReplicateMap.get(uuid); + if (excessReplicas != null) { + if (excessReplicas.remove(block)) { + excessBlocksCount.decrementAndGet(); + if (excessReplicas.isEmpty()) { + excessReplicateMap.remove(uuid); + } + } + } + } + } + public int getCapacity() { return blocksMap.getCapacity(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java index 2ba609ddb1b..438c2d7746e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java @@ -273,12 +273,17 @@ public class TestNameNodeMetrics { public void testExcessBlocks() throws Exception { Path file = getTestPath("testExcessBlocks"); createFile(file, 100, (short)2); - long totalBlocks = 1; NameNodeAdapter.setReplication(namesystem, file.toString(), (short)1); updateMetrics(); MetricsRecordBuilder rb = getMetrics(NS_METRICS); - assertGauge("ExcessBlocks", totalBlocks, rb); + assertGauge("ExcessBlocks", 1L, rb); + + // verify ExcessBlocks metric is decremented and + // excessReplicateMap is cleared after deleting a file fs.delete(file, true); + rb = getMetrics(NS_METRICS); + assertGauge("ExcessBlocks", 0L, rb); + assertTrue(bm.excessReplicateMap.isEmpty()); } /** Test to ensure metrics reflects missing blocks */