HDFS-6945. BlockManager should remove a block from excessReplicateMap and decrement ExcessBlocks metric when the block is removed. (aajisaka)

(cherry picked from commit 18a91fe4df)
This commit is contained in:
Akira Ajisaka 2015-04-01 09:07:28 +09:00
parent 4cf44bef5c
commit b85bbca745
3 changed files with 30 additions and 4 deletions

View File

@ -83,6 +83,9 @@ Release 2.8.0 - UNRELEASED
HDFS-7997. The first non-existing xattr should also throw IOException. HDFS-7997. The first non-existing xattr should also throw IOException.
(zhouyingchao via yliu) (zhouyingchao via yliu)
HDFS-6945. BlockManager should remove a block from excessReplicateMap and
decrement ExcessBlocks metric when the block is removed. (aajisaka)
Release 2.7.0 - UNRELEASED Release 2.7.0 - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -3354,8 +3354,7 @@ public void removeBlock(Block block) {
// file already removes them from the block map below. // file already removes them from the block map below.
block.setNumBytes(BlockCommand.NO_ACK); block.setNumBytes(BlockCommand.NO_ACK);
addToInvalidates(block); addToInvalidates(block);
corruptReplicas.removeFromCorruptReplicasMap(block); removeBlockFromMap(block);
blocksMap.removeBlock(block);
// Remove the block from pendingReplications and neededReplications // Remove the block from pendingReplications and neededReplications
pendingReplications.remove(block); pendingReplications.remove(block);
neededReplications.remove(block, UnderReplicatedBlocks.LEVEL); neededReplications.remove(block, UnderReplicatedBlocks.LEVEL);
@ -3531,11 +3530,30 @@ public int numCorruptReplicas(Block block) {
} }
public void removeBlockFromMap(Block block) { public void removeBlockFromMap(Block block) {
removeFromExcessReplicateMap(block);
blocksMap.removeBlock(block); blocksMap.removeBlock(block);
// If block is removed from blocksMap remove it from corruptReplicasMap // If block is removed from blocksMap remove it from corruptReplicasMap
corruptReplicas.removeFromCorruptReplicasMap(block); corruptReplicas.removeFromCorruptReplicasMap(block);
} }
/**
* If a block is removed from blocksMap, remove it from excessReplicateMap.
*/
private void removeFromExcessReplicateMap(Block block) {
for (DatanodeStorageInfo info : blocksMap.getStorages(block)) {
String uuid = info.getDatanodeDescriptor().getDatanodeUuid();
LightWeightLinkedSet<Block> excessReplicas = excessReplicateMap.get(uuid);
if (excessReplicas != null) {
if (excessReplicas.remove(block)) {
excessBlocksCount.decrementAndGet();
if (excessReplicas.isEmpty()) {
excessReplicateMap.remove(uuid);
}
}
}
}
}
public int getCapacity() { public int getCapacity() {
return blocksMap.getCapacity(); return blocksMap.getCapacity();
} }

View File

@ -273,12 +273,17 @@ public void testCorruptBlock() throws Exception {
public void testExcessBlocks() throws Exception { public void testExcessBlocks() throws Exception {
Path file = getTestPath("testExcessBlocks"); Path file = getTestPath("testExcessBlocks");
createFile(file, 100, (short)2); createFile(file, 100, (short)2);
long totalBlocks = 1;
NameNodeAdapter.setReplication(namesystem, file.toString(), (short)1); NameNodeAdapter.setReplication(namesystem, file.toString(), (short)1);
updateMetrics(); updateMetrics();
MetricsRecordBuilder rb = getMetrics(NS_METRICS); MetricsRecordBuilder rb = getMetrics(NS_METRICS);
assertGauge("ExcessBlocks", totalBlocks, rb); assertGauge("ExcessBlocks", 1L, rb);
// verify ExcessBlocks metric is decremented and
// excessReplicateMap is cleared after deleting a file
fs.delete(file, true); fs.delete(file, true);
rb = getMetrics(NS_METRICS);
assertGauge("ExcessBlocks", 0L, rb);
assertTrue(bm.excessReplicateMap.isEmpty());
} }
/** Test to ensure metrics reflects missing blocks */ /** Test to ensure metrics reflects missing blocks */