HDFS-9485. Make BlockManager#removeFromExcessReplicateMap accept BlockInfo instead of Block. Contributed by Mingliang Liu.

(cherry picked from commit 830eb252aa)
This commit is contained in:
Jing Zhao 2015-12-01 13:05:22 -08:00
parent 9db4b77ea5
commit 7e416aa70d
3 changed files with 6 additions and 3 deletions

View File

@ -838,6 +838,9 @@ Release 2.8.0 - UNRELEASED
HDFS-9269. Update the documentation and wrapper for fuse-dfs.
(Wei-Chiu Chuang via zhz)
HDFS-9485. Make BlockManager#removeFromExcessReplicateMap accept BlockInfo
instead of Block. (Mingliang Liu via jing9)
OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

View File

@ -3569,7 +3569,7 @@ public class BlockManager implements BlockStatsMXBean {
return corruptReplicas.numCorruptReplicas(block);
}
public void removeBlockFromMap(Block block) {
public void removeBlockFromMap(BlockInfo block) {
removeFromExcessReplicateMap(block);
blocksMap.removeBlock(block);
// If block is removed from blocksMap remove it from corruptReplicasMap
@ -3579,7 +3579,7 @@ public class BlockManager implements BlockStatsMXBean {
/**
* If a block is removed from blocksMap, remove it from excessReplicateMap.
*/
private void removeFromExcessReplicateMap(Block block) {
private void removeFromExcessReplicateMap(BlockInfo block) {
for (DatanodeStorageInfo info : blocksMap.getStorages(block)) {
String uuid = info.getDatanodeDescriptor().getDatanodeUuid();
LightWeightHashSet<Block> excessReplicas =

View File

@ -79,7 +79,7 @@ class FSDirWriteFileOp {
if (uc == null) {
return false;
}
fsd.getBlockManager().removeBlockFromMap(block);
fsd.getBlockManager().removeBlockFromMap(uc);
if(NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* FSDirectory.removeBlock: "