From 972782d9568e0849484c027f27c1638ba50ec56e Mon Sep 17 00:00:00 2001 From: Jing Zhao Date: Fri, 12 Feb 2016 11:07:52 -0800 Subject: [PATCH] HDFS-9754. Avoid unnecessary getBlockCollection calls in BlockManager. Contributed by Jing Zhao. --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../server/blockmanagement/BlockInfo.java | 10 +++ .../server/blockmanagement/BlockManager.java | 86 +++++++------------ .../server/blockmanagement/BlocksMap.java | 2 +- .../hdfs/server/namenode/FSDirTruncateOp.java | 8 +- .../hdfs/server/namenode/FSNamesystem.java | 14 +-- .../hadoop/hdfs/server/namenode/INode.java | 6 +- .../hdfs/server/namenode/INodeFile.java | 38 ++++---- .../hdfs/server/namenode/Namesystem.java | 23 ----- .../snapshot/FileWithSnapshotFeature.java | 2 +- .../TestNameNodeMetadataConsistency.java | 2 + 11 files changed, 76 insertions(+), 118 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 687e35f6c7e..25c8af07fd1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -1002,6 +1002,9 @@ Release 2.9.0 - UNRELEASED HDFS-9780. RollingFileSystemSink doesn't work on secure clusters. (Daniel Templeton via kasha) + HDFS-9754. Avoid unnecessary getBlockCollection calls in BlockManager. + (jing9) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java index 5da2140a5bf..104a2734fe7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java @@ -96,6 +96,10 @@ public abstract class BlockInfo extends Block this.bcId = id; } + public void delete() { + setBlockCollectionId(INVALID_INODE_ID); + } + public boolean isDeleted() { return bcId == INVALID_INODE_ID; } @@ -245,6 +249,12 @@ public abstract class BlockInfo extends Block return getBlockUCState().equals(BlockUCState.COMPLETE); } + public final boolean isCompleteOrCommitted() { + final BlockUCState state = getBlockUCState(); + return state.equals(BlockUCState.COMPLETE) || + state.equals(BlockUCState.COMMITTED); + } + /** * Add/Update the under construction feature. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 9e8026b4f9d..67f50263109 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -743,7 +743,7 @@ public class BlockManager implements BlockStatsMXBean { } if (hasMinStorage(lastBlock)) { if (committed) { - addExpectedReplicasToPending(lastBlock, bc); + addExpectedReplicasToPending(lastBlock); } completeBlock(lastBlock, false); } @@ -755,26 +755,21 @@ public class BlockManager implements BlockStatsMXBean { * pendingReplications in order to keep ReplicationMonitor from scheduling * the block. */ - public void addExpectedReplicasToPending(BlockInfo blk, BlockCollection bc) { - if (!bc.isStriped()) { - addExpectedReplicasToPending(blk); - } - } - - private void addExpectedReplicasToPending(BlockInfo lastBlock) { - DatanodeStorageInfo[] expectedStorages = - lastBlock.getUnderConstructionFeature().getExpectedStorageLocations(); - if (expectedStorages.length - lastBlock.numNodes() > 0) { - ArrayList pendingNodes = - new ArrayList(); - for (DatanodeStorageInfo storage : expectedStorages) { - DatanodeDescriptor dnd = storage.getDatanodeDescriptor(); - if (lastBlock.findStorageInfo(dnd) == null) { - pendingNodes.add(dnd); + public void addExpectedReplicasToPending(BlockInfo blk) { + if (!blk.isStriped()) { + DatanodeStorageInfo[] expectedStorages = + blk.getUnderConstructionFeature().getExpectedStorageLocations(); + if (expectedStorages.length - blk.numNodes() > 0) { + ArrayList pendingNodes = new ArrayList<>(); + for (DatanodeStorageInfo storage : expectedStorages) { + DatanodeDescriptor dnd = storage.getDatanodeDescriptor(); + if (blk.findStorageInfo(dnd) == null) { + pendingNodes.add(dnd); + } } + pendingReplications.increment(blk, + pendingNodes.toArray(new DatanodeDescriptor[pendingNodes.size()])); } - pendingReplications.increment(lastBlock, - pendingNodes.toArray(new DatanodeDescriptor[pendingNodes.size()])); } } @@ -962,13 +957,13 @@ public class BlockManager implements BlockStatsMXBean { final BlockUnderConstructionFeature uc = blk.getUnderConstructionFeature(); if (blk.isStriped()) { final DatanodeStorageInfo[] storages = uc.getExpectedStorageLocations(); - final ExtendedBlock eb = new ExtendedBlock(namesystem.getBlockPoolId(), + final ExtendedBlock eb = new ExtendedBlock(getBlockPoolId(), blk); return newLocatedStripedBlock(eb, storages, uc.getBlockIndices(), pos, false); } else { final DatanodeStorageInfo[] storages = uc.getExpectedStorageLocations(); - final ExtendedBlock eb = new ExtendedBlock(namesystem.getBlockPoolId(), + final ExtendedBlock eb = new ExtendedBlock(getBlockPoolId(), blk); return newLocatedBlock(eb, storages, pos, false); } @@ -1011,7 +1006,7 @@ public class BlockManager implements BlockStatsMXBean { " numNodes: " + numNodes + " numCorrupt: " + numCorruptNodes + " numCorruptRepls: " + numCorruptReplicas; - final ExtendedBlock eb = new ExtendedBlock(namesystem.getBlockPoolId(), blk); + final ExtendedBlock eb = new ExtendedBlock(getBlockPoolId(), blk); return blockIndices == null ? newLocatedBlock(eb, machines, pos, isCorrupt) : newLocatedStripedBlock(eb, machines, blockIndices, pos, isCorrupt); @@ -1578,11 +1573,8 @@ public class BlockManager implements BlockStatsMXBean { private BlockReconstructionWork scheduleReconstruction(BlockInfo block, int priority) { - // block should belong to a file - BlockCollection bc = getBlockCollection(block); - // abandoned block or block reopened for append - if (bc == null - || (bc.isUnderConstruction() && block.equals(bc.getLastBlock()))) { + // skip abandoned block or block reopened for append + if (block.isDeleted() || !block.isCompleteOrCommitted()) { // remove from neededReplications neededReplications.remove(block, priority); return null; @@ -1626,6 +1618,7 @@ public class BlockManager implements BlockStatsMXBean { additionalReplRequired = 1; // Needed on a new rack } + final BlockCollection bc = getBlockCollection(block); if (block.isStriped()) { if (pendingNum > 0) { // Wait the previous reconstruction to finish. @@ -1649,11 +1642,8 @@ public class BlockManager implements BlockStatsMXBean { BlockInfo block = rw.getBlock(); int priority = rw.getPriority(); // Recheck since global lock was released - // block should belong to a file - BlockCollection bc = getBlockCollection(block); - // abandoned block or block reopened for append - if (bc == null - || (bc.isUnderConstruction() && block.equals(bc.getLastBlock()))) { + // skip abandoned block or block reopened for append + if (block.isDeleted() || !block.isCompleteOrCommitted()) { neededReplications.remove(block, priority); rw.resetTargets(); return false; @@ -1688,23 +1678,12 @@ public class BlockManager implements BlockStatsMXBean { assert rw.getTargets().length > 0; assert pendingNum == 0 : "Should wait the previous reconstruction" + " to finish"; - String src = getBlockCollection(block).getName(); - ErasureCodingPolicy ecPolicy = null; - try { - ecPolicy = namesystem.getErasureCodingPolicyForPath(src); - } catch (IOException e) { - blockLog - .warn("Failed to get EC policy for the file {} ", src); - } - if (ecPolicy == null) { - blockLog.warn("No erasure coding policy found for the file {}. " - + "So cannot proceed for reconstruction", src); - // TODO: we may have to revisit later for what we can do better to - // handle this case. - return false; - } + final ErasureCodingPolicy ecPolicy = + ((BlockInfoStriped) block).getErasureCodingPolicy(); + assert ecPolicy != null; + rw.getTargets()[0].getDatanodeDescriptor().addBlockToBeErasureCoded( - new ExtendedBlock(namesystem.getBlockPoolId(), block), + new ExtendedBlock(getBlockPoolId(), block), rw.getSrcNodes(), rw.getTargets(), ((ErasureCodingWork) rw).getLiveBlockIndicies(), ecPolicy); } else { @@ -2870,8 +2849,6 @@ public class BlockManager implements BlockStatsMXBean { // it will happen in next block report otherwise. return block; } - BlockCollection bc = getBlockCollection(storedBlock); - assert bc != null : "Block must belong to a file"; // add block to the datanode AddBlockResult result = storageInfo.addBlock(storedBlock, reportedBlock); @@ -2907,7 +2884,7 @@ public class BlockManager implements BlockStatsMXBean { if(storedBlock.getBlockUCState() == BlockUCState.COMMITTED && hasMinStorage(storedBlock, numLiveReplicas)) { - addExpectedReplicasToPending(storedBlock, bc); + addExpectedReplicasToPending(storedBlock); completeBlock(storedBlock, false); } else if (storedBlock.isComplete() && result == AddBlockResult.ADDED) { // check whether safe replication is reached for the block @@ -2918,8 +2895,8 @@ public class BlockManager implements BlockStatsMXBean { bmSafeMode.incrementSafeBlockCount(numCurrentReplica, storedBlock); } - // if file is under construction, then done for now - if (bc.isUnderConstruction()) { + // if block is still under construction, then done for now + if (!storedBlock.isCompleteOrCommitted()) { return storedBlock; } @@ -3444,8 +3421,7 @@ public class BlockManager implements BlockStatsMXBean { // necessary. In that case, put block on a possibly-will- // be-replicated list. // - BlockCollection bc = getBlockCollection(storedBlock); - if (bc != null) { + if (!storedBlock.isDeleted()) { bmSafeMode.decrementSafeBlockCount(storedBlock); updateNeededReplications(storedBlock, -1, 0); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java index 71d0598d9ad..f7cde90872f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java @@ -91,7 +91,7 @@ class BlocksMap { if (blockInfo == null) return; - blockInfo.setBlockCollectionId(INodeId.INVALID_INODE_ID); + assert blockInfo.getBlockCollectionId() == INodeId.INVALID_INODE_ID; final int size = blockInfo.isStriped() ? blockInfo.getCapacity() : blockInfo.numNodes(); for(int idx = size - 1; idx >= 0; idx--) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java index 95980e76129..29c663a3e73 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs.server.namenode; import java.io.IOException; +import java.util.Set; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.fs.UnresolvedLinkException; @@ -186,6 +187,7 @@ final class FSDirTruncateOp { "Should be the same block."; if (oldBlock.getBlockId() != tBlk.getBlockId() && !file.isBlockInLatestSnapshot(oldBlock)) { + oldBlock.delete(); fsd.getBlockManager().removeBlockFromMap(oldBlock); } } @@ -298,9 +300,9 @@ final class FSDirTruncateOp { verifyQuotaForTruncate(fsn, iip, file, newLength, delta); - long remainingLength = - file.collectBlocksBeyondMax(newLength, collectedBlocks); - file.excludeSnapshotBlocks(latestSnapshot, collectedBlocks); + Set toRetain = file.getSnapshotBlocksToRetain(latestSnapshot); + long remainingLength = file.collectBlocksBeyondMax(newLength, + collectedBlocks, toRetain); file.setModificationTime(mtime); // return whether on a block boundary return (remainingLength - newLength) == 0; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index c3a0058b4ec..4722ac1a354 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -3199,7 +3199,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, final BlockInfo b = blocks[i]; if (b != null && b.getBlockUCState() == BlockUCState.COMMITTED) { // b is COMMITTED but not yet COMPLETE, add it to pending replication. - blockManager.addExpectedReplicasToPending(b, pendingFile); + blockManager.addExpectedReplicasToPending(b); } } } @@ -4308,9 +4308,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, return new PermissionStatus(fsOwner.getShortUserName(), supergroup, permission); } - @Override - public void checkSuperuserPrivilege() - throws AccessControlException { + void checkSuperuserPrivilege() throws AccessControlException { if (isPermissionEnabled) { FSPermissionChecker pc = getPermissionChecker(); pc.checkSuperuserPrivilege(); @@ -6573,7 +6571,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, readLock(); try { checkOperation(OperationCategory.READ); - return getErasureCodingPolicyForPath(src); + return FSDirErasureCodingOp.getErasureCodingPolicy(this, src); } finally { readUnlock(); } @@ -6837,12 +6835,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, } } - @Override - public ErasureCodingPolicy getErasureCodingPolicyForPath(String src) - throws IOException { - return FSDirErasureCodingOp.getErasureCodingPolicy(this, src); - } - /** * Gets number of bytes in the blocks in future generation stamps. * diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java index 9d04fbba810..c8f36e137cf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java @@ -1011,14 +1011,10 @@ public abstract class INode implements INodeAttributes, Diff.Element { */ public void addDeleteBlock(BlockInfo toDelete) { assert toDelete != null : "toDelete is null"; + toDelete.delete(); toDeleteList.add(toDelete); } - public void removeDeleteBlock(BlockInfo block) { - assert block != null : "block is null"; - toDeleteList.remove(block); - } - public void addUpdateReplicationFactor(BlockInfo block, short targetRepl) { toUpdateReplicationInfo.add( new UpdatedReplicationInfo(targetRepl, block)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java index 4e44b0b2a82..53684758db3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java @@ -25,6 +25,7 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.io.PrintWriter; import java.util.Arrays; +import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Set; @@ -314,12 +315,13 @@ public class INodeFile extends INodeWithAdditionalFields return null; } - BlockInfo ucBlock = blocks[size_1]; + BlockInfo lastBlock = blocks[size_1]; //copy to a new list BlockInfo[] newlist = new BlockInfo[size_1]; System.arraycopy(blocks, 0, newlist, 0, size_1); setBlocks(newlist); - return ucBlock; + lastBlock.delete(); + return lastBlock; } /* End of Under-Construction Feature */ @@ -629,7 +631,6 @@ public class INodeFile extends INodeWithAdditionalFields if (blocks != null && reclaimContext.collectedBlocks != null) { for (BlockInfo blk : blocks) { reclaimContext.collectedBlocks.addDeleteBlock(blk); - blk.setBlockCollectionId(INodeId.INVALID_INODE_ID); } } clearBlocks(); @@ -905,7 +906,7 @@ public class INodeFile extends INodeWithAdditionalFields * @return sum of sizes of the remained blocks */ public long collectBlocksBeyondMax(final long max, - final BlocksMapUpdateInfo collectedBlocks) { + final BlocksMapUpdateInfo collectedBlocks, Set toRetain) { final BlockInfo[] oldBlocks = getBlocks(); if (oldBlocks == null) { return 0; @@ -927,7 +928,10 @@ public class INodeFile extends INodeWithAdditionalFields // collect the blocks beyond max if (collectedBlocks != null) { for(; n < oldBlocks.length; n++) { - collectedBlocks.addDeleteBlock(oldBlocks[n]); + final BlockInfo del = oldBlocks[n]; + if (toRetain == null || !toRetain.contains(del)) { + collectedBlocks.addDeleteBlock(del); + } } } return size; @@ -1026,22 +1030,18 @@ public class INodeFile extends INodeWithAdditionalFields } /** Exclude blocks collected for deletion that belong to a snapshot. */ - void excludeSnapshotBlocks(int snapshotId, - BlocksMapUpdateInfo collectedBlocks) { - if(collectedBlocks == null || collectedBlocks.getToDeleteList().isEmpty()) - return; + Set getSnapshotBlocksToRetain(int snapshotId) { FileWithSnapshotFeature sf = getFileWithSnapshotFeature(); - if(sf == null) - return; - BlockInfo[] snapshotBlocks = - getDiffs().findEarlierSnapshotBlocks(snapshotId); - if(snapshotBlocks == null) - return; - List toDelete = collectedBlocks.getToDeleteList(); - for(BlockInfo blk : snapshotBlocks) { - if(toDelete.contains(blk)) - collectedBlocks.removeDeleteBlock(blk); + if(sf == null) { + return null; } + BlockInfo[] snapshotBlocks = getDiffs().findEarlierSnapshotBlocks(snapshotId); + if(snapshotBlocks == null) { + return null; + } + Set toRetain = new HashSet<>(snapshotBlocks.length); + Collections.addAll(toRetain, snapshotBlocks); + return toRetain; } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java index 5a9e69bb8e7..c6751443d5d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java @@ -17,17 +17,10 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import java.io.IOException; - import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection; -import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory; import org.apache.hadoop.hdfs.server.namenode.ha.HAContext; import org.apache.hadoop.hdfs.util.RwLock; -import org.apache.hadoop.ipc.StandbyException; -import org.apache.hadoop.security.AccessControlException; /** Namesystem operations. */ @InterfaceAudience.Private @@ -35,26 +28,10 @@ public interface Namesystem extends RwLock, SafeMode { /** Is this name system running? */ boolean isRunning(); - /** Check if the user has superuser privilege. */ - void checkSuperuserPrivilege() throws AccessControlException; - - /** @return the block pool ID */ - String getBlockPoolId(); - BlockCollection getBlockCollection(long id); void startSecretManagerIfNecessary(); - /** - * Gets the erasure coding policy for the path - * @param src - * - path - * @return {@link ErasureCodingPolicy} - * @throws IOException - */ - ErasureCodingPolicy getErasureCodingPolicyForPath(String src) - throws IOException; - boolean isInSnapshot(long blockCollectionID); CacheManager getCacheManager(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java index 9a149f0d80a..b52e8d67c36 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java @@ -220,7 +220,7 @@ public class FileWithSnapshotFeature implements INode.Feature { FileDiff last = diffs.getLast(); BlockInfo[] snapshotBlocks = last == null ? null : last.getBlocks(); if(snapshotBlocks == null) - file.collectBlocksBeyondMax(max, reclaimContext.collectedBlocks()); + file.collectBlocksBeyondMax(max, reclaimContext.collectedBlocks(), null); else file.collectBlocksBeyondSnapshot(snapshotBlocks, reclaimContext.collectedBlocks()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetadataConsistency.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetadataConsistency.java index 367e3fa17c2..6c2832ec8a0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetadataConsistency.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetadataConsistency.java @@ -95,6 +95,7 @@ public class TestNameNodeMetadataConsistency { cluster.getNameNode().getNamesystem().writeLock(); BlockInfo bInfo = cluster.getNameNode().getNamesystem().getBlockManager() .getStoredBlock(block.getLocalBlock()); + bInfo.delete(); cluster.getNameNode().getNamesystem().getBlockManager() .removeBlock(bInfo); cluster.getNameNode().getNamesystem().writeUnlock(); @@ -146,6 +147,7 @@ public class TestNameNodeMetadataConsistency { BlockInfo bInfo = cluster.getNameNode().getNamesystem().getBlockManager ().getStoredBlock(block.getLocalBlock()); cluster.getNameNode().getNamesystem().writeLock(); + bInfo.delete(); cluster.getNameNode().getNamesystem().getBlockManager() .removeBlock(bInfo); cluster.getNameNode().getNamesystem().writeUnlock();