From d737bf99d44ce34cd01baad716d23df269267c95 Mon Sep 17 00:00:00 2001 From: Lei Xu Date: Wed, 4 Apr 2018 15:56:17 -0700 Subject: [PATCH] HDFS-13350. Negative legacy block ID will confuse Erasure Coding to be considered as striped block. (Contributed by Lei (Eddy) Xu). --- .../blockmanagement/BlockIdManager.java | 17 ++++++ .../server/blockmanagement/BlockManager.java | 5 +- .../blockmanagement/BlockManagerSafeMode.java | 2 +- .../server/blockmanagement/BlocksMap.java | 12 ++-- .../blockmanagement/CorruptReplicasMap.java | 35 +++++------ .../blockmanagement/InvalidateBlocks.java | 13 ++-- .../blockmanagement/TestBlockManager.java | 61 ++++++++++++++++--- .../TestCorruptReplicaInfo.java | 48 ++++++++++----- 8 files changed, 136 insertions(+), 57 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java index 321155b6284..5eebe8e2e51 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java @@ -239,6 +239,23 @@ void clear() { legacyGenerationStampLimit = HdfsConstants.GRANDFATHER_GENERATION_STAMP; } + /** + * Return true if the block is a striped block. + * + * Before HDFS-4645, block ID was randomly generated (legacy), so it is + * possible that legacy block ID to be negative, which should not be + * considered as striped block ID. + * + * @see #isLegacyBlock(Block) detecting legacy block IDs. + */ + public boolean isStripedBlock(Block block) { + return isStripedBlockID(block.getBlockId()) && !isLegacyBlock(block); + } + + /** + * See {@link #isStripedBlock(Block)}, we should not use this function alone + * to determine a block is striped block. + */ public static boolean isStripedBlockID(long id) { return BlockType.fromBlockId(id) == STRIPED; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index f49e1d87c69..76a77816d5c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -448,7 +448,8 @@ public BlockManager(final Namesystem namesystem, boolean haEnabled, DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_DEFAULT) * 1000L; invalidateBlocks = new InvalidateBlocks( datanodeManager.getBlockInvalidateLimit(), - startupDelayBlockDeletionInMs); + startupDelayBlockDeletionInMs, + blockIdManager); // Compute the map capacity by allocating 2% of total memory blocksMap = new BlocksMap( @@ -1677,7 +1678,7 @@ private void markBlockAsCorrupt(BlockToMarkCorrupt b, corrupted.setBlockId(b.getStored().getBlockId()); } corruptReplicas.addToCorruptReplicasMap(corrupted, node, b.getReason(), - b.getReasonCode()); + b.getReasonCode(), b.getStored().isStriped()); NumberReplicas numberOfReplicas = countNodes(b.getStored()); boolean hasEnoughLiveReplicas = numberOfReplicas.liveReplicas() >= diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java index daa3d8bb917..8de17ef5c37 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java @@ -486,7 +486,7 @@ void checkBlocksWithFutureGS(BlockReportReplica brr) { if (!blockManager.getShouldPostponeBlocksFromFuture() && !inRollBack && blockManager.isGenStampInFuture(brr)) { - if (BlockIdManager.isStripedBlockID(brr.getBlockId())) { + if (blockManager.getBlockIdManager().isStripedBlock(brr)) { bytesInFutureECBlockGroups.add(brr.getBytesOnDisk()); } else { bytesInFutureBlocks.add(brr.getBytesOnDisk()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java index 6f13da999cf..a96c815b006 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java @@ -93,7 +93,7 @@ BlockInfo addBlockCollection(BlockInfo b, BlockCollection bc) { * remove it from all data-node lists it belongs to; * and remove all data-node locations associated with the block. */ - void removeBlock(Block block) { + void removeBlock(BlockInfo block) { BlockInfo blockInfo = blocks.remove(block); if (blockInfo == null) { return; @@ -175,7 +175,7 @@ boolean removeNode(Block b, DatanodeDescriptor node) { if (info.hasNoStorage() // no datanodes left && info.isDeleted()) { // does not belong to a file blocks.remove(b); // remove block from the map - decrementBlockStat(b); + decrementBlockStat(info); } return removed; } @@ -207,16 +207,16 @@ int getCapacity() { return capacity; } - private void incrementBlockStat(Block block) { - if (BlockIdManager.isStripedBlockID(block.getBlockId())) { + private void incrementBlockStat(BlockInfo block) { + if (block.isStriped()) { totalECBlockGroups.increment(); } else { totalReplicatedBlocks.increment(); } } - private void decrementBlockStat(Block block) { - if (BlockIdManager.isStripedBlockID(block.getBlockId())) { + private void decrementBlockStat(BlockInfo block) { + if (block.isStriped()) { totalECBlockGroups.decrement(); assert totalECBlockGroups.longValue() >= 0 : "Total number of ec block groups should be non-negative"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java index 7a576bb9b84..fe1224cdb03 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java @@ -69,12 +69,12 @@ public enum Reason { * @param reasonCode the enum representation of the reason */ void addToCorruptReplicasMap(Block blk, DatanodeDescriptor dn, - String reason, Reason reasonCode) { + String reason, Reason reasonCode, boolean isStriped) { Map nodes = corruptReplicasMap.get(blk); if (nodes == null) { nodes = new HashMap(); corruptReplicasMap.put(blk, nodes); - incrementBlockStat(blk); + incrementBlockStat(isStriped); } String reasonText; @@ -103,11 +103,11 @@ void addToCorruptReplicasMap(Block blk, DatanodeDescriptor dn, * Remove Block from CorruptBlocksMap. * @param blk Block to be removed */ - void removeFromCorruptReplicasMap(Block blk) { + void removeFromCorruptReplicasMap(BlockInfo blk) { if (corruptReplicasMap != null) { Map value = corruptReplicasMap.remove(blk); if (value != null) { - decrementBlockStat(blk); + decrementBlockStat(blk.isStriped()); } } } @@ -119,12 +119,13 @@ void removeFromCorruptReplicasMap(Block blk) { * @return true if the removal is successful; false if the replica is not in the map */ - boolean removeFromCorruptReplicasMap(Block blk, DatanodeDescriptor datanode) { + boolean removeFromCorruptReplicasMap( + BlockInfo blk, DatanodeDescriptor datanode) { return removeFromCorruptReplicasMap(blk, datanode, Reason.ANY); } - boolean removeFromCorruptReplicasMap(Block blk, DatanodeDescriptor datanode, - Reason reason) { + boolean removeFromCorruptReplicasMap( + BlockInfo blk, DatanodeDescriptor datanode, Reason reason) { Map datanodes = corruptReplicasMap.get(blk); if (datanodes == null) { return false; @@ -141,23 +142,23 @@ boolean removeFromCorruptReplicasMap(Block blk, DatanodeDescriptor datanode, if (datanodes.isEmpty()) { // remove the block if there is no more corrupted replicas corruptReplicasMap.remove(blk); - decrementBlockStat(blk); + decrementBlockStat(blk.isStriped()); } return true; } return false; } - private void incrementBlockStat(Block block) { - if (BlockIdManager.isStripedBlockID(block.getBlockId())) { + private void incrementBlockStat(boolean isStriped) { + if (isStriped) { totalCorruptECBlockGroups.increment(); } else { totalCorruptBlocks.increment(); } } - private void decrementBlockStat(Block block) { - if (BlockIdManager.isStripedBlockID(block.getBlockId())) { + private void decrementBlockStat(boolean isStriped) { + if (isStriped) { totalCorruptECBlockGroups.decrement(); } else { totalCorruptBlocks.decrement(); @@ -205,6 +206,8 @@ int size() { * is null, up to numExpectedBlocks blocks are returned from the beginning. * If startingBlockId cannot be found, null is returned. * + * @param bim BlockIdManager to determine the block type. + * @param blockType desired block type to return. * @param numExpectedBlocks Number of block ids to return. * 0 <= numExpectedBlocks <= 100 * @param startingBlockId Block id from which to start. If null, start at @@ -212,7 +215,7 @@ int size() { * @return Up to numExpectedBlocks blocks from startingBlockId if it exists */ @VisibleForTesting - long[] getCorruptBlockIdsForTesting(BlockType blockType, + long[] getCorruptBlockIdsForTesting(BlockIdManager bim, BlockType blockType, int numExpectedBlocks, Long startingBlockId) { if (numExpectedBlocks < 0 || numExpectedBlocks > 100) { return null; @@ -223,11 +226,9 @@ long[] getCorruptBlockIdsForTesting(BlockType blockType, .stream() .filter(r -> { if (blockType == BlockType.STRIPED) { - return BlockIdManager.isStripedBlockID(r.getBlockId()) && - r.getBlockId() >= cursorBlockId; + return bim.isStripedBlock(r) && r.getBlockId() >= cursorBlockId; } else { - return !BlockIdManager.isStripedBlockID(r.getBlockId()) && - r.getBlockId() >= cursorBlockId; + return !bim.isStripedBlock(r) && r.getBlockId() >= cursorBlockId; } }) .sorted() diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java index ff68c020f31..7c837920c96 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java @@ -57,6 +57,7 @@ class InvalidateBlocks { private final LongAdder numBlocks = new LongAdder(); private final LongAdder numECBlocks = new LongAdder(); private final int blockInvalidateLimit; + private final BlockIdManager blockIdManager; /** * The period of pending time for block invalidation since the NameNode @@ -66,9 +67,11 @@ class InvalidateBlocks { /** the startup time */ private final long startupTime = Time.monotonicNow(); - InvalidateBlocks(final int blockInvalidateLimit, long pendingPeriodInMs) { + InvalidateBlocks(final int blockInvalidateLimit, long pendingPeriodInMs, + final BlockIdManager blockIdManager) { this.blockInvalidateLimit = blockInvalidateLimit; this.pendingPeriodInMs = pendingPeriodInMs; + this.blockIdManager = blockIdManager; printBlockDeletionTime(BlockManager.LOG); } @@ -124,7 +127,7 @@ private LightWeightHashSet getECBlocksSet(final DatanodeInfo dn) { private LightWeightHashSet getBlocksSet(final DatanodeInfo dn, final Block block) { - if (BlockIdManager.isStripedBlockID(block.getBlockId())) { + if (blockIdManager.isStripedBlock(block)) { return getECBlocksSet(dn); } else { return getBlocksSet(dn); @@ -133,7 +136,7 @@ private LightWeightHashSet getBlocksSet(final DatanodeInfo dn, private void putBlocksSet(final DatanodeInfo dn, final Block block, final LightWeightHashSet set) { - if (BlockIdManager.isStripedBlockID(block.getBlockId())) { + if (blockIdManager.isStripedBlock(block)) { assert getECBlocksSet(dn) == null; nodeToECBlocks.put(dn, set); } else { @@ -178,7 +181,7 @@ synchronized void add(final Block block, final DatanodeInfo datanode, putBlocksSet(datanode, block, set); } if (set.add(block)) { - if (BlockIdManager.isStripedBlockID(block.getBlockId())) { + if (blockIdManager.isStripedBlock(block)) { numECBlocks.increment(); } else { numBlocks.increment(); @@ -206,7 +209,7 @@ synchronized void remove(final DatanodeInfo dn) { synchronized void remove(final DatanodeInfo dn, final Block block) { final LightWeightHashSet v = getBlocksSet(dn, block); if (v != null && v.remove(block)) { - if (BlockIdManager.isStripedBlockID(block.getBlockId())) { + if (blockIdManager.isStripedBlock(block)) { numECBlocks.decrement(); } else { numBlocks.decrement(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java index f63be91f27b..5219a44994f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java @@ -39,6 +39,8 @@ import org.apache.hadoop.hdfs.StripedFileTestUtil; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; +import org.apache.hadoop.hdfs.protocol.BlockType; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; @@ -114,10 +116,13 @@ import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; +import static org.mockito.Matchers.any; import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; import static org.mockito.Mockito.reset; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; public class TestBlockManager { private DatanodeStorageInfo[] storages; @@ -1343,14 +1348,14 @@ public void testIsReplicaCorruptCall() throws Exception { spyBM.createLocatedBlocks(new BlockInfo[]{blockInfo}, 3L, false, 0L, 3L, false, false, null, null); verify(spyBM, Mockito.atLeast(0)). - isReplicaCorrupt(Mockito.any(BlockInfo.class), - Mockito.any(DatanodeDescriptor.class)); + isReplicaCorrupt(any(BlockInfo.class), + any(DatanodeDescriptor.class)); addCorruptBlockOnNodes(0, origNodes); spyBM.createLocatedBlocks(new BlockInfo[]{blockInfo}, 3L, false, 0L, 3L, false, false, null, null); verify(spyBM, Mockito.atLeast(1)). - isReplicaCorrupt(Mockito.any(BlockInfo.class), - Mockito.any(DatanodeDescriptor.class)); + isReplicaCorrupt(any(BlockInfo.class), + any(DatanodeDescriptor.class)); } @Test (timeout = 300000) @@ -1506,8 +1511,8 @@ private BlockInfo makeBlockReplicasMissing(long blockId, blockInfo.getGenerationStamp() + 1, blockInfo.getNumBytes(), new DatanodeStorageInfo[]{}); - BlockCollection mockedBc = Mockito.mock(BlockCollection.class); - Mockito.when(mockedBc.getBlocks()).thenReturn(new BlockInfo[]{blockInfo}); + BlockCollection mockedBc = mock(BlockCollection.class); + when(mockedBc.getBlocks()).thenReturn(new BlockInfo[]{blockInfo}); bm.checkRedundancy(mockedBc); return blockInfo; } @@ -1524,8 +1529,8 @@ private BlockInfo makeBlockReplicasMaintenance(long blockId, Mockito.doReturn(bc).when(fsn).getBlockCollection(inodeId); bm.blocksMap.addBlockCollection(blockInfo, bc); nodesList.get(0).setInMaintenance(); - BlockCollection mockedBc = Mockito.mock(BlockCollection.class); - Mockito.when(mockedBc.getBlocks()).thenReturn(new BlockInfo[]{blockInfo}); + BlockCollection mockedBc = mock(BlockCollection.class); + when(mockedBc.getBlocks()).thenReturn(new BlockInfo[]{blockInfo}); bm.checkRedundancy(mockedBc); return blockInfo; } @@ -1580,8 +1585,8 @@ private BlockInfo makeBlockReplicasDecommission(long blockId, Mockito.doReturn(bc).when(fsn).getBlockCollection(inodeId); bm.blocksMap.addBlockCollection(blockInfo, bc); nodesList.get(0).startDecommission(); - BlockCollection mockedBc = Mockito.mock(BlockCollection.class); - Mockito.when(mockedBc.getBlocks()).thenReturn(new BlockInfo[]{blockInfo}); + BlockCollection mockedBc = mock(BlockCollection.class); + when(mockedBc.getBlocks()).thenReturn(new BlockInfo[]{blockInfo}); bm.checkRedundancy(mockedBc); return blockInfo; } @@ -1623,4 +1628,40 @@ public void testMetaSaveDecommissioningReplicas() throws Exception { } } + @Test + public void testLegacyBlockInInvalidateBlocks() { + final long legancyGenerationStampLimit = 10000; + BlockIdManager bim = Mockito.mock(BlockIdManager.class); + + when(bim.getLegacyGenerationStampLimit()) + .thenReturn(legancyGenerationStampLimit); + when(bim.isStripedBlock(any(Block.class))).thenCallRealMethod(); + when(bim.isLegacyBlock(any(Block.class))).thenCallRealMethod(); + + InvalidateBlocks ibs = new InvalidateBlocks(100, 30000, bim); + + Block legacy = new Block(-1, 10, legancyGenerationStampLimit / 10); + Block striped = new Block( + bm.nextBlockId(BlockType.STRIPED), 10, + legancyGenerationStampLimit + 10); + + DatanodeInfo legacyDnInfo = DFSTestUtil.getLocalDatanodeInfo(); + DatanodeInfo stripedDnInfo = DFSTestUtil.getLocalDatanodeInfo(); + + ibs.add(legacy, legacyDnInfo, false); + assertEquals(1, ibs.getBlocks()); + assertEquals(0, ibs.getECBlocks()); + + ibs.add(striped, stripedDnInfo, false); + assertEquals(1, ibs.getBlocks()); + assertEquals(1, ibs.getECBlocks()); + + ibs.remove(legacyDnInfo); + assertEquals(0, ibs.getBlocks()); + assertEquals(1, ibs.getECBlocks()); + + ibs.remove(stripedDnInfo); + assertEquals(0, ibs.getBlocks()); + assertEquals(0, ibs.getECBlocks()); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java index 3510bc3d769..299df56b2b0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java @@ -21,6 +21,8 @@ import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.when; import java.io.IOException; import java.util.Arrays; @@ -30,10 +32,12 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.hdfs.StripedFileTestUtil; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.BlockType; import org.apache.hadoop.hdfs.server.blockmanagement.CorruptReplicasMap.Reason; import org.junit.Test; +import org.mockito.Mockito; /** @@ -46,27 +50,31 @@ public class TestCorruptReplicaInfo { private static final Log LOG = LogFactory.getLog( TestCorruptReplicaInfo.class); - private final Map replicaMap = new HashMap<>(); - private final Map stripedBlocksMap = new HashMap<>(); + private final Map replicaMap = new HashMap<>(); + private final Map stripedBlocksMap = new HashMap<>(); // Allow easy block creation by block id. Return existing // replica block if one with same block id already exists. - private Block getReplica(Long blockId) { + private BlockInfo getReplica(Long blockId) { if (!replicaMap.containsKey(blockId)) { - replicaMap.put(blockId, new Block(blockId, 0, 0)); + short replFactor = 3; + replicaMap.put(blockId, + new BlockInfoContiguous(new Block(blockId, 0, 0), replFactor)); } return replicaMap.get(blockId); } - private Block getReplica(int blkId) { + private BlockInfo getReplica(int blkId) { return getReplica(Long.valueOf(blkId)); } - private Block getStripedBlock(int blkId) { + private BlockInfo getStripedBlock(int blkId) { Long stripedBlockId = (1L << 63) + blkId; assertTrue(BlockIdManager.isStripedBlockID(stripedBlockId)); if (!stripedBlocksMap.containsKey(stripedBlockId)) { - stripedBlocksMap.put(stripedBlockId, new Block(stripedBlockId, 1024, 0)); + stripedBlocksMap.put(stripedBlockId, + new BlockInfoStriped(new Block(stripedBlockId, 1024, 0), + StripedFileTestUtil.getDefaultECPolicy())); } return stripedBlocksMap.get(stripedBlockId); } @@ -88,6 +96,10 @@ private void verifyCorruptBlocksCount(CorruptReplicasMap corruptReplicasMap, public void testCorruptReplicaInfo() throws IOException, InterruptedException { CorruptReplicasMap crm = new CorruptReplicasMap(); + BlockIdManager bim = Mockito.mock(BlockIdManager.class); + when(bim.isLegacyBlock(any(Block.class))).thenReturn(false); + when(bim.isStripedBlock(any(Block.class))).thenCallRealMethod(); + assertTrue(!bim.isLegacyBlock(new Block(-1))); // Make sure initial values are returned correctly assertEquals("Total number of corrupt blocks must initially be 0!", @@ -97,10 +109,11 @@ public void testCorruptReplicaInfo() assertEquals("Number of corrupt striped block groups must initially be 0!", 0, crm.getCorruptECBlockGroups()); assertNull("Param n cannot be less than 0", - crm.getCorruptBlockIdsForTesting(BlockType.CONTIGUOUS, -1, null)); + crm.getCorruptBlockIdsForTesting(bim, BlockType.CONTIGUOUS, -1, null)); assertNull("Param n cannot be greater than 100", - crm.getCorruptBlockIdsForTesting(BlockType.CONTIGUOUS, 101, null)); - long[] l = crm.getCorruptBlockIdsForTesting(BlockType.CONTIGUOUS, 0, null); + crm.getCorruptBlockIdsForTesting(bim, BlockType.CONTIGUOUS, 101, null)); + long[] l = crm.getCorruptBlockIdsForTesting( + bim, BlockType.CONTIGUOUS, 0, null); assertNotNull("n = 0 must return non-null", l); assertEquals("n = 0 must return an empty list", 0, l.length); @@ -156,22 +169,25 @@ public void testCorruptReplicaInfo() 2 * blockCount, crm.size()); assertTrue("First five corrupt replica blocks ids are not right!", Arrays.equals(Arrays.copyOfRange(replicaIds, 0, 5), - crm.getCorruptBlockIdsForTesting(BlockType.CONTIGUOUS, 5, null))); + crm.getCorruptBlockIdsForTesting( + bim, BlockType.CONTIGUOUS, 5, null))); assertTrue("First five corrupt striped blocks ids are not right!", Arrays.equals(Arrays.copyOfRange(stripedIds, 0, 5), - crm.getCorruptBlockIdsForTesting(BlockType.STRIPED, 5, null))); + crm.getCorruptBlockIdsForTesting( + bim, BlockType.STRIPED, 5, null))); assertTrue("10 replica blocks after 7 not returned correctly!", Arrays.equals(Arrays.copyOfRange(replicaIds, 7, 17), - crm.getCorruptBlockIdsForTesting(BlockType.CONTIGUOUS, 10, 7L))); + crm.getCorruptBlockIdsForTesting( + bim, BlockType.CONTIGUOUS, 10, 7L))); assertTrue("10 striped blocks after 7 not returned correctly!", Arrays.equals(Arrays.copyOfRange(stripedIds, 7, 17), - crm.getCorruptBlockIdsForTesting(BlockType.STRIPED, + crm.getCorruptBlockIdsForTesting(bim, BlockType.STRIPED, 10, getStripedBlock(7).getBlockId()))); } private static void addToCorruptReplicasMap(CorruptReplicasMap crm, - Block blk, DatanodeDescriptor dn) { - crm.addToCorruptReplicasMap(blk, dn, "TEST", Reason.NONE); + BlockInfo blk, DatanodeDescriptor dn) { + crm.addToCorruptReplicasMap(blk, dn, "TEST", Reason.NONE, blk.isStriped()); } }