From 132478e805ba0f955345217b8ad87c2d17cccb2d Mon Sep 17 00:00:00 2001 From: Tsz-Wo Nicholas Sze Date: Wed, 9 Dec 2015 17:55:28 -0800 Subject: [PATCH] HDFS-9527. The return type of FSNamesystem.getBlockCollection should be changed to INodeFile. --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../server/blockmanagement/BlockManager.java | 4 +-- .../blockmanagement/DecommissionManager.java | 2 +- .../hdfs/server/namenode/FSNamesystem.java | 36 ++++++++++--------- .../hdfs/server/namenode/NamenodeFsck.java | 6 ++-- .../hdfs/server/namenode/Namesystem.java | 3 +- ...TestClientProtocolForPipelineRecovery.java | 4 ++- .../blockmanagement/TestBlockManager.java | 22 ++++++------ .../TestReplicationPolicy.java | 14 +++++--- .../hadoop/hdfs/server/namenode/TestFsck.java | 6 ++-- .../hdfs/server/namenode/TestINodeFile.java | 5 +++ 11 files changed, 59 insertions(+), 46 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 8a892d5de61..6755f0041ec 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -1736,6 +1736,9 @@ Release 2.8.0 - UNRELEASED HDFS-9214. Support reconfiguring dfs.datanode.balance.max.concurrent.moves without DN restart. (Xiaobing Zhou via Arpit Agarwal) + HDFS-9527. The return type of FSNamesystem.getBlockCollection should be + changed to INodeFile. (szetszwo) + OPTIMIZATIONS HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 8c94c03cbb6..197850a8e3e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -2405,7 +2405,7 @@ private void processFirstBlockReport( // OpenFileBlocks only inside snapshots also will be added to safemode // threshold. So we need to update such blocks to safemode // refer HDFS-5283 - if (namesystem.isInSnapshot(storedBlock)) { + if (namesystem.isInSnapshot(storedBlock.getBlockCollectionId())) { int numOfReplicas = storedBlock.getUnderConstructionFeature() .getNumExpectedLocations(); bmSafeMode.incrementSafeBlockCount(numOfReplicas, storedBlock); @@ -3997,7 +3997,7 @@ public BlockInfo addBlockCollectionWithCheck( return addBlockCollection(block, bc); } - public BlockCollection getBlockCollection(BlockInfo b) { + BlockCollection getBlockCollection(BlockInfo b) { return namesystem.getBlockCollection(b.getBlockCollectionId()); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java index 42810350720..b9b5bf48e08 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java @@ -533,7 +533,7 @@ private void processBlocksForDecomInternal( continue; } - BlockCollection bc = namesystem.getBlockCollection(bcId); + final BlockCollection bc = blockManager.getBlockCollection(block); final NumberReplicas num = blockManager.countNodes(block); final int liveReplicas = num.liveReplicas(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 9c9d9f5d554..7e52625dd5c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -3172,11 +3172,10 @@ BlockInfo getStoredBlock(Block block) { } @Override - public boolean isInSnapshot(BlockInfo blockUC) { + public boolean isInSnapshot(long blockCollectionID) { assert hasReadLock(); - final BlockCollection bc = blockManager.getBlockCollection(blockUC); - if (bc == null || !(bc instanceof INodeFile) - || !bc.isUnderConstruction()) { + final INodeFile bc = getBlockCollection(blockCollectionID); + if (bc == null || !bc.isUnderConstruction()) { return false; } @@ -3203,8 +3202,12 @@ public boolean isInSnapshot(BlockInfo blockUC) { return true; } + INodeFile getBlockCollection(BlockInfo b) { + return getBlockCollection(b.getBlockCollectionId()); + } + @Override - public BlockCollection getBlockCollection(long id) { + public INodeFile getBlockCollection(long id) { INode inode = getFSDirectory().getInode(id); return inode == null ? null : inode.asFile(); } @@ -3265,8 +3268,7 @@ void commitBlockSynchronization(ExtendedBlock oldBlock, + " is null, likely because the file owning this block was" + " deleted and the block removal is delayed"); } - long bcId = storedBlock.getBlockCollectionId(); - INodeFile iFile = ((INode)getBlockCollection(bcId)).asFile(); + final INodeFile iFile = getBlockCollection(storedBlock); src = iFile.getFullPathName(); if (isFileDeleted(iFile)) { throw new FileNotFoundException("File not found: " @@ -3682,8 +3684,7 @@ private void clearCorruptLazyPersistFiles() while (it.hasNext()) { Block b = it.next(); BlockInfo blockInfo = blockManager.getStoredBlock(b); - BlockCollection bc = getBlockCollection( - blockInfo.getBlockCollectionId()); + BlockCollection bc = getBlockCollection(blockInfo); if (bc.getStoragePolicyID() == lpPolicy.getId()) { filesToDelete.add(bc); } @@ -4606,15 +4607,17 @@ private INodeFile checkUCBlock(ExtendedBlock block, // check stored block state BlockInfo storedBlock = getStoredBlock(ExtendedBlock.getLocalBlock(block)); - if (storedBlock == null || - storedBlock.getBlockUCState() != BlockUCState.UNDER_CONSTRUCTION) { - throw new IOException(block + - " does not exist or is not under Construction" + storedBlock); + if (storedBlock == null) { + throw new IOException(block + " does not exist."); + } + if (storedBlock.getBlockUCState() != BlockUCState.UNDER_CONSTRUCTION) { + throw new IOException("Unexpected BlockUCState: " + block + + " is " + storedBlock.getBlockUCState() + + " but not " + BlockUCState.UNDER_CONSTRUCTION); } // check file inode - long bcId = storedBlock.getBlockCollectionId(); - final INodeFile file = ((INode)getBlockCollection(bcId)).asFile(); + final INodeFile file = getBlockCollection(storedBlock); if (file == null || !file.isUnderConstruction() || isFileDeleted(file)) { throw new IOException("The file " + storedBlock + " belonged to does not exist or it is not under construction."); @@ -4887,8 +4890,7 @@ Collection listCorruptFileBlocks(String path, while (blkIterator.hasNext()) { BlockInfo blk = blkIterator.next(); - BlockCollection bc = getBlockCollection(blk.getBlockCollectionId()); - final INode inode = (INode)bc; + final INodeFile inode = getBlockCollection(blk); skip++; if (inode != null && blockManager.countNodes(blk).liveReplicas() == 0) { String src = FSDirectory.getFullPathName(inode); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java index 9d4edb5420c..fec2abd0560 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java @@ -40,6 +40,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.hdfs.BlockReader; import org.apache.hadoop.hdfs.BlockReaderFactory; @@ -47,7 +48,6 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.RemotePeerFactory; -import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.net.Peer; import org.apache.hadoop.hdfs.protocol.Block; @@ -64,7 +64,6 @@ import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; @@ -261,8 +260,7 @@ public void blockIdCK(String blockId) { LOG.warn("Block "+ blockId + " " + NONEXISTENT_STATUS); return; } - BlockCollection bc = blockManager.getBlockCollection(blockInfo); - INode iNode = (INode) bc; + final INodeFile iNode = namenode.getNamesystem().getBlockCollection(blockInfo); NumberReplicas numberReplicas= blockManager.countNodes(blockInfo); out.println("Block Id: " + blockId); out.println("Block belongs to: "+iNode.getFullPathName()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java index 59ad092a12c..f2cc75bfea0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java @@ -22,7 +22,6 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection; import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory; import org.apache.hadoop.hdfs.server.namenode.ha.HAContext; @@ -62,7 +61,7 @@ public interface Namesystem extends RwLock, SafeMode { ErasureCodingPolicy getErasureCodingPolicyForPath(String src) throws IOException; - boolean isInSnapshot(BlockInfo blockUC); + boolean isInSnapshot(long blockCollectionID); CacheManager getCacheManager(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java index 77cfb7c0be2..22009fd54d6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector; import org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException; @@ -62,7 +63,8 @@ public class TestClientProtocolForPipelineRecovery { namenode.updateBlockForPipeline(firstBlock, ""); Assert.fail("Can not get a new GS from a finalized block"); } catch (IOException e) { - Assert.assertTrue(e.getMessage().contains("is not under Construction")); + Assert.assertTrue(e.getMessage().contains( + "not " + BlockUCState.UNDER_CONSTRUCTION)); } // test getNewStampAndToken on a non-existent block diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java index 9b7ba4a9782..5df73cecf7c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java @@ -51,16 +51,18 @@ import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica; import org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; +import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.server.namenode.INodeId; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; +import org.apache.hadoop.hdfs.server.namenode.TestINodeFile; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo; import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.net.NetworkTopology; -import org.junit.Assert; import org.apache.hadoop.test.GenericTestUtils; +import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; @@ -438,13 +440,12 @@ private List startDecommission(int ... indexes) { private BlockInfo addBlockOnNodes(long blockId, List nodes) { long inodeId = ++mockINodeId; - BlockCollection bc = Mockito.mock(BlockCollection.class); - Mockito.doReturn(inodeId).when(bc).getId(); - Mockito.doReturn(bc).when(fsn).getBlockCollection(inodeId); - BlockInfo blockInfo = blockOnNodes(blockId, nodes); + final INodeFile bc = TestINodeFile.createINodeFile(inodeId); + BlockInfo blockInfo = blockOnNodes(blockId, nodes); blockInfo.setReplication((short) 3); blockInfo.setBlockCollectionId(inodeId); + Mockito.doReturn(bc).when(fsn).getBlockCollection(inodeId); bm.blocksMap.addBlockCollection(blockInfo, bc); return blockInfo; } @@ -747,12 +748,11 @@ public void testSafeModeIBRBeforeFirstFullBR() throws Exception { private BlockInfo addBlockToBM(long blkId) { Block block = new Block(blkId); - BlockInfo blockInfo = - new BlockInfoContiguous(block, (short) 3); - BlockCollection bc = Mockito.mock(BlockCollection.class); + BlockInfo blockInfo = new BlockInfoContiguous(block, (short) 3); long inodeId = ++mockINodeId; - doReturn(inodeId).when(bc).getId(); + final INodeFile bc = TestINodeFile.createINodeFile(inodeId); bm.blocksMap.addBlockCollection(blockInfo, bc); + blockInfo.setBlockCollectionId(inodeId); doReturn(bc).when(fsn).getBlockCollection(inodeId); return blockInfo; } @@ -761,9 +761,9 @@ private BlockInfo addUcBlockToBM(long blkId) { Block block = new Block(blkId); BlockInfo blockInfo = new BlockInfoContiguous(block, (short) 3); blockInfo.convertToBlockUnderConstruction(UNDER_CONSTRUCTION, null); - BlockCollection bc = Mockito.mock(BlockCollection.class); long inodeId = ++mockINodeId; - doReturn(inodeId).when(bc).getId(); + final INodeFile bc = TestINodeFile.createINodeFile(inodeId); + blockInfo.setBlockCollectionId(inodeId); bm.blocksMap.addBlockCollection(blockInfo, bc); doReturn(bc).when(fsn).getBlockCollection(inodeId); return blockInfo; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java index 1a8a088161e..3493c142dcd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java @@ -56,7 +56,10 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; +import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; +import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.server.namenode.Namesystem; +import org.apache.hadoop.hdfs.server.namenode.TestINodeFile; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.net.Node; import org.apache.log4j.Level; @@ -1309,7 +1312,7 @@ public void testUpdateDoesNotCauseSkippedReplication() { @Test(timeout = 60000) public void testAddStoredBlockDoesNotCauseSkippedReplication() throws IOException { - Namesystem mockNS = mock(Namesystem.class); + FSNamesystem mockNS = mock(FSNamesystem.class); when(mockNS.hasWriteLock()).thenReturn(true); when(mockNS.hasReadLock()).thenReturn(true); BlockManager bm = new BlockManager(mockNS, new HdfsConfiguration()); @@ -1337,10 +1340,11 @@ public void testAddStoredBlockDoesNotCauseSkippedReplication() // queue. BlockInfoContiguous info = new BlockInfoContiguous(block1, (short) 1); info.convertToBlockUnderConstruction(BlockUCState.UNDER_CONSTRUCTION, null); - BlockCollection bc = mock(BlockCollection.class); - when(bc.getId()).thenReturn(1000L); - when(mockNS.getBlockCollection(1000L)).thenReturn(bc); - bm.addBlockCollection(info, bc); + info.setBlockCollectionId(1000L); + + final INodeFile file = TestINodeFile.createINodeFile(1000L); + when(mockNS.getBlockCollection(1000L)).thenReturn(file); + bm.addBlockCollection(info, file); // Adding this block will increase its current replication, and that will // remove it from the queue. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java index 9b06f85c475..cc741905302 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java @@ -913,7 +913,7 @@ public void testFsckReplicaDetails() throws Exception { try { fsn.writeLock(); BlockInfo bi = bm.getStoredBlock(eb.getLocalBlock()); - bc = bm.getBlockCollection(bi); + bc = fsn.getBlockCollection(bi); } finally { fsn.writeUnlock(); } @@ -1484,7 +1484,7 @@ public void testBlockIdCKDecommission() throws Exception { try { fsn.writeLock(); BlockInfo bi = bm.getStoredBlock(eb.getLocalBlock()); - bc = bm.getBlockCollection(bi); + bc = fsn.getBlockCollection(bi); } finally { fsn.writeUnlock(); } @@ -1699,7 +1699,7 @@ public void testFsckWithDecommissionedReplicas() throws Exception { try { fsn.writeLock(); BlockInfo bi = bm.getStoredBlock(eb.getLocalBlock()); - bc = bm.getBlockCollection(bi); + bc = fsn.getBlockCollection(bi); } finally { fsn.writeUnlock(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java index c33e668b7ec..89b2854a758 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java @@ -89,6 +89,11 @@ public class TestINodeFile { private short replication; private long preferredBlockSize = 1024; + static public INodeFile createINodeFile(long id) { + return new INodeFile(id, ("file" + id).getBytes(), perm, 0L, 0L, null, + (short)3, 1024L); + } + INodeFile createINodeFile(short replication, long preferredBlockSize) { return new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID, null, perm, 0L, 0L, null, replication, preferredBlockSize);