diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index ee0fc20a4e3..24a0cea6df8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -867,6 +867,9 @@ Release 2.8.0 - UNRELEASED HDFS-8831. Trash Support for deletion in HDFS encryption zone. (xyao) + HDFS-9527. The return type of FSNamesystem.getBlockCollection should be + changed to INodeFile. (szetszwo) + OPTIMIZATIONS HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index a6e769261c2..2cf2c9d8f23 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -2235,7 +2235,7 @@ private void processFirstBlockReport( // OpenFileBlocks only inside snapshots also will be added to safemode // threshold. So we need to update such blocks to safemode // refer HDFS-5283 - if (namesystem.isInSnapshot(storedBlock)) { + if (namesystem.isInSnapshot(storedBlock.getBlockCollectionId())) { int numOfReplicas = storedBlock.getUnderConstructionFeature() .getNumExpectedLocations(); bmSafeMode.incrementSafeBlockCount(numOfReplicas); @@ -3632,7 +3632,7 @@ public BlockInfo addBlockCollection(BlockInfo block, return blocksMap.addBlockCollection(block, bc); } - public BlockCollection getBlockCollection(BlockInfo b) { + BlockCollection getBlockCollection(BlockInfo b) { return namesystem.getBlockCollection(b.getBlockCollectionId()); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java index 28024b1d40e..7b728d55c1b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java @@ -560,7 +560,7 @@ private void processBlocksForDecomInternal( continue; } - BlockCollection bc = namesystem.getBlockCollection(bcId); + final BlockCollection bc = blockManager.getBlockCollection(block); final NumberReplicas num = blockManager.countNodes(block); final int liveReplicas = num.liveReplicas(); final int curReplicas = liveReplicas; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index d00cec960d2..d892c6e89e0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -3118,11 +3118,10 @@ BlockInfo getStoredBlock(Block block) { } @Override - public boolean isInSnapshot(BlockInfo blockUC) { + public boolean isInSnapshot(long blockCollectionID) { assert hasReadLock(); - final BlockCollection bc = blockManager.getBlockCollection(blockUC); - if (bc == null || !(bc instanceof INodeFile) - || !bc.isUnderConstruction()) { + final INodeFile bc = getBlockCollection(blockCollectionID); + if (bc == null || !bc.isUnderConstruction()) { return false; } @@ -3149,8 +3148,12 @@ public boolean isInSnapshot(BlockInfo blockUC) { return true; } + INodeFile getBlockCollection(BlockInfo b) { + return getBlockCollection(b.getBlockCollectionId()); + } + @Override - public BlockCollection getBlockCollection(long id) { + public INodeFile getBlockCollection(long id) { INode inode = getFSDirectory().getInode(id); return inode == null ? null : inode.asFile(); } @@ -3211,8 +3214,7 @@ void commitBlockSynchronization(ExtendedBlock oldBlock, + " is null, likely because the file owning this block was" + " deleted and the block removal is delayed"); } - long bcId = storedBlock.getBlockCollectionId(); - INodeFile iFile = ((INode)getBlockCollection(bcId)).asFile(); + final INodeFile iFile = getBlockCollection(storedBlock); src = iFile.getFullPathName(); if (isFileDeleted(iFile)) { throw new FileNotFoundException("File not found: " @@ -3628,8 +3630,7 @@ private void clearCorruptLazyPersistFiles() while (it.hasNext()) { Block b = it.next(); BlockInfo blockInfo = blockManager.getStoredBlock(b); - BlockCollection bc = getBlockCollection( - blockInfo.getBlockCollectionId()); + BlockCollection bc = getBlockCollection(blockInfo); if (bc.getStoragePolicyID() == lpPolicy.getId()) { filesToDelete.add(bc); } @@ -4548,15 +4549,17 @@ private INodeFile checkUCBlock(ExtendedBlock block, // check stored block state BlockInfo storedBlock = getStoredBlock(ExtendedBlock.getLocalBlock(block)); - if (storedBlock == null || - storedBlock.getBlockUCState() != BlockUCState.UNDER_CONSTRUCTION) { - throw new IOException(block + - " does not exist or is not under Construction" + storedBlock); + if (storedBlock == null) { + throw new IOException(block + " does not exist."); + } + if (storedBlock.getBlockUCState() != BlockUCState.UNDER_CONSTRUCTION) { + throw new IOException("Unexpected BlockUCState: " + block + + " is " + storedBlock.getBlockUCState() + + " but not " + BlockUCState.UNDER_CONSTRUCTION); } // check file inode - long bcId = storedBlock.getBlockCollectionId(); - final INodeFile file = ((INode)getBlockCollection(bcId)).asFile(); + final INodeFile file = getBlockCollection(storedBlock); if (file == null || !file.isUnderConstruction() || isFileDeleted(file)) { throw new IOException("The file " + storedBlock + " belonged to does not exist or it is not under construction."); @@ -4820,8 +4823,7 @@ Collection listCorruptFileBlocks(String path, while (blkIterator.hasNext()) { BlockInfo blk = blkIterator.next(); - BlockCollection bc = getBlockCollection(blk.getBlockCollectionId()); - final INode inode = (INode)bc; + final INodeFile inode = getBlockCollection(blk); skip++; if (inode != null && blockManager.countNodes(blk).liveReplicas() == 0) { String src = FSDirectory.getFullPathName(inode); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java index 19eade4d3ea..be4d1c58e4f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java @@ -40,6 +40,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.hdfs.BlockReader; import org.apache.hadoop.hdfs.BlockReaderFactory; @@ -47,7 +48,6 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.RemotePeerFactory; -import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.net.Peer; import org.apache.hadoop.hdfs.protocol.Block; @@ -64,7 +64,6 @@ import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy; @@ -254,8 +253,7 @@ public void blockIdCK(String blockId) { LOG.warn("Block "+ blockId + " " + NONEXISTENT_STATUS); return; } - BlockCollection bc = bm.getBlockCollection(blockInfo); - INode iNode = (INode) bc; + final INodeFile iNode = namenode.getNamesystem().getBlockCollection(blockInfo); NumberReplicas numberReplicas= bm.countNodes(blockInfo); out.println("Block Id: " + blockId); out.println("Block belongs to: "+iNode.getFullPathName()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java index 84056d04345..fa90f7d22f3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java @@ -19,7 +19,6 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection; import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory; import org.apache.hadoop.hdfs.server.namenode.ha.HAContext; @@ -49,7 +48,7 @@ public interface Namesystem extends RwLock, SafeMode { void startSecretManagerIfNecessary(); - boolean isInSnapshot(BlockInfo blockUC); + boolean isInSnapshot(long blockCollectionID); CacheManager getCacheManager(); HAContext getHAContext(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java index 77cfb7c0be2..22009fd54d6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector; import org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException; @@ -62,7 +63,8 @@ public class TestClientProtocolForPipelineRecovery { namenode.updateBlockForPipeline(firstBlock, ""); Assert.fail("Can not get a new GS from a finalized block"); } catch (IOException e) { - Assert.assertTrue(e.getMessage().contains("is not under Construction")); + Assert.assertTrue(e.getMessage().contains( + "not " + BlockUCState.UNDER_CONSTRUCTION)); } // test getNewStampAndToken on a non-existent block diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java index 51b0f216c1c..5c874baeef6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java @@ -51,16 +51,18 @@ import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica; import org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; +import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.server.namenode.INodeId; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; +import org.apache.hadoop.hdfs.server.namenode.TestINodeFile; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo; import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.net.NetworkTopology; -import org.junit.Assert; import org.apache.hadoop.test.GenericTestUtils; +import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; @@ -438,13 +440,12 @@ private List startDecommission(int ... indexes) { private BlockInfo addBlockOnNodes(long blockId, List nodes) { long inodeId = ++mockINodeId; - BlockCollection bc = Mockito.mock(BlockCollection.class); - Mockito.doReturn(inodeId).when(bc).getId(); - Mockito.doReturn(bc).when(fsn).getBlockCollection(inodeId); - BlockInfo blockInfo = blockOnNodes(blockId, nodes); + final INodeFile bc = TestINodeFile.createINodeFile(inodeId); + BlockInfo blockInfo = blockOnNodes(blockId, nodes); blockInfo.setReplication((short) 3); blockInfo.setBlockCollectionId(inodeId); + Mockito.doReturn(bc).when(fsn).getBlockCollection(inodeId); bm.blocksMap.addBlockCollection(blockInfo, bc); return blockInfo; } @@ -746,12 +747,11 @@ public void testSafeModeIBRBeforeFirstFullBR() throws Exception { private BlockInfo addBlockToBM(long blkId) { Block block = new Block(blkId); - BlockInfo blockInfo = - new BlockInfoContiguous(block, (short) 3); - BlockCollection bc = Mockito.mock(BlockCollection.class); + BlockInfo blockInfo = new BlockInfoContiguous(block, (short) 3); long inodeId = ++mockINodeId; - doReturn(inodeId).when(bc).getId(); + final INodeFile bc = TestINodeFile.createINodeFile(inodeId); bm.blocksMap.addBlockCollection(blockInfo, bc); + blockInfo.setBlockCollectionId(inodeId); doReturn(bc).when(fsn).getBlockCollection(inodeId); return blockInfo; } @@ -760,9 +760,9 @@ private BlockInfo addUcBlockToBM(long blkId) { Block block = new Block(blkId); BlockInfo blockInfo = new BlockInfoContiguous(block, (short) 3); blockInfo.convertToBlockUnderConstruction(UNDER_CONSTRUCTION, null); - BlockCollection bc = Mockito.mock(BlockCollection.class); long inodeId = ++mockINodeId; - doReturn(inodeId).when(bc).getId(); + final INodeFile bc = TestINodeFile.createINodeFile(inodeId); + blockInfo.setBlockCollectionId(inodeId); bm.blocksMap.addBlockCollection(blockInfo, bc); doReturn(bc).when(fsn).getBlockCollection(inodeId); return blockInfo; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java index be5c4772442..6c2d00bc6b0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java @@ -56,7 +56,10 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; +import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; +import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.server.namenode.Namesystem; +import org.apache.hadoop.hdfs.server.namenode.TestINodeFile; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.net.Node; import org.apache.log4j.Level; @@ -1309,7 +1312,7 @@ public void testUpdateDoesNotCauseSkippedReplication() { @Test(timeout = 60000) public void testAddStoredBlockDoesNotCauseSkippedReplication() throws IOException { - Namesystem mockNS = mock(Namesystem.class); + FSNamesystem mockNS = mock(FSNamesystem.class); when(mockNS.hasWriteLock()).thenReturn(true); when(mockNS.hasReadLock()).thenReturn(true); BlockManager bm = new BlockManager(mockNS, new HdfsConfiguration()); @@ -1337,10 +1340,11 @@ public void testAddStoredBlockDoesNotCauseSkippedReplication() // queue. BlockInfoContiguous info = new BlockInfoContiguous(block1, (short) 1); info.convertToBlockUnderConstruction(BlockUCState.UNDER_CONSTRUCTION, null); - BlockCollection bc = mock(BlockCollection.class); - when(bc.getId()).thenReturn(1000L); - when(mockNS.getBlockCollection(1000L)).thenReturn(bc); - bm.addBlockCollection(info, bc); + info.setBlockCollectionId(1000L); + + final INodeFile file = TestINodeFile.createINodeFile(1000L); + when(mockNS.getBlockCollection(1000L)).thenReturn(file); + bm.addBlockCollection(info, file); // Adding this block will increase its current replication, and that will // remove it from the queue. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java index 4a4c7a329da..bc9bc4f7a88 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java @@ -826,7 +826,7 @@ public void testFsckReplicaDetails() throws Exception { try { fsn.writeLock(); BlockInfo bi = bm.getStoredBlock(eb.getLocalBlock()); - bc = bm.getBlockCollection(bi); + bc = fsn.getBlockCollection(bi); } finally { fsn.writeUnlock(); } @@ -1394,7 +1394,7 @@ public void testBlockIdCKDecommission() throws Exception { try { fsn.writeLock(); BlockInfo bi = bm.getStoredBlock(eb.getLocalBlock()); - bc = bm.getBlockCollection(bi); + bc = fsn.getBlockCollection(bi); } finally { fsn.writeUnlock(); } @@ -1609,7 +1609,7 @@ public void testFsckWithDecommissionedReplicas() throws Exception { try { fsn.writeLock(); BlockInfo bi = bm.getStoredBlock(eb.getLocalBlock()); - bc = bm.getBlockCollection(bi); + bc = fsn.getBlockCollection(bi); } finally { fsn.writeUnlock(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java index 3dfe9d76f80..33a547ad40c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java @@ -88,6 +88,11 @@ public class TestINodeFile { private short replication; private long preferredBlockSize = 1024; + static public INodeFile createINodeFile(long id) { + return new INodeFile(id, ("file" + id).getBytes(), perm, 0L, 0L, null, + (short)3, 1024L); + } + INodeFile createINodeFile(short replication, long preferredBlockSize) { return new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID, null, perm, 0L, 0L, null, replication, preferredBlockSize, (byte)0);