diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index f41537ed41e..da163579002 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -939,6 +939,8 @@ Release 2.9.0 - UNRELEASED HDFS-9576: HTrace: collect position/length information on read operations (zhz via cmccabe) + HDFS-9542. Move BlockIdManager from FSNamesystem to BlockManager. (jing9) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java index 17fef055615..9c712875ef9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java @@ -163,7 +163,7 @@ public class BlockIdManager { /** * Increments, logs and then returns the stamp */ - public long nextGenerationStamp(boolean legacyBlock) throws IOException { + long nextGenerationStamp(boolean legacyBlock) throws IOException { return legacyBlock ? getNextGenerationStampV1() : getNextGenerationStampV2(); } @@ -199,22 +199,19 @@ public class BlockIdManager { * * @return true if the block ID was randomly generated, false otherwise. */ - public boolean isLegacyBlock(Block block) { + boolean isLegacyBlock(Block block) { return block.getGenerationStamp() < getGenerationStampV1Limit(); } /** * Increments, logs and then returns the block ID */ - public long nextContiguousBlockId() { - return blockIdGenerator.nextValue(); + long nextBlockId(boolean isStriped) { + return isStriped ? blockGroupIdGenerator.nextValue() : + blockIdGenerator.nextValue(); } - public long nextStripedBlockId() { - return blockGroupIdGenerator.nextValue(); - } - - public boolean isGenStampInFuture(Block block) { + boolean isGenStampInFuture(Block block) { if (isLegacyBlock(block)) { return block.getGenerationStamp() > getGenerationStampV1(); } else { @@ -222,7 +219,7 @@ public class BlockIdManager { } } - public void clear() { + void clear() { generationStampV1.setCurrentValue(GenerationStamp.LAST_RESERVED_STAMP); generationStampV2.setCurrentValue(GenerationStamp.LAST_RESERVED_STAMP); getBlockIdGenerator().setCurrentValue(SequentialBlockIdGenerator @@ -240,7 +237,7 @@ public class BlockIdManager { * and the other 60 bits are 1. Group ID is the first 60 bits of any * data/parity block id in the same striped block group. */ - public static long convertToStripedID(long id) { + static long convertToStripedID(long id) { return id & (~HdfsServerConstants.BLOCK_GROUP_INDEX_MASK); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index cee03f66f48..bf637082330 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -78,7 +78,6 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.namenode.CachedBlock; import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; import org.apache.hadoop.hdfs.server.namenode.NameNode; -import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory; import org.apache.hadoop.hdfs.server.namenode.Namesystem; import org.apache.hadoop.hdfs.server.namenode.ha.HAContext; import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; @@ -308,11 +307,14 @@ public class BlockManager implements BlockStatsMXBean { /** Check whether there are any non-EC blocks using StripedID */ private boolean hasNonEcBlockUsingStripedID = false; - public BlockManager(final Namesystem namesystem, final Configuration conf) - throws IOException { + private final BlockIdManager blockIdManager; + + public BlockManager(final Namesystem namesystem, boolean haEnabled, + final Configuration conf) throws IOException { this.namesystem = namesystem; datanodeManager = new DatanodeManager(this, namesystem, conf); heartbeatManager = datanodeManager.getHeartbeatManager(); + this.blockIdManager = new BlockIdManager(this); startupDelayBlockDeletionInMs = conf.getLong( DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_KEY, @@ -387,7 +389,7 @@ public class BlockManager implements BlockStatsMXBean { DFSConfigKeys.DFS_BLOCK_MISREPLICATION_PROCESSING_LIMIT_DEFAULT); this.blockReportLeaseManager = new BlockReportLeaseManager(conf); - bmSafeMode = new BlockManagerSafeMode(this, namesystem, conf); + bmSafeMode = new BlockManagerSafeMode(this, namesystem, haEnabled, conf); LOG.info("defaultReplication = " + defaultReplication); LOG.info("maxReplication = " + maxReplication); @@ -498,8 +500,7 @@ public class BlockManager implements BlockStatsMXBean { /** Should the access keys be updated? */ boolean shouldUpdateBlockKey(final long updateTime) throws IOException { - return isBlockTokenEnabled()? blockTokenSecretManager.updateKeys(updateTime) - : false; + return isBlockTokenEnabled() && blockTokenSecretManager.updateKeys(updateTime); } public void activate(Configuration conf, long blockTotal) { @@ -538,7 +539,7 @@ public class BlockManager implements BlockStatsMXBean { /** Dump meta data to out. */ public void metaSave(PrintWriter out) { - assert namesystem.hasWriteLock(); + assert namesystem.hasWriteLock(); // TODO: block manager read lock and NS write lock final List live = new ArrayList(); final List dead = new ArrayList(); datanodeManager.fetchDatanodes(live, dead, false); @@ -1108,27 +1109,8 @@ public class BlockManager implements BlockStatsMXBean { return countNodes(b).liveReplicas() >= replication; } - /** - * return a list of blocks & their locations on datanode whose - * total size is size - * - * @param datanode on which blocks are located - * @param size total size of blocks - */ - public BlocksWithLocations getBlocks(DatanodeID datanode, long size - ) throws IOException { - namesystem.checkOperation(OperationCategory.READ); - namesystem.readLock(); - try { - namesystem.checkOperation(OperationCategory.READ); - return getBlocksWithLocations(datanode, size); - } finally { - namesystem.readUnlock(); - } - } - /** Get all blocks with location information from a datanode. */ - private BlocksWithLocations getBlocksWithLocations(final DatanodeID datanode, + public BlocksWithLocations getBlocksWithLocations(final DatanodeID datanode, final long size) throws UnregisteredNodeException { final DatanodeDescriptor node = getDatanodeManager().getDatanode(datanode); if (node == null) { @@ -2353,8 +2335,7 @@ public class BlockManager implements BlockStatsMXBean { + " on " + storageInfo.getDatanodeDescriptor() + " size " + iblk.getNumBytes() + " replicaState = " + reportedState); } - if (shouldPostponeBlocksFromFuture && - namesystem.isGenStampInFuture(iblk)) { + if (shouldPostponeBlocksFromFuture && isGenStampInFuture(iblk)) { queueReportedBlock(storageInfo, iblk, reportedState, QUEUE_REASON_FUTURE_GENSTAMP); continue; @@ -2499,8 +2480,7 @@ public class BlockManager implements BlockStatsMXBean { + " replicaState = " + reportedState); } - if (shouldPostponeBlocksFromFuture && - namesystem.isGenStampInFuture(block)) { + if (shouldPostponeBlocksFromFuture && isGenStampInFuture(block)) { queueReportedBlock(storageInfo, block, reportedState, QUEUE_REASON_FUTURE_GENSTAMP); return null; @@ -3360,8 +3340,7 @@ public class BlockManager implements BlockStatsMXBean { private void removeStoredBlock(DatanodeStorageInfo storageInfo, Block block, DatanodeDescriptor node) { - if (shouldPostponeBlocksFromFuture && - namesystem.isGenStampInFuture(block)) { + if (shouldPostponeBlocksFromFuture && isGenStampInFuture(block)) { queueReportedBlock(storageInfo, block, null, QUEUE_REASON_FUTURE_GENSTAMP); return; @@ -4201,6 +4180,7 @@ public class BlockManager implements BlockStatsMXBean { } public void clear() { + blockIdManager.clear(); clearQueues(); blocksMap.clear(); } @@ -4364,4 +4344,24 @@ public class BlockManager implements BlockStatsMXBean { } } } + + public BlockIdManager getBlockIdManager() { + return blockIdManager; + } + + public long nextGenerationStamp(boolean legacyBlock) throws IOException { + return blockIdManager.nextGenerationStamp(legacyBlock); + } + + public boolean isLegacyBlock(Block block) { + return blockIdManager.isLegacyBlock(block); + } + + public long nextBlockId(boolean isStriped) { + return blockIdManager.nextBlockId(isStriped); + } + + boolean isGenStampInFuture(Block block) { + return blockIdManager.isGenStampInFuture(block); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java index aba3c85aeae..e2d688ec81b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java @@ -115,10 +115,10 @@ class BlockManagerSafeMode { private final boolean inRollBack; BlockManagerSafeMode(BlockManager blockManager, Namesystem namesystem, - Configuration conf) { + boolean haEnabled, Configuration conf) { this.blockManager = blockManager; this.namesystem = namesystem; - this.haEnabled = namesystem.isHaEnabled(); + this.haEnabled = haEnabled; this.threshold = conf.getFloat(DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_DEFAULT); if (this.threshold > 1.0) { @@ -473,7 +473,7 @@ class BlockManagerSafeMode { if (!blockManager.getShouldPostponeBlocksFromFuture() && !inRollBack && - namesystem.isGenStampInFuture(brr)) { + blockManager.isGenStampInFuture(brr)) { numberOfBytesInFutureBlocks.addAndGet(brr.getBytesOnDisk()); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java index 03eb96d5a29..95980e76129 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java @@ -228,7 +228,7 @@ final class FSDirTruncateOp { if (newBlock == null) { newBlock = (shouldCopyOnTruncate) ? fsn.createNewBlock(false) : new Block(oldBlock.getBlockId(), oldBlock.getNumBytes(), - fsn.nextGenerationStamp(fsn.getBlockIdManager().isLegacyBlock( + fsn.nextGenerationStamp(fsn.getBlockManager().isLegacyBlock( oldBlock))); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index a74dd7fcbc3..5d27786ad3e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -48,6 +48,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus; import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption; @@ -116,12 +117,14 @@ public class FSEditLogLoader { static final long REPLAY_TRANSACTION_LOG_INTERVAL = 1000; // 1sec private final FSNamesystem fsNamesys; + private final BlockManager blockManager; private long lastAppliedTxId; /** Total number of end transactions loaded. */ private int totalEdits = 0; public FSEditLogLoader(FSNamesystem fsNamesys, long lastAppliedTxId) { this.fsNamesys = fsNamesys; + this.blockManager = fsNamesys.getBlockManager(); this.lastAppliedTxId = lastAppliedTxId; } @@ -586,7 +589,7 @@ public class FSEditLogLoader { } case OP_SET_GENSTAMP_V1: { SetGenstampV1Op setGenstampV1Op = (SetGenstampV1Op)op; - fsNamesys.getBlockIdManager().setGenerationStampV1( + blockManager.getBlockIdManager().setGenerationStampV1( setGenstampV1Op.genStampV1); break; } @@ -794,7 +797,7 @@ public class FSEditLogLoader { } case OP_SET_GENSTAMP_V2: { SetGenstampV2Op setGenstampV2Op = (SetGenstampV2Op) op; - fsNamesys.getBlockIdManager().setGenerationStampV2( + blockManager.getBlockIdManager().setGenerationStampV2( setGenstampV2Op.genStampV2); break; } @@ -803,10 +806,10 @@ public class FSEditLogLoader { if (BlockIdManager.isStripedBlockID(allocateBlockIdOp.blockId)) { // ALLOCATE_BLOCK_ID is added for sequential block id, thus if the id // is negative, it must belong to striped blocks - fsNamesys.getBlockIdManager().setLastAllocatedStripedBlockId( + blockManager.getBlockIdManager().setLastAllocatedStripedBlockId( allocateBlockIdOp.blockId); } else { - fsNamesys.getBlockIdManager().setLastAllocatedContiguousBlockId( + blockManager.getBlockIdManager().setLastAllocatedContiguousBlockId( allocateBlockIdOp.blockId); } break; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java index 8c05a2d9a1e..a815864099a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java @@ -48,11 +48,11 @@ import org.apache.hadoop.fs.PathIsNotDirectoryException; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LayoutFlags; import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockIdManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; @@ -344,27 +344,26 @@ public class FSImageFormat { // read in the last generation stamp for legacy blocks. long genstamp = in.readLong(); - namesystem.getBlockIdManager().setGenerationStampV1(genstamp); + final BlockIdManager blockIdManager = namesystem.getBlockManager() + .getBlockIdManager(); + blockIdManager.setGenerationStampV1(genstamp); if (NameNodeLayoutVersion.supports( LayoutVersion.Feature.SEQUENTIAL_BLOCK_ID, imgVersion)) { // read the starting generation stamp for sequential block IDs genstamp = in.readLong(); - namesystem.getBlockIdManager().setGenerationStampV2(genstamp); + blockIdManager.setGenerationStampV2(genstamp); // read the last generation stamp for blocks created after // the switch to sequential block IDs. long stampAtIdSwitch = in.readLong(); - namesystem.getBlockIdManager().setGenerationStampV1Limit(stampAtIdSwitch); + blockIdManager.setGenerationStampV1Limit(stampAtIdSwitch); // read the max sequential block ID. long maxSequentialBlockId = in.readLong(); - namesystem.getBlockIdManager().setLastAllocatedContiguousBlockId( - maxSequentialBlockId); + blockIdManager.setLastAllocatedContiguousBlockId(maxSequentialBlockId); } else { - - long startingGenStamp = namesystem.getBlockIdManager() - .upgradeGenerationStampToV2(); + long startingGenStamp = blockIdManager.upgradeGenerationStampToV2(); // This is an upgrade. LOG.info("Upgrading to sequential block IDs. Generation stamp " + "for new blocks set to " + startingGenStamp); @@ -1269,10 +1268,12 @@ public class FSImageFormat { out.writeInt(sourceNamesystem.unprotectedGetNamespaceInfo() .getNamespaceID()); out.writeLong(numINodes); - out.writeLong(sourceNamesystem.getBlockIdManager().getGenerationStampV1()); - out.writeLong(sourceNamesystem.getBlockIdManager().getGenerationStampV2()); - out.writeLong(sourceNamesystem.getBlockIdManager().getGenerationStampAtblockIdSwitch()); - out.writeLong(sourceNamesystem.getBlockIdManager().getLastAllocatedContiguousBlockId()); + final BlockIdManager blockIdManager = sourceNamesystem.getBlockManager() + .getBlockIdManager(); + out.writeLong(blockIdManager.getGenerationStampV1()); + out.writeLong(blockIdManager.getGenerationStampV2()); + out.writeLong(blockIdManager.getGenerationStampAtblockIdSwitch()); + out.writeLong(blockIdManager.getLastAllocatedContiguousBlockId()); out.writeLong(context.getTxId()); out.writeLong(sourceNamesystem.dir.getLastInodeId()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java index ef0cc1dd7ba..635dc34b68b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java @@ -293,7 +293,7 @@ public final class FSImageFormatProtobuf { private void loadNameSystemSection(InputStream in) throws IOException { NameSystemSection s = NameSystemSection.parseDelimitedFrom(in); - BlockIdManager blockIdManager = fsn.getBlockIdManager(); + BlockIdManager blockIdManager = fsn.getBlockManager().getBlockIdManager(); blockIdManager.setGenerationStampV1(s.getGenstampV1()); blockIdManager.setGenerationStampV2(s.getGenstampV2()); blockIdManager.setGenerationStampV1Limit(s.getGenstampV1Limit()); @@ -548,7 +548,7 @@ public final class FSImageFormatProtobuf { throws IOException { final FSNamesystem fsn = context.getSourceNamesystem(); OutputStream out = sectionOutputStream; - BlockIdManager blockIdManager = fsn.getBlockIdManager(); + BlockIdManager blockIdManager = fsn.getBlockManager().getBlockIdManager(); NameSystemSection.Builder b = NameSystemSection.newBuilder() .setGenstampV1(blockIdManager.getGenerationStampV1()) .setGenstampV1Limit(blockIdManager.getGenerationStampV1Limit()) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index fa110e54e80..3d265442d25 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -195,7 +195,6 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifie import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager.SecretManagerState; import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockIdManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; @@ -236,6 +235,7 @@ import org.apache.hadoop.hdfs.server.namenode.top.TopConf; import org.apache.hadoop.hdfs.server.namenode.top.metrics.TopMetrics; import org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager; import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods; +import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; @@ -316,8 +316,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, NameNodeMXBean { public static final Log LOG = LogFactory.getLog(FSNamesystem.class); - private final BlockIdManager blockIdManager; - boolean isAuditEnabled() { return (!isDefaultAuditLogger || auditLog.isInfoEnabled()) && !auditLoggers.isEmpty(); @@ -557,7 +555,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, void clear() { dir.reset(); dtSecretManager.reset(); - blockIdManager.clear(); leaseManager.removeAllLeases(); snapshotManager.clearSnapshottableDirs(); cacheManager.clear(); @@ -570,8 +567,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, LeaseManager getLeaseManager() { return leaseManager; } - - @Override + public boolean isHaEnabled() { return haEnabled; } @@ -728,9 +724,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, } // block manager needs the haEnabled initialized - this.blockManager = new BlockManager(this, conf); + this.blockManager = new BlockManager(this, haEnabled, conf); this.datanodeStatistics = blockManager.getDatanodeManager().getDatanodeStatistics(); - this.blockIdManager = new BlockIdManager(blockManager); // Get the checksum type from config String checksumTypeStr = conf.get(DFS_CHECKSUM_TYPE_KEY, @@ -1253,8 +1248,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, getFSImage().editLog.close(); } } - - @Override + public void checkOperation(OperationCategory op) throws StandbyException { if (haContext != null) { // null in some unit tests @@ -1542,8 +1536,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, public boolean isRunning() { return fsRunning; } - - @Override + public boolean isInStandbyState() { if (haContext == null || haContext.getState() == null) { // We're still starting up. In this case, if HA is @@ -1555,6 +1548,25 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, return HAServiceState.STANDBY == haContext.getState().getServiceState(); } + /** + * return a list of blocks & their locations on datanode whose + * total size is size + * + * @param datanode on which blocks are located + * @param size total size of blocks + */ + public BlocksWithLocations getBlocks(DatanodeID datanode, long size) + throws IOException { + checkOperation(OperationCategory.READ); + readLock(); + try { + checkOperation(OperationCategory.READ); + return getBlockManager().getBlocksWithLocations(datanode, size); + } finally { + readUnlock(); + } + } + /** * Dump all metadata into specified file */ @@ -3041,7 +3053,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, } // start recovery of the last block for this file long blockRecoveryId = nextGenerationStamp( - blockIdManager.isLegacyBlock(lastBlock)); + blockManager.isLegacyBlock(lastBlock)); lease = reassignLease(lease, src, recoveryLeaseHolder, pendingFile); if(copyOnTruncate) { lastBlock.setGenerationStamp(blockRecoveryId); @@ -4482,11 +4494,11 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, * Increments, logs and then returns the stamp */ long nextGenerationStamp(boolean legacyBlock) - throws IOException, SafeModeException { + throws IOException { assert hasWriteLock(); checkNameNodeSafeMode("Cannot get next generation stamp"); - long gs = blockIdManager.nextGenerationStamp(legacyBlock); + long gs = blockManager.nextGenerationStamp(legacyBlock); if (legacyBlock) { getEditLog().logGenerationStampV1(gs); } else { @@ -4504,8 +4516,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, private long nextBlockId(boolean isStriped) throws IOException { assert hasWriteLock(); checkNameNodeSafeMode("Cannot get next block ID"); - final long blockId = isStriped ? - blockIdManager.nextStripedBlockId() : blockIdManager.nextContiguousBlockId(); + final long blockId = blockManager.nextBlockId(isStriped); getEditLog().logAllocateBlockId(blockId); // NB: callers sync the log return blockId; @@ -4632,7 +4643,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, final INodeFile file = checkUCBlock(block, clientName); // get a new generation stamp and an access token - block.setGenerationStamp(nextGenerationStamp(blockIdManager.isLegacyBlock(block.getLocalBlock()))); + block.setGenerationStamp(nextGenerationStamp( + blockManager.isLegacyBlock(block.getLocalBlock()))); locatedBlock = BlockManager.newLocatedBlock( block, file.getLastBlock(), null, -1); @@ -5472,10 +5484,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, return blockManager; } - public BlockIdManager getBlockIdManager() { - return blockIdManager; - } - /** @return the FSDirectory. */ public FSDirectory getFSDirectory() { return dir; @@ -5611,11 +5619,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, throw it; } } - - @Override - public boolean isGenStampInFuture(Block block) { - return blockIdManager.isGenStampInFuture(block); - } @VisibleForTesting public EditLogTailer getEditLogTailer() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index 67f13869dd6..60366ae50a2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -560,7 +560,7 @@ class NameNodeRpcServer implements NamenodeProtocols { } checkNNStartup(); namesystem.checkSuperuserPrivilege(); - return namesystem.getBlockManager().getBlocks(datanode, size); + return namesystem.getBlocks(datanode, size); } @Override // NamenodeProtocol diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java index f2cc75bfea0..5a9e69bb8e7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java @@ -41,14 +41,8 @@ public interface Namesystem extends RwLock, SafeMode { /** @return the block pool ID */ String getBlockPoolId(); - boolean isInStandbyState(); - - boolean isGenStampInFuture(Block block); - BlockCollection getBlockCollection(long id); - void checkOperation(OperationCategory read) throws StandbyException; - void startSecretManagerIfNecessary(); /** @@ -67,11 +61,6 @@ public interface Namesystem extends RwLock, SafeMode { HAContext getHAContext(); - /** - * @return true if the HA is enabled else false - */ - boolean isHaEnabled(); - /** * @return Whether the namenode is transitioning to active state and is in the * middle of the starting active services. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java index 4a74d2f0b9f..0e4e167da21 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java @@ -130,7 +130,7 @@ public class TestBlockManager { Mockito.doReturn(true).when(fsn).hasWriteLock(); Mockito.doReturn(true).when(fsn).hasReadLock(); Mockito.doReturn(true).when(fsn).isRunning(); - bm = new BlockManager(fsn, conf); + bm = new BlockManager(fsn, false, conf); final String[] racks = { "/rackA", "/rackA", diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java index 47d4a43fcb6..ade55748fb4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java @@ -94,16 +94,16 @@ public class TestBlockManagerSafeMode { doReturn(true).when(fsn).hasWriteLock(); doReturn(true).when(fsn).hasReadLock(); doReturn(true).when(fsn).isRunning(); - doReturn(true).when(fsn).isGenStampInFuture(any(Block.class)); NameNode.initMetrics(conf, NamenodeRole.NAMENODE); - bm = spy(new BlockManager(fsn, conf)); + bm = spy(new BlockManager(fsn, false, conf)); + doReturn(true).when(bm).isGenStampInFuture(any(Block.class)); dn = spy(bm.getDatanodeManager()); Whitebox.setInternalState(bm, "datanodeManager", dn); // the datanode threshold is always met when(dn.getNumLiveDataNodes()).thenReturn(DATANODE_NUM); - bmSafeMode = new BlockManagerSafeMode(bm, fsn, conf); + bmSafeMode = new BlockManagerSafeMode(bm, fsn, false, conf); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java index 518a35984b4..32596127658 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java @@ -1315,7 +1315,7 @@ public class TestReplicationPolicy extends BaseReplicationPolicyTest { FSNamesystem mockNS = mock(FSNamesystem.class); when(mockNS.hasWriteLock()).thenReturn(true); when(mockNS.hasReadLock()).thenReturn(true); - BlockManager bm = new BlockManager(mockNS, new HdfsConfiguration()); + BlockManager bm = new BlockManager(mockNS, false, new HdfsConfiguration()); UnderReplicatedBlocks underReplicatedBlocks = bm.neededReplications; BlockInfo block1 = genBlockInfo(ThreadLocalRandom.current().nextLong()); @@ -1365,7 +1365,7 @@ public class TestReplicationPolicy extends BaseReplicationPolicyTest { Namesystem mockNS = mock(Namesystem.class); when(mockNS.hasWriteLock()).thenReturn(true); - BlockManager bm = new BlockManager(mockNS, new HdfsConfiguration()); + BlockManager bm = new BlockManager(mockNS, false, new HdfsConfiguration()); UnderReplicatedBlocks underReplicatedBlocks = bm.neededReplications; long blkID1 = ThreadLocalRandom.current().nextLong(); @@ -1437,7 +1437,7 @@ public class TestReplicationPolicy extends BaseReplicationPolicyTest { Namesystem mockNS = mock(Namesystem.class); when(mockNS.hasReadLock()).thenReturn(true); - BlockManager bm = new BlockManager(mockNS, new HdfsConfiguration()); + BlockManager bm = new BlockManager(mockNS, false, new HdfsConfiguration()); UnderReplicatedBlocks underReplicatedBlocks = bm.neededReplications; BlockInfo block1 = genBlockInfo(ThreadLocalRandom.current().nextLong()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockGroupId.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockGroupId.java index 648745c3480..8d1bbad36d2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockGroupId.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockGroupId.java @@ -82,8 +82,8 @@ public class TestSequentialBlockGroupId { cluster.waitActive(); fs = cluster.getFileSystem(); - blockGrpIdGenerator = cluster.getNamesystem().getBlockIdManager() - .getBlockGroupIdGenerator(); + blockGrpIdGenerator = cluster.getNamesystem().getBlockManager() + .getBlockIdManager().getBlockGroupIdGenerator(); fs.mkdirs(ecDir); cluster.getFileSystem().getClient() .setErasureCodingPolicy("/ecDir", null); @@ -179,10 +179,10 @@ public class TestSequentialBlockGroupId { // collision during blockGroup Id generation FSNamesystem fsn = cluster.getNamesystem(); // Replace SequentialBlockIdGenerator with a spy - SequentialBlockIdGenerator blockIdGenerator = spy(fsn.getBlockIdManager() - .getBlockIdGenerator()); - Whitebox.setInternalState(fsn.getBlockIdManager(), "blockIdGenerator", - blockIdGenerator); + SequentialBlockIdGenerator blockIdGenerator = spy(fsn.getBlockManager() + .getBlockIdManager().getBlockIdGenerator()); + Whitebox.setInternalState(fsn.getBlockManager().getBlockIdManager(), + "blockIdGenerator", blockIdGenerator); SequentialBlockIdGenerator spySequentialBlockIdGenerator = new SequentialBlockIdGenerator( null) { @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockId.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockId.java index 82352949c77..e612ea95066 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockId.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockId.java @@ -116,8 +116,8 @@ public class TestSequentialBlockId { // Rewind the block ID counter in the name system object. This will result // in block ID collisions when we try to allocate new blocks. - SequentialBlockIdGenerator blockIdGenerator = fsn.getBlockIdManager() - .getBlockIdGenerator(); + SequentialBlockIdGenerator blockIdGenerator = fsn.getBlockManager() + .getBlockIdManager().getBlockIdGenerator(); blockIdGenerator.setCurrentValue(blockIdGenerator.getCurrentValue() - 5); // Trigger collisions by creating a new file. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java index 56846af8769..f2c03306b34 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java @@ -1020,7 +1020,7 @@ public class TestFileTruncate { assertThat(truncateBlock.getNumBytes(), is(oldBlock.getNumBytes())); assertThat(truncateBlock.getGenerationStamp(), - is(fsn.getBlockIdManager().getGenerationStampV2())); + is(fsn.getBlockManager().getBlockIdManager().getGenerationStampV2())); assertThat(file.getLastBlock().getBlockUCState(), is(HdfsServerConstants.BlockUCState.UNDER_RECOVERY)); long blockRecoveryId = file.getLastBlock().getUnderConstructionFeature() @@ -1054,7 +1054,7 @@ public class TestFileTruncate { assertThat(truncateBlock.getNumBytes() < oldBlock.getNumBytes(), is(true)); assertThat(truncateBlock.getGenerationStamp(), - is(fsn.getBlockIdManager().getGenerationStampV2())); + is(fsn.getBlockManager().getBlockIdManager().getGenerationStampV2())); assertThat(file.getLastBlock().getBlockUCState(), is(HdfsServerConstants.BlockUCState.UNDER_RECOVERY)); long blockRecoveryId = file.getLastBlock().getUnderConstructionFeature() diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java index 6033642354f..4d37d619dfd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java @@ -517,8 +517,8 @@ public class TestSaveNamespace { FSNamesystem spyFsn = spy(fsn); final FSNamesystem finalFsn = spyFsn; DelayAnswer delayer = new GenericTestUtils.DelayAnswer(LOG); - BlockIdManager bid = spy(spyFsn.getBlockIdManager()); - Whitebox.setInternalState(finalFsn, "blockIdManager", bid); + BlockIdManager bid = spy(spyFsn.getBlockManager().getBlockIdManager()); + Whitebox.setInternalState(finalFsn.getBlockManager(), "blockIdManager", bid); doAnswer(delayer).when(bid).getGenerationStampV2(); ExecutorService pool = Executors.newFixedThreadPool(2);