diff --git a/hdfs/CHANGES.txt b/hdfs/CHANGES.txt index f8b69e53029..586d1dd1058 100644 --- a/hdfs/CHANGES.txt +++ b/hdfs/CHANGES.txt @@ -590,6 +590,9 @@ Trunk (unreleased changes) HDFS-2116. Use Mokito in TestStreamFile and TestByteRangeInputStream. (Plamen Jeliazkov via shv) + HDFS-2112. Move ReplicationMonitor to block management. (Uma Maheswara + Rao G via szetszwo) + OPTIMIZATIONS HDFS-1458. Improve checkpoint performance by avoiding unnecessary image diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index c9f3826c881..6879bb755ee 100644 --- a/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -42,9 +42,9 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; -import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportIterator; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportIterator; import org.apache.hadoop.hdfs.server.blockmanagement.UnderReplicatedBlocks.BlockIterator; import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState; import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState; @@ -54,6 +54,7 @@ import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.server.namenode.INodeFileUnderConstruction; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.net.Node; +import org.apache.hadoop.util.Daemon; /** * Keeps information related to the blocks stored in the Hadoop cluster. @@ -101,6 +102,9 @@ public class BlockManager { return excessBlocksCount; } + /**replicationRecheckInterval is how often namenode checks for new replication work*/ + private final long replicationRecheckInterval; + /** * Mapping: Block -> { INode, datanodes, self ref } * Updated only in response to client-sent information. @@ -108,7 +112,10 @@ public class BlockManager { public final BlocksMap blocksMap; private final DatanodeManager datanodeManager; - + + /** Replication thread. */ + final Daemon replicationThread = new Daemon(new ReplicationMonitor()); + /** Store blocks -> datanodedescriptor(s) map of corrupt replicas */ final CorruptReplicasMap corruptReplicas = new CorruptReplicasMap(); @@ -162,7 +169,6 @@ public class BlockManager { public BlockManager(FSNamesystem fsn, Configuration conf) throws IOException { namesystem = fsn; datanodeManager = new DatanodeManager(fsn, conf); - blocksMap = new BlocksMap(DEFAULT_MAP_LOAD_FACTOR); blockplacement = BlockPlacementPolicy.getInstance( conf, namesystem, datanodeManager.getNetworkTopology()); @@ -198,22 +204,29 @@ public class BlockManager { DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_DEFAULT); this.shouldCheckForEnoughRacks = conf.get(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY) == null ? false : true; + + this.replicationRecheckInterval = + conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, + DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT) * 1000L; FSNamesystem.LOG.info("defaultReplication = " + defaultReplication); FSNamesystem.LOG.info("maxReplication = " + maxReplication); FSNamesystem.LOG.info("minReplication = " + minReplication); FSNamesystem.LOG.info("maxReplicationStreams = " + maxReplicationStreams); FSNamesystem.LOG.info("shouldCheckForEnoughRacks = " + shouldCheckForEnoughRacks); + FSNamesystem.LOG.info("replicationRecheckInterval = " + replicationRecheckInterval); } public void activate(Configuration conf) { pendingReplications.start(); datanodeManager.activate(conf); + this.replicationThread.start(); } public void close() { if (pendingReplications != null) pendingReplications.stop(); blocksMap.close(); datanodeManager.close(); + if (replicationThread != null) replicationThread.interrupt(); } /** @return the datanodeManager */ @@ -2248,4 +2261,72 @@ public class BlockManager { processOverReplicatedBlocksOnReCommission(node); } } + + /** + * Periodically calls computeReplicationWork(). + */ + private class ReplicationMonitor implements Runnable { + static final int INVALIDATE_WORK_PCT_PER_ITERATION = 32; + static final float REPLICATION_WORK_MULTIPLIER_PER_ITERATION = 2; + + @Override + public void run() { + while (namesystem.isRunning()) { + try { + computeDatanodeWork(); + processPendingReplications(); + Thread.sleep(replicationRecheckInterval); + } catch (InterruptedException ie) { + LOG.warn("ReplicationMonitor thread received InterruptedException.", ie); + break; + } catch (IOException ie) { + LOG.warn("ReplicationMonitor thread received exception. " , ie); + } catch (Throwable t) { + LOG.warn("ReplicationMonitor thread received Runtime exception. ", t); + Runtime.getRuntime().exit(-1); + } + } + } + } + + + /** + * Compute block replication and block invalidation work that can be scheduled + * on data-nodes. The datanode will be informed of this work at the next + * heartbeat. + * + * @return number of blocks scheduled for replication or removal. + * @throws IOException + */ + int computeDatanodeWork() throws IOException { + int workFound = 0; + int blocksToProcess = 0; + int nodesToProcess = 0; + // Blocks should not be replicated or removed if in safe mode. + // It's OK to check safe mode here w/o holding lock, in the worst + // case extra replications will be scheduled, and these will get + // fixed up later. + if (namesystem.isInSafeMode()) + return workFound; + + synchronized (namesystem.heartbeats) { + blocksToProcess = (int) (namesystem.heartbeats.size() * ReplicationMonitor.REPLICATION_WORK_MULTIPLIER_PER_ITERATION); + nodesToProcess = (int) Math.ceil((double) namesystem.heartbeats.size() + * ReplicationMonitor.INVALIDATE_WORK_PCT_PER_ITERATION / 100); + } + + workFound = this.computeReplicationWork(blocksToProcess); + + // Update FSNamesystemMetrics counters + namesystem.writeLock(); + try { + this.updateState(); + this.scheduledReplicationBlocksCount = workFound; + } finally { + namesystem.writeUnlock(); + } + workFound += this.computeInvalidateWork(nodesToProcess); + return workFound; + } + } diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java index 3dc623477e7..4e88e4daddd 100644 --- a/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java +++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java @@ -35,11 +35,11 @@ class DecommissionManager { static final Log LOG = LogFactory.getLog(DecommissionManager.class); private final FSNamesystem fsnamesystem; - private final BlockManager blockmanager; + private final BlockManager blockManager; DecommissionManager(FSNamesystem namesystem) { this.fsnamesystem = namesystem; - this.blockmanager = fsnamesystem.getBlockManager(); + this.blockManager = fsnamesystem.getBlockManager(); } /** Periodically check decommission status. */ @@ -90,7 +90,7 @@ class DecommissionManager { if (d.isDecommissionInProgress()) { try { - blockmanager.checkDecommissionStateInternal(d); + blockManager.checkDecommissionStateInternal(d); } catch(Exception e) { LOG.warn("entry=" + entry, e); } diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 1d82e2bdcd5..6ac35efa103 100644 --- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -154,7 +154,7 @@ public class FSDirectory implements Closeable { } private BlockManager getBlockManager() { - return getFSNamesystem().blockManager; + return getFSNamesystem().getBlockManager(); } void loadFSImage(Collection dataDirs, diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 70601c75901..71a9bccd2b6 100644 --- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -233,7 +233,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, // Stores the correct file name hierarchy // public FSDirectory dir; - BlockManager blockManager; + private BlockManager blockManager; // Block pool ID used by this namenode String blockPoolId; @@ -280,7 +280,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, Daemon hbthread = null; // HeartbeatMonitor thread public Daemon lmthread = null; // LeaseMonitor thread Daemon smmthread = null; // SafeModeMonitor thread - public Daemon replthread = null; // Replication thread + Daemon nnrmthread = null; // NamenodeResourceMonitor thread private volatile boolean hasResourcesAvailable = false; @@ -292,8 +292,6 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, // heartbeatExpireInterval is how long namenode waits for datanode to report // heartbeat private long heartbeatExpireInterval; - //replicationRecheckInterval is how often namenode checks for new replication work - private long replicationRecheckInterval; //resourceRecheckInterval is how often namenode checks for the disk space availability private long resourceRecheckInterval; @@ -387,10 +385,9 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, blockManager.activate(conf); this.hbthread = new Daemon(new HeartbeatMonitor()); this.lmthread = new Daemon(leaseManager.new Monitor()); - this.replthread = new Daemon(new ReplicationMonitor()); + hbthread.start(); lmthread.start(); - replthread.start(); this.nnrmthread = new Daemon(new NameNodeResourceMonitor()); nnrmthread.start(); @@ -524,9 +521,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT); // 5 minutes this.heartbeatExpireInterval = 2 * heartbeatRecheckInterval + 10 * heartbeatInterval; - this.replicationRecheckInterval = - conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, - DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT) * 1000L; + this.serverDefaults = new FsServerDefaults( conf.getLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE), conf.getInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BYTES_PER_CHECKSUM), @@ -595,7 +590,6 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, try { if (blockManager != null) blockManager.close(); if (hbthread != null) hbthread.interrupt(); - if (replthread != null) replthread.interrupt(); if (smmthread != null) smmthread.interrupt(); if (dtSecretManager != null) dtSecretManager.stopThreads(); if (nnrmthread != null) nnrmthread.interrupt(); @@ -3009,77 +3003,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, } } - /** - * Periodically calls computeReplicationWork(). - */ - class ReplicationMonitor implements Runnable { - static final int INVALIDATE_WORK_PCT_PER_ITERATION = 32; - static final float REPLICATION_WORK_MULTIPLIER_PER_ITERATION = 2; - public void run() { - while (fsRunning) { - try { - computeDatanodeWork(); - blockManager.processPendingReplications(); - Thread.sleep(replicationRecheckInterval); - } catch (InterruptedException ie) { - LOG.warn("ReplicationMonitor thread received InterruptedException." + ie); - break; - } catch (IOException ie) { - LOG.warn("ReplicationMonitor thread received exception. " + ie); - } catch (Throwable t) { - LOG.warn("ReplicationMonitor thread received Runtime exception. " + t); - Runtime.getRuntime().exit(-1); - } - } - } - } - - ///////////////////////////////////////////////////////// - // - // These methods are called by the Namenode system, to see - // if there is any work for registered datanodes. - // - ///////////////////////////////////////////////////////// - /** - * Compute block replication and block invalidation work - * that can be scheduled on data-nodes. - * The datanode will be informed of this work at the next heartbeat. - * - * @return number of blocks scheduled for replication or removal. - * @throws IOException - */ - public int computeDatanodeWork() throws IOException { - int workFound = 0; - int blocksToProcess = 0; - int nodesToProcess = 0; - // Blocks should not be replicated or removed if in safe mode. - // It's OK to check safe mode here w/o holding lock, in the worst - // case extra replications will be scheduled, and these will get - // fixed up later. - if (isInSafeMode()) - return workFound; - - synchronized (heartbeats) { - blocksToProcess = (int)(heartbeats.size() - * ReplicationMonitor.REPLICATION_WORK_MULTIPLIER_PER_ITERATION); - nodesToProcess = (int)Math.ceil((double)heartbeats.size() - * ReplicationMonitor.INVALIDATE_WORK_PCT_PER_ITERATION / 100); - } - - workFound = blockManager.computeReplicationWork(blocksToProcess); - - // Update FSNamesystemMetrics counters - writeLock(); - try { - blockManager.updateState(); - blockManager.scheduledReplicationBlocksCount = workFound; - } finally { - writeUnlock(); - } - workFound += blockManager.computeInvalidateWork(nodesToProcess); - return workFound; - } - + public void setNodeReplicationLimit(int limit) { blockManager.maxReplicationStreams = limit; } diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java index 7135806a8d9..e2a1f158cf6 100644 --- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java +++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java @@ -724,7 +724,7 @@ class NamenodeJspHelper { this.inode = null; } else { this.block = new Block(blockId); - this.inode = fsn.blockManager.getINode(block); + this.inode = fsn.getBlockManager().getINode(block); } } @@ -799,9 +799,9 @@ class NamenodeJspHelper { doc.startTag("replicas"); - if (fsn.blockManager.blocksMap.contains(block)) { + if (fsn.getBlockManager().blocksMap.contains(block)) { Iterator it = - fsn.blockManager.blocksMap.nodeIterator(block); + fsn.getBlockManager().blocksMap.nodeIterator(block); while (it.hasNext()) { doc.startTag("replica"); diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java index 7b3d167738d..592705afc19 100644 --- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java +++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; +import java.io.IOException; import java.util.Collection; import java.util.HashSet; import java.util.Iterator; @@ -24,7 +25,7 @@ import java.util.Set; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; -import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.util.Daemon; public class BlockManagerTestUtil { /** @@ -48,12 +49,12 @@ public class BlockManagerTestUtil { * decommissioning/decommissioned nodes are not counted. corrupt replicas * are also ignored */ - private static int getNumberOfRacks(final BlockManager blockmanager, + private static int getNumberOfRacks(final BlockManager blockManager, final Block b) { final Set rackSet = new HashSet(0); final Collection corruptNodes = - blockmanager.corruptReplicas.getNodes(b); - for (Iterator it = blockmanager.blocksMap.nodeIterator(b); + getCorruptReplicas(blockManager).getNodes(b); + for (Iterator it = blockManager.blocksMap.nodeIterator(b); it.hasNext();) { DatanodeDescriptor cur = it.next(); if (!cur.isDecommissionInProgress() && !cur.isDecommissioned()) { @@ -68,4 +69,33 @@ public class BlockManagerTestUtil { return rackSet.size(); } + /** + * @param blockManager + * @return replication monitor thread instance from block manager. + */ + public static Daemon getReplicationThread(final BlockManager blockManager) + { + return blockManager.replicationThread; + } + + /** + * @param blockManager + * @return corruptReplicas from block manager + */ + public static CorruptReplicasMap getCorruptReplicas(final BlockManager blockManager){ + return blockManager.corruptReplicas; + + } + + /** + * @param blockManager + * @return computed block replication and block invalidation work that can be + * scheduled on data-nodes. + * @throws IOException + */ + public static int getComputedDatanodeWork(final BlockManager blockManager) throws IOException + { + return blockManager.computeDatanodeWork(); + } + } diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java index 9f9daec9984..5f281aefe80 100644 --- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java +++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; import org.apache.hadoop.hdfs.server.common.HdfsConstants; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNode; @@ -220,7 +221,8 @@ public class TestBlockReport { cluster.getNameNode().blockReport(dnR, poolId, new BlockListAsLongs(blocks, null).getBlockListAsLongs()); - cluster.getNamesystem().computeDatanodeWork(); + BlockManagerTestUtil.getComputedDatanodeWork(cluster.getNamesystem() + .getBlockManager()); printStats(); diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java index dbbb345dacd..9b3789c00eb 100644 --- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java +++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; import org.apache.hadoop.hdfs.server.common.HdfsConstants; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNode; @@ -191,7 +192,7 @@ public class TestDataNodeVolumeFailure { // now check the number of under-replicated blocks FSNamesystem fsn = cluster.getNamesystem(); // force update of all the metric counts by calling computeDatanodeWork - fsn.computeDatanodeWork(); + BlockManagerTestUtil.getComputedDatanodeWork(fsn.getBlockManager()); // get all the counts long underRepl = fsn.getUnderReplicatedBlocks(); long pendRepl = fsn.getPendingReplicationBlocks(); diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java index 484dc72a83f..d90ee5eac1f 100644 --- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java +++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.FSConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataStorage; import org.apache.hadoop.hdfs.server.protocol.BlockCommand; @@ -1111,9 +1112,11 @@ public class NNThroughputBenchmark { // start data-nodes; create a bunch of files; generate block reports. blockReportObject.generateInputs(ignore); // stop replication monitor - namesystem.replthread.interrupt(); + BlockManagerTestUtil.getReplicationThread(namesystem.getBlockManager()) + .interrupt(); try { - namesystem.replthread.join(); + BlockManagerTestUtil.getReplicationThread(namesystem.getBlockManager()) + .join(); } catch(InterruptedException ei) { return; } @@ -1156,7 +1159,8 @@ public class NNThroughputBenchmark { assert daemonId < numThreads : "Wrong daemonId."; long start = System.currentTimeMillis(); // compute data-node work - int work = nameNode.getNamesystem().computeDatanodeWork(); + int work = BlockManagerTestUtil.getComputedDatanodeWork(nameNode + .getNamesystem().getBlockManager()); long end = System.currentTimeMillis(); numPendingBlocks += work; if(work == 0) diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java index 39dde0a16b6..2dbe281b0c1 100644 --- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java +++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java @@ -50,7 +50,7 @@ public class NameNodeAdapter { * @param namenode to proxy the invocation to */ public static void refreshBlockCounts(NameNode namenode) { - namenode.getNamesystem().blockManager.updateState(); + namenode.getNamesystem().getBlockManager().updateState(); } /** diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java index cc1781490a1..beb70bd9b1d 100644 --- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java +++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java @@ -100,7 +100,7 @@ public class TestBlockUnderConstruction { assertTrue("Block is not complete: " + curBlock, curBlock.isComplete()); assertTrue("Block is not in BlocksMap: " + curBlock, - ns.blockManager.getStoredBlock(curBlock) == curBlock); + ns.getBlockManager().getStoredBlock(curBlock) == curBlock); } // the penultimate block is either complete or @@ -115,7 +115,7 @@ public class TestBlockUnderConstruction { (curBlock.getBlockUCState() == BlockUCState.COMMITTED))); assertTrue("Block is not in BlocksMap: " + curBlock, - ns.blockManager.getStoredBlock(curBlock) == curBlock); + ns.getBlockManager().getStoredBlock(curBlock) == curBlock); } // The last block is complete if the file is closed. @@ -126,7 +126,7 @@ public class TestBlockUnderConstruction { curBlock.isComplete()); } assertTrue("Block is not in BlocksMap: " + curBlock, - ns.blockManager.getStoredBlock(curBlock) == curBlock); + ns.getBlockManager().getStoredBlock(curBlock) == curBlock); } @Test diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestComputeInvalidateWork.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestComputeInvalidateWork.java index 95eebd30f60..0299cc55954 100644 --- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestComputeInvalidateWork.java +++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestComputeInvalidateWork.java @@ -51,23 +51,23 @@ public class TestComputeInvalidateWork extends TestCase { for(int j=0; j<3*namesystem.blockInvalidateLimit+1; j++) { Block block = new Block(i*(namesystem.blockInvalidateLimit+1)+j, 0, GenerationStamp.FIRST_VALID_STAMP); - namesystem.blockManager.addToInvalidates(block, nodes[i]); + namesystem.getBlockManager().addToInvalidates(block, nodes[i]); } } assertEquals(namesystem.blockInvalidateLimit*NUM_OF_DATANODES, - namesystem.blockManager.computeInvalidateWork(NUM_OF_DATANODES+1)); + namesystem.getBlockManager().computeInvalidateWork(NUM_OF_DATANODES+1)); assertEquals(namesystem.blockInvalidateLimit*NUM_OF_DATANODES, - namesystem.blockManager.computeInvalidateWork(NUM_OF_DATANODES)); + namesystem.getBlockManager().computeInvalidateWork(NUM_OF_DATANODES)); assertEquals(namesystem.blockInvalidateLimit*(NUM_OF_DATANODES-1), - namesystem.blockManager.computeInvalidateWork(NUM_OF_DATANODES-1)); - int workCount = namesystem.blockManager.computeInvalidateWork(1); + namesystem.getBlockManager().computeInvalidateWork(NUM_OF_DATANODES-1)); + int workCount = namesystem.getBlockManager().computeInvalidateWork(1); if (workCount == 1) { assertEquals(namesystem.blockInvalidateLimit+1, - namesystem.blockManager.computeInvalidateWork(2)); + namesystem.getBlockManager().computeInvalidateWork(2)); } else { assertEquals(workCount, namesystem.blockInvalidateLimit); - assertEquals(2, namesystem.blockManager.computeInvalidateWork(2)); + assertEquals(2, namesystem.getBlockManager().computeInvalidateWork(2)); } } finally { namesystem.writeUnlock(); diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java index 8814f83d190..e9966de4f6d 100644 --- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java +++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java @@ -78,7 +78,7 @@ public class TestLargeDirectoryDelete { Assert.assertNotNull("No Namenode in cluster", mc.getNameNode()); FSNamesystem namesystem = mc.getNamesystem(); Assert.assertNotNull("Null Namesystem in cluster", namesystem); - Assert.assertNotNull("Null Namesystem.blockmanager", namesystem.blockManager); + Assert.assertNotNull("Null Namesystem.blockmanager", namesystem.getBlockManager()); return (int) namesystem.getBlocksTotal(); } diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNodeCount.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNodeCount.java index 16047f72a1a..2f375bcf864 100644 --- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNodeCount.java +++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNodeCount.java @@ -102,12 +102,12 @@ public class TestNodeCount extends TestCase { } // find out a non-excess node - Iterator iter = namesystem.blockManager.blocksMap + Iterator iter = namesystem.getBlockManager().blocksMap .nodeIterator(block.getLocalBlock()); DatanodeDescriptor nonExcessDN = null; while (iter.hasNext()) { DatanodeDescriptor dn = iter.next(); - Collection blocks = namesystem.blockManager.excessReplicateMap.get(dn.getStorageID()); + Collection blocks = namesystem.getBlockManager().excessReplicateMap.get(dn.getStorageID()); if (blocks == null || !blocks.contains(block) ) { nonExcessDN = dn; break; @@ -184,7 +184,7 @@ public class TestNodeCount extends TestCase { namesystem.readLock(); try { lastBlock = block; - lastNum = namesystem.blockManager.countNodes(block); + lastNum = namesystem.getBlockManager().countNodes(block); return lastNum; } finally { diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestOverReplicatedBlocks.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestOverReplicatedBlocks.java index 7f99bd7d2bf..c4b91c21c33 100644 --- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestOverReplicatedBlocks.java +++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestOverReplicatedBlocks.java @@ -100,7 +100,7 @@ public class TestOverReplicatedBlocks extends TestCase { // corrupt one won't be chosen to be excess one // without 4910 the number of live replicas would be 0: block gets lost - assertEquals(1, namesystem.blockManager.countNodes(block.getLocalBlock()) + assertEquals(1, namesystem.getBlockManager().countNodes(block.getLocalBlock()) .liveReplicas()); } } finally { diff --git a/hdfs/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java b/hdfs/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java index ca91724abfe..508a0d2d979 100644 --- a/hdfs/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java +++ b/hdfs/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java @@ -455,7 +455,7 @@ public class TestNNLeaseRecovery { fsn.leaseManager.addLease("mock-lease", file.toString()); if (setStoredBlock) { when(b1.getINode()).thenReturn(iNFmock); - fsn.blockManager.blocksMap.addINode(b1, iNFmock); + fsn.getBlockManager().blocksMap.addINode(b1, iNFmock); } when(fsDir.getFileINode(anyString())).thenReturn(iNFmock);