From 09b6f98de431628c80bc8a6faf0070eeaf72ff2a Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Wed, 29 Jun 2011 01:31:15 +0000 Subject: [PATCH] HDFS-2107. Move block management code from o.a.h.h.s.namenode to a new package o.a.h.h.s.blockmanagement. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1140939 13f79535-47bb-0310-9956-ffa450edef68 --- hdfs/CHANGES.txt | 3 + .../hadoop/hdfs/server/balancer/Balancer.java | 4 +- .../BlockInfo.java | 27 +-- .../BlockInfoUnderConstruction.java | 27 ++- .../BlockManager.java | 176 ++++++++++-------- .../BlockPlacementPolicy.java | 10 +- .../BlockPlacementPolicyDefault.java | 7 +- .../BlocksMap.java | 11 +- .../CorruptReplicasMap.java | 3 +- .../DatanodeDescriptor.java | 72 +++---- .../blockmanagement/NumberReplicas.java | 57 ++++++ .../PendingReplicationBlocks.java | 4 +- .../UnderReplicatedBlocks.java | 11 +- .../hadoop/hdfs/server/common/JspHelper.java | 5 +- .../server/namenode/DecommissionManager.java | 1 + .../hdfs/server/namenode/FSDirectory.java | 29 +-- .../hdfs/server/namenode/FSEditLogLoader.java | 26 ++- .../hdfs/server/namenode/FSEditLogOp.java | 2 + .../hdfs/server/namenode/FSImageFormat.java | 1 + .../server/namenode/FSImageSerialization.java | 4 +- .../hdfs/server/namenode/FSNamesystem.java | 102 ++++------ .../hdfs/server/namenode/Host2NodesMap.java | 5 +- .../hadoop/hdfs/server/namenode/INode.java | 10 +- .../hdfs/server/namenode/INodeFile.java | 14 +- .../namenode/INodeFileUnderConstruction.java | 15 +- .../hdfs/server/namenode/LeaseManager.java | 2 +- .../hadoop/hdfs/server/namenode/NameNode.java | 2 +- .../hdfs/server/namenode/NamenodeFsck.java | 6 +- .../server/namenode/NamenodeJspHelper.java | 8 +- .../hdfs/server/protocol/BlockCommand.java | 11 +- .../org/apache/hadoop/hdfs/DFSTestUtil.java | 2 +- .../hdfs/TestBlocksScheduledCounter.java | 2 +- .../TestCorruptReplicaInfo.java | 14 +- .../TestDatanodeDescriptor.java | 2 +- .../TestHeartbeatHandling.java | 3 +- .../TestPendingReplication.java | 2 +- .../TestReplicationPolicy.java | 18 +- .../TestUnderReplicatedBlocks.java | 7 +- .../TestDataNodeVolumeFailureReporting.java | 29 +-- .../hdfs/server/namenode/CreateEditsLog.java | 1 + .../hdfs/server/namenode/NameNodeAdapter.java | 3 +- .../namenode/TestBlockUnderConstruction.java | 2 + .../namenode/TestComputeInvalidateWork.java | 5 +- .../server/namenode/TestDeadDatanode.java | 2 +- .../namenode/TestDecommissioningStatus.java | 16 +- .../server/namenode/TestHost2NodesMap.java | 5 +- .../namenode/TestNamenodeCapacityReport.java | 16 +- .../hdfs/server/namenode/TestNodeCount.java | 7 +- .../namenode/TestOverReplicatedBlocks.java | 9 +- .../namenode/metrics/TestNameNodeMetrics.java | 17 +- .../hadoop/net/TestNetworkTopology.java | 2 +- .../server/namenode/TestNNLeaseRecovery.java | 20 +- hdfs/src/webapps/hdfs/block_info_xml.jsp | 10 - 53 files changed, 494 insertions(+), 355 deletions(-) rename hdfs/src/java/org/apache/hadoop/hdfs/server/{namenode => blockmanagement}/BlockInfo.java (93%) rename hdfs/src/java/org/apache/hadoop/hdfs/server/{namenode => blockmanagement}/BlockInfoUnderConstruction.java (91%) rename hdfs/src/java/org/apache/hadoop/hdfs/server/{namenode => blockmanagement}/BlockManager.java (94%) rename hdfs/src/java/org/apache/hadoop/hdfs/server/{namenode => blockmanagement}/BlockPlacementPolicy.java (96%) rename hdfs/src/java/org/apache/hadoop/hdfs/server/{namenode => blockmanagement}/BlockPlacementPolicyDefault.java (98%) rename hdfs/src/java/org/apache/hadoop/hdfs/server/{namenode => blockmanagement}/BlocksMap.java (95%) rename hdfs/src/java/org/apache/hadoop/hdfs/server/{namenode => blockmanagement}/CorruptReplicasMap.java (98%) rename hdfs/src/java/org/apache/hadoop/hdfs/server/{namenode => blockmanagement}/DatanodeDescriptor.java (89%) create mode 100644 hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/NumberReplicas.java rename hdfs/src/java/org/apache/hadoop/hdfs/server/{namenode => blockmanagement}/PendingReplicationBlocks.java (98%) rename hdfs/src/java/org/apache/hadoop/hdfs/server/{namenode => blockmanagement}/UnderReplicatedBlocks.java (96%) rename hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/{namenode => blockmanagement}/TestCorruptReplicaInfo.java (93%) rename hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/{namenode => blockmanagement}/TestDatanodeDescriptor.java (97%) rename hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/{namenode => blockmanagement}/TestHeartbeatHandling.java (97%) rename hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/{namenode => blockmanagement}/TestPendingReplication.java (98%) rename hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/{namenode => blockmanagement}/TestReplicationPolicy.java (99%) rename hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/{namenode => blockmanagement}/TestUnderReplicatedBlocks.java (95%) diff --git a/hdfs/CHANGES.txt b/hdfs/CHANGES.txt index 2b025b921c3..c98f7220512 100644 --- a/hdfs/CHANGES.txt +++ b/hdfs/CHANGES.txt @@ -534,6 +534,9 @@ Trunk (unreleased changes) HDFS-2110. StreamFile and ByteRangeInputStream cleanup. (eli) + HDFS-2107. Move block management code from o.a.h.h.s.namenode to a new + package o.a.h.h.s.blockmanagement. (szetszwo) + OPTIMIZATIONS HDFS-1458. Improve checkpoint performance by avoiding unnecessary image diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java index 08af0fb9ff5..71a8b838124 100644 --- a/hdfs/src/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java +++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java @@ -61,10 +61,10 @@ import org.apache.hadoop.hdfs.protocol.datatransfer.Sender; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault; import org.apache.hadoop.hdfs.server.common.HdfsConstants; import org.apache.hadoop.hdfs.server.common.Util; -import org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicy; -import org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicyDefault; import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException; import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; import org.apache.hadoop.io.IOUtils; diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BlockInfo.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java similarity index 93% rename from hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BlockInfo.java rename to hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java index 1c6deb90d13..5f10f2ae754 100644 --- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BlockInfo.java +++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java @@ -15,16 +15,17 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdfs.server.namenode; +package org.apache.hadoop.hdfs.server.blockmanagement; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState; +import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.util.LightWeightGSet; /** * Internal class for block metadata. */ -class BlockInfo extends Block implements LightWeightGSet.LinkedElement { +public class BlockInfo extends Block implements LightWeightGSet.LinkedElement { private INodeFile inode; /** For implementing {@link LightWeightGSet.LinkedElement} interface */ @@ -44,12 +45,12 @@ class BlockInfo extends Block implements LightWeightGSet.LinkedElement { * Construct an entry for blocksmap * @param replication the block's replication factor */ - protected BlockInfo(int replication) { + public BlockInfo(int replication) { this.triplets = new Object[3*replication]; this.inode = null; } - protected BlockInfo(Block blk, int replication) { + public BlockInfo(Block blk, int replication) { super(blk); this.triplets = new Object[3*replication]; this.inode = null; @@ -65,11 +66,11 @@ class BlockInfo extends Block implements LightWeightGSet.LinkedElement { this.inode = from.inode; } - INodeFile getINode() { + public INodeFile getINode() { return inode; } - void setINode(INodeFile inode) { + public void setINode(INodeFile inode) { this.inode = inode; } @@ -162,7 +163,7 @@ class BlockInfo extends Block implements LightWeightGSet.LinkedElement { /** * Add data-node this block belongs to. */ - boolean addNode(DatanodeDescriptor node) { + public boolean addNode(DatanodeDescriptor node) { if(findDatanode(node) >= 0) // the node is already there return false; // find the last null node @@ -176,7 +177,7 @@ class BlockInfo extends Block implements LightWeightGSet.LinkedElement { /** * Remove data-node from the block. */ - boolean removeNode(DatanodeDescriptor node) { + public boolean removeNode(DatanodeDescriptor node) { int dnIndex = findDatanode(node); if(dnIndex < 0) // the node is not found return false; @@ -218,7 +219,7 @@ class BlockInfo extends Block implements LightWeightGSet.LinkedElement { * If the head is null then form a new list. * @return current block as the new head of the list. */ - BlockInfo listInsert(BlockInfo head, DatanodeDescriptor dn) { + public BlockInfo listInsert(BlockInfo head, DatanodeDescriptor dn) { int dnIndex = this.findDatanode(dn); assert dnIndex >= 0 : "Data node is not found: current"; assert getPrevious(dnIndex) == null && getNext(dnIndex) == null : @@ -238,7 +239,7 @@ class BlockInfo extends Block implements LightWeightGSet.LinkedElement { * @return the new head of the list or null if the list becomes * empty after deletion. */ - BlockInfo listRemove(BlockInfo head, DatanodeDescriptor dn) { + public BlockInfo listRemove(BlockInfo head, DatanodeDescriptor dn) { if(head == null) return null; int dnIndex = this.findDatanode(dn); @@ -284,7 +285,7 @@ class BlockInfo extends Block implements LightWeightGSet.LinkedElement { * to {@link BlockInfoUnderConstruction}. * @return {@link BlockUCState#COMPLETE} */ - BlockUCState getBlockUCState() { + public BlockUCState getBlockUCState() { return BlockUCState.COMPLETE; } @@ -293,7 +294,7 @@ class BlockInfo extends Block implements LightWeightGSet.LinkedElement { * * @return true if the state of the block is {@link BlockUCState#COMPLETE} */ - boolean isComplete() { + public boolean isComplete() { return getBlockUCState().equals(BlockUCState.COMPLETE); } @@ -302,7 +303,7 @@ class BlockInfo extends Block implements LightWeightGSet.LinkedElement { * * @return BlockInfoUnderConstruction - an under construction block. */ - BlockInfoUnderConstruction convertToBlockUnderConstruction( + public BlockInfoUnderConstruction convertToBlockUnderConstruction( BlockUCState s, DatanodeDescriptor[] targets) { if(isComplete()) { return new BlockInfoUnderConstruction( diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BlockInfoUnderConstruction.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java similarity index 91% rename from hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BlockInfoUnderConstruction.java rename to hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java index 1891d75eea4..c3f676e3d2d 100644 --- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BlockInfoUnderConstruction.java +++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdfs.server.namenode; +package org.apache.hadoop.hdfs.server.blockmanagement; import java.io.IOException; import java.util.ArrayList; @@ -24,12 +24,13 @@ import java.util.List; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState; import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState; +import org.apache.hadoop.hdfs.server.namenode.NameNode; /** * Represents a block that is currently being constructed.
* This is usually the last block of a file opened for write or append. */ -class BlockInfoUnderConstruction extends BlockInfo { +public class BlockInfoUnderConstruction extends BlockInfo { /** Block state. See {@link BlockUCState} */ private BlockUCState blockUCState; @@ -128,11 +129,14 @@ class BlockInfoUnderConstruction extends BlockInfo { * Create block and set its state to * {@link BlockUCState#UNDER_CONSTRUCTION}. */ - BlockInfoUnderConstruction(Block blk, int replication) { + public BlockInfoUnderConstruction(Block blk, int replication) { this(blk, replication, BlockUCState.UNDER_CONSTRUCTION, null); } - BlockInfoUnderConstruction(Block blk, int replication, + /** + * Create a block that is currently being constructed. + */ + public BlockInfoUnderConstruction(Block blk, int replication, BlockUCState state, DatanodeDescriptor[] targets) { super(blk, replication); @@ -160,7 +164,8 @@ class BlockInfoUnderConstruction extends BlockInfo { return new BlockInfo(this); } - void setExpectedLocations(DatanodeDescriptor[] targets) { + /** Set expected locations */ + public void setExpectedLocations(DatanodeDescriptor[] targets) { int numLocations = targets == null ? 0 : targets.length; this.replicas = new ArrayList(numLocations); for(int i = 0; i < numLocations; i++) @@ -172,7 +177,7 @@ class BlockInfoUnderConstruction extends BlockInfo { * Create array of expected replica locations * (as has been assigned by chooseTargets()). */ - DatanodeDescriptor[] getExpectedLocations() { + public DatanodeDescriptor[] getExpectedLocations() { int numLocations = replicas == null ? 0 : replicas.size(); DatanodeDescriptor[] locations = new DatanodeDescriptor[numLocations]; for(int i = 0; i < numLocations; i++) @@ -180,7 +185,8 @@ class BlockInfoUnderConstruction extends BlockInfo { return locations; } - int getNumExpectedLocations() { + /** Get the number of expected locations */ + public int getNumExpectedLocations() { return replicas == null ? 0 : replicas.size(); } @@ -189,7 +195,7 @@ class BlockInfoUnderConstruction extends BlockInfo { * @see BlockUCState */ @Override // BlockInfo - BlockUCState getBlockUCState() { + public BlockUCState getBlockUCState() { return blockUCState; } @@ -197,7 +203,8 @@ class BlockInfoUnderConstruction extends BlockInfo { blockUCState = s; } - long getBlockRecoveryId() { + /** Get block recovery ID */ + public long getBlockRecoveryId() { return blockRecoveryId; } @@ -220,7 +227,7 @@ class BlockInfoUnderConstruction extends BlockInfo { * Find the first alive data-node starting from the previous primary and * make it primary. */ - void initializeBlockRecovery(long recoveryId) { + public void initializeBlockRecovery(long recoveryId) { setBlockUCState(BlockUCState.UNDER_RECOVERY); blockRecoveryId = recoveryId; if (replicas.size() == 0) { diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java similarity index 94% rename from hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java rename to hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index bc9babb80fb..2510245ba95 100644 --- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java +++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdfs.server.namenode; +package org.apache.hadoop.hdfs.server.blockmanagement; import java.io.IOException; import java.io.PrintWriter; @@ -39,10 +39,14 @@ import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportIterator; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.server.blockmanagement.UnderReplicatedBlocks.BlockIterator; import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState; import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState; -import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NumberReplicas; -import org.apache.hadoop.hdfs.server.namenode.UnderReplicatedBlocks; +import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; +import org.apache.hadoop.hdfs.server.namenode.INode; +import org.apache.hadoop.hdfs.server.namenode.INodeFile; +import org.apache.hadoop.hdfs.server.namenode.INodeFileUnderConstruction; +import org.apache.hadoop.hdfs.server.namenode.NameNode; /** * Keeps information related to the blocks stored in the Hadoop cluster. @@ -57,18 +61,43 @@ public class BlockManager { private final FSNamesystem namesystem; - volatile long pendingReplicationBlocksCount = 0L; - volatile long corruptReplicaBlocksCount = 0L; - volatile long underReplicatedBlocksCount = 0L; - volatile long scheduledReplicationBlocksCount = 0L; - volatile long excessBlocksCount = 0L; - volatile long pendingDeletionBlocksCount = 0L; + private volatile long pendingReplicationBlocksCount = 0L; + private volatile long corruptReplicaBlocksCount = 0L; + private volatile long underReplicatedBlocksCount = 0L; + public volatile long scheduledReplicationBlocksCount = 0L; + private volatile long excessBlocksCount = 0L; + private volatile long pendingDeletionBlocksCount = 0L; - // - // Mapping: Block -> { INode, datanodes, self ref } - // Updated only in response to client-sent information. - // - final BlocksMap blocksMap; + /** Used by metrics */ + public long getPendingReplicationBlocksCount() { + return pendingReplicationBlocksCount; + } + /** Used by metrics */ + public long getUnderReplicatedBlocksCount() { + return underReplicatedBlocksCount; + } + /** Used by metrics */ + public long getCorruptReplicaBlocksCount() { + return corruptReplicaBlocksCount; + } + /** Used by metrics */ + public long getScheduledReplicationBlocksCount() { + return scheduledReplicationBlocksCount; + } + /** Used by metrics */ + public long getPendingDeletionBlocksCount() { + return pendingDeletionBlocksCount; + } + /** Used by metrics */ + public long getExcessBlocksCount() { + return excessBlocksCount; + } + + /** + * Mapping: Block -> { INode, datanodes, self ref } + * Updated only in response to client-sent information. + */ + public final BlocksMap blocksMap; // // Store blocks-->datanodedescriptor(s) map of corrupt replicas @@ -90,24 +119,24 @@ public class BlockManager { // eventually remove these extras. // Mapping: StorageID -> TreeSet // - Map> excessReplicateMap = + public final Map> excessReplicateMap = new TreeMap>(); // // Store set of Blocks that need to be replicated 1 or more times. // We also store pending replication-orders. // - UnderReplicatedBlocks neededReplications = new UnderReplicatedBlocks(); + public UnderReplicatedBlocks neededReplications = new UnderReplicatedBlocks(); private PendingReplicationBlocks pendingReplications; // The maximum number of replicas allowed for a block - int maxReplication; + public int maxReplication; // How many outgoing replication streams a given node should have at one time - int maxReplicationStreams; + public int maxReplicationStreams; // Minimum copies needed or else write is disallowed - int minReplication; + public int minReplication; // Default number of replicas - int defaultReplication; + public int defaultReplication; // How many entries are returned by getCorruptInodes() int maxCorruptFilesReturned; @@ -121,9 +150,9 @@ public class BlockManager { Random r = new Random(); // for block replicas placement - BlockPlacementPolicy replicator; + public BlockPlacementPolicy replicator; - BlockManager(FSNamesystem fsn, Configuration conf) throws IOException { + public BlockManager(FSNamesystem fsn, Configuration conf) throws IOException { this(fsn, conf, DEFAULT_INITIAL_MAP_CAPACITY); } @@ -178,16 +207,16 @@ public class BlockManager { FSNamesystem.LOG.info("shouldCheckForEnoughRacks = " + shouldCheckForEnoughRacks); } - void activate() { + public void activate() { pendingReplications.start(); } - void close() { + public void close() { if (pendingReplications != null) pendingReplications.stop(); blocksMap.close(); } - void metaSave(PrintWriter out) { + public void metaSave(PrintWriter out) { // // Dump contents of neededReplication // @@ -249,7 +278,7 @@ public class BlockManager { * @param block * @return true if the block has minimum replicas */ - boolean checkMinReplication(Block block) { + public boolean checkMinReplication(Block block) { return (countNodes(block).liveReplicas() >= minReplication); } @@ -297,7 +326,7 @@ public class BlockManager { * @throws IOException if the block does not have at least a minimal number * of replicas reported from data-nodes. */ - void commitOrCompleteLastBlock(INodeFileUnderConstruction fileINode, + public void commitOrCompleteLastBlock(INodeFileUnderConstruction fileINode, Block commitBlock) throws IOException { if(commitBlock == null) @@ -362,7 +391,7 @@ public class BlockManager { * @param fileINode file * @return the last block locations if the block is partial or null otherwise */ - LocatedBlock convertLastBlockToUnderConstruction( + public LocatedBlock convertLastBlockToUnderConstruction( INodeFileUnderConstruction fileINode) throws IOException { BlockInfo oldBlock = fileINode.getLastBlock(); if(oldBlock == null || @@ -393,7 +422,7 @@ public class BlockManager { /** * Get all valid locations of the block */ - ArrayList getValidLocations(Block block) { + public ArrayList getValidLocations(Block block) { ArrayList machineSet = new ArrayList(blocksMap.numNodes(block)); for(Iterator it = @@ -407,7 +436,7 @@ public class BlockManager { return machineSet; } - List getBlockLocations(BlockInfo[] blocks, long offset, + public List getBlockLocations(BlockInfo[] blocks, long offset, long length, int nrBlocksToReturn) throws IOException { int curBlk = 0; long curPos = 0, blkSize = 0; @@ -436,11 +465,15 @@ public class BlockManager { return results; } - /** @param needBlockToken - * @return a LocatedBlock for the given block */ - LocatedBlock getBlockLocation(final BlockInfo blk, final long pos + /** @return a LocatedBlock for the given block */ + public LocatedBlock getBlockLocation(final BlockInfo blk, final long pos ) throws IOException { - if (!blk.isComplete()) { + if (blk instanceof BlockInfoUnderConstruction) { + if (blk.isComplete()) { + throw new IOException( + "blk instanceof BlockInfoUnderConstruction && blk.isComplete()" + + ", blk=" + blk); + } final BlockInfoUnderConstruction uc = (BlockInfoUnderConstruction)blk; final DatanodeDescriptor[] locations = uc.getExpectedLocations(); return namesystem.createLocatedBlock(uc, locations, pos, false); @@ -476,7 +509,7 @@ public class BlockManager { * Check whether the replication parameter is within the range * determined by system configuration. */ - void verifyReplication(String src, + public void verifyReplication(String src, short replication, String clientName) throws IOException { @@ -544,7 +577,7 @@ public class BlockManager { * @param b block * @param dn datanode */ - void addToInvalidates(Block b, DatanodeInfo dn) { + public void addToInvalidates(Block b, DatanodeInfo dn) { addToInvalidates(b, dn, true); } @@ -585,7 +618,7 @@ public class BlockManager { } } - void findAndMarkBlockAsCorrupt(Block blk, + public void findAndMarkBlockAsCorrupt(Block blk, DatanodeInfo dn) throws IOException { BlockInfo storedBlock = getStoredBlock(blk); if (storedBlock == null) { @@ -668,14 +701,14 @@ public class BlockManager { } } - void updateState() { + public void updateState() { pendingReplicationBlocksCount = pendingReplications.size(); underReplicatedBlocksCount = neededReplications.size(); corruptReplicaBlocksCount = corruptReplicas.size(); } /** Return number of under-replicated but not missing blocks */ - int getUnderReplicatedNotMissingBlocks() { + public int getUnderReplicatedNotMissingBlocks() { return neededReplications.getUnderReplicatedBlockCount(); } @@ -684,7 +717,7 @@ public class BlockManager { * @param nodesToProcess number of datanodes to schedule deletion work * @return total number of block for deletion */ - int computeInvalidateWork(int nodesToProcess) { + public int computeInvalidateWork(int nodesToProcess) { int numOfNodes = recentInvalidateSets.size(); nodesToProcess = Math.min(numOfNodes, nodesToProcess); @@ -724,7 +757,7 @@ public class BlockManager { * * @return number of blocks scheduled for replication during this iteration. */ - int computeReplicationWork(int blocksToProcess) throws IOException { + public int computeReplicationWork(int blocksToProcess) throws IOException { // Choose the blocks to be replicated List> blocksToReplicate = chooseUnderReplicatedBlocks(blocksToProcess); @@ -1031,7 +1064,7 @@ public class BlockManager { * If there were any replication requests that timed out, reap them * and put them back into the neededReplication queue */ - void processPendingReplications() { + public void processPendingReplications() { Block[] timedOutItems = pendingReplications.getTimedOutBlocks(); if (timedOutItems != null) { namesystem.writeLock(); @@ -1464,7 +1497,7 @@ public class BlockManager { short fileReplication = fileINode.getReplication(); if (!isNeededReplication(storedBlock, fileReplication, numCurrentReplica)) { neededReplications.remove(storedBlock, numCurrentReplica, - num.decommissionedReplicas, fileReplication); + num.decommissionedReplicas(), fileReplication); } else { updateNeededReplications(storedBlock, curReplicaDelta, 0); } @@ -1525,7 +1558,7 @@ public class BlockManager { * For each block in the name-node verify whether it belongs to any file, * over or under replicated. Place it into the respective queue. */ - void processMisReplicatedBlocks() { + public void processMisReplicatedBlocks() { long nrInvalid = 0, nrOverReplicated = 0, nrUnderReplicated = 0; namesystem.writeLock(); try { @@ -1570,7 +1603,7 @@ public class BlockManager { * If there are any extras, call chooseExcessReplicates() to * mark them in the excessReplicateMap. */ - void processOverReplicatedBlock(Block block, short replication, + public void processOverReplicatedBlock(Block block, short replication, DatanodeDescriptor addedNode, DatanodeDescriptor delNodeHint) { assert namesystem.hasWriteLock(); if (addedNode == delNodeHint) { @@ -1597,7 +1630,7 @@ public class BlockManager { addedNode, delNodeHint, replicator); } - void addToExcessReplicate(DatanodeInfo dn, Block block) { + public void addToExcessReplicate(DatanodeInfo dn, Block block) { assert namesystem.hasWriteLock(); Collection excessBlocks = excessReplicateMap.get(dn.getStorageID()); if (excessBlocks == null) { @@ -1618,7 +1651,7 @@ public class BlockManager { * Modify (block-->datanode) map. Possibly generate replication tasks, if the * removed block is still valid. */ - void removeStoredBlock(Block block, DatanodeDescriptor node) { + public void removeStoredBlock(Block block, DatanodeDescriptor node) { if(NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug("BLOCK* NameSystem.removeStoredBlock: " + block + " from " + node.getName()); @@ -1673,7 +1706,7 @@ public class BlockManager { /** * The given node is reporting that it received a certain block. */ - void addBlock(DatanodeDescriptor node, Block block, String delHint) + public void addBlock(DatanodeDescriptor node, Block block, String delHint) throws IOException { // decrement number of blocks scheduled to this datanode. node.decBlocksScheduled(); @@ -1726,7 +1759,7 @@ public class BlockManager { /** * Return the number of nodes that are live and decommissioned. */ - NumberReplicas countNodes(Block b) { + public NumberReplicas countNodes(Block b) { int count = 0; int live = 0; int corrupt = 0; @@ -1805,7 +1838,7 @@ public class BlockManager { * Return true if there are any blocks on this node that have not * yet reached their replication factor. Otherwise returns false. */ - boolean isReplicationInProgress(DatanodeDescriptor srcNode) { + public boolean isReplicationInProgress(DatanodeDescriptor srcNode) { boolean status = false; int underReplicatedBlocks = 0; int decommissionOnlyReplicas = 0; @@ -1855,11 +1888,11 @@ public class BlockManager { return status; } - int getActiveBlockCount() { + public int getActiveBlockCount() { return blocksMap.size() - (int)pendingDeletionBlocksCount; } - DatanodeDescriptor[] getNodes(BlockInfo block) { + public DatanodeDescriptor[] getNodes(BlockInfo block) { DatanodeDescriptor[] nodes = new DatanodeDescriptor[block.numNodes()]; Iterator it = blocksMap.nodeIterator(block); @@ -1869,22 +1902,22 @@ public class BlockManager { return nodes; } - int getTotalBlocks() { + public int getTotalBlocks() { return blocksMap.size(); } - void removeBlock(Block block) { + public void removeBlock(Block block) { addToInvalidates(block); corruptReplicas.removeFromCorruptReplicasMap(block); blocksMap.removeBlock(block); } - BlockInfo getStoredBlock(Block block) { + public BlockInfo getStoredBlock(Block block) { return blocksMap.getStoredBlock(block); } /* updates a block in under replication queue */ - void updateNeededReplications(Block block, int curReplicasDelta, + public void updateNeededReplications(Block block, int curReplicasDelta, int expectedReplicasDelta) { namesystem.writeLock(); try { @@ -1905,13 +1938,13 @@ public class BlockManager { } } - void checkReplication(Block block, int numExpectedReplicas) { + public void checkReplication(Block block, int numExpectedReplicas) { // filter out containingNodes that are marked for decommission. NumberReplicas number = countNodes(block); if (isNeededReplication(block, numExpectedReplicas, number.liveReplicas())) { neededReplications.add(block, number.liveReplicas(), - number.decommissionedReplicas, + number.decommissionedReplicas(), numExpectedReplicas); } } @@ -1926,11 +1959,8 @@ public class BlockManager { return fileINode.getReplication(); } - /** - * Remove a datanode from the invalidatesSet - * @param n datanode - */ - void removeFromInvalidates(String storageID) { + /** Remove a datanode from the invalidatesSet */ + public void removeFromInvalidates(String storageID) { Collection blocks = recentInvalidateSets.remove(storageID); if (blocks != null) { pendingDeletionBlocksCount -= blocks.size(); @@ -1998,7 +2028,7 @@ public class BlockManager { //Returns the number of racks over which a given block is replicated //decommissioning/decommissioned nodes are not counted. corrupt replicas //are also ignored - int getNumberOfRacks(Block b) { + public int getNumberOfRacks(Block b) { HashSet rackSet = new HashSet(0); Collection corruptNodes = corruptReplicas.getNodes(b); @@ -2056,32 +2086,32 @@ public class BlockManager { } } - long getMissingBlocksCount() { + public long getMissingBlocksCount() { // not locking return this.neededReplications.getCorruptBlockSize(); } - BlockInfo addINode(BlockInfo block, INodeFile iNode) { + public BlockInfo addINode(BlockInfo block, INodeFile iNode) { return blocksMap.addINode(block, iNode); } - INodeFile getINode(Block b) { + public INodeFile getINode(Block b) { return blocksMap.getINode(b); } - void removeFromCorruptReplicasMap(Block block) { + public void removeFromCorruptReplicasMap(Block block) { corruptReplicas.removeFromCorruptReplicasMap(block); } - int numCorruptReplicas(Block block) { + public int numCorruptReplicas(Block block) { return corruptReplicas.numCorruptReplicas(block); } - void removeBlockFromMap(Block block) { + public void removeBlockFromMap(Block block) { blocksMap.removeBlock(block); } - int getCapacity() { + public int getCapacity() { namesystem.readLock(); try { return blocksMap.getCapacity(); @@ -2104,7 +2134,7 @@ public class BlockManager { * @return Up to numExpectedBlocks blocks from startingBlockId if it exists * */ - long[] getCorruptReplicaBlockIds(int numExpectedBlocks, + public long[] getCorruptReplicaBlockIds(int numExpectedBlocks, Long startingBlockId) { return corruptReplicas.getCorruptReplicaBlockIds(numExpectedBlocks, startingBlockId); @@ -2113,7 +2143,7 @@ public class BlockManager { /** * Return an iterator over the set of blocks for which there are no replicas. */ - UnderReplicatedBlocks.BlockIterator getCorruptReplicaBlockIterator() { + public BlockIterator getCorruptReplicaBlockIterator() { return neededReplications .iterator(UnderReplicatedBlocks.QUEUE_WITH_CORRUPT_BLOCKS); } diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicy.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java similarity index 96% rename from hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicy.java rename to hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java index aa0be3a8104..fe2eb3e1ede 100644 --- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicy.java +++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdfs.server.namenode; +package org.apache.hadoop.hdfs.server.blockmanagement; import java.util.ArrayList; import java.util.Collection; @@ -26,6 +26,8 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.server.namenode.FSClusterStats; +import org.apache.hadoop.hdfs.server.namenode.FSInodeInfo; import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.net.Node; import org.apache.hadoop.util.ReflectionUtils; @@ -94,7 +96,7 @@ public abstract class BlockPlacementPolicy { * @return array of DatanodeDescriptor instances chosen as target * and sorted as a pipeline. */ - abstract DatanodeDescriptor[] chooseTarget(String srcPath, + public abstract DatanodeDescriptor[] chooseTarget(String srcPath, int numOfReplicas, DatanodeDescriptor writer, List chosenNodes, @@ -222,11 +224,11 @@ public abstract class BlockPlacementPolicy { * @param numOfReplicas number of replicas wanted. * @param writer the writer's machine, null if not in the cluster. * @param blocksize size of the data to be written. - * @param excludedNodes: datanodes that should not be considered as targets. + * @param excludedNodes datanodes that should not be considered as targets. * @return array of DatanodeDescriptor instances chosen as targets * and sorted as a pipeline. */ - DatanodeDescriptor[] chooseTarget(String srcPath, + public DatanodeDescriptor[] chooseTarget(String srcPath, int numOfReplicas, DatanodeDescriptor writer, HashMap excludedNodes, diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicyDefault.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java similarity index 98% rename from hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicyDefault.java rename to hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java index 0d3fbb7a833..bac1e42e69b 100644 --- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicyDefault.java +++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdfs.server.namenode; +package org.apache.hadoop.hdfs.server.blockmanagement; import java.util.ArrayList; import java.util.Collection; @@ -32,6 +32,9 @@ import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.FSConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.server.namenode.FSClusterStats; +import org.apache.hadoop.hdfs.server.namenode.FSInodeInfo; +import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.net.Node; import org.apache.hadoop.net.NodeBase; @@ -89,7 +92,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy { /** {@inheritDoc} */ @Override - DatanodeDescriptor[] chooseTarget(String srcPath, + public DatanodeDescriptor[] chooseTarget(String srcPath, int numOfReplicas, DatanodeDescriptor writer, List chosenNodes, diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BlocksMap.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java similarity index 95% rename from hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BlocksMap.java rename to hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java index 634e60071a5..7a2854c816f 100644 --- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BlocksMap.java +++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java @@ -15,11 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdfs.server.namenode; +package org.apache.hadoop.hdfs.server.blockmanagement; import java.util.Iterator; import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.util.GSet; import org.apache.hadoop.hdfs.util.LightWeightGSet; @@ -28,7 +29,7 @@ import org.apache.hadoop.hdfs.util.LightWeightGSet; * block's metadata currently includes INode it belongs to and * the datanodes that store the block. */ -class BlocksMap { +public class BlocksMap { private static class NodeIterator implements Iterator { private BlockInfo blockInfo; private int nextIdx = 0; @@ -100,7 +101,7 @@ class BlocksMap { /** * Add block b belonging to the specified file inode to the map. */ - BlockInfo addINode(BlockInfo b, INodeFile iNode) { + public BlockInfo addINode(BlockInfo b, INodeFile iNode) { BlockInfo info = blocks.get(b); if (info != b) { info = b; @@ -136,7 +137,7 @@ class BlocksMap { * Searches for the block in the BlocksMap and * returns Iterator that iterates through the nodes the block belongs to. */ - Iterator nodeIterator(Block b) { + public Iterator nodeIterator(Block b) { return nodeIterator(blocks.get(b)); } @@ -185,7 +186,7 @@ class BlocksMap { /** * Check if the block exists in map */ - boolean contains(Block block) { + public boolean contains(Block block) { return blocks.contains(block); } diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/CorruptReplicasMap.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java similarity index 98% rename from hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/CorruptReplicasMap.java rename to hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java index a66ec7c4006..ebef0022172 100644 --- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/CorruptReplicasMap.java +++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java @@ -15,10 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdfs.server.namenode; +package org.apache.hadoop.hdfs.server.blockmanagement; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.ipc.Server; import java.util.*; diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java similarity index 89% rename from hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java rename to hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java index e7e09eeab8c..db0132fdfb7 100644 --- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java +++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdfs.server.namenode; +package org.apache.hadoop.hdfs.server.blockmanagement; import java.io.DataInput; import java.io.IOException; @@ -26,6 +26,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.io.Text; import org.apache.hadoop.hdfs.DeprecatedUTF8; import org.apache.hadoop.io.WritableUtils; @@ -44,7 +45,7 @@ public class DatanodeDescriptor extends DatanodeInfo { // Stores status of decommissioning. // If node is not decommissioning, do not use this object for anything. - DecommissioningStatus decommissioningStatus = new DecommissioningStatus(); + public DecommissioningStatus decommissioningStatus = new DecommissioningStatus(); /** Block and targets pair */ @InterfaceAudience.Private @@ -96,8 +97,8 @@ public class DatanodeDescriptor extends DatanodeInfo { private int numBlocks = 0; // isAlive == heartbeats.contains(this) // This is an optimization, because contains takes O(n) time on Arraylist - protected boolean isAlive = false; - protected boolean needKeyUpdate = false; + public boolean isAlive = false; + public boolean needKeyUpdate = false; /** A queue of blocks to be replicated by this datanode */ private BlockQueue replicateBlocks = new BlockQueue(); @@ -204,7 +205,7 @@ public class DatanodeDescriptor extends DatanodeInfo { * Add datanode to the block. * Add block to the head of the list of blocks belonging to the data-node. */ - boolean addBlock(BlockInfo b) { + public boolean addBlock(BlockInfo b) { if(!b.addNode(this)) return false; // add to the head of the data-node list @@ -217,7 +218,7 @@ public class DatanodeDescriptor extends DatanodeInfo { * Remove block from the list of blocks belonging to the data-node. * Remove datanode from the block. */ - boolean removeBlock(BlockInfo b) { + public boolean removeBlock(BlockInfo b) { blockList = b.listRemove(blockList, this); if ( b.removeNode(this) ) { numBlocks--; @@ -242,7 +243,7 @@ public class DatanodeDescriptor extends DatanodeInfo { * @param newBlock - a replacement block * @return the new block */ - BlockInfo replaceBlock(BlockInfo oldBlock, BlockInfo newBlock) { + public BlockInfo replaceBlock(BlockInfo oldBlock, BlockInfo newBlock) { boolean done = removeBlock(oldBlock); assert done : "Old block should belong to the data-node when replacing"; done = addBlock(newBlock); @@ -250,7 +251,7 @@ public class DatanodeDescriptor extends DatanodeInfo { return newBlock; } - void resetBlocks() { + public void resetBlocks() { this.capacity = 0; this.remaining = 0; this.blockPoolUsed = 0; @@ -268,7 +269,7 @@ public class DatanodeDescriptor extends DatanodeInfo { /** * Updates stats from datanode heartbeat. */ - void updateHeartbeat(long capacity, long dfsUsed, long remaining, + public void updateHeartbeat(long capacity, long dfsUsed, long remaining, long blockPoolUsed, int xceiverCount, int volFailures) { this.capacity = capacity; this.dfsUsed = dfsUsed; @@ -283,7 +284,7 @@ public class DatanodeDescriptor extends DatanodeInfo { /** * Iterates over the list of blocks belonging to the datanode. */ - static class BlockIterator implements Iterator { + public static class BlockIterator implements Iterator { private BlockInfo current; private DatanodeDescriptor node; @@ -307,7 +308,7 @@ public class DatanodeDescriptor extends DatanodeInfo { } } - Iterator getBlockIterator() { + public Iterator getBlockIterator() { return new BlockIterator(this.blockList, this); } @@ -361,11 +362,11 @@ public class DatanodeDescriptor extends DatanodeInfo { } } - List getReplicationCommand(int maxTransfers) { + public List getReplicationCommand(int maxTransfers) { return replicateBlocks.poll(maxTransfers); } - BlockInfoUnderConstruction[] getLeaseRecoveryCommand(int maxTransfers) { + public BlockInfoUnderConstruction[] getLeaseRecoveryCommand(int maxTransfers) { List blocks = recoverBlocks.poll(maxTransfers); if(blocks == null) return null; @@ -375,7 +376,7 @@ public class DatanodeDescriptor extends DatanodeInfo { /** * Remove the specified number of blocks to be invalidated */ - Block[] getInvalidateBlocks(int maxblocks) { + public Block[] getInvalidateBlocks(int maxblocks) { return getBlockArray(invalidateBlocks, maxblocks); } @@ -418,7 +419,7 @@ public class DatanodeDescriptor extends DatanodeInfo { } /** Serialization for FSEditLog */ - void readFieldsFromFSEditLog(DataInput in) throws IOException { + public void readFieldsFromFSEditLog(DataInput in) throws IOException { this.name = DeprecatedUTF8.readString(in); this.storageID = DeprecatedUTF8.readString(in); this.infoPort = in.readShort() & 0x0000ffff; @@ -445,7 +446,7 @@ public class DatanodeDescriptor extends DatanodeInfo { /** * Increments counter for number of blocks scheduled. */ - void incBlocksScheduled() { + public void incBlocksScheduled() { currApproxBlocksScheduled++; } @@ -485,12 +486,13 @@ public class DatanodeDescriptor extends DatanodeInfo { // by DatanodeID return (this == obj) || super.equals(obj); } - - class DecommissioningStatus { - int underReplicatedBlocks; - int decommissionOnlyReplicas; - int underReplicatedInOpenFiles; - long startTime; + + /** Decommissioning status */ + public class DecommissioningStatus { + private int underReplicatedBlocks; + private int decommissionOnlyReplicas; + private int underReplicatedInOpenFiles; + private long startTime; synchronized void set(int underRep, int onlyRep, int underConstruction) { @@ -501,32 +503,34 @@ public class DatanodeDescriptor extends DatanodeInfo { decommissionOnlyReplicas = onlyRep; underReplicatedInOpenFiles = underConstruction; } - - synchronized int getUnderReplicatedBlocks() { + + /** @return the number of under-replicated blocks */ + public synchronized int getUnderReplicatedBlocks() { if (isDecommissionInProgress() == false) { return 0; } return underReplicatedBlocks; } - synchronized int getDecommissionOnlyReplicas() { + /** @return the number of decommission-only replicas */ + public synchronized int getDecommissionOnlyReplicas() { if (isDecommissionInProgress() == false) { return 0; } return decommissionOnlyReplicas; } - - synchronized int getUnderReplicatedInOpenFiles() { + /** @return the number of under-replicated blocks in open files */ + public synchronized int getUnderReplicatedInOpenFiles() { if (isDecommissionInProgress() == false) { return 0; } return underReplicatedInOpenFiles; } - - synchronized void setStartTime(long time) { + /** Set start time */ + public synchronized void setStartTime(long time) { startTime = time; } - - synchronized long getStartTime() { + /** @return start time */ + public synchronized long getStartTime() { if (isDecommissionInProgress() == false) { return 0; } @@ -538,11 +542,11 @@ public class DatanodeDescriptor extends DatanodeInfo { * Set the flag to indicate if this datanode is disallowed from communicating * with the namenode. */ - void setDisallowed(boolean flag) { + public void setDisallowed(boolean flag) { disallowed = flag; } - - boolean isDisallowed() { + /** Is the datanode disallowed from communicating with the namenode? */ + public boolean isDisallowed() { return disallowed; } diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/NumberReplicas.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/NumberReplicas.java new file mode 100644 index 00000000000..52f62587b1c --- /dev/null +++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/NumberReplicas.java @@ -0,0 +1,57 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.blockmanagement; + +/** + * A immutable object that stores the number of live replicas and + * the number of decommissined Replicas. + */ +public class NumberReplicas { + private int liveReplicas; + private int decommissionedReplicas; + private int corruptReplicas; + private int excessReplicas; + + NumberReplicas() { + initialize(0, 0, 0, 0); + } + + NumberReplicas(int live, int decommissioned, int corrupt, int excess) { + initialize(live, decommissioned, corrupt, excess); + } + + void initialize(int live, int decommissioned, int corrupt, int excess) { + liveReplicas = live; + decommissionedReplicas = decommissioned; + corruptReplicas = corrupt; + excessReplicas = excess; + } + + public int liveReplicas() { + return liveReplicas; + } + public int decommissionedReplicas() { + return decommissionedReplicas; + } + public int corruptReplicas() { + return corruptReplicas; + } + public int excessReplicas() { + return excessReplicas; + } +} diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/PendingReplicationBlocks.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReplicationBlocks.java similarity index 98% rename from hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/PendingReplicationBlocks.java rename to hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReplicationBlocks.java index 96b1f6411fd..01c2fe138f1 100644 --- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/PendingReplicationBlocks.java +++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReplicationBlocks.java @@ -15,9 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdfs.server.namenode; +package org.apache.hadoop.hdfs.server.blockmanagement; import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; + import static org.apache.hadoop.hdfs.server.common.Util.now; import org.apache.hadoop.util.*; import java.io.*; diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/UnderReplicatedBlocks.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/UnderReplicatedBlocks.java similarity index 96% rename from hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/UnderReplicatedBlocks.java rename to hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/UnderReplicatedBlocks.java index 196bc0449c5..9c2a213985c 100644 --- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/UnderReplicatedBlocks.java +++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/UnderReplicatedBlocks.java @@ -15,17 +15,18 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdfs.server.namenode; +package org.apache.hadoop.hdfs.server.blockmanagement; import java.util.*; import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.server.namenode.NameNode; /* Class for keeping track of under replication blocks * Blocks have replication priority, with priority 0 indicating the highest * Blocks have only one replicas has the highest */ -class UnderReplicatedBlocks implements Iterable { +public class UnderReplicatedBlocks implements Iterable { static final int LEVEL = 5; static public final int QUEUE_WITH_CORRUPT_BLOCKS = 4; private List> priorityQueues = new ArrayList>(); @@ -47,7 +48,7 @@ class UnderReplicatedBlocks implements Iterable { } /* Return the total number of under replication blocks */ - synchronized int size() { + public synchronized int size() { int size = 0; for (int i=0; i { } /* Check if a block is in the neededReplication queue */ - synchronized boolean contains(Block block) { + public synchronized boolean contains(Block block) { for(TreeSet set:priorityQueues) { if(set.contains(block)) { return true; } } @@ -218,7 +219,7 @@ class UnderReplicatedBlocks implements Iterable { return new BlockIterator(); } - class BlockIterator implements Iterator { + public class BlockIterator implements Iterator { private int level; private boolean isIteratorForLevel = false; private List> iterators = new ArrayList>(); diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/common/JspHelper.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/common/JspHelper.java index 704b4d76410..ccdaa0e24d4 100644 --- a/hdfs/src/java/org/apache/hadoop/hdfs/server/common/JspHelper.java +++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/common/JspHelper.java @@ -26,8 +26,8 @@ import java.net.InetSocketAddress; import java.net.Socket; import java.net.URL; import java.net.URLEncoder; -import java.util.Arrays; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; @@ -45,14 +45,13 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.BlockReader; import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; -import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.http.HtmlQuoting; import org.apache.hadoop.io.Text; diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/DecommissionManager.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/DecommissionManager.java index 977abb9a220..0c7c4ca1ff6 100644 --- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/DecommissionManager.java +++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/DecommissionManager.java @@ -21,6 +21,7 @@ import java.util.Map; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.util.CyclicIteration; /** diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 08b10cb9c62..1d82e2bdcd5 100644 --- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import static org.apache.hadoop.hdfs.server.common.Util.now; + import java.io.Closeable; import java.io.FileNotFoundException; import java.io.IOException; @@ -24,35 +26,40 @@ import java.net.URI; import java.util.ArrayList; import java.util.Collection; import java.util.List; -import java.util.concurrent.locks.ReentrantReadWriteLock; -import java.util.concurrent.locks.Condition; import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.ReentrantReadWriteLock; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.FileAlreadyExistsException; -import org.apache.hadoop.fs.ParentNotDirectoryException; -import org.apache.hadoop.fs.UnresolvedLinkException; -import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.Options.Rename; +import org.apache.hadoop.fs.ParentNotDirectoryException; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; +import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DirectoryListing; +import org.apache.hadoop.hdfs.protocol.FSConstants; import org.apache.hadoop.hdfs.protocol.FSLimitException; -import org.apache.hadoop.hdfs.protocol.FSLimitException.*; +import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException; +import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState; import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; import org.apache.hadoop.hdfs.util.ByteArray; -import static org.apache.hadoop.hdfs.server.common.Util.now; -import org.apache.hadoop.hdfs.DFSConfigKeys; /************************************************* * FSDirectory stores the filesystem directory state. @@ -63,7 +70,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; * and logged to disk. * *************************************************/ -class FSDirectory implements Closeable { +public class FSDirectory implements Closeable { INodeDirectoryWithQuota rootDir; FSImage fsImage; @@ -1332,7 +1339,7 @@ class FSDirectory implements Closeable { * @throws QuotaExceededException if the new count violates any quota limit * @throws FileNotFound if path does not exist. */ - void updateSpaceConsumed(String path, long nsDelta, long dsDelta) + public void updateSpaceConsumed(String path, long nsDelta, long dsDelta) throws QuotaExceededException, FileNotFoundException, UnresolvedLinkException { diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index 8d1ad102573..d8ecaf4e2ec 100644 --- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import static org.apache.hadoop.hdfs.server.common.Util.now; + import java.io.BufferedInputStream; import java.io.DataInputStream; import java.io.EOFException; @@ -32,10 +34,30 @@ import org.apache.hadoop.hdfs.protocol.FSConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; -import static org.apache.hadoop.hdfs.server.common.Util.now; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; import org.apache.hadoop.hdfs.server.common.Storage; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCloseOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.CancelDelegationTokenOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ClearNSQuotaOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ConcatDeleteOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.DeleteOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.GetDelegationTokenOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.MkdirOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ReassignLeaseOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOldOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenewDelegationTokenOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetGenstampOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetNSQuotaOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetOwnerOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetPermissionsOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetQuotaOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetReplicationOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SymlinkOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.TimesOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateMasterKeyOp; import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease; -import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.*; public class FSEditLogLoader { private final FSNamesystem fsNamesys; diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java index 9583b152abf..591eb9798fb 100644 --- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java +++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java @@ -32,6 +32,8 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; import org.apache.hadoop.hdfs.server.common.GenerationStamp; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; + import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.*; import org.apache.hadoop.security.token.delegation.DelegationKey; import org.apache.hadoop.io.BytesWritable; diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java index ff487df926a..46d08970b32 100644 --- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java +++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.FSConstants; import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.common.GenerationStamp; import org.apache.hadoop.io.MD5Hash; import org.apache.hadoop.io.Text; diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java index d993ae6aa76..277fac0eb9c 100644 --- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java +++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java @@ -22,7 +22,6 @@ import java.io.DataInputStream; import java.io.DataOutput; import java.io.DataOutputStream; import java.io.IOException; -import java.nio.ByteBuffer; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -33,6 +32,9 @@ import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DeprecatedUTF8; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeID; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Writable; diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index d1d72864067..f1e887f4775 100644 --- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -97,15 +97,20 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair; +import org.apache.hadoop.hdfs.server.blockmanagement.UnderReplicatedBlocks; import org.apache.hadoop.hdfs.server.common.GenerationStamp; import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState; import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport; import org.apache.hadoop.hdfs.server.common.Util; -import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor.BlockTargetPair; import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease; -import org.apache.hadoop.hdfs.server.namenode.UnderReplicatedBlocks.BlockIterator; import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean; import org.apache.hadoop.hdfs.server.protocol.BlockCommand; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand; @@ -235,7 +240,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, // Stores the correct file name hierarchy // public FSDirectory dir; - BlockManager blockManager; + public BlockManager blockManager; // Block pool ID used by this namenode String blockPoolId; @@ -270,10 +275,10 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, * Stores a set of DatanodeDescriptor objects. * This is a subset of {@link #datanodeMap}, containing nodes that are * considered alive. - * The {@link HeartbeatMonitor} periodically checks for outdated entries, + * The HeartbeatMonitor periodically checks for out-dated entries, * and removes them from the list. */ - ArrayList heartbeats = new ArrayList(); + public ArrayList heartbeats = new ArrayList(); public LeaseManager leaseManager = new LeaseManager(this); @@ -314,8 +319,8 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, private volatile SafeModeInfo safeMode; // safe mode information private Host2NodesMap host2DataNodeMap = new Host2NodesMap(); - // datanode networktoplogy - NetworkTopology clusterMap = new NetworkTopology(); + /** datanode network toplogy */ + public NetworkTopology clusterMap = new NetworkTopology(); private DNSToSwitchMapping dnsToSwitchMapping; private HostsFileReader hostsReader; @@ -329,7 +334,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, private final GenerationStamp generationStamp = new GenerationStamp(); // Ask Datanode only up to this many blocks to delete. - int blockInvalidateLimit = DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT; + public int blockInvalidateLimit = DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT; // precision of access times. private long accessTimePrecision = 0; @@ -472,23 +477,23 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, } // utility methods to acquire and release read lock and write lock - void readLock() { + public void readLock() { this.fsLock.readLock().lock(); } - void readUnlock() { + public void readUnlock() { this.fsLock.readLock().unlock(); } - void writeLock() { + public void writeLock() { this.fsLock.writeLock().lock(); } - void writeUnlock() { + public void writeUnlock() { this.fsLock.writeLock().unlock(); } - boolean hasWriteLock() { + public boolean hasWriteLock() { return this.fsLock.isWriteLockedByCurrentThread(); } @@ -1014,7 +1019,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, } /** Create a LocatedBlock. */ - LocatedBlock createLocatedBlock(final Block b, final DatanodeInfo[] locations, + public LocatedBlock createLocatedBlock(final Block b, final DatanodeInfo[] locations, final long offset, final boolean corrupt) throws IOException { return new LocatedBlock(getExtendedBlock(b), locations, offset, corrupt); } @@ -3013,7 +3018,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, * @return an array of datanode commands * @throws IOException */ - DatanodeCommand[] handleHeartbeat(DatanodeRegistration nodeReg, + public DatanodeCommand[] handleHeartbeat(DatanodeRegistration nodeReg, long capacity, long dfsUsed, long remaining, long blockPoolUsed, int xceiverCount, int xmitsInProgress, int failedVolumes) throws IOException { @@ -3521,7 +3526,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, * If no such a node is available, * then pick a node with least free space */ - void chooseExcessReplicates(Collection nonExcess, + public void chooseExcessReplicates(Collection nonExcess, Block b, short replication, DatanodeDescriptor addedNode, DatanodeDescriptor delNodeHint, @@ -3979,45 +3984,6 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, return replication; } - /** - * A immutable object that stores the number of live replicas and - * the number of decommissined Replicas. - */ - static class NumberReplicas { - private int liveReplicas; - int decommissionedReplicas; - private int corruptReplicas; - private int excessReplicas; - - NumberReplicas() { - initialize(0, 0, 0, 0); - } - - NumberReplicas(int live, int decommissioned, int corrupt, int excess) { - initialize(live, decommissioned, corrupt, excess); - } - - void initialize(int live, int decommissioned, int corrupt, int excess) { - liveReplicas = live; - decommissionedReplicas = decommissioned; - corruptReplicas = corrupt; - excessReplicas = excess; - } - - int liveReplicas() { - return liveReplicas; - } - int decommissionedReplicas() { - return decommissionedReplicas; - } - int corruptReplicas() { - return corruptReplicas; - } - int excessReplicas() { - return excessReplicas; - } - } - /** * Change, if appropriate, the admin state of a datanode to * decommission completed. Return true if decommission is complete. @@ -4675,7 +4641,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, * Check whether the name node is in safe mode. * @return true if safe mode is ON, false otherwise */ - boolean isInSafeMode() { + public boolean isInSafeMode() { // safeMode is volatile, and may be set to null at any time SafeModeInfo safeMode = this.safeMode; if (safeMode == null) @@ -4686,7 +4652,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, /** * Check whether the name node is in startup mode. */ - boolean isInStartupSafeMode() { + public boolean isInStartupSafeMode() { // safeMode is volatile, and may be set to null at any time SafeModeInfo safeMode = this.safeMode; if (safeMode == null) @@ -4697,7 +4663,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, /** * Check whether replication queues are populated. */ - boolean isPopulatingReplQueues() { + public boolean isPopulatingReplQueues() { // safeMode is volatile, and may be set to null at any time SafeModeInfo safeMode = this.safeMode; if (safeMode == null) @@ -4709,7 +4675,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, * Increment number of blocks that reached minimal replication. * @param replication current replication */ - void incrementSafeBlockCount(int replication) { + public void incrementSafeBlockCount(int replication) { // safeMode is volatile, and may be set to null at any time SafeModeInfo safeMode = this.safeMode; if (safeMode == null) @@ -4720,7 +4686,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, /** * Decrement number of blocks that reached minimal replication. */ - void decrementSafeBlockCount(Block b) { + public void decrementSafeBlockCount(Block b) { // safeMode is volatile, and may be set to null at any time SafeModeInfo safeMode = this.safeMode; if (safeMode == null) // mostly true @@ -5042,13 +5008,13 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, @Override // FSNamesystemMBean @Metric public long getPendingReplicationBlocks() { - return blockManager.pendingReplicationBlocksCount; + return blockManager.getPendingReplicationBlocksCount(); } @Override // FSNamesystemMBean @Metric public long getUnderReplicatedBlocks() { - return blockManager.underReplicatedBlocksCount; + return blockManager.getUnderReplicatedBlocksCount(); } /** Return number of under-replicated but not missing blocks */ @@ -5059,23 +5025,23 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, /** Returns number of blocks with corrupt replicas */ @Metric({"CorruptBlocks", "Number of blocks with corrupt replicas"}) public long getCorruptReplicaBlocks() { - return blockManager.corruptReplicaBlocksCount; + return blockManager.getCorruptReplicaBlocksCount(); } @Override // FSNamesystemMBean @Metric public long getScheduledReplicationBlocks() { - return blockManager.scheduledReplicationBlocksCount; + return blockManager.getScheduledReplicationBlocksCount(); } @Metric public long getPendingDeletionBlocks() { - return blockManager.pendingDeletionBlocksCount; + return blockManager.getPendingDeletionBlocksCount(); } @Metric public long getExcessBlocks() { - return blockManager.excessBlocksCount; + return blockManager.getExcessBlocksCount(); } @Metric @@ -5444,7 +5410,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, } /** Get a datanode descriptor given corresponding storageID */ - DatanodeDescriptor getDatanode(String nodeID) { + public DatanodeDescriptor getDatanode(String nodeID) { assert hasReadOrWriteLock(); return datanodeMap.get(nodeID); } @@ -5508,7 +5474,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, if (startBlockAfter != null) { startBlockId = Block.filename2id(startBlockAfter); } - BlockIterator blkIterator = blockManager.getCorruptReplicaBlockIterator(); + UnderReplicatedBlocks.BlockIterator blkIterator = blockManager.getCorruptReplicaBlockIterator(); while (blkIterator.hasNext()) { Block blk = blkIterator.next(); INode inode = blockManager.getINode(blk); diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/Host2NodesMap.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/Host2NodesMap.java index a00022dc0de..e24cbcb6d78 100644 --- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/Host2NodesMap.java +++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/Host2NodesMap.java @@ -17,10 +17,13 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import java.util.*; +import java.util.HashMap; +import java.util.Random; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; + class Host2NodesMap { private HashMap map = new HashMap(); diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/INode.java index 2faa430d1a0..c9cea600257 100644 --- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/INode.java +++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/INode.java @@ -20,11 +20,13 @@ package org.apache.hadoop.hdfs.server.namenode; import java.util.Arrays; import java.util.List; -import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.ContentSummary; -import org.apache.hadoop.fs.permission.*; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.util.StringUtils; /** @@ -32,7 +34,7 @@ import org.apache.hadoop.util.StringUtils; * This is a base INode class containing common fields for file and * directory inodes. */ -abstract class INode implements Comparable, FSInodeInfo { +public abstract class INode implements Comparable, FSInodeInfo { /* * The inode name is in java UTF8 encoding; * The name in HdfsFileStatus should keep the same encoding as this. @@ -324,7 +326,7 @@ abstract class INode implements Comparable, FSInodeInfo { /** * Is this inode being constructed? */ - boolean isUnderConstruction() { + public boolean isUnderConstruction() { return false; } diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java index d880b3e8bc9..69ff700373e 100644 --- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java +++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java @@ -24,8 +24,11 @@ import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; -class INodeFile extends INode { +/** I-node for closed file. */ +public class INodeFile extends INode { static final FsPermission UMASK = FsPermission.createImmutable((short)0111); //Number of bits for Block size @@ -106,7 +109,7 @@ class INodeFile extends INode { * Get file blocks * @return file blocks */ - BlockInfo[] getBlocks() { + public BlockInfo[] getBlocks() { return this.blocks; } @@ -149,7 +152,7 @@ class INodeFile extends INode { /** * Set file block */ - void setBlock(int idx, BlockInfo blk) { + public void setBlock(int idx, BlockInfo blk) { this.blocks[idx] = blk; } @@ -237,7 +240,7 @@ class INodeFile extends INode { * Get the last block of the file. * Make sure it has the right type. */ - T getLastBlock() throws IOException { + public T getLastBlock() throws IOException { if (blocks == null || blocks.length == 0) return null; T returnBlock = null; @@ -252,7 +255,8 @@ class INodeFile extends INode { return returnBlock; } - int numBlocks() { + /** @return the number of blocks */ + public int numBlocks() { return blocks == null ? 0 : blocks.length; } } diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java index 409827c8b30..7663ecff762 100644 --- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java +++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java @@ -21,10 +21,15 @@ import java.io.IOException; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState; - -class INodeFileUnderConstruction extends INodeFile { +/** + * I-node for file being written. + */ +public class INodeFileUnderConstruction extends INodeFile { private String clientName; // lease holder private final String clientMachine; private final DatanodeDescriptor clientNode; // if client is a cluster node too. @@ -43,7 +48,7 @@ class INodeFileUnderConstruction extends INodeFile { this.clientNode = clientNode; } - public INodeFileUnderConstruction(byte[] name, + INodeFileUnderConstruction(byte[] name, short blockReplication, long modificationTime, long preferredBlockSize, @@ -80,7 +85,7 @@ class INodeFileUnderConstruction extends INodeFile { * Is this inode being constructed? */ @Override - boolean isUnderConstruction() { + public boolean isUnderConstruction() { return true; } @@ -122,7 +127,7 @@ class INodeFileUnderConstruction extends INodeFile { * Convert the last block of the file to an under-construction block. * Set its locations. */ - BlockInfoUnderConstruction setLastBlock(BlockInfo lastBlock, + public BlockInfoUnderConstruction setLastBlock(BlockInfo lastBlock, DatanodeDescriptor[] targets) throws IOException { if (blocks == null || blocks.length == 0) { diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java index c38ba5b652e..257d37e0cb9 100644 --- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java +++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java @@ -163,7 +163,7 @@ public class LeaseManager { /** * Finds the pathname for the specified pendingFile */ - synchronized String findPath(INodeFileUnderConstruction pendingFile) + public synchronized String findPath(INodeFileUnderConstruction pendingFile) throws IOException { Lease lease = getLease(pendingFile.getClientName()); if (lease != null) { diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 5c60036d024..f1a403b7e18 100644 --- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -247,7 +247,7 @@ public class NameNode implements NamenodeProtocols, FSConstants { /** Return the {@link FSNamesystem} object. * @return {@link FSNamesystem} object. */ - FSNamesystem getNamesystem() { + public FSNamesystem getNamesystem() { return namesystem; } diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java index a3b42d4c646..6900644e328 100644 --- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java +++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java @@ -36,20 +36,20 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.BlockReader; import org.apache.hadoop.hdfs.DFSClient; -import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DirectoryListing; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy; import org.apache.hadoop.hdfs.server.common.HdfsConstants; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.net.NodeBase; -import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; /** diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java index 494a2de140f..1181097a1f9 100644 --- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java +++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java @@ -36,24 +36,24 @@ import javax.servlet.http.HttpServletResponse; import javax.servlet.jsp.JspWriter; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.common.JspHelper; import org.apache.hadoop.hdfs.server.common.Storage; -import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; +import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.ServletUtil; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.VersionInfo; - -import org.znerd.xmlenc.*; +import org.znerd.xmlenc.XMLOutputter; class NamenodeJspHelper { static String getSafeModeText(FSNamesystem fsn) { diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java index ae8625bf995..b4e4ab7de14 100644 --- a/hdfs/src/java/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java +++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java @@ -17,15 +17,20 @@ */ package org.apache.hadoop.hdfs.server.protocol; -import java.io.*; +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor.BlockTargetPair; -import org.apache.hadoop.io.*; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableFactories; +import org.apache.hadoop.io.WritableFactory; /**************************************************** diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java index 3a8d6e61db3..82bb4bf7c82 100644 --- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -61,10 +61,10 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.datatransfer.Sender; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.TestTransferRbw; -import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java index bcbe45c3d0e..e013a913750 100644 --- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java +++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java @@ -25,7 +25,7 @@ import junit.framework.TestCase; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; /** * This class tests DatanodeDescriptor.getBlocksScheduled() at the diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCorruptReplicaInfo.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java similarity index 93% rename from hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCorruptReplicaInfo.java rename to hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java index 17d65ec0e1e..ab63d7e085e 100644 --- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCorruptReplicaInfo.java +++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java @@ -15,17 +15,20 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdfs.server.namenode; +package org.apache.hadoop.hdfs.server.blockmanagement; import java.io.IOException; -import java.util.*; +import java.util.Arrays; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; + import junit.framework.TestCase; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.server.namenode.CorruptReplicasMap; -import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor; /** @@ -79,7 +82,6 @@ public class TestCorruptReplicaInfo extends TestCase { DatanodeDescriptor dn1 = new DatanodeDescriptor(); DatanodeDescriptor dn2 = new DatanodeDescriptor(); - DatanodeDescriptor dn3 = new DatanodeDescriptor(); crm.addToCorruptReplicasMap(getBlock(0), dn1); assertEquals("Number of corrupt blocks not returning correctly", diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDatanodeDescriptor.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java similarity index 97% rename from hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDatanodeDescriptor.java rename to hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java index bb3c2b1f797..ac3885738eb 100644 --- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDatanodeDescriptor.java +++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdfs.server.namenode; +package org.apache.hadoop.hdfs.server.blockmanagement; import java.util.ArrayList; diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHeartbeatHandling.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java similarity index 97% rename from hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHeartbeatHandling.java rename to hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java index 9b10156a60a..aed48e421d3 100644 --- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHeartbeatHandling.java +++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdfs.server.namenode; +package org.apache.hadoop.hdfs.server.blockmanagement; import java.io.IOException; import java.util.ArrayList; @@ -29,6 +29,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.common.GenerationStamp; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; +import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.protocol.BlockCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestPendingReplication.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java similarity index 98% rename from hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestPendingReplication.java rename to hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java index fa2828729ef..0d138e05d1a 100644 --- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestPendingReplication.java +++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdfs.server.namenode; +package org.apache.hadoop.hdfs.server.blockmanagement; import junit.framework.TestCase; import java.lang.System; diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestReplicationPolicy.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java similarity index 99% rename from hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestReplicationPolicy.java rename to hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java index 17a4d3819a6..9ba21ead0d2 100644 --- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestReplicationPolicy.java +++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java @@ -15,8 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -package org.apache.hadoop.hdfs.server.namenode; +package org.apache.hadoop.hdfs.server.blockmanagement; import java.io.IOException; import java.util.ArrayList; @@ -24,18 +23,19 @@ import java.util.Arrays; import java.util.HashMap; import java.util.List; +import junit.framework.TestCase; + import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.net.NetworkTopology; -import org.apache.hadoop.net.Node; -import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.hdfs.protocol.DatanodeID; -import org.apache.hadoop.hdfs.protocol.FSConstants; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; - -import junit.framework.TestCase; +import org.apache.hadoop.hdfs.protocol.DatanodeID; +import org.apache.hadoop.hdfs.protocol.FSConstants; +import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; +import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.net.NetworkTopology; +import org.apache.hadoop.net.Node; public class TestReplicationPolicy extends TestCase { private static final int BLOCK_SIZE = 1024; diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestUnderReplicatedBlocks.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlocks.java similarity index 95% rename from hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestUnderReplicatedBlocks.java rename to hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlocks.java index b9763ae7a5f..bf1837397f5 100644 --- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestUnderReplicatedBlocks.java +++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlocks.java @@ -15,7 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdfs.server.namenode; +package org.apache.hadoop.hdfs.server.blockmanagement; + +import junit.framework.TestCase; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -25,8 +27,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; - -import junit.framework.TestCase; +import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; public class TestUnderReplicatedBlocks extends TestCase { public void testSetrepIncWithUnderReplicatedBlocks() throws Exception { diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java index 14b58b942e3..094673b3237 100644 --- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java +++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java @@ -17,30 +17,31 @@ */ package org.apache.hadoop.hdfs.server.datanode; +import static org.apache.hadoop.test.MetricsAsserts.assertCounter; +import static org.apache.hadoop.test.MetricsAsserts.getMetrics; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assume.assumeTrue; + import java.io.File; import java.util.ArrayList; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.DFSTestUtil; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor; -import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; -import org.apache.hadoop.hdfs.HdfsConfiguration; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import static org.apache.hadoop.test.MetricsAsserts.*; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; +import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.log4j.Level; - import org.junit.After; import org.junit.Before; import org.junit.Test; -import static org.junit.Assert.*; -import static org.junit.Assume.assumeTrue; /** * Test reporting of DN volume failure counts and metrics. diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java index 4db05ffe510..f18b1b54804 100644 --- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java +++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java @@ -23,6 +23,7 @@ import java.io.IOException; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.common.GenerationStamp; import org.apache.hadoop.hdfs.server.common.Storage; diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java index 883c51f3528..a56fd1b1723 100644 --- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java +++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java @@ -19,10 +19,11 @@ package org.apache.hadoop.hdfs.server.namenode; import java.io.IOException; -import org.apache.hadoop.ipc.Server; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; +import org.apache.hadoop.ipc.Server; /** * This is a utility class to expose NameNode functionality for unit tests. diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java index 765a1784fb5..b283b8a33fa 100644 --- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java +++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java @@ -34,6 +34,8 @@ import org.apache.hadoop.hdfs.TestFileCreation; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState; import org.junit.AfterClass; import org.junit.BeforeClass; diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestComputeInvalidateWork.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestComputeInvalidateWork.java index 57e63e64510..95eebd30f60 100644 --- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestComputeInvalidateWork.java +++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestComputeInvalidateWork.java @@ -17,14 +17,15 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import junit.framework.TestCase; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.common.GenerationStamp; -import junit.framework.TestCase; - /** * Test if FSNamesystem handles heartbeat right */ diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java index 0c5aa8cd276..0c8ed4cf8c6 100644 --- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java +++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java @@ -29,9 +29,9 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; -import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java index 773bfde83db..78dfb738720 100644 --- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java +++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java @@ -17,19 +17,15 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.net.InetSocketAddress; import java.util.ArrayList; import java.util.Iterator; import java.util.Random; -import org.junit.BeforeClass; -import org.junit.AfterClass; -import org.junit.Test; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; @@ -40,8 +36,10 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; -import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; -import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; /** * This class tests the decommissioning of nodes. diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHost2NodesMap.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHost2NodesMap.java index 9a22fdaa8ac..f0717e10ed5 100644 --- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHost2NodesMap.java +++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHost2NodesMap.java @@ -18,10 +18,11 @@ package org.apache.hadoop.hdfs.server.namenode; -import org.apache.hadoop.hdfs.protocol.DatanodeID; - import junit.framework.TestCase; +import org.apache.hadoop.hdfs.protocol.DatanodeID; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; + public class TestHost2NodesMap extends TestCase { static private Host2NodesMap map = new Host2NodesMap(); private final static DatanodeDescriptor dataNodes[] = new DatanodeDescriptor[] { diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java index efa989063d0..a3759888e32 100644 --- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java +++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java @@ -21,17 +21,17 @@ package org.apache.hadoop.hdfs.server.namenode; import java.io.File; import java.util.ArrayList; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.DF; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.HdfsConfiguration; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor; -import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import junit.framework.TestCase; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.DF; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNodeCount.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNodeCount.java index 036af78d234..16047f72a1a 100644 --- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNodeCount.java +++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNodeCount.java @@ -21,6 +21,8 @@ import java.util.Collection; import java.util.Iterator; import java.util.concurrent.TimeoutException; +import junit.framework.TestCase; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -30,9 +32,8 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NumberReplicas; - -import junit.framework.TestCase; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; +import org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas; /** * Test if live nodes count per node is correct diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestOverReplicatedBlocks.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestOverReplicatedBlocks.java index 538d213afd1..7f99bd7d2bf 100644 --- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestOverReplicatedBlocks.java +++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestOverReplicatedBlocks.java @@ -20,20 +20,21 @@ package org.apache.hadoop.hdfs.server.namenode; import java.io.File; import java.io.IOException; +import junit.framework.TestCase; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.TestDatanodeBlockScanner; import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties; +import org.apache.hadoop.hdfs.TestDatanodeBlockScanner; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; -import org.apache.hadoop.hdfs.DFSConfigKeys; - -import junit.framework.TestCase; public class TestOverReplicatedBlocks extends TestCase { /** Test processOverReplicatedBlock can handle corrupt replicas fine. diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java index 7f4eb0f0071..0ad5a19c757 100644 --- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java +++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java @@ -17,34 +17,35 @@ */ package org.apache.hadoop.hdfs.server.namenode.metrics; +import static org.apache.hadoop.test.MetricsAsserts.assertCounter; +import static org.apache.hadoop.test.MetricsAsserts.assertGauge; +import static org.apache.hadoop.test.MetricsAsserts.getMetrics; + import java.io.DataInputStream; import java.io.IOException; import java.util.Random; import junit.framework.TestCase; +import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Options.Rename; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.LocatedBlock; -import org.apache.hadoop.hdfs.server.namenode.BlockManager; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.test.MetricsAsserts; import org.apache.log4j.Level; -import org.apache.commons.logging.LogFactory; - -import static org.apache.hadoop.test.MetricsAsserts.*; - /** * Test for metrics published by the Namenode */ diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/net/TestNetworkTopology.java b/hdfs/src/test/hdfs/org/apache/hadoop/net/TestNetworkTopology.java index 0fead9e1080..9d755a7643a 100644 --- a/hdfs/src/test/hdfs/org/apache/hadoop/net/TestNetworkTopology.java +++ b/hdfs/src/test/hdfs/org/apache/hadoop/net/TestNetworkTopology.java @@ -24,8 +24,8 @@ import java.util.Map; import junit.framework.TestCase; -import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor; import org.apache.hadoop.hdfs.protocol.DatanodeID; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; public class TestNetworkTopology extends TestCase { private final static NetworkTopology cluster = new NetworkTopology(); diff --git a/hdfs/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java b/hdfs/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java index 0c22de89779..bcf4d0235e6 100644 --- a/hdfs/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java +++ b/hdfs/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java @@ -19,6 +19,16 @@ package org.apache.hadoop.hdfs.server.namenode; import static junit.framework.Assert.assertTrue; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.fail; +import static org.mockito.Matchers.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.when; + +import java.io.File; +import java.io.IOException; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; @@ -34,20 +44,16 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; import org.apache.hadoop.hdfs.protocol.DatanodeID; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.common.HdfsConstants; import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole; import org.apache.hadoop.test.GenericTestUtils; import org.apache.log4j.Level; import org.junit.After; - -import static org.junit.Assert.*; import org.junit.Before; import org.junit.Test; -import static org.mockito.Matchers.anyString; -import static org.mockito.Mockito.*; - -import java.io.File; -import java.io.IOException; public class TestNNLeaseRecovery { private static final Log LOG = LogFactory.getLog(TestNNLeaseRecovery.class); diff --git a/hdfs/src/webapps/hdfs/block_info_xml.jsp b/hdfs/src/webapps/hdfs/block_info_xml.jsp index 118e6b52e1c..b3cb978f033 100644 --- a/hdfs/src/webapps/hdfs/block_info_xml.jsp +++ b/hdfs/src/webapps/hdfs/block_info_xml.jsp @@ -59,19 +59,9 @@ %> <%@ page contentType="application/xml" - import="java.io.IOException" - import="java.util.Iterator" - import="org.apache.hadoop.conf.Configuration" - import="org.apache.hadoop.hdfs.protocol.Block" - import="org.apache.hadoop.hdfs.server.namenode.INode" - import="org.apache.hadoop.hdfs.server.namenode.BlocksMap" - import="org.apache.hadoop.hdfs.server.namenode.BlockInfo" - import="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor" import="org.apache.hadoop.hdfs.server.namenode.NamenodeJspHelper.XMLBlockInfo" import="org.apache.hadoop.hdfs.server.common.JspHelper" - import="org.apache.hadoop.util.ServletUtil" import="org.znerd.xmlenc.*" - %> <%! //for java.io.Serializable