HDFS-2107. Move block management code from o.a.h.h.s.namenode to a new package o.a.h.h.s.blockmanagement.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1140939 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2011-06-29 01:31:15 +00:00
parent 97b6ca4dd7
commit 09b6f98de4
53 changed files with 494 additions and 355 deletions

View File

@ -534,6 +534,9 @@ Trunk (unreleased changes)
HDFS-2110. StreamFile and ByteRangeInputStream cleanup. (eli) HDFS-2110. StreamFile and ByteRangeInputStream cleanup. (eli)
HDFS-2107. Move block management code from o.a.h.h.s.namenode to a new
package o.a.h.h.s.blockmanagement. (szetszwo)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-1458. Improve checkpoint performance by avoiding unnecessary image HDFS-1458. Improve checkpoint performance by avoiding unnecessary image

View File

@ -61,10 +61,10 @@ import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
import org.apache.hadoop.hdfs.server.common.HdfsConstants; import org.apache.hadoop.hdfs.server.common.HdfsConstants;
import org.apache.hadoop.hdfs.server.common.Util; import org.apache.hadoop.hdfs.server.common.Util;
import org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicy;
import org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicyDefault;
import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException; import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException;
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;

View File

@ -15,16 +15,17 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.blockmanagement;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState; import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.util.LightWeightGSet; import org.apache.hadoop.hdfs.util.LightWeightGSet;
/** /**
* Internal class for block metadata. * Internal class for block metadata.
*/ */
class BlockInfo extends Block implements LightWeightGSet.LinkedElement { public class BlockInfo extends Block implements LightWeightGSet.LinkedElement {
private INodeFile inode; private INodeFile inode;
/** For implementing {@link LightWeightGSet.LinkedElement} interface */ /** For implementing {@link LightWeightGSet.LinkedElement} interface */
@ -44,12 +45,12 @@ class BlockInfo extends Block implements LightWeightGSet.LinkedElement {
* Construct an entry for blocksmap * Construct an entry for blocksmap
* @param replication the block's replication factor * @param replication the block's replication factor
*/ */
protected BlockInfo(int replication) { public BlockInfo(int replication) {
this.triplets = new Object[3*replication]; this.triplets = new Object[3*replication];
this.inode = null; this.inode = null;
} }
protected BlockInfo(Block blk, int replication) { public BlockInfo(Block blk, int replication) {
super(blk); super(blk);
this.triplets = new Object[3*replication]; this.triplets = new Object[3*replication];
this.inode = null; this.inode = null;
@ -65,11 +66,11 @@ class BlockInfo extends Block implements LightWeightGSet.LinkedElement {
this.inode = from.inode; this.inode = from.inode;
} }
INodeFile getINode() { public INodeFile getINode() {
return inode; return inode;
} }
void setINode(INodeFile inode) { public void setINode(INodeFile inode) {
this.inode = inode; this.inode = inode;
} }
@ -162,7 +163,7 @@ class BlockInfo extends Block implements LightWeightGSet.LinkedElement {
/** /**
* Add data-node this block belongs to. * Add data-node this block belongs to.
*/ */
boolean addNode(DatanodeDescriptor node) { public boolean addNode(DatanodeDescriptor node) {
if(findDatanode(node) >= 0) // the node is already there if(findDatanode(node) >= 0) // the node is already there
return false; return false;
// find the last null node // find the last null node
@ -176,7 +177,7 @@ class BlockInfo extends Block implements LightWeightGSet.LinkedElement {
/** /**
* Remove data-node from the block. * Remove data-node from the block.
*/ */
boolean removeNode(DatanodeDescriptor node) { public boolean removeNode(DatanodeDescriptor node) {
int dnIndex = findDatanode(node); int dnIndex = findDatanode(node);
if(dnIndex < 0) // the node is not found if(dnIndex < 0) // the node is not found
return false; return false;
@ -218,7 +219,7 @@ class BlockInfo extends Block implements LightWeightGSet.LinkedElement {
* If the head is null then form a new list. * If the head is null then form a new list.
* @return current block as the new head of the list. * @return current block as the new head of the list.
*/ */
BlockInfo listInsert(BlockInfo head, DatanodeDescriptor dn) { public BlockInfo listInsert(BlockInfo head, DatanodeDescriptor dn) {
int dnIndex = this.findDatanode(dn); int dnIndex = this.findDatanode(dn);
assert dnIndex >= 0 : "Data node is not found: current"; assert dnIndex >= 0 : "Data node is not found: current";
assert getPrevious(dnIndex) == null && getNext(dnIndex) == null : assert getPrevious(dnIndex) == null && getNext(dnIndex) == null :
@ -238,7 +239,7 @@ class BlockInfo extends Block implements LightWeightGSet.LinkedElement {
* @return the new head of the list or null if the list becomes * @return the new head of the list or null if the list becomes
* empty after deletion. * empty after deletion.
*/ */
BlockInfo listRemove(BlockInfo head, DatanodeDescriptor dn) { public BlockInfo listRemove(BlockInfo head, DatanodeDescriptor dn) {
if(head == null) if(head == null)
return null; return null;
int dnIndex = this.findDatanode(dn); int dnIndex = this.findDatanode(dn);
@ -284,7 +285,7 @@ class BlockInfo extends Block implements LightWeightGSet.LinkedElement {
* to {@link BlockInfoUnderConstruction}. * to {@link BlockInfoUnderConstruction}.
* @return {@link BlockUCState#COMPLETE} * @return {@link BlockUCState#COMPLETE}
*/ */
BlockUCState getBlockUCState() { public BlockUCState getBlockUCState() {
return BlockUCState.COMPLETE; return BlockUCState.COMPLETE;
} }
@ -293,7 +294,7 @@ class BlockInfo extends Block implements LightWeightGSet.LinkedElement {
* *
* @return true if the state of the block is {@link BlockUCState#COMPLETE} * @return true if the state of the block is {@link BlockUCState#COMPLETE}
*/ */
boolean isComplete() { public boolean isComplete() {
return getBlockUCState().equals(BlockUCState.COMPLETE); return getBlockUCState().equals(BlockUCState.COMPLETE);
} }
@ -302,7 +303,7 @@ class BlockInfo extends Block implements LightWeightGSet.LinkedElement {
* *
* @return BlockInfoUnderConstruction - an under construction block. * @return BlockInfoUnderConstruction - an under construction block.
*/ */
BlockInfoUnderConstruction convertToBlockUnderConstruction( public BlockInfoUnderConstruction convertToBlockUnderConstruction(
BlockUCState s, DatanodeDescriptor[] targets) { BlockUCState s, DatanodeDescriptor[] targets) {
if(isComplete()) { if(isComplete()) {
return new BlockInfoUnderConstruction( return new BlockInfoUnderConstruction(

View File

@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.blockmanagement;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
@ -24,12 +24,13 @@ import java.util.List;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState; import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState; import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
/** /**
* Represents a block that is currently being constructed.<br> * Represents a block that is currently being constructed.<br>
* This is usually the last block of a file opened for write or append. * This is usually the last block of a file opened for write or append.
*/ */
class BlockInfoUnderConstruction extends BlockInfo { public class BlockInfoUnderConstruction extends BlockInfo {
/** Block state. See {@link BlockUCState} */ /** Block state. See {@link BlockUCState} */
private BlockUCState blockUCState; private BlockUCState blockUCState;
@ -128,11 +129,14 @@ class BlockInfoUnderConstruction extends BlockInfo {
* Create block and set its state to * Create block and set its state to
* {@link BlockUCState#UNDER_CONSTRUCTION}. * {@link BlockUCState#UNDER_CONSTRUCTION}.
*/ */
BlockInfoUnderConstruction(Block blk, int replication) { public BlockInfoUnderConstruction(Block blk, int replication) {
this(blk, replication, BlockUCState.UNDER_CONSTRUCTION, null); this(blk, replication, BlockUCState.UNDER_CONSTRUCTION, null);
} }
BlockInfoUnderConstruction(Block blk, int replication, /**
* Create a block that is currently being constructed.
*/
public BlockInfoUnderConstruction(Block blk, int replication,
BlockUCState state, BlockUCState state,
DatanodeDescriptor[] targets) { DatanodeDescriptor[] targets) {
super(blk, replication); super(blk, replication);
@ -160,7 +164,8 @@ class BlockInfoUnderConstruction extends BlockInfo {
return new BlockInfo(this); return new BlockInfo(this);
} }
void setExpectedLocations(DatanodeDescriptor[] targets) { /** Set expected locations */
public void setExpectedLocations(DatanodeDescriptor[] targets) {
int numLocations = targets == null ? 0 : targets.length; int numLocations = targets == null ? 0 : targets.length;
this.replicas = new ArrayList<ReplicaUnderConstruction>(numLocations); this.replicas = new ArrayList<ReplicaUnderConstruction>(numLocations);
for(int i = 0; i < numLocations; i++) for(int i = 0; i < numLocations; i++)
@ -172,7 +177,7 @@ class BlockInfoUnderConstruction extends BlockInfo {
* Create array of expected replica locations * Create array of expected replica locations
* (as has been assigned by chooseTargets()). * (as has been assigned by chooseTargets()).
*/ */
DatanodeDescriptor[] getExpectedLocations() { public DatanodeDescriptor[] getExpectedLocations() {
int numLocations = replicas == null ? 0 : replicas.size(); int numLocations = replicas == null ? 0 : replicas.size();
DatanodeDescriptor[] locations = new DatanodeDescriptor[numLocations]; DatanodeDescriptor[] locations = new DatanodeDescriptor[numLocations];
for(int i = 0; i < numLocations; i++) for(int i = 0; i < numLocations; i++)
@ -180,7 +185,8 @@ class BlockInfoUnderConstruction extends BlockInfo {
return locations; return locations;
} }
int getNumExpectedLocations() { /** Get the number of expected locations */
public int getNumExpectedLocations() {
return replicas == null ? 0 : replicas.size(); return replicas == null ? 0 : replicas.size();
} }
@ -189,7 +195,7 @@ class BlockInfoUnderConstruction extends BlockInfo {
* @see BlockUCState * @see BlockUCState
*/ */
@Override // BlockInfo @Override // BlockInfo
BlockUCState getBlockUCState() { public BlockUCState getBlockUCState() {
return blockUCState; return blockUCState;
} }
@ -197,7 +203,8 @@ class BlockInfoUnderConstruction extends BlockInfo {
blockUCState = s; blockUCState = s;
} }
long getBlockRecoveryId() { /** Get block recovery ID */
public long getBlockRecoveryId() {
return blockRecoveryId; return blockRecoveryId;
} }
@ -220,7 +227,7 @@ class BlockInfoUnderConstruction extends BlockInfo {
* Find the first alive data-node starting from the previous primary and * Find the first alive data-node starting from the previous primary and
* make it primary. * make it primary.
*/ */
void initializeBlockRecovery(long recoveryId) { public void initializeBlockRecovery(long recoveryId) {
setBlockUCState(BlockUCState.UNDER_RECOVERY); setBlockUCState(BlockUCState.UNDER_RECOVERY);
blockRecoveryId = recoveryId; blockRecoveryId = recoveryId;
if (replicas.size() == 0) { if (replicas.size() == 0) {

View File

@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.blockmanagement;
import java.io.IOException; import java.io.IOException;
import java.io.PrintWriter; import java.io.PrintWriter;
@ -39,10 +39,14 @@ import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportIterator; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportIterator;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.blockmanagement.UnderReplicatedBlocks.BlockIterator;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState; import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState; import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NumberReplicas; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.UnderReplicatedBlocks; import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.INodeFileUnderConstruction;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
/** /**
* Keeps information related to the blocks stored in the Hadoop cluster. * Keeps information related to the blocks stored in the Hadoop cluster.
@ -57,18 +61,43 @@ public class BlockManager {
private final FSNamesystem namesystem; private final FSNamesystem namesystem;
volatile long pendingReplicationBlocksCount = 0L; private volatile long pendingReplicationBlocksCount = 0L;
volatile long corruptReplicaBlocksCount = 0L; private volatile long corruptReplicaBlocksCount = 0L;
volatile long underReplicatedBlocksCount = 0L; private volatile long underReplicatedBlocksCount = 0L;
volatile long scheduledReplicationBlocksCount = 0L; public volatile long scheduledReplicationBlocksCount = 0L;
volatile long excessBlocksCount = 0L; private volatile long excessBlocksCount = 0L;
volatile long pendingDeletionBlocksCount = 0L; private volatile long pendingDeletionBlocksCount = 0L;
// /** Used by metrics */
// Mapping: Block -> { INode, datanodes, self ref } public long getPendingReplicationBlocksCount() {
// Updated only in response to client-sent information. return pendingReplicationBlocksCount;
// }
final BlocksMap blocksMap; /** Used by metrics */
public long getUnderReplicatedBlocksCount() {
return underReplicatedBlocksCount;
}
/** Used by metrics */
public long getCorruptReplicaBlocksCount() {
return corruptReplicaBlocksCount;
}
/** Used by metrics */
public long getScheduledReplicationBlocksCount() {
return scheduledReplicationBlocksCount;
}
/** Used by metrics */
public long getPendingDeletionBlocksCount() {
return pendingDeletionBlocksCount;
}
/** Used by metrics */
public long getExcessBlocksCount() {
return excessBlocksCount;
}
/**
* Mapping: Block -> { INode, datanodes, self ref }
* Updated only in response to client-sent information.
*/
public final BlocksMap blocksMap;
// //
// Store blocks-->datanodedescriptor(s) map of corrupt replicas // Store blocks-->datanodedescriptor(s) map of corrupt replicas
@ -90,24 +119,24 @@ public class BlockManager {
// eventually remove these extras. // eventually remove these extras.
// Mapping: StorageID -> TreeSet<Block> // Mapping: StorageID -> TreeSet<Block>
// //
Map<String, Collection<Block>> excessReplicateMap = public final Map<String, Collection<Block>> excessReplicateMap =
new TreeMap<String, Collection<Block>>(); new TreeMap<String, Collection<Block>>();
// //
// Store set of Blocks that need to be replicated 1 or more times. // Store set of Blocks that need to be replicated 1 or more times.
// We also store pending replication-orders. // We also store pending replication-orders.
// //
UnderReplicatedBlocks neededReplications = new UnderReplicatedBlocks(); public UnderReplicatedBlocks neededReplications = new UnderReplicatedBlocks();
private PendingReplicationBlocks pendingReplications; private PendingReplicationBlocks pendingReplications;
// The maximum number of replicas allowed for a block // The maximum number of replicas allowed for a block
int maxReplication; public int maxReplication;
// How many outgoing replication streams a given node should have at one time // How many outgoing replication streams a given node should have at one time
int maxReplicationStreams; public int maxReplicationStreams;
// Minimum copies needed or else write is disallowed // Minimum copies needed or else write is disallowed
int minReplication; public int minReplication;
// Default number of replicas // Default number of replicas
int defaultReplication; public int defaultReplication;
// How many entries are returned by getCorruptInodes() // How many entries are returned by getCorruptInodes()
int maxCorruptFilesReturned; int maxCorruptFilesReturned;
@ -121,9 +150,9 @@ public class BlockManager {
Random r = new Random(); Random r = new Random();
// for block replicas placement // for block replicas placement
BlockPlacementPolicy replicator; public BlockPlacementPolicy replicator;
BlockManager(FSNamesystem fsn, Configuration conf) throws IOException { public BlockManager(FSNamesystem fsn, Configuration conf) throws IOException {
this(fsn, conf, DEFAULT_INITIAL_MAP_CAPACITY); this(fsn, conf, DEFAULT_INITIAL_MAP_CAPACITY);
} }
@ -178,16 +207,16 @@ public class BlockManager {
FSNamesystem.LOG.info("shouldCheckForEnoughRacks = " + shouldCheckForEnoughRacks); FSNamesystem.LOG.info("shouldCheckForEnoughRacks = " + shouldCheckForEnoughRacks);
} }
void activate() { public void activate() {
pendingReplications.start(); pendingReplications.start();
} }
void close() { public void close() {
if (pendingReplications != null) pendingReplications.stop(); if (pendingReplications != null) pendingReplications.stop();
blocksMap.close(); blocksMap.close();
} }
void metaSave(PrintWriter out) { public void metaSave(PrintWriter out) {
// //
// Dump contents of neededReplication // Dump contents of neededReplication
// //
@ -249,7 +278,7 @@ public class BlockManager {
* @param block * @param block
* @return true if the block has minimum replicas * @return true if the block has minimum replicas
*/ */
boolean checkMinReplication(Block block) { public boolean checkMinReplication(Block block) {
return (countNodes(block).liveReplicas() >= minReplication); return (countNodes(block).liveReplicas() >= minReplication);
} }
@ -297,7 +326,7 @@ public class BlockManager {
* @throws IOException if the block does not have at least a minimal number * @throws IOException if the block does not have at least a minimal number
* of replicas reported from data-nodes. * of replicas reported from data-nodes.
*/ */
void commitOrCompleteLastBlock(INodeFileUnderConstruction fileINode, public void commitOrCompleteLastBlock(INodeFileUnderConstruction fileINode,
Block commitBlock) throws IOException { Block commitBlock) throws IOException {
if(commitBlock == null) if(commitBlock == null)
@ -362,7 +391,7 @@ public class BlockManager {
* @param fileINode file * @param fileINode file
* @return the last block locations if the block is partial or null otherwise * @return the last block locations if the block is partial or null otherwise
*/ */
LocatedBlock convertLastBlockToUnderConstruction( public LocatedBlock convertLastBlockToUnderConstruction(
INodeFileUnderConstruction fileINode) throws IOException { INodeFileUnderConstruction fileINode) throws IOException {
BlockInfo oldBlock = fileINode.getLastBlock(); BlockInfo oldBlock = fileINode.getLastBlock();
if(oldBlock == null || if(oldBlock == null ||
@ -393,7 +422,7 @@ public class BlockManager {
/** /**
* Get all valid locations of the block * Get all valid locations of the block
*/ */
ArrayList<String> getValidLocations(Block block) { public ArrayList<String> getValidLocations(Block block) {
ArrayList<String> machineSet = ArrayList<String> machineSet =
new ArrayList<String>(blocksMap.numNodes(block)); new ArrayList<String>(blocksMap.numNodes(block));
for(Iterator<DatanodeDescriptor> it = for(Iterator<DatanodeDescriptor> it =
@ -407,7 +436,7 @@ public class BlockManager {
return machineSet; return machineSet;
} }
List<LocatedBlock> getBlockLocations(BlockInfo[] blocks, long offset, public List<LocatedBlock> getBlockLocations(BlockInfo[] blocks, long offset,
long length, int nrBlocksToReturn) throws IOException { long length, int nrBlocksToReturn) throws IOException {
int curBlk = 0; int curBlk = 0;
long curPos = 0, blkSize = 0; long curPos = 0, blkSize = 0;
@ -436,11 +465,15 @@ public class BlockManager {
return results; return results;
} }
/** @param needBlockToken /** @return a LocatedBlock for the given block */
* @return a LocatedBlock for the given block */ public LocatedBlock getBlockLocation(final BlockInfo blk, final long pos
LocatedBlock getBlockLocation(final BlockInfo blk, final long pos
) throws IOException { ) throws IOException {
if (!blk.isComplete()) { if (blk instanceof BlockInfoUnderConstruction) {
if (blk.isComplete()) {
throw new IOException(
"blk instanceof BlockInfoUnderConstruction && blk.isComplete()"
+ ", blk=" + blk);
}
final BlockInfoUnderConstruction uc = (BlockInfoUnderConstruction)blk; final BlockInfoUnderConstruction uc = (BlockInfoUnderConstruction)blk;
final DatanodeDescriptor[] locations = uc.getExpectedLocations(); final DatanodeDescriptor[] locations = uc.getExpectedLocations();
return namesystem.createLocatedBlock(uc, locations, pos, false); return namesystem.createLocatedBlock(uc, locations, pos, false);
@ -476,7 +509,7 @@ public class BlockManager {
* Check whether the replication parameter is within the range * Check whether the replication parameter is within the range
* determined by system configuration. * determined by system configuration.
*/ */
void verifyReplication(String src, public void verifyReplication(String src,
short replication, short replication,
String clientName) throws IOException { String clientName) throws IOException {
@ -544,7 +577,7 @@ public class BlockManager {
* @param b block * @param b block
* @param dn datanode * @param dn datanode
*/ */
void addToInvalidates(Block b, DatanodeInfo dn) { public void addToInvalidates(Block b, DatanodeInfo dn) {
addToInvalidates(b, dn, true); addToInvalidates(b, dn, true);
} }
@ -585,7 +618,7 @@ public class BlockManager {
} }
} }
void findAndMarkBlockAsCorrupt(Block blk, public void findAndMarkBlockAsCorrupt(Block blk,
DatanodeInfo dn) throws IOException { DatanodeInfo dn) throws IOException {
BlockInfo storedBlock = getStoredBlock(blk); BlockInfo storedBlock = getStoredBlock(blk);
if (storedBlock == null) { if (storedBlock == null) {
@ -668,14 +701,14 @@ public class BlockManager {
} }
} }
void updateState() { public void updateState() {
pendingReplicationBlocksCount = pendingReplications.size(); pendingReplicationBlocksCount = pendingReplications.size();
underReplicatedBlocksCount = neededReplications.size(); underReplicatedBlocksCount = neededReplications.size();
corruptReplicaBlocksCount = corruptReplicas.size(); corruptReplicaBlocksCount = corruptReplicas.size();
} }
/** Return number of under-replicated but not missing blocks */ /** Return number of under-replicated but not missing blocks */
int getUnderReplicatedNotMissingBlocks() { public int getUnderReplicatedNotMissingBlocks() {
return neededReplications.getUnderReplicatedBlockCount(); return neededReplications.getUnderReplicatedBlockCount();
} }
@ -684,7 +717,7 @@ public class BlockManager {
* @param nodesToProcess number of datanodes to schedule deletion work * @param nodesToProcess number of datanodes to schedule deletion work
* @return total number of block for deletion * @return total number of block for deletion
*/ */
int computeInvalidateWork(int nodesToProcess) { public int computeInvalidateWork(int nodesToProcess) {
int numOfNodes = recentInvalidateSets.size(); int numOfNodes = recentInvalidateSets.size();
nodesToProcess = Math.min(numOfNodes, nodesToProcess); nodesToProcess = Math.min(numOfNodes, nodesToProcess);
@ -724,7 +757,7 @@ public class BlockManager {
* *
* @return number of blocks scheduled for replication during this iteration. * @return number of blocks scheduled for replication during this iteration.
*/ */
int computeReplicationWork(int blocksToProcess) throws IOException { public int computeReplicationWork(int blocksToProcess) throws IOException {
// Choose the blocks to be replicated // Choose the blocks to be replicated
List<List<Block>> blocksToReplicate = List<List<Block>> blocksToReplicate =
chooseUnderReplicatedBlocks(blocksToProcess); chooseUnderReplicatedBlocks(blocksToProcess);
@ -1031,7 +1064,7 @@ public class BlockManager {
* If there were any replication requests that timed out, reap them * If there were any replication requests that timed out, reap them
* and put them back into the neededReplication queue * and put them back into the neededReplication queue
*/ */
void processPendingReplications() { public void processPendingReplications() {
Block[] timedOutItems = pendingReplications.getTimedOutBlocks(); Block[] timedOutItems = pendingReplications.getTimedOutBlocks();
if (timedOutItems != null) { if (timedOutItems != null) {
namesystem.writeLock(); namesystem.writeLock();
@ -1464,7 +1497,7 @@ public class BlockManager {
short fileReplication = fileINode.getReplication(); short fileReplication = fileINode.getReplication();
if (!isNeededReplication(storedBlock, fileReplication, numCurrentReplica)) { if (!isNeededReplication(storedBlock, fileReplication, numCurrentReplica)) {
neededReplications.remove(storedBlock, numCurrentReplica, neededReplications.remove(storedBlock, numCurrentReplica,
num.decommissionedReplicas, fileReplication); num.decommissionedReplicas(), fileReplication);
} else { } else {
updateNeededReplications(storedBlock, curReplicaDelta, 0); updateNeededReplications(storedBlock, curReplicaDelta, 0);
} }
@ -1525,7 +1558,7 @@ public class BlockManager {
* For each block in the name-node verify whether it belongs to any file, * For each block in the name-node verify whether it belongs to any file,
* over or under replicated. Place it into the respective queue. * over or under replicated. Place it into the respective queue.
*/ */
void processMisReplicatedBlocks() { public void processMisReplicatedBlocks() {
long nrInvalid = 0, nrOverReplicated = 0, nrUnderReplicated = 0; long nrInvalid = 0, nrOverReplicated = 0, nrUnderReplicated = 0;
namesystem.writeLock(); namesystem.writeLock();
try { try {
@ -1570,7 +1603,7 @@ public class BlockManager {
* If there are any extras, call chooseExcessReplicates() to * If there are any extras, call chooseExcessReplicates() to
* mark them in the excessReplicateMap. * mark them in the excessReplicateMap.
*/ */
void processOverReplicatedBlock(Block block, short replication, public void processOverReplicatedBlock(Block block, short replication,
DatanodeDescriptor addedNode, DatanodeDescriptor delNodeHint) { DatanodeDescriptor addedNode, DatanodeDescriptor delNodeHint) {
assert namesystem.hasWriteLock(); assert namesystem.hasWriteLock();
if (addedNode == delNodeHint) { if (addedNode == delNodeHint) {
@ -1597,7 +1630,7 @@ public class BlockManager {
addedNode, delNodeHint, replicator); addedNode, delNodeHint, replicator);
} }
void addToExcessReplicate(DatanodeInfo dn, Block block) { public void addToExcessReplicate(DatanodeInfo dn, Block block) {
assert namesystem.hasWriteLock(); assert namesystem.hasWriteLock();
Collection<Block> excessBlocks = excessReplicateMap.get(dn.getStorageID()); Collection<Block> excessBlocks = excessReplicateMap.get(dn.getStorageID());
if (excessBlocks == null) { if (excessBlocks == null) {
@ -1618,7 +1651,7 @@ public class BlockManager {
* Modify (block-->datanode) map. Possibly generate replication tasks, if the * Modify (block-->datanode) map. Possibly generate replication tasks, if the
* removed block is still valid. * removed block is still valid.
*/ */
void removeStoredBlock(Block block, DatanodeDescriptor node) { public void removeStoredBlock(Block block, DatanodeDescriptor node) {
if(NameNode.stateChangeLog.isDebugEnabled()) { if(NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("BLOCK* NameSystem.removeStoredBlock: " NameNode.stateChangeLog.debug("BLOCK* NameSystem.removeStoredBlock: "
+ block + " from " + node.getName()); + block + " from " + node.getName());
@ -1673,7 +1706,7 @@ public class BlockManager {
/** /**
* The given node is reporting that it received a certain block. * The given node is reporting that it received a certain block.
*/ */
void addBlock(DatanodeDescriptor node, Block block, String delHint) public void addBlock(DatanodeDescriptor node, Block block, String delHint)
throws IOException { throws IOException {
// decrement number of blocks scheduled to this datanode. // decrement number of blocks scheduled to this datanode.
node.decBlocksScheduled(); node.decBlocksScheduled();
@ -1726,7 +1759,7 @@ public class BlockManager {
/** /**
* Return the number of nodes that are live and decommissioned. * Return the number of nodes that are live and decommissioned.
*/ */
NumberReplicas countNodes(Block b) { public NumberReplicas countNodes(Block b) {
int count = 0; int count = 0;
int live = 0; int live = 0;
int corrupt = 0; int corrupt = 0;
@ -1805,7 +1838,7 @@ public class BlockManager {
* Return true if there are any blocks on this node that have not * Return true if there are any blocks on this node that have not
* yet reached their replication factor. Otherwise returns false. * yet reached their replication factor. Otherwise returns false.
*/ */
boolean isReplicationInProgress(DatanodeDescriptor srcNode) { public boolean isReplicationInProgress(DatanodeDescriptor srcNode) {
boolean status = false; boolean status = false;
int underReplicatedBlocks = 0; int underReplicatedBlocks = 0;
int decommissionOnlyReplicas = 0; int decommissionOnlyReplicas = 0;
@ -1855,11 +1888,11 @@ public class BlockManager {
return status; return status;
} }
int getActiveBlockCount() { public int getActiveBlockCount() {
return blocksMap.size() - (int)pendingDeletionBlocksCount; return blocksMap.size() - (int)pendingDeletionBlocksCount;
} }
DatanodeDescriptor[] getNodes(BlockInfo block) { public DatanodeDescriptor[] getNodes(BlockInfo block) {
DatanodeDescriptor[] nodes = DatanodeDescriptor[] nodes =
new DatanodeDescriptor[block.numNodes()]; new DatanodeDescriptor[block.numNodes()];
Iterator<DatanodeDescriptor> it = blocksMap.nodeIterator(block); Iterator<DatanodeDescriptor> it = blocksMap.nodeIterator(block);
@ -1869,22 +1902,22 @@ public class BlockManager {
return nodes; return nodes;
} }
int getTotalBlocks() { public int getTotalBlocks() {
return blocksMap.size(); return blocksMap.size();
} }
void removeBlock(Block block) { public void removeBlock(Block block) {
addToInvalidates(block); addToInvalidates(block);
corruptReplicas.removeFromCorruptReplicasMap(block); corruptReplicas.removeFromCorruptReplicasMap(block);
blocksMap.removeBlock(block); blocksMap.removeBlock(block);
} }
BlockInfo getStoredBlock(Block block) { public BlockInfo getStoredBlock(Block block) {
return blocksMap.getStoredBlock(block); return blocksMap.getStoredBlock(block);
} }
/* updates a block in under replication queue */ /* updates a block in under replication queue */
void updateNeededReplications(Block block, int curReplicasDelta, public void updateNeededReplications(Block block, int curReplicasDelta,
int expectedReplicasDelta) { int expectedReplicasDelta) {
namesystem.writeLock(); namesystem.writeLock();
try { try {
@ -1905,13 +1938,13 @@ public class BlockManager {
} }
} }
void checkReplication(Block block, int numExpectedReplicas) { public void checkReplication(Block block, int numExpectedReplicas) {
// filter out containingNodes that are marked for decommission. // filter out containingNodes that are marked for decommission.
NumberReplicas number = countNodes(block); NumberReplicas number = countNodes(block);
if (isNeededReplication(block, numExpectedReplicas, number.liveReplicas())) { if (isNeededReplication(block, numExpectedReplicas, number.liveReplicas())) {
neededReplications.add(block, neededReplications.add(block,
number.liveReplicas(), number.liveReplicas(),
number.decommissionedReplicas, number.decommissionedReplicas(),
numExpectedReplicas); numExpectedReplicas);
} }
} }
@ -1926,11 +1959,8 @@ public class BlockManager {
return fileINode.getReplication(); return fileINode.getReplication();
} }
/** /** Remove a datanode from the invalidatesSet */
* Remove a datanode from the invalidatesSet public void removeFromInvalidates(String storageID) {
* @param n datanode
*/
void removeFromInvalidates(String storageID) {
Collection<Block> blocks = recentInvalidateSets.remove(storageID); Collection<Block> blocks = recentInvalidateSets.remove(storageID);
if (blocks != null) { if (blocks != null) {
pendingDeletionBlocksCount -= blocks.size(); pendingDeletionBlocksCount -= blocks.size();
@ -1998,7 +2028,7 @@ public class BlockManager {
//Returns the number of racks over which a given block is replicated //Returns the number of racks over which a given block is replicated
//decommissioning/decommissioned nodes are not counted. corrupt replicas //decommissioning/decommissioned nodes are not counted. corrupt replicas
//are also ignored //are also ignored
int getNumberOfRacks(Block b) { public int getNumberOfRacks(Block b) {
HashSet<String> rackSet = new HashSet<String>(0); HashSet<String> rackSet = new HashSet<String>(0);
Collection<DatanodeDescriptor> corruptNodes = Collection<DatanodeDescriptor> corruptNodes =
corruptReplicas.getNodes(b); corruptReplicas.getNodes(b);
@ -2056,32 +2086,32 @@ public class BlockManager {
} }
} }
long getMissingBlocksCount() { public long getMissingBlocksCount() {
// not locking // not locking
return this.neededReplications.getCorruptBlockSize(); return this.neededReplications.getCorruptBlockSize();
} }
BlockInfo addINode(BlockInfo block, INodeFile iNode) { public BlockInfo addINode(BlockInfo block, INodeFile iNode) {
return blocksMap.addINode(block, iNode); return blocksMap.addINode(block, iNode);
} }
INodeFile getINode(Block b) { public INodeFile getINode(Block b) {
return blocksMap.getINode(b); return blocksMap.getINode(b);
} }
void removeFromCorruptReplicasMap(Block block) { public void removeFromCorruptReplicasMap(Block block) {
corruptReplicas.removeFromCorruptReplicasMap(block); corruptReplicas.removeFromCorruptReplicasMap(block);
} }
int numCorruptReplicas(Block block) { public int numCorruptReplicas(Block block) {
return corruptReplicas.numCorruptReplicas(block); return corruptReplicas.numCorruptReplicas(block);
} }
void removeBlockFromMap(Block block) { public void removeBlockFromMap(Block block) {
blocksMap.removeBlock(block); blocksMap.removeBlock(block);
} }
int getCapacity() { public int getCapacity() {
namesystem.readLock(); namesystem.readLock();
try { try {
return blocksMap.getCapacity(); return blocksMap.getCapacity();
@ -2104,7 +2134,7 @@ public class BlockManager {
* @return Up to numExpectedBlocks blocks from startingBlockId if it exists * @return Up to numExpectedBlocks blocks from startingBlockId if it exists
* *
*/ */
long[] getCorruptReplicaBlockIds(int numExpectedBlocks, public long[] getCorruptReplicaBlockIds(int numExpectedBlocks,
Long startingBlockId) { Long startingBlockId) {
return corruptReplicas.getCorruptReplicaBlockIds(numExpectedBlocks, return corruptReplicas.getCorruptReplicaBlockIds(numExpectedBlocks,
startingBlockId); startingBlockId);
@ -2113,7 +2143,7 @@ public class BlockManager {
/** /**
* Return an iterator over the set of blocks for which there are no replicas. * Return an iterator over the set of blocks for which there are no replicas.
*/ */
UnderReplicatedBlocks.BlockIterator getCorruptReplicaBlockIterator() { public BlockIterator getCorruptReplicaBlockIterator() {
return neededReplications return neededReplications
.iterator(UnderReplicatedBlocks.QUEUE_WITH_CORRUPT_BLOCKS); .iterator(UnderReplicatedBlocks.QUEUE_WITH_CORRUPT_BLOCKS);
} }

View File

@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.blockmanagement;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collection; import java.util.Collection;
@ -26,6 +26,8 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.namenode.FSClusterStats;
import org.apache.hadoop.hdfs.server.namenode.FSInodeInfo;
import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.Node; import org.apache.hadoop.net.Node;
import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.ReflectionUtils;
@ -94,7 +96,7 @@ public abstract class BlockPlacementPolicy {
* @return array of DatanodeDescriptor instances chosen as target * @return array of DatanodeDescriptor instances chosen as target
* and sorted as a pipeline. * and sorted as a pipeline.
*/ */
abstract DatanodeDescriptor[] chooseTarget(String srcPath, public abstract DatanodeDescriptor[] chooseTarget(String srcPath,
int numOfReplicas, int numOfReplicas,
DatanodeDescriptor writer, DatanodeDescriptor writer,
List<DatanodeDescriptor> chosenNodes, List<DatanodeDescriptor> chosenNodes,
@ -222,11 +224,11 @@ public abstract class BlockPlacementPolicy {
* @param numOfReplicas number of replicas wanted. * @param numOfReplicas number of replicas wanted.
* @param writer the writer's machine, null if not in the cluster. * @param writer the writer's machine, null if not in the cluster.
* @param blocksize size of the data to be written. * @param blocksize size of the data to be written.
* @param excludedNodes: datanodes that should not be considered as targets. * @param excludedNodes datanodes that should not be considered as targets.
* @return array of DatanodeDescriptor instances chosen as targets * @return array of DatanodeDescriptor instances chosen as targets
* and sorted as a pipeline. * and sorted as a pipeline.
*/ */
DatanodeDescriptor[] chooseTarget(String srcPath, public DatanodeDescriptor[] chooseTarget(String srcPath,
int numOfReplicas, int numOfReplicas,
DatanodeDescriptor writer, DatanodeDescriptor writer,
HashMap<Node, Node> excludedNodes, HashMap<Node, Node> excludedNodes,

View File

@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.blockmanagement;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collection; import java.util.Collection;
@ -32,6 +32,9 @@ import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.FSConstants; import org.apache.hadoop.hdfs.protocol.FSConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.namenode.FSClusterStats;
import org.apache.hadoop.hdfs.server.namenode.FSInodeInfo;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.Node; import org.apache.hadoop.net.Node;
import org.apache.hadoop.net.NodeBase; import org.apache.hadoop.net.NodeBase;
@ -89,7 +92,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
/** {@inheritDoc} */ /** {@inheritDoc} */
@Override @Override
DatanodeDescriptor[] chooseTarget(String srcPath, public DatanodeDescriptor[] chooseTarget(String srcPath,
int numOfReplicas, int numOfReplicas,
DatanodeDescriptor writer, DatanodeDescriptor writer,
List<DatanodeDescriptor> chosenNodes, List<DatanodeDescriptor> chosenNodes,

View File

@ -15,11 +15,12 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.blockmanagement;
import java.util.Iterator; import java.util.Iterator;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.util.GSet; import org.apache.hadoop.hdfs.util.GSet;
import org.apache.hadoop.hdfs.util.LightWeightGSet; import org.apache.hadoop.hdfs.util.LightWeightGSet;
@ -28,7 +29,7 @@ import org.apache.hadoop.hdfs.util.LightWeightGSet;
* block's metadata currently includes INode it belongs to and * block's metadata currently includes INode it belongs to and
* the datanodes that store the block. * the datanodes that store the block.
*/ */
class BlocksMap { public class BlocksMap {
private static class NodeIterator implements Iterator<DatanodeDescriptor> { private static class NodeIterator implements Iterator<DatanodeDescriptor> {
private BlockInfo blockInfo; private BlockInfo blockInfo;
private int nextIdx = 0; private int nextIdx = 0;
@ -100,7 +101,7 @@ class BlocksMap {
/** /**
* Add block b belonging to the specified file inode to the map. * Add block b belonging to the specified file inode to the map.
*/ */
BlockInfo addINode(BlockInfo b, INodeFile iNode) { public BlockInfo addINode(BlockInfo b, INodeFile iNode) {
BlockInfo info = blocks.get(b); BlockInfo info = blocks.get(b);
if (info != b) { if (info != b) {
info = b; info = b;
@ -136,7 +137,7 @@ class BlocksMap {
* Searches for the block in the BlocksMap and * Searches for the block in the BlocksMap and
* returns Iterator that iterates through the nodes the block belongs to. * returns Iterator that iterates through the nodes the block belongs to.
*/ */
Iterator<DatanodeDescriptor> nodeIterator(Block b) { public Iterator<DatanodeDescriptor> nodeIterator(Block b) {
return nodeIterator(blocks.get(b)); return nodeIterator(blocks.get(b));
} }
@ -185,7 +186,7 @@ class BlocksMap {
/** /**
* Check if the block exists in map * Check if the block exists in map
*/ */
boolean contains(Block block) { public boolean contains(Block block) {
return blocks.contains(block); return blocks.contains(block);
} }

View File

@ -15,10 +15,11 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.blockmanagement;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.ipc.Server; import org.apache.hadoop.ipc.Server;
import java.util.*; import java.util.*;

View File

@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.blockmanagement;
import java.io.DataInput; import java.io.DataInput;
import java.io.IOException; import java.io.IOException;
@ -26,6 +26,7 @@ import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
import org.apache.hadoop.hdfs.DeprecatedUTF8; import org.apache.hadoop.hdfs.DeprecatedUTF8;
import org.apache.hadoop.io.WritableUtils; import org.apache.hadoop.io.WritableUtils;
@ -44,7 +45,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
// Stores status of decommissioning. // Stores status of decommissioning.
// If node is not decommissioning, do not use this object for anything. // If node is not decommissioning, do not use this object for anything.
DecommissioningStatus decommissioningStatus = new DecommissioningStatus(); public DecommissioningStatus decommissioningStatus = new DecommissioningStatus();
/** Block and targets pair */ /** Block and targets pair */
@InterfaceAudience.Private @InterfaceAudience.Private
@ -96,8 +97,8 @@ public class DatanodeDescriptor extends DatanodeInfo {
private int numBlocks = 0; private int numBlocks = 0;
// isAlive == heartbeats.contains(this) // isAlive == heartbeats.contains(this)
// This is an optimization, because contains takes O(n) time on Arraylist // This is an optimization, because contains takes O(n) time on Arraylist
protected boolean isAlive = false; public boolean isAlive = false;
protected boolean needKeyUpdate = false; public boolean needKeyUpdate = false;
/** A queue of blocks to be replicated by this datanode */ /** A queue of blocks to be replicated by this datanode */
private BlockQueue<BlockTargetPair> replicateBlocks = new BlockQueue<BlockTargetPair>(); private BlockQueue<BlockTargetPair> replicateBlocks = new BlockQueue<BlockTargetPair>();
@ -204,7 +205,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
* Add datanode to the block. * Add datanode to the block.
* Add block to the head of the list of blocks belonging to the data-node. * Add block to the head of the list of blocks belonging to the data-node.
*/ */
boolean addBlock(BlockInfo b) { public boolean addBlock(BlockInfo b) {
if(!b.addNode(this)) if(!b.addNode(this))
return false; return false;
// add to the head of the data-node list // add to the head of the data-node list
@ -217,7 +218,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
* Remove block from the list of blocks belonging to the data-node. * Remove block from the list of blocks belonging to the data-node.
* Remove datanode from the block. * Remove datanode from the block.
*/ */
boolean removeBlock(BlockInfo b) { public boolean removeBlock(BlockInfo b) {
blockList = b.listRemove(blockList, this); blockList = b.listRemove(blockList, this);
if ( b.removeNode(this) ) { if ( b.removeNode(this) ) {
numBlocks--; numBlocks--;
@ -242,7 +243,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
* @param newBlock - a replacement block * @param newBlock - a replacement block
* @return the new block * @return the new block
*/ */
BlockInfo replaceBlock(BlockInfo oldBlock, BlockInfo newBlock) { public BlockInfo replaceBlock(BlockInfo oldBlock, BlockInfo newBlock) {
boolean done = removeBlock(oldBlock); boolean done = removeBlock(oldBlock);
assert done : "Old block should belong to the data-node when replacing"; assert done : "Old block should belong to the data-node when replacing";
done = addBlock(newBlock); done = addBlock(newBlock);
@ -250,7 +251,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
return newBlock; return newBlock;
} }
void resetBlocks() { public void resetBlocks() {
this.capacity = 0; this.capacity = 0;
this.remaining = 0; this.remaining = 0;
this.blockPoolUsed = 0; this.blockPoolUsed = 0;
@ -268,7 +269,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
/** /**
* Updates stats from datanode heartbeat. * Updates stats from datanode heartbeat.
*/ */
void updateHeartbeat(long capacity, long dfsUsed, long remaining, public void updateHeartbeat(long capacity, long dfsUsed, long remaining,
long blockPoolUsed, int xceiverCount, int volFailures) { long blockPoolUsed, int xceiverCount, int volFailures) {
this.capacity = capacity; this.capacity = capacity;
this.dfsUsed = dfsUsed; this.dfsUsed = dfsUsed;
@ -283,7 +284,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
/** /**
* Iterates over the list of blocks belonging to the datanode. * Iterates over the list of blocks belonging to the datanode.
*/ */
static class BlockIterator implements Iterator<BlockInfo> { public static class BlockIterator implements Iterator<BlockInfo> {
private BlockInfo current; private BlockInfo current;
private DatanodeDescriptor node; private DatanodeDescriptor node;
@ -307,7 +308,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
} }
} }
Iterator<BlockInfo> getBlockIterator() { public Iterator<BlockInfo> getBlockIterator() {
return new BlockIterator(this.blockList, this); return new BlockIterator(this.blockList, this);
} }
@ -361,11 +362,11 @@ public class DatanodeDescriptor extends DatanodeInfo {
} }
} }
List<BlockTargetPair> getReplicationCommand(int maxTransfers) { public List<BlockTargetPair> getReplicationCommand(int maxTransfers) {
return replicateBlocks.poll(maxTransfers); return replicateBlocks.poll(maxTransfers);
} }
BlockInfoUnderConstruction[] getLeaseRecoveryCommand(int maxTransfers) { public BlockInfoUnderConstruction[] getLeaseRecoveryCommand(int maxTransfers) {
List<BlockInfoUnderConstruction> blocks = recoverBlocks.poll(maxTransfers); List<BlockInfoUnderConstruction> blocks = recoverBlocks.poll(maxTransfers);
if(blocks == null) if(blocks == null)
return null; return null;
@ -375,7 +376,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
/** /**
* Remove the specified number of blocks to be invalidated * Remove the specified number of blocks to be invalidated
*/ */
Block[] getInvalidateBlocks(int maxblocks) { public Block[] getInvalidateBlocks(int maxblocks) {
return getBlockArray(invalidateBlocks, maxblocks); return getBlockArray(invalidateBlocks, maxblocks);
} }
@ -418,7 +419,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
} }
/** Serialization for FSEditLog */ /** Serialization for FSEditLog */
void readFieldsFromFSEditLog(DataInput in) throws IOException { public void readFieldsFromFSEditLog(DataInput in) throws IOException {
this.name = DeprecatedUTF8.readString(in); this.name = DeprecatedUTF8.readString(in);
this.storageID = DeprecatedUTF8.readString(in); this.storageID = DeprecatedUTF8.readString(in);
this.infoPort = in.readShort() & 0x0000ffff; this.infoPort = in.readShort() & 0x0000ffff;
@ -445,7 +446,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
/** /**
* Increments counter for number of blocks scheduled. * Increments counter for number of blocks scheduled.
*/ */
void incBlocksScheduled() { public void incBlocksScheduled() {
currApproxBlocksScheduled++; currApproxBlocksScheduled++;
} }
@ -486,11 +487,12 @@ public class DatanodeDescriptor extends DatanodeInfo {
return (this == obj) || super.equals(obj); return (this == obj) || super.equals(obj);
} }
class DecommissioningStatus { /** Decommissioning status */
int underReplicatedBlocks; public class DecommissioningStatus {
int decommissionOnlyReplicas; private int underReplicatedBlocks;
int underReplicatedInOpenFiles; private int decommissionOnlyReplicas;
long startTime; private int underReplicatedInOpenFiles;
private long startTime;
synchronized void set(int underRep, synchronized void set(int underRep,
int onlyRep, int underConstruction) { int onlyRep, int underConstruction) {
@ -502,31 +504,33 @@ public class DatanodeDescriptor extends DatanodeInfo {
underReplicatedInOpenFiles = underConstruction; underReplicatedInOpenFiles = underConstruction;
} }
synchronized int getUnderReplicatedBlocks() { /** @return the number of under-replicated blocks */
public synchronized int getUnderReplicatedBlocks() {
if (isDecommissionInProgress() == false) { if (isDecommissionInProgress() == false) {
return 0; return 0;
} }
return underReplicatedBlocks; return underReplicatedBlocks;
} }
synchronized int getDecommissionOnlyReplicas() { /** @return the number of decommission-only replicas */
public synchronized int getDecommissionOnlyReplicas() {
if (isDecommissionInProgress() == false) { if (isDecommissionInProgress() == false) {
return 0; return 0;
} }
return decommissionOnlyReplicas; return decommissionOnlyReplicas;
} }
/** @return the number of under-replicated blocks in open files */
synchronized int getUnderReplicatedInOpenFiles() { public synchronized int getUnderReplicatedInOpenFiles() {
if (isDecommissionInProgress() == false) { if (isDecommissionInProgress() == false) {
return 0; return 0;
} }
return underReplicatedInOpenFiles; return underReplicatedInOpenFiles;
} }
/** Set start time */
synchronized void setStartTime(long time) { public synchronized void setStartTime(long time) {
startTime = time; startTime = time;
} }
/** @return start time */
synchronized long getStartTime() { public synchronized long getStartTime() {
if (isDecommissionInProgress() == false) { if (isDecommissionInProgress() == false) {
return 0; return 0;
} }
@ -538,11 +542,11 @@ public class DatanodeDescriptor extends DatanodeInfo {
* Set the flag to indicate if this datanode is disallowed from communicating * Set the flag to indicate if this datanode is disallowed from communicating
* with the namenode. * with the namenode.
*/ */
void setDisallowed(boolean flag) { public void setDisallowed(boolean flag) {
disallowed = flag; disallowed = flag;
} }
/** Is the datanode disallowed from communicating with the namenode? */
boolean isDisallowed() { public boolean isDisallowed() {
return disallowed; return disallowed;
} }

View File

@ -0,0 +1,57 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
/**
* A immutable object that stores the number of live replicas and
* the number of decommissined Replicas.
*/
public class NumberReplicas {
private int liveReplicas;
private int decommissionedReplicas;
private int corruptReplicas;
private int excessReplicas;
NumberReplicas() {
initialize(0, 0, 0, 0);
}
NumberReplicas(int live, int decommissioned, int corrupt, int excess) {
initialize(live, decommissioned, corrupt, excess);
}
void initialize(int live, int decommissioned, int corrupt, int excess) {
liveReplicas = live;
decommissionedReplicas = decommissioned;
corruptReplicas = corrupt;
excessReplicas = excess;
}
public int liveReplicas() {
return liveReplicas;
}
public int decommissionedReplicas() {
return decommissionedReplicas;
}
public int corruptReplicas() {
return corruptReplicas;
}
public int excessReplicas() {
return excessReplicas;
}
}

View File

@ -15,9 +15,11 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.blockmanagement;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import static org.apache.hadoop.hdfs.server.common.Util.now; import static org.apache.hadoop.hdfs.server.common.Util.now;
import org.apache.hadoop.util.*; import org.apache.hadoop.util.*;
import java.io.*; import java.io.*;

View File

@ -15,17 +15,18 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.blockmanagement;
import java.util.*; import java.util.*;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
/* Class for keeping track of under replication blocks /* Class for keeping track of under replication blocks
* Blocks have replication priority, with priority 0 indicating the highest * Blocks have replication priority, with priority 0 indicating the highest
* Blocks have only one replicas has the highest * Blocks have only one replicas has the highest
*/ */
class UnderReplicatedBlocks implements Iterable<Block> { public class UnderReplicatedBlocks implements Iterable<Block> {
static final int LEVEL = 5; static final int LEVEL = 5;
static public final int QUEUE_WITH_CORRUPT_BLOCKS = 4; static public final int QUEUE_WITH_CORRUPT_BLOCKS = 4;
private List<TreeSet<Block>> priorityQueues = new ArrayList<TreeSet<Block>>(); private List<TreeSet<Block>> priorityQueues = new ArrayList<TreeSet<Block>>();
@ -47,7 +48,7 @@ class UnderReplicatedBlocks implements Iterable<Block> {
} }
/* Return the total number of under replication blocks */ /* Return the total number of under replication blocks */
synchronized int size() { public synchronized int size() {
int size = 0; int size = 0;
for (int i=0; i<LEVEL; i++) { for (int i=0; i<LEVEL; i++) {
size += priorityQueues.get(i).size(); size += priorityQueues.get(i).size();
@ -70,7 +71,7 @@ class UnderReplicatedBlocks implements Iterable<Block> {
} }
/* Check if a block is in the neededReplication queue */ /* Check if a block is in the neededReplication queue */
synchronized boolean contains(Block block) { public synchronized boolean contains(Block block) {
for(TreeSet<Block> set:priorityQueues) { for(TreeSet<Block> set:priorityQueues) {
if(set.contains(block)) { return true; } if(set.contains(block)) { return true; }
} }
@ -218,7 +219,7 @@ class UnderReplicatedBlocks implements Iterable<Block> {
return new BlockIterator(); return new BlockIterator();
} }
class BlockIterator implements Iterator<Block> { public class BlockIterator implements Iterator<Block> {
private int level; private int level;
private boolean isIteratorForLevel = false; private boolean isIteratorForLevel = false;
private List<Iterator<Block>> iterators = new ArrayList<Iterator<Block>>(); private List<Iterator<Block>> iterators = new ArrayList<Iterator<Block>>();

View File

@ -26,8 +26,8 @@ import java.net.InetSocketAddress;
import java.net.Socket; import java.net.Socket;
import java.net.URL; import java.net.URL;
import java.net.URLEncoder; import java.net.URLEncoder;
import java.util.Arrays;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections; import java.util.Collections;
import java.util.Comparator; import java.util.Comparator;
import java.util.HashMap; import java.util.HashMap;
@ -45,14 +45,13 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.BlockReader; import org.apache.hadoop.hdfs.BlockReader;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.http.HtmlQuoting; import org.apache.hadoop.http.HtmlQuoting;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;

View File

@ -21,6 +21,7 @@ import java.util.Map;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.util.CyclicIteration; import org.apache.hadoop.util.CyclicIteration;
/** /**

View File

@ -17,6 +17,8 @@
*/ */
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.server.common.Util.now;
import java.io.Closeable; import java.io.Closeable;
import java.io.FileNotFoundException; import java.io.FileNotFoundException;
import java.io.IOException; import java.io.IOException;
@ -24,35 +26,40 @@ import java.net.URI;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collection; import java.util.Collection;
import java.util.List; import java.util.List;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.FSConstants; import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.FSConstants;
import org.apache.hadoop.hdfs.protocol.FSLimitException; import org.apache.hadoop.hdfs.protocol.FSLimitException;
import org.apache.hadoop.hdfs.protocol.FSLimitException.*; import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException;
import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState; import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
import org.apache.hadoop.hdfs.util.ByteArray; import org.apache.hadoop.hdfs.util.ByteArray;
import static org.apache.hadoop.hdfs.server.common.Util.now;
import org.apache.hadoop.hdfs.DFSConfigKeys;
/************************************************* /*************************************************
* FSDirectory stores the filesystem directory state. * FSDirectory stores the filesystem directory state.
@ -63,7 +70,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
* and logged to disk. * and logged to disk.
* *
*************************************************/ *************************************************/
class FSDirectory implements Closeable { public class FSDirectory implements Closeable {
INodeDirectoryWithQuota rootDir; INodeDirectoryWithQuota rootDir;
FSImage fsImage; FSImage fsImage;
@ -1332,7 +1339,7 @@ class FSDirectory implements Closeable {
* @throws QuotaExceededException if the new count violates any quota limit * @throws QuotaExceededException if the new count violates any quota limit
* @throws FileNotFound if path does not exist. * @throws FileNotFound if path does not exist.
*/ */
void updateSpaceConsumed(String path, long nsDelta, long dsDelta) public void updateSpaceConsumed(String path, long nsDelta, long dsDelta)
throws QuotaExceededException, throws QuotaExceededException,
FileNotFoundException, FileNotFoundException,
UnresolvedLinkException { UnresolvedLinkException {

View File

@ -17,6 +17,8 @@
*/ */
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.server.common.Util.now;
import java.io.BufferedInputStream; import java.io.BufferedInputStream;
import java.io.DataInputStream; import java.io.DataInputStream;
import java.io.EOFException; import java.io.EOFException;
@ -32,10 +34,30 @@ import org.apache.hadoop.hdfs.protocol.FSConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import static org.apache.hadoop.hdfs.server.common.Util.now; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCloseOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.CancelDelegationTokenOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ClearNSQuotaOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ConcatDeleteOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.DeleteOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.GetDelegationTokenOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.MkdirOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ReassignLeaseOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOldOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenewDelegationTokenOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetGenstampOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetNSQuotaOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetOwnerOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetPermissionsOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetQuotaOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetReplicationOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SymlinkOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.TimesOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateMasterKeyOp;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease; import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.*;
public class FSEditLogLoader { public class FSEditLogLoader {
private final FSNamesystem fsNamesys; private final FSNamesystem fsNamesys;

View File

@ -32,6 +32,8 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import org.apache.hadoop.hdfs.server.common.GenerationStamp; import org.apache.hadoop.hdfs.server.common.GenerationStamp;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.*; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.*;
import org.apache.hadoop.security.token.delegation.DelegationKey; import org.apache.hadoop.security.token.delegation.DelegationKey;
import org.apache.hadoop.io.BytesWritable; import org.apache.hadoop.io.BytesWritable;

View File

@ -42,6 +42,7 @@ import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.FSConstants; import org.apache.hadoop.hdfs.protocol.FSConstants;
import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.common.GenerationStamp; import org.apache.hadoop.hdfs.server.common.GenerationStamp;
import org.apache.hadoop.io.MD5Hash; import org.apache.hadoop.io.MD5Hash;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;

View File

@ -22,7 +22,6 @@ import java.io.DataInputStream;
import java.io.DataOutput; import java.io.DataOutput;
import java.io.DataOutputStream; import java.io.DataOutputStream;
import java.io.IOException; import java.io.IOException;
import java.nio.ByteBuffer;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
@ -33,6 +32,9 @@ import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DeprecatedUTF8; import org.apache.hadoop.hdfs.DeprecatedUTF8;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState; import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.Writable;

View File

@ -97,15 +97,20 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair;
import org.apache.hadoop.hdfs.server.blockmanagement.UnderReplicatedBlocks;
import org.apache.hadoop.hdfs.server.common.GenerationStamp; import org.apache.hadoop.hdfs.server.common.GenerationStamp;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState; import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport; import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
import org.apache.hadoop.hdfs.server.common.Util; import org.apache.hadoop.hdfs.server.common.Util;
import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor.BlockTargetPair;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease; import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
import org.apache.hadoop.hdfs.server.namenode.UnderReplicatedBlocks.BlockIterator;
import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean; import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean;
import org.apache.hadoop.hdfs.server.protocol.BlockCommand; import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand;
@ -235,7 +240,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
// Stores the correct file name hierarchy // Stores the correct file name hierarchy
// //
public FSDirectory dir; public FSDirectory dir;
BlockManager blockManager; public BlockManager blockManager;
// Block pool ID used by this namenode // Block pool ID used by this namenode
String blockPoolId; String blockPoolId;
@ -270,10 +275,10 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
* Stores a set of DatanodeDescriptor objects. * Stores a set of DatanodeDescriptor objects.
* This is a subset of {@link #datanodeMap}, containing nodes that are * This is a subset of {@link #datanodeMap}, containing nodes that are
* considered alive. * considered alive.
* The {@link HeartbeatMonitor} periodically checks for outdated entries, * The HeartbeatMonitor periodically checks for out-dated entries,
* and removes them from the list. * and removes them from the list.
*/ */
ArrayList<DatanodeDescriptor> heartbeats = new ArrayList<DatanodeDescriptor>(); public ArrayList<DatanodeDescriptor> heartbeats = new ArrayList<DatanodeDescriptor>();
public LeaseManager leaseManager = new LeaseManager(this); public LeaseManager leaseManager = new LeaseManager(this);
@ -314,8 +319,8 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
private volatile SafeModeInfo safeMode; // safe mode information private volatile SafeModeInfo safeMode; // safe mode information
private Host2NodesMap host2DataNodeMap = new Host2NodesMap(); private Host2NodesMap host2DataNodeMap = new Host2NodesMap();
// datanode networktoplogy /** datanode network toplogy */
NetworkTopology clusterMap = new NetworkTopology(); public NetworkTopology clusterMap = new NetworkTopology();
private DNSToSwitchMapping dnsToSwitchMapping; private DNSToSwitchMapping dnsToSwitchMapping;
private HostsFileReader hostsReader; private HostsFileReader hostsReader;
@ -329,7 +334,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
private final GenerationStamp generationStamp = new GenerationStamp(); private final GenerationStamp generationStamp = new GenerationStamp();
// Ask Datanode only up to this many blocks to delete. // Ask Datanode only up to this many blocks to delete.
int blockInvalidateLimit = DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT; public int blockInvalidateLimit = DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT;
// precision of access times. // precision of access times.
private long accessTimePrecision = 0; private long accessTimePrecision = 0;
@ -472,23 +477,23 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
} }
// utility methods to acquire and release read lock and write lock // utility methods to acquire and release read lock and write lock
void readLock() { public void readLock() {
this.fsLock.readLock().lock(); this.fsLock.readLock().lock();
} }
void readUnlock() { public void readUnlock() {
this.fsLock.readLock().unlock(); this.fsLock.readLock().unlock();
} }
void writeLock() { public void writeLock() {
this.fsLock.writeLock().lock(); this.fsLock.writeLock().lock();
} }
void writeUnlock() { public void writeUnlock() {
this.fsLock.writeLock().unlock(); this.fsLock.writeLock().unlock();
} }
boolean hasWriteLock() { public boolean hasWriteLock() {
return this.fsLock.isWriteLockedByCurrentThread(); return this.fsLock.isWriteLockedByCurrentThread();
} }
@ -1014,7 +1019,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
} }
/** Create a LocatedBlock. */ /** Create a LocatedBlock. */
LocatedBlock createLocatedBlock(final Block b, final DatanodeInfo[] locations, public LocatedBlock createLocatedBlock(final Block b, final DatanodeInfo[] locations,
final long offset, final boolean corrupt) throws IOException { final long offset, final boolean corrupt) throws IOException {
return new LocatedBlock(getExtendedBlock(b), locations, offset, corrupt); return new LocatedBlock(getExtendedBlock(b), locations, offset, corrupt);
} }
@ -3013,7 +3018,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
* @return an array of datanode commands * @return an array of datanode commands
* @throws IOException * @throws IOException
*/ */
DatanodeCommand[] handleHeartbeat(DatanodeRegistration nodeReg, public DatanodeCommand[] handleHeartbeat(DatanodeRegistration nodeReg,
long capacity, long dfsUsed, long remaining, long blockPoolUsed, long capacity, long dfsUsed, long remaining, long blockPoolUsed,
int xceiverCount, int xmitsInProgress, int failedVolumes) int xceiverCount, int xmitsInProgress, int failedVolumes)
throws IOException { throws IOException {
@ -3521,7 +3526,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
* If no such a node is available, * If no such a node is available,
* then pick a node with least free space * then pick a node with least free space
*/ */
void chooseExcessReplicates(Collection<DatanodeDescriptor> nonExcess, public void chooseExcessReplicates(Collection<DatanodeDescriptor> nonExcess,
Block b, short replication, Block b, short replication,
DatanodeDescriptor addedNode, DatanodeDescriptor addedNode,
DatanodeDescriptor delNodeHint, DatanodeDescriptor delNodeHint,
@ -3979,45 +3984,6 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
return replication; return replication;
} }
/**
* A immutable object that stores the number of live replicas and
* the number of decommissined Replicas.
*/
static class NumberReplicas {
private int liveReplicas;
int decommissionedReplicas;
private int corruptReplicas;
private int excessReplicas;
NumberReplicas() {
initialize(0, 0, 0, 0);
}
NumberReplicas(int live, int decommissioned, int corrupt, int excess) {
initialize(live, decommissioned, corrupt, excess);
}
void initialize(int live, int decommissioned, int corrupt, int excess) {
liveReplicas = live;
decommissionedReplicas = decommissioned;
corruptReplicas = corrupt;
excessReplicas = excess;
}
int liveReplicas() {
return liveReplicas;
}
int decommissionedReplicas() {
return decommissionedReplicas;
}
int corruptReplicas() {
return corruptReplicas;
}
int excessReplicas() {
return excessReplicas;
}
}
/** /**
* Change, if appropriate, the admin state of a datanode to * Change, if appropriate, the admin state of a datanode to
* decommission completed. Return true if decommission is complete. * decommission completed. Return true if decommission is complete.
@ -4675,7 +4641,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
* Check whether the name node is in safe mode. * Check whether the name node is in safe mode.
* @return true if safe mode is ON, false otherwise * @return true if safe mode is ON, false otherwise
*/ */
boolean isInSafeMode() { public boolean isInSafeMode() {
// safeMode is volatile, and may be set to null at any time // safeMode is volatile, and may be set to null at any time
SafeModeInfo safeMode = this.safeMode; SafeModeInfo safeMode = this.safeMode;
if (safeMode == null) if (safeMode == null)
@ -4686,7 +4652,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
/** /**
* Check whether the name node is in startup mode. * Check whether the name node is in startup mode.
*/ */
boolean isInStartupSafeMode() { public boolean isInStartupSafeMode() {
// safeMode is volatile, and may be set to null at any time // safeMode is volatile, and may be set to null at any time
SafeModeInfo safeMode = this.safeMode; SafeModeInfo safeMode = this.safeMode;
if (safeMode == null) if (safeMode == null)
@ -4697,7 +4663,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
/** /**
* Check whether replication queues are populated. * Check whether replication queues are populated.
*/ */
boolean isPopulatingReplQueues() { public boolean isPopulatingReplQueues() {
// safeMode is volatile, and may be set to null at any time // safeMode is volatile, and may be set to null at any time
SafeModeInfo safeMode = this.safeMode; SafeModeInfo safeMode = this.safeMode;
if (safeMode == null) if (safeMode == null)
@ -4709,7 +4675,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
* Increment number of blocks that reached minimal replication. * Increment number of blocks that reached minimal replication.
* @param replication current replication * @param replication current replication
*/ */
void incrementSafeBlockCount(int replication) { public void incrementSafeBlockCount(int replication) {
// safeMode is volatile, and may be set to null at any time // safeMode is volatile, and may be set to null at any time
SafeModeInfo safeMode = this.safeMode; SafeModeInfo safeMode = this.safeMode;
if (safeMode == null) if (safeMode == null)
@ -4720,7 +4686,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
/** /**
* Decrement number of blocks that reached minimal replication. * Decrement number of blocks that reached minimal replication.
*/ */
void decrementSafeBlockCount(Block b) { public void decrementSafeBlockCount(Block b) {
// safeMode is volatile, and may be set to null at any time // safeMode is volatile, and may be set to null at any time
SafeModeInfo safeMode = this.safeMode; SafeModeInfo safeMode = this.safeMode;
if (safeMode == null) // mostly true if (safeMode == null) // mostly true
@ -5042,13 +5008,13 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
@Override // FSNamesystemMBean @Override // FSNamesystemMBean
@Metric @Metric
public long getPendingReplicationBlocks() { public long getPendingReplicationBlocks() {
return blockManager.pendingReplicationBlocksCount; return blockManager.getPendingReplicationBlocksCount();
} }
@Override // FSNamesystemMBean @Override // FSNamesystemMBean
@Metric @Metric
public long getUnderReplicatedBlocks() { public long getUnderReplicatedBlocks() {
return blockManager.underReplicatedBlocksCount; return blockManager.getUnderReplicatedBlocksCount();
} }
/** Return number of under-replicated but not missing blocks */ /** Return number of under-replicated but not missing blocks */
@ -5059,23 +5025,23 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
/** Returns number of blocks with corrupt replicas */ /** Returns number of blocks with corrupt replicas */
@Metric({"CorruptBlocks", "Number of blocks with corrupt replicas"}) @Metric({"CorruptBlocks", "Number of blocks with corrupt replicas"})
public long getCorruptReplicaBlocks() { public long getCorruptReplicaBlocks() {
return blockManager.corruptReplicaBlocksCount; return blockManager.getCorruptReplicaBlocksCount();
} }
@Override // FSNamesystemMBean @Override // FSNamesystemMBean
@Metric @Metric
public long getScheduledReplicationBlocks() { public long getScheduledReplicationBlocks() {
return blockManager.scheduledReplicationBlocksCount; return blockManager.getScheduledReplicationBlocksCount();
} }
@Metric @Metric
public long getPendingDeletionBlocks() { public long getPendingDeletionBlocks() {
return blockManager.pendingDeletionBlocksCount; return blockManager.getPendingDeletionBlocksCount();
} }
@Metric @Metric
public long getExcessBlocks() { public long getExcessBlocks() {
return blockManager.excessBlocksCount; return blockManager.getExcessBlocksCount();
} }
@Metric @Metric
@ -5444,7 +5410,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
} }
/** Get a datanode descriptor given corresponding storageID */ /** Get a datanode descriptor given corresponding storageID */
DatanodeDescriptor getDatanode(String nodeID) { public DatanodeDescriptor getDatanode(String nodeID) {
assert hasReadOrWriteLock(); assert hasReadOrWriteLock();
return datanodeMap.get(nodeID); return datanodeMap.get(nodeID);
} }
@ -5508,7 +5474,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
if (startBlockAfter != null) { if (startBlockAfter != null) {
startBlockId = Block.filename2id(startBlockAfter); startBlockId = Block.filename2id(startBlockAfter);
} }
BlockIterator blkIterator = blockManager.getCorruptReplicaBlockIterator(); UnderReplicatedBlocks.BlockIterator blkIterator = blockManager.getCorruptReplicaBlockIterator();
while (blkIterator.hasNext()) { while (blkIterator.hasNext()) {
Block blk = blkIterator.next(); Block blk = blkIterator.next();
INode inode = blockManager.getINode(blk); INode inode = blockManager.getINode(blk);

View File

@ -17,10 +17,13 @@
*/ */
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.namenode;
import java.util.*; import java.util.HashMap;
import java.util.Random;
import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
class Host2NodesMap { class Host2NodesMap {
private HashMap<String, DatanodeDescriptor[]> map private HashMap<String, DatanodeDescriptor[]> map
= new HashMap<String, DatanodeDescriptor[]>(); = new HashMap<String, DatanodeDescriptor[]>();

View File

@ -20,11 +20,13 @@ package org.apache.hadoop.hdfs.server.namenode;
import java.util.Arrays; import java.util.Arrays;
import java.util.List; import java.util.List;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.permission.*; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
/** /**
@ -32,7 +34,7 @@ import org.apache.hadoop.util.StringUtils;
* This is a base INode class containing common fields for file and * This is a base INode class containing common fields for file and
* directory inodes. * directory inodes.
*/ */
abstract class INode implements Comparable<byte[]>, FSInodeInfo { public abstract class INode implements Comparable<byte[]>, FSInodeInfo {
/* /*
* The inode name is in java UTF8 encoding; * The inode name is in java UTF8 encoding;
* The name in HdfsFileStatus should keep the same encoding as this. * The name in HdfsFileStatus should keep the same encoding as this.
@ -324,7 +326,7 @@ abstract class INode implements Comparable<byte[]>, FSInodeInfo {
/** /**
* Is this inode being constructed? * Is this inode being constructed?
*/ */
boolean isUnderConstruction() { public boolean isUnderConstruction() {
return false; return false;
} }

View File

@ -24,8 +24,11 @@ import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
class INodeFile extends INode { /** I-node for closed file. */
public class INodeFile extends INode {
static final FsPermission UMASK = FsPermission.createImmutable((short)0111); static final FsPermission UMASK = FsPermission.createImmutable((short)0111);
//Number of bits for Block size //Number of bits for Block size
@ -106,7 +109,7 @@ class INodeFile extends INode {
* Get file blocks * Get file blocks
* @return file blocks * @return file blocks
*/ */
BlockInfo[] getBlocks() { public BlockInfo[] getBlocks() {
return this.blocks; return this.blocks;
} }
@ -149,7 +152,7 @@ class INodeFile extends INode {
/** /**
* Set file block * Set file block
*/ */
void setBlock(int idx, BlockInfo blk) { public void setBlock(int idx, BlockInfo blk) {
this.blocks[idx] = blk; this.blocks[idx] = blk;
} }
@ -237,7 +240,7 @@ class INodeFile extends INode {
* Get the last block of the file. * Get the last block of the file.
* Make sure it has the right type. * Make sure it has the right type.
*/ */
<T extends BlockInfo> T getLastBlock() throws IOException { public <T extends BlockInfo> T getLastBlock() throws IOException {
if (blocks == null || blocks.length == 0) if (blocks == null || blocks.length == 0)
return null; return null;
T returnBlock = null; T returnBlock = null;
@ -252,7 +255,8 @@ class INodeFile extends INode {
return returnBlock; return returnBlock;
} }
int numBlocks() { /** @return the number of blocks */
public int numBlocks() {
return blocks == null ? 0 : blocks.length; return blocks == null ? 0 : blocks.length;
} }
} }

View File

@ -21,10 +21,15 @@ import java.io.IOException;
import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState; import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
/**
class INodeFileUnderConstruction extends INodeFile { * I-node for file being written.
*/
public class INodeFileUnderConstruction extends INodeFile {
private String clientName; // lease holder private String clientName; // lease holder
private final String clientMachine; private final String clientMachine;
private final DatanodeDescriptor clientNode; // if client is a cluster node too. private final DatanodeDescriptor clientNode; // if client is a cluster node too.
@ -43,7 +48,7 @@ class INodeFileUnderConstruction extends INodeFile {
this.clientNode = clientNode; this.clientNode = clientNode;
} }
public INodeFileUnderConstruction(byte[] name, INodeFileUnderConstruction(byte[] name,
short blockReplication, short blockReplication,
long modificationTime, long modificationTime,
long preferredBlockSize, long preferredBlockSize,
@ -80,7 +85,7 @@ class INodeFileUnderConstruction extends INodeFile {
* Is this inode being constructed? * Is this inode being constructed?
*/ */
@Override @Override
boolean isUnderConstruction() { public boolean isUnderConstruction() {
return true; return true;
} }
@ -122,7 +127,7 @@ class INodeFileUnderConstruction extends INodeFile {
* Convert the last block of the file to an under-construction block. * Convert the last block of the file to an under-construction block.
* Set its locations. * Set its locations.
*/ */
BlockInfoUnderConstruction setLastBlock(BlockInfo lastBlock, public BlockInfoUnderConstruction setLastBlock(BlockInfo lastBlock,
DatanodeDescriptor[] targets) DatanodeDescriptor[] targets)
throws IOException { throws IOException {
if (blocks == null || blocks.length == 0) { if (blocks == null || blocks.length == 0) {

View File

@ -163,7 +163,7 @@ public class LeaseManager {
/** /**
* Finds the pathname for the specified pendingFile * Finds the pathname for the specified pendingFile
*/ */
synchronized String findPath(INodeFileUnderConstruction pendingFile) public synchronized String findPath(INodeFileUnderConstruction pendingFile)
throws IOException { throws IOException {
Lease lease = getLease(pendingFile.getClientName()); Lease lease = getLease(pendingFile.getClientName());
if (lease != null) { if (lease != null) {

View File

@ -247,7 +247,7 @@ public class NameNode implements NamenodeProtocols, FSConstants {
/** Return the {@link FSNamesystem} object. /** Return the {@link FSNamesystem} object.
* @return {@link FSNamesystem} object. * @return {@link FSNamesystem} object.
*/ */
FSNamesystem getNamesystem() { public FSNamesystem getNamesystem() {
return namesystem; return namesystem;
} }

View File

@ -36,20 +36,20 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.BlockReader; import org.apache.hadoop.hdfs.BlockReader;
import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
import org.apache.hadoop.hdfs.server.common.HdfsConstants; import org.apache.hadoop.hdfs.server.common.HdfsConstants;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.NodeBase; import org.apache.hadoop.net.NodeBase;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
/** /**

View File

@ -36,24 +36,24 @@ import javax.servlet.http.HttpServletResponse;
import javax.servlet.jsp.JspWriter; import javax.servlet.jsp.JspWriter;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction; import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.common.JspHelper; import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.ServletUtil; import org.apache.hadoop.util.ServletUtil;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.VersionInfo; import org.apache.hadoop.util.VersionInfo;
import org.znerd.xmlenc.XMLOutputter;
import org.znerd.xmlenc.*;
class NamenodeJspHelper { class NamenodeJspHelper {
static String getSafeModeText(FSNamesystem fsn) { static String getSafeModeText(FSNamesystem fsn) {

View File

@ -17,15 +17,20 @@
*/ */
package org.apache.hadoop.hdfs.server.protocol; package org.apache.hadoop.hdfs.server.protocol;
import java.io.*; import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.List; import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor.BlockTargetPair; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair;
import org.apache.hadoop.io.*; import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableFactories;
import org.apache.hadoop.io.WritableFactory;
/**************************************************** /****************************************************

View File

@ -61,10 +61,10 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender; import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.TestTransferRbw; import org.apache.hadoop.hdfs.server.datanode.TestTransferRbw;
import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;

View File

@ -25,7 +25,7 @@ import junit.framework.TestCase;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
/** /**
* This class tests DatanodeDescriptor.getBlocksScheduled() at the * This class tests DatanodeDescriptor.getBlocksScheduled() at the

View File

@ -15,17 +15,20 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.blockmanagement;
import java.io.IOException; import java.io.IOException;
import java.util.*; import java.util.Arrays;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import junit.framework.TestCase; import junit.framework.TestCase;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.namenode.CorruptReplicasMap;
import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor;
/** /**
@ -79,7 +82,6 @@ public class TestCorruptReplicaInfo extends TestCase {
DatanodeDescriptor dn1 = new DatanodeDescriptor(); DatanodeDescriptor dn1 = new DatanodeDescriptor();
DatanodeDescriptor dn2 = new DatanodeDescriptor(); DatanodeDescriptor dn2 = new DatanodeDescriptor();
DatanodeDescriptor dn3 = new DatanodeDescriptor();
crm.addToCorruptReplicasMap(getBlock(0), dn1); crm.addToCorruptReplicasMap(getBlock(0), dn1);
assertEquals("Number of corrupt blocks not returning correctly", assertEquals("Number of corrupt blocks not returning correctly",

View File

@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.blockmanagement;
import java.util.ArrayList; import java.util.ArrayList;

View File

@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.blockmanagement;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
@ -29,6 +29,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.GenerationStamp; import org.apache.hadoop.hdfs.server.common.GenerationStamp;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.protocol.BlockCommand; import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;

View File

@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.blockmanagement;
import junit.framework.TestCase; import junit.framework.TestCase;
import java.lang.System; import java.lang.System;

View File

@ -15,8 +15,7 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hdfs.server.blockmanagement;
package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
@ -24,18 +23,19 @@ import java.util.Arrays;
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.List;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.Node;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.FSConstants;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import junit.framework.TestCase; import org.apache.hadoop.hdfs.protocol.FSConstants;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.Node;
public class TestReplicationPolicy extends TestCase { public class TestReplicationPolicy extends TestCase {
private static final int BLOCK_SIZE = 1024; private static final int BLOCK_SIZE = 1024;

View File

@ -15,7 +15,9 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.blockmanagement;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
@ -25,8 +27,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import junit.framework.TestCase;
public class TestUnderReplicatedBlocks extends TestCase { public class TestUnderReplicatedBlocks extends TestCase {
public void testSetrepIncWithUnderReplicatedBlocks() throws Exception { public void testSetrepIncWithUnderReplicatedBlocks() throws Exception {

View File

@ -17,30 +17,31 @@
*/ */
package org.apache.hadoop.hdfs.server.datanode; package org.apache.hadoop.hdfs.server.datanode;
import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assume.assumeTrue;
import java.io.File; import java.io.File;
import java.util.ArrayList; import java.util.ArrayList;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import static org.apache.hadoop.test.MetricsAsserts.*;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger; import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.log4j.Level; import org.apache.log4j.Level;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
import static org.junit.Assert.*;
import static org.junit.Assume.assumeTrue;
/** /**
* Test reporting of DN volume failure counts and metrics. * Test reporting of DN volume failure counts and metrics.

View File

@ -23,6 +23,7 @@ import java.io.IOException;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.common.GenerationStamp; import org.apache.hadoop.hdfs.server.common.GenerationStamp;
import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.Storage;

View File

@ -19,10 +19,11 @@ package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException; import java.io.IOException;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.ipc.Server;
/** /**
* This is a utility class to expose NameNode functionality for unit tests. * This is a utility class to expose NameNode functionality for unit tests.

View File

@ -34,6 +34,8 @@ import org.apache.hadoop.hdfs.TestFileCreation;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState; import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
import org.junit.AfterClass; import org.junit.AfterClass;
import org.junit.BeforeClass; import org.junit.BeforeClass;

View File

@ -17,14 +17,15 @@
*/ */
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.namenode;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.common.GenerationStamp; import org.apache.hadoop.hdfs.server.common.GenerationStamp;
import junit.framework.TestCase;
/** /**
* Test if FSNamesystem handles heartbeat right * Test if FSNamesystem handles heartbeat right
*/ */

View File

@ -29,9 +29,9 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;

View File

@ -17,19 +17,15 @@
*/ */
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.IOException; import java.io.IOException;
import java.net.InetSocketAddress; import java.net.InetSocketAddress;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Iterator; import java.util.Iterator;
import java.util.Random; import java.util.Random;
import org.junit.BeforeClass;
import org.junit.AfterClass;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
@ -40,8 +36,10 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor; import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
/** /**
* This class tests the decommissioning of nodes. * This class tests the decommissioning of nodes.

View File

@ -18,10 +18,11 @@
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.namenode;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import junit.framework.TestCase; import junit.framework.TestCase;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
public class TestHost2NodesMap extends TestCase { public class TestHost2NodesMap extends TestCase {
static private Host2NodesMap map = new Host2NodesMap(); static private Host2NodesMap map = new Host2NodesMap();
private final static DatanodeDescriptor dataNodes[] = new DatanodeDescriptor[] { private final static DatanodeDescriptor dataNodes[] = new DatanodeDescriptor[] {

View File

@ -21,17 +21,17 @@ package org.apache.hadoop.hdfs.server.namenode;
import java.io.File; import java.io.File;
import java.util.ArrayList; import java.util.ArrayList;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.DF;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import junit.framework.TestCase; import junit.framework.TestCase;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.DF;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;

View File

@ -21,6 +21,8 @@ import java.util.Collection;
import java.util.Iterator; import java.util.Iterator;
import java.util.concurrent.TimeoutException; import java.util.concurrent.TimeoutException;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
@ -30,9 +32,8 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties; import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NumberReplicas; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas;
import junit.framework.TestCase;
/** /**
* Test if live nodes count per node is correct * Test if live nodes count per node is correct

View File

@ -20,20 +20,21 @@ package org.apache.hadoop.hdfs.server.namenode;
import java.io.File; import java.io.File;
import java.io.IOException; import java.io.IOException;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.TestDatanodeBlockScanner;
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties; import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
import org.apache.hadoop.hdfs.TestDatanodeBlockScanner;
import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import junit.framework.TestCase;
public class TestOverReplicatedBlocks extends TestCase { public class TestOverReplicatedBlocks extends TestCase {
/** Test processOverReplicatedBlock can handle corrupt replicas fine. /** Test processOverReplicatedBlock can handle corrupt replicas fine.

View File

@ -17,34 +17,35 @@
*/ */
package org.apache.hadoop.hdfs.server.namenode.metrics; package org.apache.hadoop.hdfs.server.namenode.metrics;
import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
import static org.apache.hadoop.test.MetricsAsserts.assertGauge;
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
import java.io.DataInputStream; import java.io.DataInputStream;
import java.io.IOException; import java.io.IOException;
import java.util.Random; import java.util.Random;
import junit.framework.TestCase; import junit.framework.TestCase;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger; import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.namenode.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.test.MetricsAsserts; import org.apache.hadoop.test.MetricsAsserts;
import org.apache.log4j.Level; import org.apache.log4j.Level;
import org.apache.commons.logging.LogFactory;
import static org.apache.hadoop.test.MetricsAsserts.*;
/** /**
* Test for metrics published by the Namenode * Test for metrics published by the Namenode
*/ */

View File

@ -24,8 +24,8 @@ import java.util.Map;
import junit.framework.TestCase; import junit.framework.TestCase;
import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor;
import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
public class TestNetworkTopology extends TestCase { public class TestNetworkTopology extends TestCase {
private final static NetworkTopology cluster = new NetworkTopology(); private final static NetworkTopology cluster = new NetworkTopology();

View File

@ -19,6 +19,16 @@
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.namenode;
import static junit.framework.Assert.assertTrue; import static junit.framework.Assert.assertTrue;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.fail;
import static org.mockito.Matchers.anyString;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.when;
import java.io.File;
import java.io.IOException;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger; import org.apache.commons.logging.impl.Log4JLogger;
@ -34,20 +44,16 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.common.HdfsConstants; import org.apache.hadoop.hdfs.server.common.HdfsConstants;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole; import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole;
import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level; import org.apache.log4j.Level;
import org.junit.After; import org.junit.After;
import static org.junit.Assert.*;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
import static org.mockito.Matchers.anyString;
import static org.mockito.Mockito.*;
import java.io.File;
import java.io.IOException;
public class TestNNLeaseRecovery { public class TestNNLeaseRecovery {
private static final Log LOG = LogFactory.getLog(TestNNLeaseRecovery.class); private static final Log LOG = LogFactory.getLog(TestNNLeaseRecovery.class);

View File

@ -59,19 +59,9 @@
%> %>
<%@ page <%@ page
contentType="application/xml" contentType="application/xml"
import="java.io.IOException"
import="java.util.Iterator"
import="org.apache.hadoop.conf.Configuration"
import="org.apache.hadoop.hdfs.protocol.Block"
import="org.apache.hadoop.hdfs.server.namenode.INode"
import="org.apache.hadoop.hdfs.server.namenode.BlocksMap"
import="org.apache.hadoop.hdfs.server.namenode.BlockInfo"
import="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor"
import="org.apache.hadoop.hdfs.server.namenode.NamenodeJspHelper.XMLBlockInfo" import="org.apache.hadoop.hdfs.server.namenode.NamenodeJspHelper.XMLBlockInfo"
import="org.apache.hadoop.hdfs.server.common.JspHelper" import="org.apache.hadoop.hdfs.server.common.JspHelper"
import="org.apache.hadoop.util.ServletUtil"
import="org.znerd.xmlenc.*" import="org.znerd.xmlenc.*"
%> %>
<%! <%!
//for java.io.Serializable //for java.io.Serializable