HDFS-3369. Rename {get|set|add}INode(..) methods in BlockManager and BlocksMap to {get|set|add}BlockCollection(..). Contributed by John George

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1336909 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2012-05-10 21:41:25 +00:00
parent 6957c78066
commit 7e8e983620
15 changed files with 134 additions and 131 deletions

View File

@ -441,6 +441,9 @@ Release 2.0.0 - UNRELEASED
HDFS-3341. Change minimum RPC versions to respective SNAPSHOTs instead of
final releases. (todd)
HDFS-3369. Rename {get|set|add}INode(..) methods in BlockManager and
BlocksMap to {get|set|add}BlockCollection(..). (John George via szetszwo)
OPTIMIZATIONS
HDFS-3024. Improve performance of stringification in addStoredBlock (todd)

View File

@ -32,7 +32,7 @@ import org.apache.hadoop.hdfs.util.LightWeightGSet;
@InterfaceAudience.Private
public class BlockInfo extends Block implements
LightWeightGSet.LinkedElement {
private BlockCollection inode;
private BlockCollection bc;
/** For implementing {@link LightWeightGSet.LinkedElement} interface */
private LightWeightGSet.LinkedElement nextLinkedElement;
@ -57,13 +57,13 @@ public class BlockInfo extends Block implements
*/
public BlockInfo(int replication) {
this.triplets = new Object[3*replication];
this.inode = null;
this.bc = null;
}
public BlockInfo(Block blk, int replication) {
super(blk);
this.triplets = new Object[3*replication];
this.inode = null;
this.bc = null;
}
/**
@ -72,16 +72,16 @@ public class BlockInfo extends Block implements
* @param from BlockInfo to copy from.
*/
protected BlockInfo(BlockInfo from) {
this(from, from.inode.getReplication());
this.inode = from.inode;
this(from, from.bc.getReplication());
this.bc = from.bc;
}
public BlockCollection getINode() {
return inode;
public BlockCollection getBlockCollection() {
return bc;
}
public void setINode(BlockCollection inode) {
this.inode = inode;
public void setBlockCollection(BlockCollection bc) {
this.bc = bc;
}
DatanodeDescriptor getDatanode(int index) {
@ -334,7 +334,7 @@ public class BlockInfo extends Block implements
BlockUCState s, DatanodeDescriptor[] targets) {
if(isComplete()) {
return new BlockInfoUnderConstruction(
this, getINode().getReplication(), s, targets);
this, getBlockCollection().getReplication(), s, targets);
}
// the block is already under construction
BlockInfoUnderConstruction ucBlock = (BlockInfoUnderConstruction)this;

View File

@ -140,7 +140,7 @@ public class BlockManager {
private final long replicationRecheckInterval;
/**
* Mapping: Block -> { INode, datanodes, self ref }
* Mapping: Block -> { BlockCollection, datanodes, self ref }
* Updated only in response to client-sent information.
*/
final BlocksMap blocksMap;
@ -190,7 +190,7 @@ public class BlockManager {
public final short minReplication;
/** Default number of replicas */
public final int defaultReplication;
/** The maximum number of entries returned by getCorruptInodes() */
/** value returned by MAX_CORRUPT_FILES_RETURNED */
final int maxCorruptFilesReturned;
/** variable to enable check for enough racks */
@ -382,7 +382,7 @@ public class BlockManager {
numReplicas.decommissionedReplicas();
if (block instanceof BlockInfo) {
String fileName = ((BlockInfo)block).getINode().getName();
String fileName = ((BlockInfo)block).getBlockCollection().getName();
out.print(fileName + ": ");
}
// l: == live:, d: == decommissioned c: == corrupt e: == excess
@ -452,17 +452,17 @@ public class BlockManager {
* Commit the last block of the file and mark it as complete if it has
* meets the minimum replication requirement
*
* @param fileINode file inode
* @param bc block collection
* @param commitBlock - contains client reported block length and generation
* @return true if the last block is changed to committed state.
* @throws IOException if the block does not have at least a minimal number
* of replicas reported from data-nodes.
*/
public boolean commitOrCompleteLastBlock(MutableBlockCollection fileINode,
public boolean commitOrCompleteLastBlock(MutableBlockCollection bc,
Block commitBlock) throws IOException {
if(commitBlock == null)
return false; // not committing, this is a block allocation retry
BlockInfo lastBlock = fileINode.getLastBlock();
BlockInfo lastBlock = bc.getLastBlock();
if(lastBlock == null)
return false; // no blocks in file yet
if(lastBlock.isComplete())
@ -470,22 +470,22 @@ public class BlockManager {
final boolean b = commitBlock((BlockInfoUnderConstruction)lastBlock, commitBlock);
if(countNodes(lastBlock).liveReplicas() >= minReplication)
completeBlock(fileINode, fileINode.numBlocks()-1, false);
completeBlock(bc, bc.numBlocks()-1, false);
return b;
}
/**
* Convert a specified block of the file to a complete block.
* @param fileINode file
* @param bc file
* @param blkIndex block index in the file
* @throws IOException if the block does not have at least a minimal number
* of replicas reported from data-nodes.
*/
private BlockInfo completeBlock(final MutableBlockCollection fileINode,
private BlockInfo completeBlock(final MutableBlockCollection bc,
final int blkIndex, boolean force) throws IOException {
if(blkIndex < 0)
return null;
BlockInfo curBlock = fileINode.getBlocks()[blkIndex];
BlockInfo curBlock = bc.getBlocks()[blkIndex];
if(curBlock.isComplete())
return curBlock;
BlockInfoUnderConstruction ucBlock = (BlockInfoUnderConstruction)curBlock;
@ -498,7 +498,7 @@ public class BlockManager {
"Cannot complete block: block has not been COMMITTED by the client");
BlockInfo completeBlock = ucBlock.convertToCompleteBlock();
// replace penultimate block in file
fileINode.setBlock(blkIndex, completeBlock);
bc.setBlock(blkIndex, completeBlock);
// Since safe-mode only counts complete blocks, and we now have
// one more complete block, we need to adjust the total up, and
@ -514,12 +514,12 @@ public class BlockManager {
return blocksMap.replaceBlock(completeBlock);
}
private BlockInfo completeBlock(final MutableBlockCollection fileINode,
private BlockInfo completeBlock(final MutableBlockCollection bc,
final BlockInfo block, boolean force) throws IOException {
BlockInfo[] fileBlocks = fileINode.getBlocks();
BlockInfo[] fileBlocks = bc.getBlocks();
for(int idx = 0; idx < fileBlocks.length; idx++)
if(fileBlocks[idx] == block) {
return completeBlock(fileINode, idx, force);
return completeBlock(bc, idx, force);
}
return block;
}
@ -529,10 +529,10 @@ public class BlockManager {
* regardless of whether enough replicas are present. This is necessary
* when tailing edit logs as a Standby.
*/
public BlockInfo forceCompleteBlock(final MutableBlockCollection fileINode,
public BlockInfo forceCompleteBlock(final MutableBlockCollection bc,
final BlockInfoUnderConstruction block) throws IOException {
block.commitBlock(block);
return completeBlock(fileINode, block, true);
return completeBlock(bc, block, true);
}
@ -546,14 +546,14 @@ public class BlockManager {
* The methods returns null if there is no partial block at the end.
* The client is supposed to allocate a new block with the next call.
*
* @param fileINode file
* @param bc file
* @return the last block locations if the block is partial or null otherwise
*/
public LocatedBlock convertLastBlockToUnderConstruction(
MutableBlockCollection fileINode) throws IOException {
BlockInfo oldBlock = fileINode.getLastBlock();
MutableBlockCollection bc) throws IOException {
BlockInfo oldBlock = bc.getLastBlock();
if(oldBlock == null ||
fileINode.getPreferredBlockSize() == oldBlock.getNumBytes())
bc.getPreferredBlockSize() == oldBlock.getNumBytes())
return null;
assert oldBlock == getStoredBlock(oldBlock) :
"last block of the file is not in blocksMap";
@ -561,7 +561,7 @@ public class BlockManager {
DatanodeDescriptor[] targets = getNodes(oldBlock);
BlockInfoUnderConstruction ucBlock =
fileINode.setLastBlock(oldBlock, targets);
bc.setLastBlock(oldBlock, targets);
blocksMap.replaceBlock(ucBlock);
// Remove block from replication queue.
@ -581,7 +581,7 @@ public class BlockManager {
// always decrement total blocks
-1);
final long fileLength = fileINode.computeContentSummary().getLength();
final long fileLength = bc.computeContentSummary().getLength();
final long pos = fileLength - ucBlock.getNumBytes();
return createLocatedBlock(ucBlock, pos, AccessMode.WRITE);
}
@ -921,8 +921,8 @@ public class BlockManager {
" does not exist. ");
}
BlockCollection inode = storedBlock.getINode();
if (inode == null) {
BlockCollection bc = storedBlock.getBlockCollection();
if (bc == null) {
NameNode.stateChangeLog.info("BLOCK markBlockAsCorrupt: " +
"block " + storedBlock +
" could not be marked as corrupt as it" +
@ -936,7 +936,7 @@ public class BlockManager {
// Add this replica to corruptReplicas Map
corruptReplicas.addToCorruptReplicasMap(storedBlock, node, reason);
if (countNodes(storedBlock).liveReplicas() >= inode.getReplication()) {
if (countNodes(storedBlock).liveReplicas() >= bc.getReplication()) {
// the block is over-replicated so invalidate the replicas immediately
invalidateBlock(storedBlock, node);
} else if (namesystem.isPopulatingReplQueues()) {
@ -1049,7 +1049,7 @@ public class BlockManager {
int requiredReplication, numEffectiveReplicas;
List<DatanodeDescriptor> containingNodes, liveReplicaNodes;
DatanodeDescriptor srcNode;
BlockCollection fileINode = null;
BlockCollection bc = null;
int additionalReplRequired;
int scheduledWork = 0;
@ -1061,15 +1061,15 @@ public class BlockManager {
for (int priority = 0; priority < blocksToReplicate.size(); priority++) {
for (Block block : blocksToReplicate.get(priority)) {
// block should belong to a file
fileINode = blocksMap.getINode(block);
bc = blocksMap.getBlockCollection(block);
// abandoned block or block reopened for append
if(fileINode == null || fileINode instanceof MutableBlockCollection) {
if(bc == null || bc instanceof MutableBlockCollection) {
neededReplications.remove(block, priority); // remove from neededReplications
neededReplications.decrementReplicationIndex(priority);
continue;
}
requiredReplication = fileINode.getReplication();
requiredReplication = bc.getReplication();
// get a source data-node
containingNodes = new ArrayList<DatanodeDescriptor>();
@ -1105,7 +1105,7 @@ public class BlockManager {
} else {
additionalReplRequired = 1; // Needed on a new rack
}
work.add(new ReplicationWork(block, fileINode, srcNode,
work.add(new ReplicationWork(block, bc, srcNode,
containingNodes, liveReplicaNodes, additionalReplRequired,
priority));
}
@ -1127,8 +1127,8 @@ public class BlockManager {
// choose replication targets: NOT HOLDING THE GLOBAL LOCK
// It is costly to extract the filename for which chooseTargets is called,
// so for now we pass in the Inode itself.
rw.targets = blockplacement.chooseTarget(rw.fileINode,
// so for now we pass in the block collection itself.
rw.targets = blockplacement.chooseTarget(rw.bc,
rw.additionalReplRequired, rw.srcNode, rw.liveReplicaNodes,
excludedNodes, rw.block.getNumBytes());
}
@ -1147,15 +1147,15 @@ public class BlockManager {
int priority = rw.priority;
// Recheck since global lock was released
// block should belong to a file
fileINode = blocksMap.getINode(block);
bc = blocksMap.getBlockCollection(block);
// abandoned block or block reopened for append
if(fileINode == null || fileINode instanceof MutableBlockCollection) {
if(bc == null || bc instanceof MutableBlockCollection) {
neededReplications.remove(block, priority); // remove from neededReplications
rw.targets = null;
neededReplications.decrementReplicationIndex(priority);
continue;
}
requiredReplication = fileINode.getReplication();
requiredReplication = bc.getReplication();
// do not schedule more if enough replicas is already pending
NumberReplicas numReplicas = countNodes(block);
@ -1914,7 +1914,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
int numCurrentReplica = countLiveNodes(storedBlock);
if (storedBlock.getBlockUCState() == BlockUCState.COMMITTED
&& numCurrentReplica >= minReplication) {
completeBlock((MutableBlockCollection)storedBlock.getINode(), storedBlock, false);
completeBlock((MutableBlockCollection)storedBlock.getBlockCollection(), storedBlock, false);
} else if (storedBlock.isComplete()) {
// check whether safe replication is reached for the block
// only complete blocks are counted towards that.
@ -1942,7 +1942,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
} else {
storedBlock = block;
}
if (storedBlock == null || storedBlock.getINode() == null) {
if (storedBlock == null || storedBlock.getBlockCollection() == null) {
// If this block does not belong to anyfile, then we are done.
NameNode.stateChangeLog.info("BLOCK* addStoredBlock: " + block + " on "
+ node + " size " + block.getNumBytes()
@ -1952,8 +1952,8 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
return block;
}
assert storedBlock != null : "Block must be stored by now";
BlockCollection fileINode = storedBlock.getINode();
assert fileINode != null : "Block must belong to a file";
BlockCollection bc = storedBlock.getBlockCollection();
assert bc != null : "Block must belong to a file";
// add block to the datanode
boolean added = node.addBlock(storedBlock);
@ -1979,7 +1979,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
if(storedBlock.getBlockUCState() == BlockUCState.COMMITTED &&
numLiveReplicas >= minReplication) {
storedBlock = completeBlock((MutableBlockCollection)fileINode, storedBlock, false);
storedBlock = completeBlock((MutableBlockCollection)bc, storedBlock, false);
} else if (storedBlock.isComplete()) {
// check whether safe replication is reached for the block
// only complete blocks are counted towards that
@ -1990,7 +1990,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
}
// if file is under construction, then done for now
if (fileINode instanceof MutableBlockCollection) {
if (bc instanceof MutableBlockCollection) {
return storedBlock;
}
@ -2000,7 +2000,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
}
// handle underReplication/overReplication
short fileReplication = fileINode.getReplication();
short fileReplication = bc.getReplication();
if (!isNeededReplication(storedBlock, fileReplication, numCurrentReplica)) {
neededReplications.remove(storedBlock, numCurrentReplica,
num.decommissionedReplicas(), fileReplication);
@ -2127,8 +2127,8 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
* what happened with it.
*/
private MisReplicationResult processMisReplicatedBlock(BlockInfo block) {
BlockCollection fileINode = block.getINode();
if (fileINode == null) {
BlockCollection bc = block.getBlockCollection();
if (bc == null) {
// block does not belong to any file
addToInvalidates(block);
return MisReplicationResult.INVALID;
@ -2139,7 +2139,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
return MisReplicationResult.UNDER_CONSTRUCTION;
}
// calculate current replication
short expectedReplication = fileINode.getReplication();
short expectedReplication = bc.getReplication();
NumberReplicas num = countNodes(block);
int numCurrentReplica = num.liveReplicas();
// add to under-replicated queue if need to be
@ -2256,7 +2256,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
BlockPlacementPolicy replicator) {
assert namesystem.hasWriteLock();
// first form a rack to datanodes map and
BlockCollection inode = getINode(b);
BlockCollection bc = getBlockCollection(b);
final Map<String, List<DatanodeDescriptor>> rackMap
= new HashMap<String, List<DatanodeDescriptor>>();
for(final Iterator<DatanodeDescriptor> iter = nonExcess.iterator();
@ -2296,7 +2296,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
|| (addedNode != null && !priSet.contains(addedNode))) ) {
cur = delNodeHint;
} else { // regular excessive replica removal
cur = replicator.chooseReplicaToDelete(inode, b, replication,
cur = replicator.chooseReplicaToDelete(bc, b, replication,
priSet, remains);
}
firstOne = false;
@ -2377,8 +2377,8 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
// necessary. In that case, put block on a possibly-will-
// be-replicated list.
//
BlockCollection fileINode = blocksMap.getINode(block);
if (fileINode != null) {
BlockCollection bc = blocksMap.getBlockCollection(block);
if (bc != null) {
namesystem.decrementSafeBlockCount(block);
updateNeededReplications(block, -1, 0);
}
@ -2609,7 +2609,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
NumberReplicas num) {
int curReplicas = num.liveReplicas();
int curExpectedReplicas = getReplication(block);
BlockCollection fileINode = blocksMap.getINode(block);
BlockCollection bc = blocksMap.getBlockCollection(block);
Iterator<DatanodeDescriptor> nodeIter = blocksMap.nodeIterator(block);
StringBuilder nodeList = new StringBuilder();
while (nodeIter.hasNext()) {
@ -2622,7 +2622,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
+ ", corrupt replicas: " + num.corruptReplicas()
+ ", decommissioned replicas: " + num.decommissionedReplicas()
+ ", excess replicas: " + num.excessReplicas()
+ ", Is Open File: " + (fileINode instanceof MutableBlockCollection)
+ ", Is Open File: " + (bc instanceof MutableBlockCollection)
+ ", Datanodes having this block: " + nodeList + ", Current Datanode: "
+ srcNode + ", Is current datanode decommissioning: "
+ srcNode.isDecommissionInProgress());
@ -2637,8 +2637,8 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
final Iterator<? extends Block> it = srcNode.getBlockIterator();
while(it.hasNext()) {
final Block block = it.next();
BlockCollection fileINode = blocksMap.getINode(block);
short expectedReplication = fileINode.getReplication();
BlockCollection bc = blocksMap.getBlockCollection(block);
short expectedReplication = bc.getReplication();
NumberReplicas num = countNodes(block);
int numCurrentReplica = num.liveReplicas();
if (numCurrentReplica > expectedReplication) {
@ -2660,9 +2660,9 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
final Iterator<? extends Block> it = srcNode.getBlockIterator();
while(it.hasNext()) {
final Block block = it.next();
BlockCollection fileINode = blocksMap.getINode(block);
BlockCollection bc = blocksMap.getBlockCollection(block);
if (fileINode != null) {
if (bc != null) {
NumberReplicas num = countNodes(block);
int curReplicas = num.liveReplicas();
int curExpectedReplicas = getReplication(block);
@ -2677,7 +2677,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
if ((curReplicas == 0) && (num.decommissionedReplicas() > 0)) {
decommissionOnlyReplicas++;
}
if (fileINode instanceof MutableBlockCollection) {
if (bc instanceof MutableBlockCollection) {
underReplicatedInOpenFiles++;
}
}
@ -2780,11 +2780,11 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
/* get replication factor of a block */
private int getReplication(Block block) {
BlockCollection fileINode = blocksMap.getINode(block);
if (fileINode == null) { // block does not belong to any file
BlockCollection bc = blocksMap.getBlockCollection(block);
if (bc == null) { // block does not belong to any file
return 0;
}
return fileINode.getReplication();
return bc.getReplication();
}
@ -2856,12 +2856,12 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
return this.neededReplications.getCorruptBlockSize();
}
public BlockInfo addINode(BlockInfo block, BlockCollection iNode) {
return blocksMap.addINode(block, iNode);
public BlockInfo addBlockCollection(BlockInfo block, BlockCollection bc) {
return blocksMap.addBlockCollection(block, bc);
}
public BlockCollection getINode(Block b) {
return blocksMap.getINode(b);
public BlockCollection getBlockCollection(Block b) {
return blocksMap.getBlockCollection(b);
}
/** @return an iterator of the datanodes. */
@ -3000,7 +3000,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
private static class ReplicationWork {
private Block block;
private BlockCollection fileINode;
private BlockCollection bc;
private DatanodeDescriptor srcNode;
private List<DatanodeDescriptor> containingNodes;
@ -3011,14 +3011,14 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
private int priority;
public ReplicationWork(Block block,
BlockCollection fileINode,
BlockCollection bc,
DatanodeDescriptor srcNode,
List<DatanodeDescriptor> containingNodes,
List<DatanodeDescriptor> liveReplicaNodes,
int additionalReplRequired,
int priority) {
this.block = block;
this.fileINode = fileINode;
this.bc = bc;
this.srcNode = srcNode;
this.containingNodes = containingNodes;
this.liveReplicaNodes = liveReplicaNodes;

View File

@ -110,11 +110,11 @@ public abstract class BlockPlacementPolicy {
* choose <i>numOfReplicas</i> data nodes for <i>writer</i>
* If not, return as many as we can.
* The base implemenatation extracts the pathname of the file from the
* specified srcInode, but this could be a costly operation depending on the
* specified srcBC, but this could be a costly operation depending on the
* file system implementation. Concrete implementations of this class should
* override this method to avoid this overhead.
*
* @param srcInode The inode of the file for which chooseTarget is being invoked.
* @param srcBC block collection of file for which chooseTarget is invoked.
* @param numOfReplicas additional number of replicas wanted.
* @param writer the writer's machine, null if not in the cluster.
* @param chosenNodes datanodes that have been chosen as targets.
@ -122,13 +122,13 @@ public abstract class BlockPlacementPolicy {
* @return array of DatanodeDescriptor instances chosen as target
* and sorted as a pipeline.
*/
DatanodeDescriptor[] chooseTarget(BlockCollection srcInode,
DatanodeDescriptor[] chooseTarget(BlockCollection srcBC,
int numOfReplicas,
DatanodeDescriptor writer,
List<DatanodeDescriptor> chosenNodes,
HashMap<Node, Node> excludedNodes,
long blocksize) {
return chooseTarget(srcInode.getName(), numOfReplicas, writer,
return chooseTarget(srcBC.getName(), numOfReplicas, writer,
chosenNodes, excludedNodes, blocksize);
}
@ -149,7 +149,7 @@ public abstract class BlockPlacementPolicy {
* Decide whether deleting the specified replica of the block still makes
* the block conform to the configured block placement policy.
*
* @param srcInode The inode of the file to which the block-to-be-deleted belongs
* @param srcBC block collection of file to which block-to-be-deleted belongs
* @param block The block to be deleted
* @param replicationFactor The required number of replicas for this block
* @param existingReplicas The replica locations of this block that are present
@ -158,7 +158,7 @@ public abstract class BlockPlacementPolicy {
listed in the previous parameter.
* @return the replica that is the best candidate for deletion
*/
abstract public DatanodeDescriptor chooseReplicaToDelete(BlockCollection srcInode,
abstract public DatanodeDescriptor chooseReplicaToDelete(BlockCollection srcBC,
Block block,
short replicationFactor,
Collection<DatanodeDescriptor> existingReplicas,

View File

@ -546,7 +546,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
}
@Override
public DatanodeDescriptor chooseReplicaToDelete(BlockCollection inode,
public DatanodeDescriptor chooseReplicaToDelete(BlockCollection bc,
Block block,
short replicationFactor,
Collection<DatanodeDescriptor> first,

View File

@ -25,7 +25,7 @@ import org.apache.hadoop.hdfs.util.LightWeightGSet;
/**
* This class maintains the map from a block to its metadata.
* block's metadata currently includes INode it belongs to and
* block's metadata currently includes blockCollection it belongs to and
* the datanodes that store the block.
*/
class BlocksMap {
@ -92,21 +92,21 @@ class BlocksMap {
blocks = null;
}
BlockCollection getINode(Block b) {
BlockCollection getBlockCollection(Block b) {
BlockInfo info = blocks.get(b);
return (info != null) ? info.getINode() : null;
return (info != null) ? info.getBlockCollection() : null;
}
/**
* Add block b belonging to the specified file inode to the map.
* Add block b belonging to the specified block collection to the map.
*/
BlockInfo addINode(BlockInfo b, BlockCollection iNode) {
BlockInfo addBlockCollection(BlockInfo b, BlockCollection bc) {
BlockInfo info = blocks.get(b);
if (info != b) {
info = b;
blocks.put(info);
}
info.setINode(iNode);
info.setBlockCollection(bc);
return info;
}
@ -120,7 +120,7 @@ class BlocksMap {
if (blockInfo == null)
return;
blockInfo.setINode(null);
blockInfo.setBlockCollection(null);
for(int idx = blockInfo.numNodes()-1; idx >= 0; idx--) {
DatanodeDescriptor dn = blockInfo.getDatanode(idx);
dn.removeBlock(blockInfo); // remove from the list and wipe the location
@ -168,7 +168,7 @@ class BlocksMap {
boolean removed = node.removeBlock(info);
if (info.getDatanode(0) == null // no datanodes left
&& info.getINode() == null) { // does not belong to a file
&& info.getBlockCollection() == null) { // does not belong to a file
blocks.remove(b); // remove block from the map
}
return removed;

View File

@ -309,7 +309,7 @@ public class FSDirectory implements Closeable {
INodeFile newF = (INodeFile)newNode;
BlockInfo[] blocks = newF.getBlocks();
for (int i = 0; i < blocks.length; i++) {
newF.setBlock(i, getBlockManager().addINode(blocks[i], newF));
newF.setBlock(i, getBlockManager().addBlockCollection(blocks[i], newF));
}
}
} finally {
@ -346,7 +346,7 @@ public class FSDirectory implements Closeable {
fileINode.getReplication(),
BlockUCState.UNDER_CONSTRUCTION,
targets);
getBlockManager().addINode(blockInfo, fileINode);
getBlockManager().addBlockCollection(blockInfo, fileINode);
fileINode.addBlock(blockInfo);
if(NameNode.stateChangeLog.isDebugEnabled()) {
@ -1127,7 +1127,7 @@ public class FSDirectory implements Closeable {
int index = 0;
for (BlockInfo b : newnode.getBlocks()) {
BlockInfo info = getBlockManager().addINode(b, newnode);
BlockInfo info = getBlockManager().addBlockCollection(b, newnode);
newnode.setBlock(index, info); // inode refers to the block in BlocksMap
index++;
}

View File

@ -601,7 +601,7 @@ public class FSEditLogLoader {
// OP_ADD operations as each block is allocated.
newBI = new BlockInfo(newBlock, file.getReplication());
}
fsNamesys.getBlockManager().addINode(newBI, file);
fsNamesys.getBlockManager().addBlockCollection(newBI, file);
file.addBlock(newBI);
fsNamesys.getBlockManager().processQueuedMessagesForBlock(newBlock);
}

View File

@ -2840,7 +2840,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
if (storedBlock == null) {
throw new IOException("Block (=" + lastblock + ") not found");
}
INodeFile iFile = (INodeFile) storedBlock.getINode();
INodeFile iFile = (INodeFile) storedBlock.getBlockCollection();
if (!iFile.isUnderConstruction() || storedBlock.isComplete()) {
throw new IOException("Unexpected block (=" + lastblock
+ ") since the file (=" + iFile.getLocalName()
@ -4135,7 +4135,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
* Returns whether the given block is one pointed-to by a file.
*/
private boolean isValidBlock(Block b) {
return (blockManager.getINode(b) != null);
return (blockManager.getBlockCollection(b) != null);
}
// Distributed upgrade manager
@ -4394,7 +4394,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
}
// check file inode
INodeFile file = (INodeFile) storedBlock.getINode();
INodeFile file = (INodeFile) storedBlock.getBlockCollection();
if (file==null || !file.isUnderConstruction()) {
throw new IOException("The file " + storedBlock +
" belonged to does not exist or it is not under construction.");
@ -4706,7 +4706,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
while (blkIterator.hasNext()) {
Block blk = blkIterator.next();
INode inode = (INodeFile) blockManager.getINode(blk);
INode inode = (INodeFile) blockManager.getBlockCollection(blk);
skip++;
if (inode != null && blockManager.countNodes(blk).liveReplicas() == 0) {
String src = FSDirectory.getFullPathName(inode);

View File

@ -131,7 +131,7 @@ public class INodeFile extends INode implements BlockCollection {
}
for(BlockInfo bi: newlist) {
bi.setINode(this);
bi.setBlockCollection(this);
}
this.blocks = newlist;
}
@ -164,7 +164,7 @@ public class INodeFile extends INode implements BlockCollection {
if(blocks != null && v != null) {
for (BlockInfo blk : blocks) {
v.add(blk);
blk.setINode(null);
blk.setBlockCollection(null);
}
}
blocks = null;

View File

@ -156,7 +156,7 @@ public class INodeFileUnderConstruction extends INodeFile
BlockInfoUnderConstruction ucBlock =
lastBlock.convertToBlockUnderConstruction(
BlockUCState.UNDER_CONSTRUCTION, targets);
ucBlock.setINode(this);
ucBlock.setBlockCollection(this);
setBlock(numBlocks()-1, ucBlock);
return ucBlock;
}

View File

@ -734,7 +734,7 @@ class NamenodeJspHelper {
this.inode = null;
} else {
this.block = new Block(blockId);
this.inode = (INodeFile) blockManager.getINode(block);
this.inode = (INodeFile) blockManager.getBlockCollection(block);
}
}

View File

@ -384,7 +384,7 @@ public class TestBlockManager {
Mockito.doReturn((short)3).when(iNode).getReplication();
BlockInfo blockInfo = blockOnNodes(blockId, nodes);
bm.blocksMap.addINode(blockInfo, iNode);
bm.blocksMap.addBlockCollection(blockInfo, iNode);
return blockInfo;
}

View File

@ -144,25 +144,25 @@ public class BlockPlacementPolicyRaid extends BlockPlacementPolicy {
/** {@inheritDoc} */
@Override
public DatanodeDescriptor chooseReplicaToDelete(BlockCollection inode,
public DatanodeDescriptor chooseReplicaToDelete(BlockCollection bc,
Block block, short replicationFactor,
Collection<DatanodeDescriptor> first,
Collection<DatanodeDescriptor> second) {
DatanodeDescriptor chosenNode = null;
try {
String path = cachedFullPathNames.get(inode);
String path = cachedFullPathNames.get(bc);
FileType type = getFileType(path);
if (type == FileType.NOT_RAID) {
return defaultPolicy.chooseReplicaToDelete(
inode, block, replicationFactor, first, second);
bc, block, replicationFactor, first, second);
}
List<LocatedBlock> companionBlocks =
getCompanionBlocks(path, type, block);
if (companionBlocks == null || companionBlocks.size() == 0) {
// Use the default method if it is not a valid raided or parity file
return defaultPolicy.chooseReplicaToDelete(
inode, block, replicationFactor, first, second);
bc, block, replicationFactor, first, second);
}
// Delete from the first collection first
// This ensures the number of unique rack of this block is not reduced
@ -174,12 +174,12 @@ public class BlockPlacementPolicyRaid extends BlockPlacementPolicy {
return chosenNode;
}
return defaultPolicy.chooseReplicaToDelete(
inode, block, replicationFactor, first, second);
bc, block, replicationFactor, first, second);
} catch (Exception e) {
LOG.debug("Error happend when choosing replica to delete" +
StringUtils.stringifyException(e));
return defaultPolicy.chooseReplicaToDelete(
inode, block, replicationFactor, first, second);
bc, block, replicationFactor, first, second);
}
}
@ -446,25 +446,25 @@ public class BlockPlacementPolicyRaid extends BlockPlacementPolicy {
};
static private class INodeWithHashCode {
BlockCollection inode;
INodeWithHashCode(BlockCollection inode) {
this.inode = inode;
BlockCollection bc;
INodeWithHashCode(BlockCollection bc) {
this.bc= bc;
}
@Override
public boolean equals(Object obj) {
return inode == obj;
return bc== obj;
}
@Override
public int hashCode() {
return System.identityHashCode(inode);
return System.identityHashCode(bc);
}
String getFullPathName() {
return inode.getName();
return bc.getName();
}
}
public String get(BlockCollection inode) throws IOException {
return cacheInternal.get(new INodeWithHashCode(inode));
public String get(BlockCollection bc) throws IOException {
return cacheInternal.get(new INodeWithHashCode(bc));
}
}

View File

@ -240,19 +240,19 @@ public class TestBlockPlacementPolicyRaid {
// test full path cache
CachedFullPathNames cachedFullPathNames =
new CachedFullPathNames(namesystem);
final BlockCollection[] inodes = NameNodeRaidTestUtil.getBlockCollections(
final BlockCollection[] bcs = NameNodeRaidTestUtil.getBlockCollections(
namesystem, file1, file2);
verifyCachedFullPathNameResult(cachedFullPathNames, inodes[0]);
verifyCachedFullPathNameResult(cachedFullPathNames, inodes[0]);
verifyCachedFullPathNameResult(cachedFullPathNames, inodes[1]);
verifyCachedFullPathNameResult(cachedFullPathNames, inodes[1]);
verifyCachedFullPathNameResult(cachedFullPathNames, bcs[0]);
verifyCachedFullPathNameResult(cachedFullPathNames, bcs[0]);
verifyCachedFullPathNameResult(cachedFullPathNames, bcs[1]);
verifyCachedFullPathNameResult(cachedFullPathNames, bcs[1]);
try {
Thread.sleep(1200L);
} catch (InterruptedException e) {
}
verifyCachedFullPathNameResult(cachedFullPathNames, inodes[1]);
verifyCachedFullPathNameResult(cachedFullPathNames, inodes[0]);
verifyCachedFullPathNameResult(cachedFullPathNames, bcs[1]);
verifyCachedFullPathNameResult(cachedFullPathNames, bcs[0]);
} finally {
if (cluster != null) {
cluster.shutdown();
@ -476,14 +476,14 @@ public class TestBlockPlacementPolicyRaid {
}
private void verifyCachedFullPathNameResult(
CachedFullPathNames cachedFullPathNames, BlockCollection inode)
CachedFullPathNames cachedFullPathNames, BlockCollection bc)
throws IOException {
String res1 = inode.getName();
String res2 = cachedFullPathNames.get(inode);
String res1 = bc.getName();
String res2 = cachedFullPathNames.get(bc);
LOG.info("Actual path name: " + res1);
LOG.info("Cached path name: " + res2);
Assert.assertEquals(cachedFullPathNames.get(inode),
inode.getName());
Assert.assertEquals(cachedFullPathNames.get(bc),
bc.getName());
}
private void verifyCachedBlocksResult(CachedLocatedBlocks cachedBlocks,
@ -502,7 +502,7 @@ public class TestBlockPlacementPolicyRaid {
private Collection<LocatedBlock> getCompanionBlocks(
FSNamesystem namesystem, BlockPlacementPolicyRaid policy,
ExtendedBlock block) throws IOException {
INodeFile inode = (INodeFile)blockManager.blocksMap.getINode(block
INodeFile inode = (INodeFile)blockManager.blocksMap.getBlockCollection(block
.getLocalBlock());
FileType type = policy.getFileType(inode.getFullPathName());
return policy.getCompanionBlocks(inode.getFullPathName(), type,