svn merge -c 1335304 from trunk for HDFS-3363. Define BlockCollection and MutableBlockCollection interfaces so that INodeFile and INodeFileUnderConstruction do not have to be used in block management.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1335305 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2012-05-08 00:08:12 +00:00
parent adeb718f2b
commit d8156648fe
16 changed files with 172 additions and 93 deletions

View File

@ -289,6 +289,10 @@ Release 2.0.0 - UNRELEASED
HDFS-3375. Put client name in DataXceiver thread name for readBlock HDFS-3375. Put client name in DataXceiver thread name for readBlock
and keepalive (todd) and keepalive (todd)
HDFS-3363. Define BlockCollection and MutableBlockCollection interfaces
so that INodeFile and INodeFileUnderConstruction do not have to be used in
block management. (John George via szetszwo)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-2477. Optimize computing the diff between a block report and the HDFS-2477. Optimize computing the diff between a block report and the

View File

@ -0,0 +1,63 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
import java.io.IOException;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.fs.ContentSummary;
/**
* This interface is used by the block manager to expose a
* few characteristics of a collection of Block/BlockUnderConstruction.
*/
public interface BlockCollection {
/**
* Get the last block of the collection.
* Make sure it has the right type.
*/
public <T extends BlockInfo> T getLastBlock() throws IOException;
/**
* Get content summary.
*/
public ContentSummary computeContentSummary();
/** @return the number of blocks */
public int numBlocks();
public BlockInfo[] getBlocks();
/**
* Get preferred block size for the collection
* @return preferred block size in bytes
*/
public long getPreferredBlockSize();
/**
* Get block replication for the collection
* @return block replication value
*/
public short getReplication();
/**
* Get name of collection.
*/
public String getName();
}

View File

@ -19,14 +19,16 @@ package org.apache.hadoop.hdfs.server.blockmanagement;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.util.LightWeightGSet; import org.apache.hadoop.hdfs.util.LightWeightGSet;
/** /**
* Internal class for block metadata. * Internal class for block metadata.
* BlockInfo class maintains for a given block
* the {@link BlockCollection} it is part of and datanodes where the replicas of
* the block are stored.
*/ */
public class BlockInfo extends Block implements LightWeightGSet.LinkedElement { public class BlockInfo extends Block implements LightWeightGSet.LinkedElement {
private INodeFile inode; private BlockCollection inode;
/** For implementing {@link LightWeightGSet.LinkedElement} interface */ /** For implementing {@link LightWeightGSet.LinkedElement} interface */
private LightWeightGSet.LinkedElement nextLinkedElement; private LightWeightGSet.LinkedElement nextLinkedElement;
@ -66,11 +68,11 @@ public class BlockInfo extends Block implements LightWeightGSet.LinkedElement {
this.inode = from.inode; this.inode = from.inode;
} }
public INodeFile getINode() { public BlockCollection getINode() {
return inode; return inode;
} }
public void setINode(INodeFile inode) { public void setINode(BlockCollection inode) {
this.inode = inode; this.inode = inode;
} }

View File

@ -234,7 +234,7 @@ public class BlockInfoUnderConstruction extends BlockInfo {
blockRecoveryId = recoveryId; blockRecoveryId = recoveryId;
if (replicas.size() == 0) { if (replicas.size() == 0) {
NameNode.stateChangeLog.warn("BLOCK*" NameNode.stateChangeLog.warn("BLOCK*"
+ " INodeFileUnderConstruction.initLeaseRecovery:" + " BlockInfoUnderConstruction.initLeaseRecovery:"
+ " No blocks found, lease removed."); + " No blocks found, lease removed.");
} }

View File

@ -57,8 +57,6 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.common.Util; import org.apache.hadoop.hdfs.server.common.Util;
import org.apache.hadoop.hdfs.server.namenode.FSClusterStats; import org.apache.hadoop.hdfs.server.namenode.FSClusterStats;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.INodeFileUnderConstruction;
import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.Namesystem; import org.apache.hadoop.hdfs.server.namenode.Namesystem;
import org.apache.hadoop.hdfs.server.protocol.BlockCommand; import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
@ -386,7 +384,7 @@ public class BlockManager {
numReplicas.decommissionedReplicas(); numReplicas.decommissionedReplicas();
if (block instanceof BlockInfo) { if (block instanceof BlockInfo) {
String fileName = ((BlockInfo)block).getINode().getFullPathName(); String fileName = ((BlockInfo)block).getINode().getName();
out.print(fileName + ": "); out.print(fileName + ": ");
} }
// l: == live:, d: == decommissioned c: == corrupt e: == excess // l: == live:, d: == decommissioned c: == corrupt e: == excess
@ -462,7 +460,7 @@ public class BlockManager {
* @throws IOException if the block does not have at least a minimal number * @throws IOException if the block does not have at least a minimal number
* of replicas reported from data-nodes. * of replicas reported from data-nodes.
*/ */
public boolean commitOrCompleteLastBlock(INodeFileUnderConstruction fileINode, public boolean commitOrCompleteLastBlock(MutableBlockCollection fileINode,
Block commitBlock) throws IOException { Block commitBlock) throws IOException {
if(commitBlock == null) if(commitBlock == null)
return false; // not committing, this is a block allocation retry return false; // not committing, this is a block allocation retry
@ -474,7 +472,7 @@ public class BlockManager {
final boolean b = commitBlock((BlockInfoUnderConstruction)lastBlock, commitBlock); final boolean b = commitBlock((BlockInfoUnderConstruction)lastBlock, commitBlock);
if(countNodes(lastBlock).liveReplicas() >= minReplication) if(countNodes(lastBlock).liveReplicas() >= minReplication)
completeBlock(fileINode,fileINode.numBlocks()-1, false); completeBlock(fileINode, fileINode.numBlocks()-1, false);
return b; return b;
} }
@ -485,7 +483,7 @@ public class BlockManager {
* @throws IOException if the block does not have at least a minimal number * @throws IOException if the block does not have at least a minimal number
* of replicas reported from data-nodes. * of replicas reported from data-nodes.
*/ */
private BlockInfo completeBlock(final INodeFile fileINode, private BlockInfo completeBlock(final MutableBlockCollection fileINode,
final int blkIndex, boolean force) throws IOException { final int blkIndex, boolean force) throws IOException {
if(blkIndex < 0) if(blkIndex < 0)
return null; return null;
@ -518,7 +516,7 @@ public class BlockManager {
return blocksMap.replaceBlock(completeBlock); return blocksMap.replaceBlock(completeBlock);
} }
private BlockInfo completeBlock(final INodeFile fileINode, private BlockInfo completeBlock(final MutableBlockCollection fileINode,
final BlockInfo block, boolean force) throws IOException { final BlockInfo block, boolean force) throws IOException {
BlockInfo[] fileBlocks = fileINode.getBlocks(); BlockInfo[] fileBlocks = fileINode.getBlocks();
for(int idx = 0; idx < fileBlocks.length; idx++) for(int idx = 0; idx < fileBlocks.length; idx++)
@ -533,7 +531,7 @@ public class BlockManager {
* regardless of whether enough replicas are present. This is necessary * regardless of whether enough replicas are present. This is necessary
* when tailing edit logs as a Standby. * when tailing edit logs as a Standby.
*/ */
public BlockInfo forceCompleteBlock(final INodeFile fileINode, public BlockInfo forceCompleteBlock(final MutableBlockCollection fileINode,
final BlockInfoUnderConstruction block) throws IOException { final BlockInfoUnderConstruction block) throws IOException {
block.commitBlock(block); block.commitBlock(block);
return completeBlock(fileINode, block, true); return completeBlock(fileINode, block, true);
@ -554,7 +552,7 @@ public class BlockManager {
* @return the last block locations if the block is partial or null otherwise * @return the last block locations if the block is partial or null otherwise
*/ */
public LocatedBlock convertLastBlockToUnderConstruction( public LocatedBlock convertLastBlockToUnderConstruction(
INodeFileUnderConstruction fileINode) throws IOException { MutableBlockCollection fileINode) throws IOException {
BlockInfo oldBlock = fileINode.getLastBlock(); BlockInfo oldBlock = fileINode.getLastBlock();
if(oldBlock == null || if(oldBlock == null ||
fileINode.getPreferredBlockSize() == oldBlock.getNumBytes()) fileINode.getPreferredBlockSize() == oldBlock.getNumBytes())
@ -925,7 +923,7 @@ public class BlockManager {
" does not exist. "); " does not exist. ");
} }
INodeFile inode = storedBlock.getINode(); BlockCollection inode = storedBlock.getINode();
if (inode == null) { if (inode == null) {
NameNode.stateChangeLog.info("BLOCK markBlockAsCorrupt: " + NameNode.stateChangeLog.info("BLOCK markBlockAsCorrupt: " +
"block " + storedBlock + "block " + storedBlock +
@ -1053,7 +1051,7 @@ public class BlockManager {
int requiredReplication, numEffectiveReplicas; int requiredReplication, numEffectiveReplicas;
List<DatanodeDescriptor> containingNodes, liveReplicaNodes; List<DatanodeDescriptor> containingNodes, liveReplicaNodes;
DatanodeDescriptor srcNode; DatanodeDescriptor srcNode;
INodeFile fileINode = null; BlockCollection fileINode = null;
int additionalReplRequired; int additionalReplRequired;
int scheduledWork = 0; int scheduledWork = 0;
@ -1067,7 +1065,7 @@ public class BlockManager {
// block should belong to a file // block should belong to a file
fileINode = blocksMap.getINode(block); fileINode = blocksMap.getINode(block);
// abandoned block or block reopened for append // abandoned block or block reopened for append
if(fileINode == null || fileINode.isUnderConstruction()) { if(fileINode == null || fileINode instanceof MutableBlockCollection) {
neededReplications.remove(block, priority); // remove from neededReplications neededReplications.remove(block, priority); // remove from neededReplications
neededReplications.decrementReplicationIndex(priority); neededReplications.decrementReplicationIndex(priority);
continue; continue;
@ -1153,7 +1151,7 @@ public class BlockManager {
// block should belong to a file // block should belong to a file
fileINode = blocksMap.getINode(block); fileINode = blocksMap.getINode(block);
// abandoned block or block reopened for append // abandoned block or block reopened for append
if(fileINode == null || fileINode.isUnderConstruction()) { if(fileINode == null || fileINode instanceof MutableBlockCollection) {
neededReplications.remove(block, priority); // remove from neededReplications neededReplications.remove(block, priority); // remove from neededReplications
rw.targets = null; rw.targets = null;
neededReplications.decrementReplicationIndex(priority); neededReplications.decrementReplicationIndex(priority);
@ -1918,7 +1916,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
int numCurrentReplica = countLiveNodes(storedBlock); int numCurrentReplica = countLiveNodes(storedBlock);
if (storedBlock.getBlockUCState() == BlockUCState.COMMITTED if (storedBlock.getBlockUCState() == BlockUCState.COMMITTED
&& numCurrentReplica >= minReplication) { && numCurrentReplica >= minReplication) {
completeBlock(storedBlock.getINode(), storedBlock, false); completeBlock((MutableBlockCollection)storedBlock.getINode(), storedBlock, false);
} else if (storedBlock.isComplete()) { } else if (storedBlock.isComplete()) {
// check whether safe replication is reached for the block // check whether safe replication is reached for the block
// only complete blocks are counted towards that. // only complete blocks are counted towards that.
@ -1956,7 +1954,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
return block; return block;
} }
assert storedBlock != null : "Block must be stored by now"; assert storedBlock != null : "Block must be stored by now";
INodeFile fileINode = storedBlock.getINode(); BlockCollection fileINode = storedBlock.getINode();
assert fileINode != null : "Block must belong to a file"; assert fileINode != null : "Block must belong to a file";
// add block to the datanode // add block to the datanode
@ -1983,7 +1981,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
if(storedBlock.getBlockUCState() == BlockUCState.COMMITTED && if(storedBlock.getBlockUCState() == BlockUCState.COMMITTED &&
numLiveReplicas >= minReplication) { numLiveReplicas >= minReplication) {
storedBlock = completeBlock(fileINode, storedBlock, false); storedBlock = completeBlock((MutableBlockCollection)fileINode, storedBlock, false);
} else if (storedBlock.isComplete()) { } else if (storedBlock.isComplete()) {
// check whether safe replication is reached for the block // check whether safe replication is reached for the block
// only complete blocks are counted towards that // only complete blocks are counted towards that
@ -1994,7 +1992,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
} }
// if file is under construction, then done for now // if file is under construction, then done for now
if (fileINode.isUnderConstruction()) { if (fileINode instanceof MutableBlockCollection) {
return storedBlock; return storedBlock;
} }
@ -2131,7 +2129,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
* what happened with it. * what happened with it.
*/ */
private MisReplicationResult processMisReplicatedBlock(BlockInfo block) { private MisReplicationResult processMisReplicatedBlock(BlockInfo block) {
INodeFile fileINode = block.getINode(); BlockCollection fileINode = block.getINode();
if (fileINode == null) { if (fileINode == null) {
// block does not belong to any file // block does not belong to any file
addToInvalidates(block); addToInvalidates(block);
@ -2260,7 +2258,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
BlockPlacementPolicy replicator) { BlockPlacementPolicy replicator) {
assert namesystem.hasWriteLock(); assert namesystem.hasWriteLock();
// first form a rack to datanodes map and // first form a rack to datanodes map and
INodeFile inode = getINode(b); BlockCollection inode = getINode(b);
final Map<String, List<DatanodeDescriptor>> rackMap final Map<String, List<DatanodeDescriptor>> rackMap
= new HashMap<String, List<DatanodeDescriptor>>(); = new HashMap<String, List<DatanodeDescriptor>>();
for(final Iterator<DatanodeDescriptor> iter = nonExcess.iterator(); for(final Iterator<DatanodeDescriptor> iter = nonExcess.iterator();
@ -2381,7 +2379,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
// necessary. In that case, put block on a possibly-will- // necessary. In that case, put block on a possibly-will-
// be-replicated list. // be-replicated list.
// //
INodeFile fileINode = blocksMap.getINode(block); BlockCollection fileINode = blocksMap.getINode(block);
if (fileINode != null) { if (fileINode != null) {
namesystem.decrementSafeBlockCount(block); namesystem.decrementSafeBlockCount(block);
updateNeededReplications(block, -1, 0); updateNeededReplications(block, -1, 0);
@ -2613,7 +2611,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
NumberReplicas num) { NumberReplicas num) {
int curReplicas = num.liveReplicas(); int curReplicas = num.liveReplicas();
int curExpectedReplicas = getReplication(block); int curExpectedReplicas = getReplication(block);
INodeFile fileINode = blocksMap.getINode(block); BlockCollection fileINode = blocksMap.getINode(block);
Iterator<DatanodeDescriptor> nodeIter = blocksMap.nodeIterator(block); Iterator<DatanodeDescriptor> nodeIter = blocksMap.nodeIterator(block);
StringBuilder nodeList = new StringBuilder(); StringBuilder nodeList = new StringBuilder();
while (nodeIter.hasNext()) { while (nodeIter.hasNext()) {
@ -2626,7 +2624,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
+ ", corrupt replicas: " + num.corruptReplicas() + ", corrupt replicas: " + num.corruptReplicas()
+ ", decommissioned replicas: " + num.decommissionedReplicas() + ", decommissioned replicas: " + num.decommissionedReplicas()
+ ", excess replicas: " + num.excessReplicas() + ", excess replicas: " + num.excessReplicas()
+ ", Is Open File: " + fileINode.isUnderConstruction() + ", Is Open File: " + (fileINode instanceof MutableBlockCollection)
+ ", Datanodes having this block: " + nodeList + ", Current Datanode: " + ", Datanodes having this block: " + nodeList + ", Current Datanode: "
+ srcNode + ", Is current datanode decommissioning: " + srcNode + ", Is current datanode decommissioning: "
+ srcNode.isDecommissionInProgress()); + srcNode.isDecommissionInProgress());
@ -2641,7 +2639,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
final Iterator<? extends Block> it = srcNode.getBlockIterator(); final Iterator<? extends Block> it = srcNode.getBlockIterator();
while(it.hasNext()) { while(it.hasNext()) {
final Block block = it.next(); final Block block = it.next();
INodeFile fileINode = blocksMap.getINode(block); BlockCollection fileINode = blocksMap.getINode(block);
short expectedReplication = fileINode.getReplication(); short expectedReplication = fileINode.getReplication();
NumberReplicas num = countNodes(block); NumberReplicas num = countNodes(block);
int numCurrentReplica = num.liveReplicas(); int numCurrentReplica = num.liveReplicas();
@ -2664,7 +2662,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
final Iterator<? extends Block> it = srcNode.getBlockIterator(); final Iterator<? extends Block> it = srcNode.getBlockIterator();
while(it.hasNext()) { while(it.hasNext()) {
final Block block = it.next(); final Block block = it.next();
INodeFile fileINode = blocksMap.getINode(block); BlockCollection fileINode = blocksMap.getINode(block);
if (fileINode != null) { if (fileINode != null) {
NumberReplicas num = countNodes(block); NumberReplicas num = countNodes(block);
@ -2681,7 +2679,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
if ((curReplicas == 0) && (num.decommissionedReplicas() > 0)) { if ((curReplicas == 0) && (num.decommissionedReplicas() > 0)) {
decommissionOnlyReplicas++; decommissionOnlyReplicas++;
} }
if (fileINode.isUnderConstruction()) { if (fileINode instanceof MutableBlockCollection) {
underReplicatedInOpenFiles++; underReplicatedInOpenFiles++;
} }
} }
@ -2784,11 +2782,10 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
/* get replication factor of a block */ /* get replication factor of a block */
private int getReplication(Block block) { private int getReplication(Block block) {
INodeFile fileINode = blocksMap.getINode(block); BlockCollection fileINode = blocksMap.getINode(block);
if (fileINode == null) { // block does not belong to any file if (fileINode == null) { // block does not belong to any file
return 0; return 0;
} }
assert !fileINode.isDirectory() : "Block cannot belong to a directory.";
return fileINode.getReplication(); return fileINode.getReplication();
} }
@ -2861,11 +2858,11 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
return this.neededReplications.getCorruptBlockSize(); return this.neededReplications.getCorruptBlockSize();
} }
public BlockInfo addINode(BlockInfo block, INodeFile iNode) { public BlockInfo addINode(BlockInfo block, BlockCollection iNode) {
return blocksMap.addINode(block, iNode); return blocksMap.addINode(block, iNode);
} }
public INodeFile getINode(Block b) { public BlockCollection getINode(Block b) {
return blocksMap.getINode(b); return blocksMap.getINode(b);
} }
@ -3005,7 +3002,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
private static class ReplicationWork { private static class ReplicationWork {
private Block block; private Block block;
private INodeFile fileINode; private BlockCollection fileINode;
private DatanodeDescriptor srcNode; private DatanodeDescriptor srcNode;
private List<DatanodeDescriptor> containingNodes; private List<DatanodeDescriptor> containingNodes;
@ -3016,7 +3013,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
private int priority; private int priority;
public ReplicationWork(Block block, public ReplicationWork(Block block,
INodeFile fileINode, BlockCollection fileINode,
DatanodeDescriptor srcNode, DatanodeDescriptor srcNode,
List<DatanodeDescriptor> containingNodes, List<DatanodeDescriptor> containingNodes,
List<DatanodeDescriptor> liveReplicaNodes, List<DatanodeDescriptor> liveReplicaNodes,

View File

@ -29,7 +29,6 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.namenode.FSClusterStats; import org.apache.hadoop.hdfs.server.namenode.FSClusterStats;
import org.apache.hadoop.hdfs.server.namenode.FSInodeInfo;
import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.Node; import org.apache.hadoop.net.Node;
import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.ReflectionUtils;
@ -123,13 +122,13 @@ public abstract class BlockPlacementPolicy {
* @return array of DatanodeDescriptor instances chosen as target * @return array of DatanodeDescriptor instances chosen as target
* and sorted as a pipeline. * and sorted as a pipeline.
*/ */
DatanodeDescriptor[] chooseTarget(FSInodeInfo srcInode, DatanodeDescriptor[] chooseTarget(BlockCollection srcInode,
int numOfReplicas, int numOfReplicas,
DatanodeDescriptor writer, DatanodeDescriptor writer,
List<DatanodeDescriptor> chosenNodes, List<DatanodeDescriptor> chosenNodes,
HashMap<Node, Node> excludedNodes, HashMap<Node, Node> excludedNodes,
long blocksize) { long blocksize) {
return chooseTarget(srcInode.getFullPathName(), numOfReplicas, writer, return chooseTarget(srcInode.getName(), numOfReplicas, writer,
chosenNodes, excludedNodes, blocksize); chosenNodes, excludedNodes, blocksize);
} }
@ -159,7 +158,7 @@ public abstract class BlockPlacementPolicy {
listed in the previous parameter. listed in the previous parameter.
* @return the replica that is the best candidate for deletion * @return the replica that is the best candidate for deletion
*/ */
abstract public DatanodeDescriptor chooseReplicaToDelete(FSInodeInfo srcInode, abstract public DatanodeDescriptor chooseReplicaToDelete(BlockCollection srcInode,
Block block, Block block,
short replicationFactor, short replicationFactor,
Collection<DatanodeDescriptor> existingReplicas, Collection<DatanodeDescriptor> existingReplicas,

View File

@ -33,7 +33,6 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.namenode.FSClusterStats; import org.apache.hadoop.hdfs.server.namenode.FSClusterStats;
import org.apache.hadoop.hdfs.server.namenode.FSInodeInfo;
import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.Node; import org.apache.hadoop.net.Node;
import org.apache.hadoop.net.NodeBase; import org.apache.hadoop.net.NodeBase;
@ -547,7 +546,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
} }
@Override @Override
public DatanodeDescriptor chooseReplicaToDelete(FSInodeInfo inode, public DatanodeDescriptor chooseReplicaToDelete(BlockCollection inode,
Block block, Block block,
short replicationFactor, short replicationFactor,
Collection<DatanodeDescriptor> first, Collection<DatanodeDescriptor> first,

View File

@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs.server.blockmanagement;
import java.util.Iterator; import java.util.Iterator;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.util.GSet; import org.apache.hadoop.hdfs.util.GSet;
import org.apache.hadoop.hdfs.util.LightWeightGSet; import org.apache.hadoop.hdfs.util.LightWeightGSet;
@ -93,7 +92,7 @@ class BlocksMap {
blocks = null; blocks = null;
} }
INodeFile getINode(Block b) { BlockCollection getINode(Block b) {
BlockInfo info = blocks.get(b); BlockInfo info = blocks.get(b);
return (info != null) ? info.getINode() : null; return (info != null) ? info.getINode() : null;
} }
@ -101,7 +100,7 @@ class BlocksMap {
/** /**
* Add block b belonging to the specified file inode to the map. * Add block b belonging to the specified file inode to the map.
*/ */
BlockInfo addINode(BlockInfo b, INodeFile iNode) { BlockInfo addINode(BlockInfo b, BlockCollection iNode) {
BlockInfo info = blocks.get(b); BlockInfo info = blocks.get(b);
if (info != b) { if (info != b) {
info = b; info = b;

View File

@ -0,0 +1,44 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.fs.ContentSummary;
/**
* This interface is used by the block manager to expose a
* few characteristics of a collection of Block/BlockUnderConstruction.
*/
public interface MutableBlockCollection extends BlockCollection {
/**
* Set block
*/
public void setBlock(int idx, BlockInfo blk);
/**
* Convert the last block of the collection to an under-construction block.
* Set its locations.
*/
public BlockInfoUnderConstruction setLastBlock(BlockInfo lastBlock,
DatanodeDescriptor[] targets) throws IOException;
}

View File

@ -1,38 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* This interface is used used the pluggable block placement policy
* to expose a few characteristics of an Inode.
*/
@InterfaceAudience.Private
public interface FSInodeInfo {
/**
* a string representation of an inode
*
* @return the full pathname (from root) that this inode represents
*/
public String getFullPathName() ;
}

View File

@ -2840,7 +2840,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
if (storedBlock == null) { if (storedBlock == null) {
throw new IOException("Block (=" + lastblock + ") not found"); throw new IOException("Block (=" + lastblock + ") not found");
} }
INodeFile iFile = storedBlock.getINode(); INodeFile iFile = (INodeFile) storedBlock.getINode();
if (!iFile.isUnderConstruction() || storedBlock.isComplete()) { if (!iFile.isUnderConstruction() || storedBlock.isComplete()) {
throw new IOException("Unexpected block (=" + lastblock throw new IOException("Unexpected block (=" + lastblock
+ ") since the file (=" + iFile.getLocalName() + ") since the file (=" + iFile.getLocalName()
@ -4394,7 +4394,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
} }
// check file inode // check file inode
INodeFile file = storedBlock.getINode(); INodeFile file = (INodeFile) storedBlock.getINode();
if (file==null || !file.isUnderConstruction()) { if (file==null || !file.isUnderConstruction()) {
throw new IOException("The file " + storedBlock + throw new IOException("The file " + storedBlock +
" belonged to does not exist or it is not under construction."); " belonged to does not exist or it is not under construction.");
@ -4706,7 +4706,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
while (blkIterator.hasNext()) { while (blkIterator.hasNext()) {
Block blk = blkIterator.next(); Block blk = blkIterator.next();
INode inode = blockManager.getINode(blk); INode inode = (INodeFile) blockManager.getINode(blk);
skip++; skip++;
if (inode != null && blockManager.countNodes(blk).liveReplicas() == 0) { if (inode != null && blockManager.countNodes(blk).liveReplicas() == 0) {
String src = FSDirectory.getFullPathName(inode); String src = FSDirectory.getFullPathName(inode);

View File

@ -38,7 +38,7 @@ import com.google.common.primitives.SignedBytes;
* directory inodes. * directory inodes.
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
abstract class INode implements Comparable<byte[]>, FSInodeInfo { abstract class INode implements Comparable<byte[]> {
/* /*
* The inode name is in java UTF8 encoding; * The inode name is in java UTF8 encoding;
* The name in HdfsFileStatus should keep the same encoding as this. * The name in HdfsFileStatus should keep the same encoding as this.
@ -264,7 +264,6 @@ abstract class INode implements Comparable<byte[]>, FSInodeInfo {
this.name = name; this.name = name;
} }
@Override
public String getFullPathName() { public String getFullPathName() {
// Get the full path name of this inode. // Get the full path name of this inode.
return FSDirectory.getFullPathName(this); return FSDirectory.getFullPathName(this);

View File

@ -20,15 +20,18 @@ package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException; import java.io.IOException;
import java.util.List; import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
/** I-node for closed file. */ /** I-node for closed file. */
public class INodeFile extends INode { @InterfaceAudience.Private
public class INodeFile extends INode implements BlockCollection {
static final FsPermission UMASK = FsPermission.createImmutable((short)0111); static final FsPermission UMASK = FsPermission.createImmutable((short)0111);
//Number of bits for Block size //Number of bits for Block size
@ -167,6 +170,12 @@ public class INodeFile extends INode {
blocks = null; blocks = null;
return 1; return 1;
} }
public String getName() {
// Get the full path name of this inode.
return getFullPathName();
}
@Override @Override
long[] computeContentSummary(long[] summary) { long[] computeContentSummary(long[] summary) {

View File

@ -25,13 +25,15 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.blockmanagement.MutableBlockCollection;
import com.google.common.base.Joiner; import com.google.common.base.Joiner;
/** /**
* I-node for file being written. * I-node for file being written.
*/ */
public class INodeFileUnderConstruction extends INodeFile { public class INodeFileUnderConstruction extends INodeFile
implements MutableBlockCollection {
private String clientName; // lease holder private String clientName; // lease holder
private final String clientMachine; private final String clientMachine;
private final DatanodeDescriptor clientNode; // if client is a cluster node too. private final DatanodeDescriptor clientNode; // if client is a cluster node too.

View File

@ -734,7 +734,7 @@ class NamenodeJspHelper {
this.inode = null; this.inode = null;
} else { } else {
this.block = new Block(blockId); this.block = new Block(blockId);
this.inode = blockManager.getINode(block); this.inode = (INodeFile) blockManager.getINode(block);
} }
} }

View File

@ -46,9 +46,9 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.namenode.FSInodeInfo;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
@ -585,7 +585,7 @@ public class TestDNFencing {
} }
@Override @Override
public DatanodeDescriptor chooseReplicaToDelete(FSInodeInfo inode, public DatanodeDescriptor chooseReplicaToDelete(BlockCollection inode,
Block block, short replicationFactor, Block block, short replicationFactor,
Collection<DatanodeDescriptor> first, Collection<DatanodeDescriptor> first,
Collection<DatanodeDescriptor> second) { Collection<DatanodeDescriptor> second) {