svn merge -c 1380939 from trunk for HDFS-3888. Clean up BlockPlacementPolicyDefault.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1380940 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2012-09-05 00:04:11 +00:00
parent 555e93493e
commit 1da2d2f37d
2 changed files with 24 additions and 12 deletions

View File

@ -242,6 +242,8 @@ Release 2.0.1-alpha - UNRELEASED
HDFS-3887. Remove redundant chooseTarget methods in BlockPlacementPolicy.
(Jing Zhao via szetszwo)
HDFS-3888. Clean up BlockPlacementPolicyDefault. (Jing Zhao via szetszwo)
OPTIMIZATIONS
HDFS-2982. Startup performance suffers when there are many edit log

View File

@ -27,8 +27,6 @@
import java.util.Set;
import java.util.TreeSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
@ -55,9 +53,6 @@
@InterfaceAudience.Private
public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
private static final Log LOG =
LogFactory.getLog(BlockPlacementPolicyDefault.class.getName());
private static final String enableDebugLogging =
"For more information, please enable DEBUG log level on "
+ LOG.getClass().getName();
@ -124,7 +119,6 @@ public DatanodeDescriptor[] chooseTarget(String srcPath,
excludedNodes, blocksize);
}
/** This is the implementation. */
DatanodeDescriptor[] chooseTarget(int numOfReplicas,
DatanodeDescriptor writer,
@ -161,7 +155,8 @@ DatanodeDescriptor[] chooseTarget(int numOfReplicas,
}
DatanodeDescriptor localNode = chooseTarget(numOfReplicas, writer,
excludedNodes, blocksize, maxNodesPerRack, results);
excludedNodes, blocksize,
maxNodesPerRack, results);
if (!returnChosenNodes) {
results.removeAll(chosenNodes);
}
@ -438,14 +433,29 @@ private void chooseRandom(int numOfReplicas,
* does not have too much load, and the rack does not have too many nodes
*/
private boolean isGoodTarget(DatanodeDescriptor node,
long blockSize, int maxTargetPerLoc,
long blockSize, int maxTargetPerRack,
List<DatanodeDescriptor> results) {
return isGoodTarget(node, blockSize, maxTargetPerLoc,
return isGoodTarget(node, blockSize, maxTargetPerRack,
this.considerLoad, results);
}
/**
* Determine if a node is a good target.
*
* @param node The target node
* @param blockSize Size of block
* @param maxTargetPerRack Maximum number of targets per rack. The value of
* this parameter depends on the number of racks in
* the cluster and total number of replicas for a block
* @param considerLoad whether or not to consider load of the target node
* @param results A list containing currently chosen nodes. Used to check if
* too many nodes has been chosen in the target rack.
* @return Return true if <i>node</i> has enough space,
* does not have too much load,
* and the rack does not have too many nodes.
*/
private boolean isGoodTarget(DatanodeDescriptor node,
long blockSize, int maxTargetPerLoc,
long blockSize, int maxTargetPerRack,
boolean considerLoad,
List<DatanodeDescriptor> results) {
// check if the node is (being) decommissed
@ -497,7 +507,7 @@ private boolean isGoodTarget(DatanodeDescriptor node,
counter++;
}
}
if (counter>maxTargetPerLoc) {
if (counter>maxTargetPerRack) {
if(LOG.isDebugEnabled()) {
threadLocalBuilder.get().append(node.toString()).append(": ")
.append("Node ").append(NodeBase.getPath(node))