HDFS-3816. Merging 1374645 from trunk to branch-2

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1374648 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Suresh Srinivas 2012-08-18 19:57:36 +00:00
parent 5eebedb974
commit b082909c32
5 changed files with 97 additions and 18 deletions

View File

@ -476,6 +476,9 @@ Release 2.0.1-alpha - UNRELEASED
HDFS-3788. ByteRangeInputStream should not expect HTTP Content-Length header HDFS-3788. ByteRangeInputStream should not expect HTTP Content-Length header
when chunked transfer-encoding is used. (szetszwo) when chunked transfer-encoding is used. (szetszwo)
HDFS-3816. Invalidate work percentage default value should be 0.32f
instead of 32. (Jing Zhao via suresh)
BREAKDOWN OF HDFS-3042 SUBTASKS BREAKDOWN OF HDFS-3042 SUBTASKS
HDFS-2185. HDFS portion of ZK-based FailoverController (todd) HDFS-2185. HDFS portion of ZK-based FailoverController (todd)

View File

@ -178,7 +178,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
// Replication monitoring related keys // Replication monitoring related keys
public static final String DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION = public static final String DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION =
"dfs.namenode.invalidate.work.pct.per.iteration"; "dfs.namenode.invalidate.work.pct.per.iteration";
public static final int DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION_DEFAULT = 32; public static final float DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION_DEFAULT = 0.32f;
public static final String DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION = public static final String DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION =
"dfs.namenode.replication.work.multiplier.per.iteration"; "dfs.namenode.replication.work.multiplier.per.iteration";
public static final int DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION_DEFAULT = 2; public static final int DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION_DEFAULT = 2;

View File

@ -86,6 +86,7 @@ import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.ToolRunner; import org.apache.hadoop.util.ToolRunner;
import com.google.common.base.Joiner; import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists; import com.google.common.collect.Lists;
import com.google.common.collect.Maps; import com.google.common.collect.Maps;
import com.google.protobuf.BlockingService; import com.google.protobuf.BlockingService;
@ -1149,4 +1150,42 @@ public class DFSUtil {
} }
return false; return false;
} }
/**
* Get DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION from configuration.
*
* @param conf Configuration
* @return Value of DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION
*/
public static float getInvalidateWorkPctPerIteration(Configuration conf) {
float blocksInvalidateWorkPct = conf.getFloat(
DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION,
DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION_DEFAULT);
Preconditions.checkArgument(
(blocksInvalidateWorkPct > 0),
DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION +
" = '" + blocksInvalidateWorkPct + "' is invalid. " +
"It should be a positive, non-zero float value " +
"indicating a percentage.");
return blocksInvalidateWorkPct;
}
/**
* Get DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION from
* configuration.
*
* @param conf Configuration
* @return Value of DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION
*/
public static int getReplWorkMultiplier(Configuration conf) {
int blocksReplWorkMultiplier = conf.getInt(
DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION,
DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION_DEFAULT);
Preconditions.checkArgument(
(blocksReplWorkMultiplier > 0),
DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION +
" = '" + blocksReplWorkMultiplier + "' is invalid. " +
"It should be a positive, non-zero integer value.");
return blocksReplWorkMultiplier;
}
} }

View File

@ -269,23 +269,8 @@ public class BlockManager {
DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_DEFAULT); DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_DEFAULT);
this.shouldCheckForEnoughRacks = conf.get(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY) != null; this.shouldCheckForEnoughRacks = conf.get(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY) != null;
this.blocksInvalidateWorkPct = conf.getFloat( this.blocksInvalidateWorkPct = DFSUtil.getInvalidateWorkPctPerIteration(conf);
DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION, this.blocksReplWorkMultiplier = DFSUtil.getReplWorkMultiplier(conf);
DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION_DEFAULT);
Preconditions.checkArgument(
(this.blocksInvalidateWorkPct > 0),
DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION +
" = '" + this.blocksInvalidateWorkPct + "' is invalid. " +
"It should be a positive, non-zero float value " +
"indicating a percentage.");
this.blocksReplWorkMultiplier = conf.getInt(
DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION,
DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION_DEFAULT);
Preconditions.checkArgument(
(this.blocksReplWorkMultiplier > 0),
DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION +
" = '" + this.blocksReplWorkMultiplier + "' is invalid. " +
"It should be a positive, non-zero integer value.");
this.replicationRecheckInterval = this.replicationRecheckInterval =
conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY,

View File

@ -41,7 +41,9 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.Node; import org.apache.hadoop.net.Node;
import org.junit.BeforeClass; import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.Test; import org.junit.Test;
import org.junit.rules.ExpectedException;
public class TestReplicationPolicy { public class TestReplicationPolicy {
private Random random = DFSUtil.getRandom(); private Random random = DFSUtil.getRandom();
@ -53,6 +55,9 @@ public class TestReplicationPolicy {
private static final String filename = "/dummyfile.txt"; private static final String filename = "/dummyfile.txt";
private static DatanodeDescriptor dataNodes[]; private static DatanodeDescriptor dataNodes[];
@Rule
public ExpectedException exception = ExpectedException.none();
@BeforeClass @BeforeClass
public static void setupCluster() throws Exception { public static void setupCluster() throws Exception {
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
@ -588,4 +593,51 @@ public class TestReplicationPolicy {
fifthPrioritySize, chosenBlocks.get( fifthPrioritySize, chosenBlocks.get(
UnderReplicatedBlocks.QUEUE_WITH_CORRUPT_BLOCKS).size()); UnderReplicatedBlocks.QUEUE_WITH_CORRUPT_BLOCKS).size());
} }
/**
* This testcase tests whether the defaultvalue returned by
* DFSUtil.getInvalidateWorkPctPerIteration() is positive,
* and whether an IllegalArgumentException will be thrown
* when a non-positive value is retrieved
*/
@Test
public void testGetInvalidateWorkPctPerIteration() {
Configuration conf = new Configuration();
float blocksInvalidateWorkPct = DFSUtil
.getInvalidateWorkPctPerIteration(conf);
assertTrue(blocksInvalidateWorkPct > 0);
conf.set(DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION,
"0.5f");
blocksInvalidateWorkPct = DFSUtil.getInvalidateWorkPctPerIteration(conf);
assertEquals(blocksInvalidateWorkPct, 0.5f, blocksInvalidateWorkPct * 1e-7);
conf.set(DFSConfigKeys.
DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION, "0.0");
exception.expect(IllegalArgumentException.class);
blocksInvalidateWorkPct = DFSUtil.getInvalidateWorkPctPerIteration(conf);
}
/**
* This testcase tests whether the value returned by
* DFSUtil.getReplWorkMultiplier() is positive,
* and whether an IllegalArgumentException will be thrown
* when a non-positive value is retrieved
*/
@Test
public void testGetReplWorkMultiplier() {
Configuration conf = new Configuration();
int blocksReplWorkMultiplier = DFSUtil.getReplWorkMultiplier(conf);
assertTrue(blocksReplWorkMultiplier > 0);
conf.set(DFSConfigKeys.
DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION,"3");
blocksReplWorkMultiplier = DFSUtil.getReplWorkMultiplier(conf);
assertEquals(blocksReplWorkMultiplier, 3);
conf.set(DFSConfigKeys.
DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION,"-1");
exception.expect(IllegalArgumentException.class);
blocksReplWorkMultiplier = DFSUtil.getReplWorkMultiplier(conf);
}
} }