diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f3b97df0fa7..1f7a9381209 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -332,6 +332,9 @@ Release 2.7.0 - UNRELEASED
HDFS-7790. Do not create optional fields in DFSInputStream unless they are
needed (cmccabe)
+ HDFS-316. Balancer should run for a configurable # of iterations (Xiaoyu
+ Yao via aw)
+
OPTIMIZATIONS
HDFS-7454. Reduce memory footprint for AclEntries in NameNode.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
index eeac6ee24b9..1075861dcea 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
@@ -75,6 +75,10 @@ import com.google.common.base.Preconditions;
* start the balancer with a default threshold of 10%
* bin/ start-balancer.sh -threshold 5
* start the balancer with a threshold of 5%
+ * bin/ start-balancer.sh -idleiterations 20
+ * start the balancer with maximum 20 consecutive idle iterations
+ * bin/ start-balancer.sh -idleiterations -1
+ * run the balancer with default threshold infinitely
* To stop:
* bin/ stop-balancer.sh
*
@@ -137,7 +141,7 @@ import com.google.common.base.Preconditions;
*
* - The cluster is balanced;
*
- No block can be moved;
- *
- No block has been moved for five consecutive iterations;
+ *
- No block has been moved for specified consecutive iterations (5 by default);
*
- An IOException occurs while communicating with the namenode;
*
- Another balancer is running.
*
@@ -148,7 +152,7 @@ import com.google.common.base.Preconditions;
*
* - The cluster is balanced. Exiting
*
- No block can be moved. Exiting...
- *
- No block has been moved for 5 iterations. Exiting...
+ *
- No block has been moved for specified iterations (5 by default). Exiting...
*
- Received an IO exception: failure reason. Exiting...
*
- Another balancer is running. Exiting...
*
@@ -176,7 +180,9 @@ public class Balancer {
+ "\n\t[-exclude [-f | comma-sperated list of hosts]]"
+ "\tExcludes the specified datanodes."
+ "\n\t[-include [-f | comma-sperated list of hosts]]"
- + "\tIncludes only the specified datanodes.";
+ + "\tIncludes only the specified datanodes."
+ + "\n\t[-idleiterations ]"
+ + "\tNumber of consecutive idle iterations (-1 for Infinite) before exit.";
private final Dispatcher dispatcher;
private final BalancingPolicy policy;
@@ -573,7 +579,7 @@ public class Balancer {
List connectors = Collections.emptyList();
try {
connectors = NameNodeConnector.newNameNodeConnectors(namenodes,
- Balancer.class.getSimpleName(), BALANCER_ID_PATH, conf);
+ Balancer.class.getSimpleName(), BALANCER_ID_PATH, conf, p.maxIdleIteration);
boolean done = false;
for(int iteration = 0; !done; iteration++) {
@@ -629,19 +635,22 @@ public class Balancer {
static class Parameters {
static final Parameters DEFAULT = new Parameters(
BalancingPolicy.Node.INSTANCE, 10.0,
+ NameNodeConnector.DEFAULT_MAX_IDLE_ITERATIONS,
Collections. emptySet(), Collections. emptySet());
final BalancingPolicy policy;
final double threshold;
+ final int maxIdleIteration;
// exclude the nodes in this set from balancing operations
Set nodesToBeExcluded;
//include only these nodes in balancing operations
Set nodesToBeIncluded;
- Parameters(BalancingPolicy policy, double threshold,
+ Parameters(BalancingPolicy policy, double threshold, int maxIdleIteration,
Set nodesToBeExcluded, Set nodesToBeIncluded) {
this.policy = policy;
this.threshold = threshold;
+ this.maxIdleIteration = maxIdleIteration;
this.nodesToBeExcluded = nodesToBeExcluded;
this.nodesToBeIncluded = nodesToBeIncluded;
}
@@ -650,6 +659,7 @@ public class Balancer {
public String toString() {
return Balancer.class.getSimpleName() + "." + getClass().getSimpleName()
+ "[" + policy + ", threshold=" + threshold +
+ ", max idle iteration = " + maxIdleIteration +
", number of nodes to be excluded = "+ nodesToBeExcluded.size() +
", number of nodes to be included = "+ nodesToBeIncluded.size() +"]";
}
@@ -688,6 +698,7 @@ public class Balancer {
static Parameters parse(String[] args) {
BalancingPolicy policy = Parameters.DEFAULT.policy;
double threshold = Parameters.DEFAULT.threshold;
+ int maxIdleIteration = Parameters.DEFAULT.maxIdleIteration;
Set nodesTobeExcluded = Parameters.DEFAULT.nodesToBeExcluded;
Set nodesTobeIncluded = Parameters.DEFAULT.nodesToBeIncluded;
@@ -743,6 +754,11 @@ public class Balancer {
} else {
nodesTobeIncluded = Util.parseHostList(args[i]);
}
+ } else if ("-idleiterations".equalsIgnoreCase(args[i])) {
+ checkArgument(++i < args.length,
+ "idleiterations value is missing: args = " + Arrays.toString(args));
+ maxIdleIteration = Integer.parseInt(args[i]);
+ LOG.info("Using a idleiterations of " + maxIdleIteration);
} else {
throw new IllegalArgumentException("args = "
+ Arrays.toString(args));
@@ -756,7 +772,7 @@ public class Balancer {
}
}
- return new Parameters(policy, threshold, nodesTobeExcluded, nodesTobeIncluded);
+ return new Parameters(policy, threshold, maxIdleIteration, nodesTobeExcluded, nodesTobeIncluded);
}
private static void printUsage(PrintStream out) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
index 1b34777823e..a2712666078 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
@@ -61,18 +61,18 @@ import com.google.common.annotations.VisibleForTesting;
public class NameNodeConnector implements Closeable {
private static final Log LOG = LogFactory.getLog(NameNodeConnector.class);
- private static final int MAX_NOT_CHANGED_ITERATIONS = 5;
+ public static final int DEFAULT_MAX_IDLE_ITERATIONS = 5;
private static boolean write2IdFile = true;
/** Create {@link NameNodeConnector} for the given namenodes. */
public static List newNameNodeConnectors(
- Collection namenodes, String name, Path idPath, Configuration conf)
- throws IOException {
+ Collection namenodes, String name, Path idPath, Configuration conf,
+ int maxIdleIterations) throws IOException {
final List connectors = new ArrayList(
namenodes.size());
for (URI uri : namenodes) {
NameNodeConnector nnc = new NameNodeConnector(name, uri, idPath,
- null, conf);
+ null, conf, maxIdleIterations);
nnc.getKeyManager().startBlockKeyUpdater();
connectors.add(nnc);
}
@@ -81,12 +81,12 @@ public class NameNodeConnector implements Closeable {
public static List newNameNodeConnectors(
Map> namenodes, String name, Path idPath,
- Configuration conf) throws IOException {
+ Configuration conf, int maxIdleIterations) throws IOException {
final List connectors = new ArrayList(
namenodes.size());
for (Map.Entry> entry : namenodes.entrySet()) {
NameNodeConnector nnc = new NameNodeConnector(name, entry.getKey(),
- idPath, entry.getValue(), conf);
+ idPath, entry.getValue(), conf, maxIdleIterations);
nnc.getKeyManager().startBlockKeyUpdater();
connectors.add(nnc);
}
@@ -112,15 +112,18 @@ public class NameNodeConnector implements Closeable {
private final List targetPaths;
private final AtomicLong bytesMoved = new AtomicLong();
+ private final int maxNotChangedIterations;
private int notChangedIterations = 0;
public NameNodeConnector(String name, URI nameNodeUri, Path idPath,
- List targetPaths, Configuration conf)
+ List targetPaths, Configuration conf,
+ int maxNotChangedIterations)
throws IOException {
this.nameNodeUri = nameNodeUri;
this.idPath = idPath;
this.targetPaths = targetPaths == null || targetPaths.isEmpty() ? Arrays
.asList(new Path("/")) : targetPaths;
+ this.maxNotChangedIterations = maxNotChangedIterations;
this.namenode = NameNodeProxies.createProxy(conf, nameNodeUri,
NamenodeProtocol.class).getProxy();
@@ -183,7 +186,14 @@ public class NameNodeConnector implements Closeable {
notChangedIterations = 0;
} else {
notChangedIterations++;
- if (notChangedIterations >= MAX_NOT_CHANGED_ITERATIONS) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("No block has been moved for " +
+ notChangedIterations + " iterations, " +
+ "maximum notChangedIterations before exit is: " +
+ ((maxNotChangedIterations >= 0) ? maxNotChangedIterations : "Infinite"));
+ }
+ if ((maxNotChangedIterations >= 0) &&
+ (notChangedIterations >= maxNotChangedIterations)) {
System.out.println("No block has been moved for "
+ notChangedIterations + " iterations. Exiting...");
return false;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
index b0e9dda84d1..346d511b3fb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
@@ -530,7 +530,8 @@ public class Mover {
List connectors = Collections.emptyList();
try {
connectors = NameNodeConnector.newNameNodeConnectors(namenodes,
- Mover.class.getSimpleName(), MOVER_ID_PATH, conf);
+ Mover.class.getSimpleName(), MOVER_ID_PATH, conf,
+ NameNodeConnector.DEFAULT_MAX_IDLE_ITERATIONS);
while (connectors.size() > 0) {
Collections.shuffle(connectors);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSCommands.apt.vm b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSCommands.apt.vm
index 17a0d62c740..aa7c7a7c7e0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSCommands.apt.vm
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSCommands.apt.vm
@@ -134,7 +134,7 @@ HDFS Commands Guide
to stop the rebalancing process. See
{{{./HdfsUserGuide.html#Balancer}Balancer}} for more details.
- Usage: <<] [-policy ]>>>
+ Usage: <<] [-policy ] [-idleiterations ]>>>
*------------------------+----------------------------------------------------+
|| COMMAND_OPTION | Description
@@ -146,6 +146,9 @@ HDFS Commands Guide
| | each datanode is balanced. \
| | <<>>: Cluster is balanced if each block
| | pool in each datanode is balanced.
+*------------------------+----------------------------------------------------+
+| -idleiterations | Maximum number of idle iterations before exit.
+| | This overwrites the default idleiterations(5).
*------------------------+----------------------------------------------------+
Note that the <<>> policy is more strict than the <<>>
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
index a5346b2c924..1d818c5701c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
@@ -613,6 +613,7 @@ public class TestBalancer {
p = new Balancer.Parameters(
Balancer.Parameters.DEFAULT.policy,
Balancer.Parameters.DEFAULT.threshold,
+ Balancer.Parameters.DEFAULT.maxIdleIteration,
nodes.getNodesToBeExcluded(), nodes.getNodesToBeIncluded());
}
@@ -678,7 +679,8 @@ public class TestBalancer {
List connectors = Collections.emptyList();
try {
connectors = NameNodeConnector.newNameNodeConnectors(namenodes,
- Balancer.class.getSimpleName(), Balancer.BALANCER_ID_PATH, conf);
+ Balancer.class.getSimpleName(), Balancer.BALANCER_ID_PATH, conf,
+ Balancer.Parameters.DEFAULT.maxIdleIteration);
boolean done = false;
for(int iteration = 0; !done; iteration++) {
@@ -850,6 +852,7 @@ public class TestBalancer {
Balancer.Parameters p = new Balancer.Parameters(
Balancer.Parameters.DEFAULT.policy,
Balancer.Parameters.DEFAULT.threshold,
+ Balancer.Parameters.DEFAULT.maxIdleIteration,
datanodes, Balancer.Parameters.DEFAULT.nodesToBeIncluded);
final int r = Balancer.run(namenodes, p, conf);
assertEquals(ExitStatus.SUCCESS.getExitCode(), r);
@@ -1284,6 +1287,7 @@ public class TestBalancer {
Balancer.Parameters p = new Balancer.Parameters(
Parameters.DEFAULT.policy,
Parameters.DEFAULT.threshold,
+ Balancer.Parameters.DEFAULT.maxIdleIteration,
Parameters.DEFAULT.nodesToBeExcluded,
Parameters.DEFAULT.nodesToBeIncluded);
final int r = Balancer.run(namenodes, p, conf);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java
index c9fc5bafbfa..f35e1c88bd5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java
@@ -21,6 +21,7 @@ import java.io.IOException;
import java.net.URI;
import java.util.*;
+import com.google.common.collect.Maps;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
@@ -39,9 +40,14 @@ public class TestMover {
static Mover newMover(Configuration conf) throws IOException {
final Collection namenodes = DFSUtil.getNsServiceRpcUris(conf);
Assert.assertEquals(1, namenodes.size());
+ Map> nnMap = Maps.newHashMap();
+ for (URI nn : namenodes) {
+ nnMap.put(nn, null);
+ }
final List nncs = NameNodeConnector.newNameNodeConnectors(
- namenodes, Mover.class.getSimpleName(), Mover.MOVER_ID_PATH, conf);
+ nnMap, Mover.class.getSimpleName(), Mover.MOVER_ID_PATH, conf,
+ NameNodeConnector.DEFAULT_MAX_IDLE_ITERATIONS);
return new Mover(nncs.get(0), conf);
}