HDFS-10551. o.a.h.h.s.diskbalancer.command.Command does not actually verify options as expected. Contributed by Anu Engineer.

This commit is contained in:
Anu Engineer 2016-06-22 17:29:34 -07:00 committed by Arpit Agarwal
parent 66fa34c839
commit e8de28181a
7 changed files with 141 additions and 25 deletions

View File

@ -82,8 +82,6 @@ public abstract class Command extends Configured {
public Command(Configuration conf) {
super(conf);
// These arguments are valid for all commands.
addValidCommandParameters(DiskBalancer.HELP, "Help for this command");
addValidCommandParameters("arg", "");
topNodes = 0;
}
@ -248,12 +246,13 @@ public abstract class Command extends Configured {
Iterator<Option> iter = cmd.iterator();
while (iter.hasNext()) {
Option opt = iter.next();
if (!validArgs.containsKey(opt.getArgName())) {
if (!validArgs.containsKey(opt.getLongOpt())) {
String errMessage = String
.format("%nInvalid argument found for command %s : %s%n",
commandName, opt.getArgName());
commandName, opt.getLongOpt());
StringBuilder validArguments = new StringBuilder();
validArguments.append("Valid arguments are : %n");
validArguments.append(String.format("Valid arguments are : %n"));
for (Map.Entry<String, String> args : validArgs.entrySet()) {
String key = args.getKey();
String desc = args.getValue();

View File

@ -47,7 +47,6 @@ public class ExecuteCommand extends Command {
public ExecuteCommand(Configuration conf) {
super(conf);
addValidCommandParameters(DiskBalancer.EXECUTE, "Executes a given plan.");
addValidCommandParameters(DiskBalancer.NODE, "Name of the target node.");
}
/**

View File

@ -37,6 +37,7 @@ public class HelpCommand extends Command {
*/
public HelpCommand(Configuration conf) {
super(conf);
addValidCommandParameters(DiskBalancer.HELP, "Help Command");
}
/**

View File

@ -74,6 +74,7 @@ public class PlanCommand extends Command {
"between 2 disks");
addValidCommandParameters(DiskBalancer.VERBOSE, "Run plan command in " +
"verbose mode.");
addValidCommandParameters(DiskBalancer.PLAN, "Plan Command");
}
/**

View File

@ -399,13 +399,19 @@ public class DiskBalancer extends Configured implements Tool {
getReportOptions().addOption(report);
opt.addOption(report);
Option top = new Option(TOP, true,
"specify the number of nodes to be listed which has data imbalance.");
Option top = OptionBuilder.withLongOpt(TOP)
.hasArg()
.withDescription("specify the number of nodes to be listed which has" +
" data imbalance.")
.create();
getReportOptions().addOption(top);
opt.addOption(top);
Option node = new Option(NODE, true,
"Datanode address, it can be DataNodeID, IP or hostname.");
Option node = OptionBuilder.withLongOpt(NODE)
.hasArg()
.withDescription("Datanode address, " +
"it can be DataNodeID, IP or hostname.")
.create();
getReportOptions().addOption(node);
opt.addOption(node);
}

View File

@ -4086,4 +4086,44 @@
Truststore password for HTTPS SSL configuration
</description>
</property>
<!--Disk baalncer properties-->
<property>
<name>dfs.disk.balancer.max.disk.throughputInMBperSec</name>
<value>10</value>
<description>Maximum disk bandwidth used by diskbalancer
during read from a source disk. The unit is MB/sec.
</description>
</property>
<property>
<name>dfs.disk.balancer.block.tolerance.percent</name>
<value>10</value>
<description>
When a disk balancer copy operation is proceeding, the datanode is still
active. So it might not be possible to move the exactly specified
amount of data. So tolerance allows us to define a percentage which
defines a good enough move.
</description>
</property>
<property>
<name>dfs.disk.balancer.max.disk.errors</name>
<value>5</value>
<description>
During a block move from a source to destination disk, we might
encounter various errors. This defines how many errors we can tolerate
before we declare a move between 2 disks (or a step) has failed.
</description>
</property>
<property>
<name>dfs.disk.balancer.enabled</name>
<value>false</value>
<description>
This enables the diskbalancer feature on a cluster. By default, disk
balancer is disabled.
</description>
</property>
</configuration>

View File

@ -5,9 +5,9 @@
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
@ -44,16 +44,27 @@ import org.junit.Test;
import com.google.common.collect.Lists;
import static org.apache.hadoop.hdfs.tools.DiskBalancer.CANCEL;
import static org.apache.hadoop.hdfs.tools.DiskBalancer.HELP;
import static org.apache.hadoop.hdfs.tools.DiskBalancer.NODE;
import static org.apache.hadoop.hdfs.tools.DiskBalancer.PLAN;
import static org.apache.hadoop.hdfs.tools.DiskBalancer.QUERY;
import org.junit.Rule;
import org.junit.rules.ExpectedException;
/**
* Tests various CLI commands of DiskBalancer.
*/
public class TestDiskBalancerCommand {
@Rule
public ExpectedException thrown = ExpectedException.none();
private MiniDFSCluster cluster;
private URI clusterJson;
private Configuration conf = new HdfsConfiguration();
@Before
public void setUp() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3)
.storagesPerDatanode(2).build();
@ -73,7 +84,7 @@ public class TestDiskBalancerCommand {
}
/* test basic report */
@Test(timeout=60000)
@Test(timeout = 60000)
public void testReportSimple() throws Exception {
final String cmdLine = "hdfs diskbalancer -report";
final List<String> outputs = runCommand(cmdLine);
@ -101,7 +112,7 @@ public class TestDiskBalancerCommand {
}
/* test less than 64 DataNode(s) as total, e.g., -report -top 32 */
@Test(timeout=60000)
@Test(timeout = 60000)
public void testReportLessThanTotal() throws Exception {
final String cmdLine = "hdfs diskbalancer -report -top 32";
final List<String> outputs = runCommand(cmdLine);
@ -124,7 +135,7 @@ public class TestDiskBalancerCommand {
}
/* test more than 64 DataNode(s) as total, e.g., -report -top 128 */
@Test(timeout=60000)
@Test(timeout = 60000)
public void testReportMoreThanTotal() throws Exception {
final String cmdLine = "hdfs diskbalancer -report -top 128";
final List<String> outputs = runCommand(cmdLine);
@ -148,7 +159,7 @@ public class TestDiskBalancerCommand {
}
/* test invalid top limit, e.g., -report -top xx */
@Test(timeout=60000)
@Test(timeout = 60000)
public void testReportInvalidTopLimit() throws Exception {
final String cmdLine = "hdfs diskbalancer -report -top xx";
final List<String> outputs = runCommand(cmdLine);
@ -174,10 +185,10 @@ public class TestDiskBalancerCommand {
containsString("9 volumes with node data density 1.97"))));
}
@Test(timeout=60000)
@Test(timeout = 60000)
public void testReportNode() throws Exception {
final String cmdLine =
"hdfs diskbalancer -report -node " +
"hdfs diskbalancer -report -node " +
"a87654a9-54c7-4693-8dd9-c9c7021dc340";
final List<String> outputs = runCommand(cmdLine);
@ -249,11 +260,8 @@ public class TestDiskBalancerCommand {
containsString("0.25 free: 490407853993/2000000000000"))));
}
@Test(timeout=60000)
@Test(timeout = 60000)
public void testReadClusterFromJson() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
ClusterConnector jsonConnector = ConnectorFactory.getCluster(clusterJson,
conf);
DiskBalancerCluster diskBalancerCluster = new DiskBalancerCluster(
@ -262,10 +270,72 @@ public class TestDiskBalancerCommand {
assertEquals(64, diskBalancerCluster.getNodes().size());
}
private List<String> runCommand(final String cmdLine) throws Exception {
/* test -plan DataNodeID */
@Test(timeout = 60000)
public void testPlanNode() throws Exception {
final String planArg = String.format("-%s %s", PLAN,
cluster.getDataNodes().get(0).getDatanodeUuid());
final String cmdLine = String
.format(
"hdfs diskbalancer %s", planArg);
runCommand(cmdLine);
}
/* Test that illegal arguments are handled correctly*/
@Test(timeout = 60000)
public void testIllegalArgument() throws Exception {
final String planArg = String.format("-%s %s", PLAN,
"a87654a9-54c7-4693-8dd9-c9c7021dc340");
final String cmdLine = String
.format(
"hdfs diskbalancer %s -report", planArg);
// -plan and -report cannot be used together.
// tests the validate command line arguments function.
thrown.expect(java.lang.IllegalArgumentException.class);
runCommand(cmdLine);
}
@Test(timeout = 60000)
public void testCancelCommand() throws Exception {
final String cancelArg = String.format("-%s %s", CANCEL, "nosuchplan");
final String nodeArg = String.format("-%s %s", NODE,
cluster.getDataNodes().get(0).getDatanodeUuid());
// Port:Host format is expected. So cancel command will throw.
thrown.expect(java.lang.IllegalArgumentException.class);
final String cmdLine = String
.format(
"hdfs diskbalancer %s %s", cancelArg, nodeArg);
runCommand(cmdLine);
}
/*
Makes an invalid query attempt to non-existent Datanode.
*/
@Test(timeout = 60000)
public void testQueryCommand() throws Exception {
final String queryArg = String.format("-%s %s", QUERY,
cluster.getDataNodes().get(0).getDatanodeUuid());
thrown.expect(java.net.UnknownHostException.class);
final String cmdLine = String
.format(
"hdfs diskbalancer %s", queryArg);
runCommand(cmdLine);
}
@Test(timeout = 60000)
public void testHelpCommand() throws Exception {
final String helpArg = String.format("-%s", HELP);
final String cmdLine = String
.format(
"hdfs diskbalancer %s", helpArg);
runCommand(cmdLine);
}
private List<String> runCommand(final String cmdLine) throws Exception {
String[] cmds = StringUtils.split(cmdLine, ' ');
Configuration conf = new HdfsConfiguration();
org.apache.hadoop.hdfs.tools.DiskBalancer db =
new org.apache.hadoop.hdfs.tools.DiskBalancer(conf);