HDFS-10551. o.a.h.h.s.diskbalancer.command.Command does not actually verify options as expected. Contributed by Anu Engineer.

This commit is contained in:
Anu Engineer 2016-06-22 17:29:34 -07:00 committed by Arpit Agarwal
parent 66fa34c839
commit e8de28181a
7 changed files with 141 additions and 25 deletions

View File

@ -82,8 +82,6 @@ public abstract class Command extends Configured {
public Command(Configuration conf) { public Command(Configuration conf) {
super(conf); super(conf);
// These arguments are valid for all commands. // These arguments are valid for all commands.
addValidCommandParameters(DiskBalancer.HELP, "Help for this command");
addValidCommandParameters("arg", "");
topNodes = 0; topNodes = 0;
} }
@ -248,12 +246,13 @@ public abstract class Command extends Configured {
Iterator<Option> iter = cmd.iterator(); Iterator<Option> iter = cmd.iterator();
while (iter.hasNext()) { while (iter.hasNext()) {
Option opt = iter.next(); Option opt = iter.next();
if (!validArgs.containsKey(opt.getArgName())) {
if (!validArgs.containsKey(opt.getLongOpt())) {
String errMessage = String String errMessage = String
.format("%nInvalid argument found for command %s : %s%n", .format("%nInvalid argument found for command %s : %s%n",
commandName, opt.getArgName()); commandName, opt.getLongOpt());
StringBuilder validArguments = new StringBuilder(); StringBuilder validArguments = new StringBuilder();
validArguments.append("Valid arguments are : %n"); validArguments.append(String.format("Valid arguments are : %n"));
for (Map.Entry<String, String> args : validArgs.entrySet()) { for (Map.Entry<String, String> args : validArgs.entrySet()) {
String key = args.getKey(); String key = args.getKey();
String desc = args.getValue(); String desc = args.getValue();

View File

@ -47,7 +47,6 @@ public class ExecuteCommand extends Command {
public ExecuteCommand(Configuration conf) { public ExecuteCommand(Configuration conf) {
super(conf); super(conf);
addValidCommandParameters(DiskBalancer.EXECUTE, "Executes a given plan."); addValidCommandParameters(DiskBalancer.EXECUTE, "Executes a given plan.");
addValidCommandParameters(DiskBalancer.NODE, "Name of the target node.");
} }
/** /**

View File

@ -37,6 +37,7 @@ public class HelpCommand extends Command {
*/ */
public HelpCommand(Configuration conf) { public HelpCommand(Configuration conf) {
super(conf); super(conf);
addValidCommandParameters(DiskBalancer.HELP, "Help Command");
} }
/** /**

View File

@ -74,6 +74,7 @@ public class PlanCommand extends Command {
"between 2 disks"); "between 2 disks");
addValidCommandParameters(DiskBalancer.VERBOSE, "Run plan command in " + addValidCommandParameters(DiskBalancer.VERBOSE, "Run plan command in " +
"verbose mode."); "verbose mode.");
addValidCommandParameters(DiskBalancer.PLAN, "Plan Command");
} }
/** /**

View File

@ -399,13 +399,19 @@ public class DiskBalancer extends Configured implements Tool {
getReportOptions().addOption(report); getReportOptions().addOption(report);
opt.addOption(report); opt.addOption(report);
Option top = new Option(TOP, true, Option top = OptionBuilder.withLongOpt(TOP)
"specify the number of nodes to be listed which has data imbalance."); .hasArg()
.withDescription("specify the number of nodes to be listed which has" +
" data imbalance.")
.create();
getReportOptions().addOption(top); getReportOptions().addOption(top);
opt.addOption(top); opt.addOption(top);
Option node = new Option(NODE, true, Option node = OptionBuilder.withLongOpt(NODE)
"Datanode address, it can be DataNodeID, IP or hostname."); .hasArg()
.withDescription("Datanode address, " +
"it can be DataNodeID, IP or hostname.")
.create();
getReportOptions().addOption(node); getReportOptions().addOption(node);
opt.addOption(node); opt.addOption(node);
} }

View File

@ -4086,4 +4086,44 @@
Truststore password for HTTPS SSL configuration Truststore password for HTTPS SSL configuration
</description> </description>
</property> </property>
<!--Disk baalncer properties-->
<property>
<name>dfs.disk.balancer.max.disk.throughputInMBperSec</name>
<value>10</value>
<description>Maximum disk bandwidth used by diskbalancer
during read from a source disk. The unit is MB/sec.
</description>
</property>
<property>
<name>dfs.disk.balancer.block.tolerance.percent</name>
<value>10</value>
<description>
When a disk balancer copy operation is proceeding, the datanode is still
active. So it might not be possible to move the exactly specified
amount of data. So tolerance allows us to define a percentage which
defines a good enough move.
</description>
</property>
<property>
<name>dfs.disk.balancer.max.disk.errors</name>
<value>5</value>
<description>
During a block move from a source to destination disk, we might
encounter various errors. This defines how many errors we can tolerate
before we declare a move between 2 disks (or a step) has failed.
</description>
</property>
<property>
<name>dfs.disk.balancer.enabled</name>
<value>false</value>
<description>
This enables the diskbalancer feature on a cluster. By default, disk
balancer is disabled.
</description>
</property>
</configuration> </configuration>

View File

@ -5,9 +5,9 @@
* licenses this file to you under the Apache License, Version 2.0 (the * licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. * "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at * You may obtain a copy of the License at
* <p/> * <p>
* http://www.apache.org/licenses/LICENSE-2.0 * http://www.apache.org/licenses/LICENSE-2.0
* <p/> * <p>
* Unless required by applicable law or agreed to in writing, software * Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
@ -44,16 +44,27 @@ import org.junit.Test;
import com.google.common.collect.Lists; import com.google.common.collect.Lists;
import static org.apache.hadoop.hdfs.tools.DiskBalancer.CANCEL;
import static org.apache.hadoop.hdfs.tools.DiskBalancer.HELP;
import static org.apache.hadoop.hdfs.tools.DiskBalancer.NODE;
import static org.apache.hadoop.hdfs.tools.DiskBalancer.PLAN;
import static org.apache.hadoop.hdfs.tools.DiskBalancer.QUERY;
import org.junit.Rule;
import org.junit.rules.ExpectedException;
/** /**
* Tests various CLI commands of DiskBalancer. * Tests various CLI commands of DiskBalancer.
*/ */
public class TestDiskBalancerCommand { public class TestDiskBalancerCommand {
@Rule
public ExpectedException thrown = ExpectedException.none();
private MiniDFSCluster cluster; private MiniDFSCluster cluster;
private URI clusterJson; private URI clusterJson;
private Configuration conf = new HdfsConfiguration();
@Before @Before
public void setUp() throws Exception { public void setUp() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true); conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3) cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3)
.storagesPerDatanode(2).build(); .storagesPerDatanode(2).build();
@ -73,7 +84,7 @@ public class TestDiskBalancerCommand {
} }
/* test basic report */ /* test basic report */
@Test(timeout=60000) @Test(timeout = 60000)
public void testReportSimple() throws Exception { public void testReportSimple() throws Exception {
final String cmdLine = "hdfs diskbalancer -report"; final String cmdLine = "hdfs diskbalancer -report";
final List<String> outputs = runCommand(cmdLine); final List<String> outputs = runCommand(cmdLine);
@ -101,7 +112,7 @@ public class TestDiskBalancerCommand {
} }
/* test less than 64 DataNode(s) as total, e.g., -report -top 32 */ /* test less than 64 DataNode(s) as total, e.g., -report -top 32 */
@Test(timeout=60000) @Test(timeout = 60000)
public void testReportLessThanTotal() throws Exception { public void testReportLessThanTotal() throws Exception {
final String cmdLine = "hdfs diskbalancer -report -top 32"; final String cmdLine = "hdfs diskbalancer -report -top 32";
final List<String> outputs = runCommand(cmdLine); final List<String> outputs = runCommand(cmdLine);
@ -124,7 +135,7 @@ public class TestDiskBalancerCommand {
} }
/* test more than 64 DataNode(s) as total, e.g., -report -top 128 */ /* test more than 64 DataNode(s) as total, e.g., -report -top 128 */
@Test(timeout=60000) @Test(timeout = 60000)
public void testReportMoreThanTotal() throws Exception { public void testReportMoreThanTotal() throws Exception {
final String cmdLine = "hdfs diskbalancer -report -top 128"; final String cmdLine = "hdfs diskbalancer -report -top 128";
final List<String> outputs = runCommand(cmdLine); final List<String> outputs = runCommand(cmdLine);
@ -148,7 +159,7 @@ public class TestDiskBalancerCommand {
} }
/* test invalid top limit, e.g., -report -top xx */ /* test invalid top limit, e.g., -report -top xx */
@Test(timeout=60000) @Test(timeout = 60000)
public void testReportInvalidTopLimit() throws Exception { public void testReportInvalidTopLimit() throws Exception {
final String cmdLine = "hdfs diskbalancer -report -top xx"; final String cmdLine = "hdfs diskbalancer -report -top xx";
final List<String> outputs = runCommand(cmdLine); final List<String> outputs = runCommand(cmdLine);
@ -174,7 +185,7 @@ public class TestDiskBalancerCommand {
containsString("9 volumes with node data density 1.97")))); containsString("9 volumes with node data density 1.97"))));
} }
@Test(timeout=60000) @Test(timeout = 60000)
public void testReportNode() throws Exception { public void testReportNode() throws Exception {
final String cmdLine = final String cmdLine =
"hdfs diskbalancer -report -node " + "hdfs diskbalancer -report -node " +
@ -249,11 +260,8 @@ public class TestDiskBalancerCommand {
containsString("0.25 free: 490407853993/2000000000000")))); containsString("0.25 free: 490407853993/2000000000000"))));
} }
@Test(timeout=60000) @Test(timeout = 60000)
public void testReadClusterFromJson() throws Exception { public void testReadClusterFromJson() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
ClusterConnector jsonConnector = ConnectorFactory.getCluster(clusterJson, ClusterConnector jsonConnector = ConnectorFactory.getCluster(clusterJson,
conf); conf);
DiskBalancerCluster diskBalancerCluster = new DiskBalancerCluster( DiskBalancerCluster diskBalancerCluster = new DiskBalancerCluster(
@ -262,10 +270,72 @@ public class TestDiskBalancerCommand {
assertEquals(64, diskBalancerCluster.getNodes().size()); assertEquals(64, diskBalancerCluster.getNodes().size());
} }
private List<String> runCommand(final String cmdLine) throws Exception { /* test -plan DataNodeID */
@Test(timeout = 60000)
public void testPlanNode() throws Exception {
final String planArg = String.format("-%s %s", PLAN,
cluster.getDataNodes().get(0).getDatanodeUuid());
final String cmdLine = String
.format(
"hdfs diskbalancer %s", planArg);
runCommand(cmdLine);
}
/* Test that illegal arguments are handled correctly*/
@Test(timeout = 60000)
public void testIllegalArgument() throws Exception {
final String planArg = String.format("-%s %s", PLAN,
"a87654a9-54c7-4693-8dd9-c9c7021dc340");
final String cmdLine = String
.format(
"hdfs diskbalancer %s -report", planArg);
// -plan and -report cannot be used together.
// tests the validate command line arguments function.
thrown.expect(java.lang.IllegalArgumentException.class);
runCommand(cmdLine);
}
@Test(timeout = 60000)
public void testCancelCommand() throws Exception {
final String cancelArg = String.format("-%s %s", CANCEL, "nosuchplan");
final String nodeArg = String.format("-%s %s", NODE,
cluster.getDataNodes().get(0).getDatanodeUuid());
// Port:Host format is expected. So cancel command will throw.
thrown.expect(java.lang.IllegalArgumentException.class);
final String cmdLine = String
.format(
"hdfs diskbalancer %s %s", cancelArg, nodeArg);
runCommand(cmdLine);
}
/*
Makes an invalid query attempt to non-existent Datanode.
*/
@Test(timeout = 60000)
public void testQueryCommand() throws Exception {
final String queryArg = String.format("-%s %s", QUERY,
cluster.getDataNodes().get(0).getDatanodeUuid());
thrown.expect(java.net.UnknownHostException.class);
final String cmdLine = String
.format(
"hdfs diskbalancer %s", queryArg);
runCommand(cmdLine);
}
@Test(timeout = 60000)
public void testHelpCommand() throws Exception {
final String helpArg = String.format("-%s", HELP);
final String cmdLine = String
.format(
"hdfs diskbalancer %s", helpArg);
runCommand(cmdLine);
}
private List<String> runCommand(final String cmdLine) throws Exception {
String[] cmds = StringUtils.split(cmdLine, ' '); String[] cmds = StringUtils.split(cmdLine, ' ');
Configuration conf = new HdfsConfiguration();
org.apache.hadoop.hdfs.tools.DiskBalancer db = org.apache.hadoop.hdfs.tools.DiskBalancer db =
new org.apache.hadoop.hdfs.tools.DiskBalancer(conf); new org.apache.hadoop.hdfs.tools.DiskBalancer(conf);