HDFS-10550. DiskBalancer: fix issue of order dependency in iteration in ReportCommand test. Contributed by Xiaobing Zhou.

This commit is contained in:
Anu Engineer 2016-06-21 17:34:58 -07:00 committed by Arpit Agarwal
parent c6ed54808d
commit 7b23ad1ef7
2 changed files with 44 additions and 47 deletions

View File

@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.diskbalancer.command;
import java.io.PrintStream; import java.io.PrintStream;
import java.util.Collections; import java.util.Collections;
import java.util.List;
import java.util.ListIterator; import java.util.ListIterator;
import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.CommandLine;
@ -32,6 +33,7 @@ import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSe
import org.apache.hadoop.hdfs.tools.DiskBalancer; import org.apache.hadoop.hdfs.tools.DiskBalancer;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
/** /**
* Executes the report command. * Executes the report command.
@ -164,9 +166,10 @@ public class ReportCommand extends Command {
dbdn.getVolumeCount(), dbdn.getVolumeCount(),
dbdn.getNodeDataDensity())); dbdn.getNodeDataDensity()));
List<String> volumeList = Lists.newArrayList();
for (DiskBalancerVolumeSet vset : dbdn.getVolumeSets().values()) { for (DiskBalancerVolumeSet vset : dbdn.getVolumeSets().values()) {
for (DiskBalancerVolume vol : vset.getVolumes()) { for (DiskBalancerVolume vol : vset.getVolumes()) {
result.appendln(String.format(volumeFormat, volumeList.add(String.format(volumeFormat,
vol.getStorageType(), vol.getStorageType(),
vol.getPath(), vol.getPath(),
vol.getUsedRatio(), vol.getUsedRatio(),
@ -181,6 +184,10 @@ public class ReportCommand extends Command {
vol.isTransient() ? trueStr : falseStr)); vol.isTransient() ? trueStr : falseStr));
} }
} }
Collections.sort(volumeList);
result.appendln(
StringUtils.join(volumeList.toArray(), System.lineSeparator()));
} }
} }
} }
@ -194,13 +201,13 @@ public class ReportCommand extends Command {
" datanode, or prints out the list of nodes that will benefit from " + " datanode, or prints out the list of nodes that will benefit from " +
"running disk balancer. Top defaults to " + getDefaultTop(); "running disk balancer. Top defaults to " + getDefaultTop();
String footer = ". E.g.:\n" String footer = ". E.g.:\n"
+ "hdfs diskbalancer -uri http://namenode.uri -report\n" + "hdfs diskbalancer -fs http://namenode.uri -report\n"
+ "hdfs diskbalancer -uri http://namenode.uri -report -top 5\n" + "hdfs diskbalancer -fs http://namenode.uri -report -top 5\n"
+ "hdfs diskbalancer -uri http://namenode.uri -report " + "hdfs diskbalancer -fs http://namenode.uri -report "
+ "-node {DataNodeID | IP | Hostname}"; + "-node {DataNodeID | IP | Hostname}";
HelpFormatter helpFormatter = new HelpFormatter(); HelpFormatter helpFormatter = new HelpFormatter();
helpFormatter.printHelp("hdfs diskbalancer -uri http://namenode.uri " + helpFormatter.printHelp("hdfs diskbalancer -fs http://namenode.uri " +
"-report [options]", "-report [options]",
header, DiskBalancer.getReportOptions(), footer); header, DiskBalancer.getReportOptions(), footer);
} }

View File

@ -71,8 +71,10 @@ public class TestDiskBalancerCommand {
} }
} }
private void testReportSimple() throws Exception { /* test basic report */
final String cmdLine = String.format("hdfs diskbalancer -uri %s -report", @Test(timeout=60000)
public void testReportSimple() throws Exception {
final String cmdLine = String.format("hdfs diskbalancer -fs %s -report",
clusterJson.toString()); clusterJson.toString());
final List<String> outputs = runCommand(cmdLine); final List<String> outputs = runCommand(cmdLine);
@ -98,9 +100,11 @@ public class TestDiskBalancerCommand {
} }
private void testReportLessThanTotal() throws Exception { /* test less than 64 DataNode(s) as total, e.g., -report -top 32 */
@Test(timeout=60000)
public void testReportLessThanTotal() throws Exception {
final String cmdLine = String.format( final String cmdLine = String.format(
"hdfs diskbalancer -uri %s -report -top 32", clusterJson.toString()); "hdfs diskbalancer -fs %s -report -top 32", clusterJson.toString());
final List<String> outputs = runCommand(cmdLine); final List<String> outputs = runCommand(cmdLine);
assertThat( assertThat(
@ -120,9 +124,11 @@ public class TestDiskBalancerCommand {
containsString("9 volumes with node data density 1.97")))); containsString("9 volumes with node data density 1.97"))));
} }
private void testReportMoreThanTotal() throws Exception { /* test more than 64 DataNode(s) as total, e.g., -report -top 128 */
@Test(timeout=60000)
public void testReportMoreThanTotal() throws Exception {
final String cmdLine = String.format( final String cmdLine = String.format(
"hdfs diskbalancer -uri %s -report -top 128", clusterJson.toString()); "hdfs diskbalancer -fs %s -report -top 128", clusterJson.toString());
final List<String> outputs = runCommand(cmdLine); final List<String> outputs = runCommand(cmdLine);
assertThat( assertThat(
@ -143,9 +149,11 @@ public class TestDiskBalancerCommand {
} }
private void testReportInvalidTopLimit() throws Exception { /* test invalid top limit, e.g., -report -top xx */
@Test(timeout=60000)
public void testReportInvalidTopLimit() throws Exception {
final String cmdLine = String.format( final String cmdLine = String.format(
"hdfs diskbalancer -uri %s -report -top xx", clusterJson.toString()); "hdfs diskbalancer -fs %s -report -top xx", clusterJson.toString());
final List<String> outputs = runCommand(cmdLine); final List<String> outputs = runCommand(cmdLine);
assertThat( assertThat(
@ -169,10 +177,12 @@ public class TestDiskBalancerCommand {
containsString("9 volumes with node data density 1.97")))); containsString("9 volumes with node data density 1.97"))));
} }
private void testReportNode() throws Exception { /* test -report -node DataNodeID */
@Test(timeout=60000)
public void testReportNode() throws Exception {
final String cmdLine = String final String cmdLine = String
.format( .format(
"hdfs diskbalancer -uri %s -report -node " "hdfs diskbalancer -fs %s -report -node "
+ "a87654a9-54c7-4693-8dd9-c9c7021dc340", + "a87654a9-54c7-4693-8dd9-c9c7021dc340",
clusterJson.toString()); clusterJson.toString());
final List<String> outputs = runCommand(cmdLine); final List<String> outputs = runCommand(cmdLine);
@ -192,9 +202,9 @@ public class TestDiskBalancerCommand {
assertThat( assertThat(
outputs.get(3), outputs.get(3),
is(allOf(containsString("DISK"), is(allOf(containsString("DISK"),
containsString("/tmp/disk/xx3j3ph3zd"), containsString("/tmp/disk/KmHefYNURo"),
containsString("0.72 used: 289544224916/400000000000"), containsString("0.20 used: 39160240782/200000000000"),
containsString("0.28 free: 110455775084/400000000000")))); containsString("0.80 free: 160839759218/200000000000"))));
assertThat( assertThat(
outputs.get(4), outputs.get(4),
is(allOf(containsString("DISK"), is(allOf(containsString("DISK"),
@ -204,16 +214,15 @@ public class TestDiskBalancerCommand {
assertThat( assertThat(
outputs.get(5), outputs.get(5),
is(allOf(containsString("DISK"), is(allOf(containsString("DISK"),
containsString("DISK"), containsString("/tmp/disk/xx3j3ph3zd"),
containsString("/tmp/disk/KmHefYNURo"), containsString("0.72 used: 289544224916/400000000000"),
containsString("0.20 used: 39160240782/200000000000"), containsString("0.28 free: 110455775084/400000000000"))));
containsString("0.80 free: 160839759218/200000000000"))));
assertThat( assertThat(
outputs.get(6), outputs.get(6),
is(allOf(containsString("RAM_DISK"), is(allOf(containsString("RAM_DISK"),
containsString("/tmp/disk/MXRyYsCz3U"), containsString("/tmp/disk/BoBlQFxhfw"),
containsString("0.55 used: 438102096853/800000000000"), containsString("0.60 used: 477590453390/800000000000"),
containsString("0.45 free: 361897903147/800000000000")))); containsString("0.40 free: 322409546610/800000000000"))));
assertThat( assertThat(
outputs.get(7), outputs.get(7),
is(allOf(containsString("RAM_DISK"), is(allOf(containsString("RAM_DISK"),
@ -223,9 +232,9 @@ public class TestDiskBalancerCommand {
assertThat( assertThat(
outputs.get(8), outputs.get(8),
is(allOf(containsString("RAM_DISK"), is(allOf(containsString("RAM_DISK"),
containsString("/tmp/disk/BoBlQFxhfw"), containsString("/tmp/disk/MXRyYsCz3U"),
containsString("0.60 used: 477590453390/800000000000"), containsString("0.55 used: 438102096853/800000000000"),
containsString("0.40 free: 322409546610/800000000000")))); containsString("0.45 free: 361897903147/800000000000"))));
assertThat( assertThat(
outputs.get(9), outputs.get(9),
is(allOf(containsString("SSD"), is(allOf(containsString("SSD"),
@ -247,25 +256,6 @@ public class TestDiskBalancerCommand {
} }
@Test(timeout=60000) @Test(timeout=60000)
public void testReportCommmand() throws Exception {
/* test basic report */
testReportSimple();
/* test less than 64 DataNode(s) as total, e.g., -report -top 32 */
testReportLessThanTotal();
/* test more than 64 DataNode(s) as total, e.g., -report -top 128 */
testReportMoreThanTotal();
/* test invalid top limit, e.g., -report -top xx */
testReportInvalidTopLimit();
/* test -report -node DataNodeID */
testReportNode();
}
@Test
public void testReadClusterFromJson() throws Exception { public void testReadClusterFromJson() throws Exception {
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true); conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);