HDFS-10821. DiskBalancer: Report command support with multiple nodes. Contributed by Yiqun Lin.
This commit is contained in:
parent
58ed4fa544
commit
8a93f45a80
|
@ -38,7 +38,8 @@ public class DiskBalancerException extends IOException {
|
||||||
INVALID_MOVE,
|
INVALID_MOVE,
|
||||||
INTERNAL_ERROR,
|
INTERNAL_ERROR,
|
||||||
NO_SUCH_PLAN,
|
NO_SUCH_PLAN,
|
||||||
UNKNOWN_KEY
|
UNKNOWN_KEY,
|
||||||
|
INVALID_NODE,
|
||||||
}
|
}
|
||||||
|
|
||||||
private final Result result;
|
private final Result result;
|
||||||
|
|
|
@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.DFSUtilClient;
|
import org.apache.hadoop.hdfs.DFSUtilClient;
|
||||||
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
|
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
|
||||||
import org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerConstants;
|
import org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerConstants;
|
||||||
|
import org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException;
|
||||||
import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector;
|
import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector;
|
||||||
import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ConnectorFactory;
|
import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ConnectorFactory;
|
||||||
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster;
|
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster;
|
||||||
|
@ -256,6 +257,7 @@ public abstract class Command extends Configured {
|
||||||
throws IOException {
|
throws IOException {
|
||||||
Set<String> nodeNames = null;
|
Set<String> nodeNames = null;
|
||||||
List<DiskBalancerDataNode> nodeList = Lists.newArrayList();
|
List<DiskBalancerDataNode> nodeList = Lists.newArrayList();
|
||||||
|
List<String> invalidNodeList = Lists.newArrayList();
|
||||||
|
|
||||||
if ((listArg == null) || listArg.isEmpty()) {
|
if ((listArg == null) || listArg.isEmpty()) {
|
||||||
return nodeList;
|
return nodeList;
|
||||||
|
@ -269,10 +271,22 @@ public abstract class Command extends Configured {
|
||||||
|
|
||||||
if (node != null) {
|
if (node != null) {
|
||||||
nodeList.add(node);
|
nodeList.add(node);
|
||||||
|
} else {
|
||||||
|
invalidNodeList.add(name);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!invalidNodeList.isEmpty()) {
|
||||||
|
String invalidNodes = StringUtils.join(invalidNodeList.toArray(), ",");
|
||||||
|
String warnMsg = String.format(
|
||||||
|
"The node(s) '%s' not found. "
|
||||||
|
+ "Please make sure that '%s' exists in the cluster.",
|
||||||
|
invalidNodes, invalidNodes);
|
||||||
|
throw new DiskBalancerException(warnMsg,
|
||||||
|
DiskBalancerException.Result.INVALID_NODE);
|
||||||
|
}
|
||||||
|
|
||||||
return nodeList;
|
return nodeList;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -27,6 +27,7 @@ import org.apache.commons.cli.HelpFormatter;
|
||||||
import org.apache.commons.lang.StringUtils;
|
import org.apache.commons.lang.StringUtils;
|
||||||
import org.apache.commons.lang.text.StrBuilder;
|
import org.apache.commons.lang.text.StrBuilder;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException;
|
||||||
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode;
|
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode;
|
||||||
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume;
|
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume;
|
||||||
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet;
|
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet;
|
||||||
|
@ -59,7 +60,7 @@ public class ReportCommand extends Command {
|
||||||
"Top number of nodes to be processed. Default: %d", getDefaultTop());
|
"Top number of nodes to be processed. Default: %d", getDefaultTop());
|
||||||
addValidCommandParameters(DiskBalancerCLI.TOP, desc);
|
addValidCommandParameters(DiskBalancerCLI.TOP, desc);
|
||||||
|
|
||||||
desc = String.format("Print out volume information for a DataNode.");
|
desc = String.format("Print out volume information for DataNode(s).");
|
||||||
addValidCommandParameters(DiskBalancerCLI.NODE, desc);
|
addValidCommandParameters(DiskBalancerCLI.NODE, desc);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -83,7 +84,7 @@ public class ReportCommand extends Command {
|
||||||
|
|
||||||
if (cmd.hasOption(DiskBalancerCLI.NODE)) {
|
if (cmd.hasOption(DiskBalancerCLI.NODE)) {
|
||||||
/*
|
/*
|
||||||
* Reporting volume information for a specific DataNode
|
* Reporting volume information for specific DataNode(s)
|
||||||
*/
|
*/
|
||||||
handleNodeReport(cmd, result, nodeFormatWithoutSequence, volumeFormat);
|
handleNodeReport(cmd, result, nodeFormatWithoutSequence, volumeFormat);
|
||||||
|
|
||||||
|
@ -133,8 +134,8 @@ public class ReportCommand extends Command {
|
||||||
final String nodeFormat, final String volumeFormat) throws Exception {
|
final String nodeFormat, final String volumeFormat) throws Exception {
|
||||||
String outputLine = "";
|
String outputLine = "";
|
||||||
/*
|
/*
|
||||||
* get value that identifies a DataNode from command line, it could be UUID,
|
* get value that identifies DataNode(s) from command line, it could be
|
||||||
* IP address or host name.
|
* UUID, IP address or host name.
|
||||||
*/
|
*/
|
||||||
final String nodeVal = cmd.getOptionValue(DiskBalancerCLI.NODE);
|
final String nodeVal = cmd.getOptionValue(DiskBalancerCLI.NODE);
|
||||||
|
|
||||||
|
@ -143,70 +144,86 @@ public class ReportCommand extends Command {
|
||||||
recordOutput(result, outputLine);
|
recordOutput(result, outputLine);
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* Reporting volume information for a specific DataNode
|
* Reporting volume information for specific DataNode(s)
|
||||||
*/
|
*/
|
||||||
outputLine = String.format(
|
outputLine = String.format(
|
||||||
"Reporting volume information for DataNode '%s'.", nodeVal);
|
"Reporting volume information for DataNode(s) '%s'.", nodeVal);
|
||||||
recordOutput(result, outputLine);
|
recordOutput(result, outputLine);
|
||||||
|
|
||||||
final String trueStr = "True";
|
List<DiskBalancerDataNode> dbdns = Lists.newArrayList();
|
||||||
final String falseStr = "False";
|
try {
|
||||||
DiskBalancerDataNode dbdn = getNode(nodeVal);
|
dbdns = getNodes(nodeVal);
|
||||||
// get storage path of datanode
|
} catch (DiskBalancerException e) {
|
||||||
populatePathNames(dbdn);
|
// If there are some invalid nodes that contained in nodeVal,
|
||||||
|
// the exception will be threw.
|
||||||
|
recordOutput(result, e.getMessage());
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
if (dbdn == null) {
|
if (!dbdns.isEmpty()) {
|
||||||
outputLine = String.format(
|
for (DiskBalancerDataNode node : dbdns) {
|
||||||
"Can't find a DataNode that matches '%s'.", nodeVal);
|
recordNodeReport(result, node, nodeFormat, volumeFormat);
|
||||||
recordOutput(result, outputLine);
|
result.append(System.lineSeparator());
|
||||||
} else {
|
|
||||||
result.appendln(String.format(nodeFormat,
|
|
||||||
dbdn.getDataNodeName(),
|
|
||||||
dbdn.getDataNodeIP(),
|
|
||||||
dbdn.getDataNodePort(),
|
|
||||||
dbdn.getDataNodeUUID(),
|
|
||||||
dbdn.getVolumeCount(),
|
|
||||||
dbdn.getNodeDataDensity()));
|
|
||||||
|
|
||||||
List<String> volumeList = Lists.newArrayList();
|
|
||||||
for (DiskBalancerVolumeSet vset : dbdn.getVolumeSets().values()) {
|
|
||||||
for (DiskBalancerVolume vol : vset.getVolumes()) {
|
|
||||||
volumeList.add(String.format(volumeFormat,
|
|
||||||
vol.getStorageType(),
|
|
||||||
vol.getPath(),
|
|
||||||
vol.getUsedRatio(),
|
|
||||||
vol.getUsed(),
|
|
||||||
vol.getCapacity(),
|
|
||||||
vol.getFreeRatio(),
|
|
||||||
vol.getFreeSpace(),
|
|
||||||
vol.getCapacity(),
|
|
||||||
vol.isFailed() ? trueStr : falseStr,
|
|
||||||
vol.isReadOnly() ? trueStr : falseStr,
|
|
||||||
vol.isSkip() ? trueStr : falseStr,
|
|
||||||
vol.isTransient() ? trueStr : falseStr));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Collections.sort(volumeList);
|
|
||||||
result.appendln(
|
|
||||||
StringUtils.join(volumeList.toArray(), System.lineSeparator()));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Put node report lines to string buffer.
|
||||||
|
*/
|
||||||
|
private void recordNodeReport(StrBuilder result, DiskBalancerDataNode dbdn,
|
||||||
|
final String nodeFormat, final String volumeFormat) throws Exception {
|
||||||
|
final String trueStr = "True";
|
||||||
|
final String falseStr = "False";
|
||||||
|
|
||||||
|
// get storage path of datanode
|
||||||
|
populatePathNames(dbdn);
|
||||||
|
result.appendln(String.format(nodeFormat,
|
||||||
|
dbdn.getDataNodeName(),
|
||||||
|
dbdn.getDataNodeIP(),
|
||||||
|
dbdn.getDataNodePort(),
|
||||||
|
dbdn.getDataNodeUUID(),
|
||||||
|
dbdn.getVolumeCount(),
|
||||||
|
dbdn.getNodeDataDensity()));
|
||||||
|
|
||||||
|
List<String> volumeList = Lists.newArrayList();
|
||||||
|
for (DiskBalancerVolumeSet vset : dbdn.getVolumeSets().values()) {
|
||||||
|
for (DiskBalancerVolume vol : vset.getVolumes()) {
|
||||||
|
volumeList.add(String.format(volumeFormat,
|
||||||
|
vol.getStorageType(),
|
||||||
|
vol.getPath(),
|
||||||
|
vol.getUsedRatio(),
|
||||||
|
vol.getUsed(),
|
||||||
|
vol.getCapacity(),
|
||||||
|
vol.getFreeRatio(),
|
||||||
|
vol.getFreeSpace(),
|
||||||
|
vol.getCapacity(),
|
||||||
|
vol.isFailed() ? trueStr : falseStr,
|
||||||
|
vol.isReadOnly() ? trueStr: falseStr,
|
||||||
|
vol.isSkip() ? trueStr : falseStr,
|
||||||
|
vol.isTransient() ? trueStr : falseStr));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Collections.sort(volumeList);
|
||||||
|
result.appendln(
|
||||||
|
StringUtils.join(volumeList.toArray(), System.lineSeparator()));
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Prints the help message.
|
* Prints the help message.
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public void printHelp() {
|
public void printHelp() {
|
||||||
String header = "Report command reports the volume information of a given" +
|
String header = "Report command reports the volume information of given" +
|
||||||
" datanode, or prints out the list of nodes that will benefit from " +
|
" datanode(s), or prints out the list of nodes that will benefit " +
|
||||||
"running disk balancer. Top defaults to " + getDefaultTop();
|
"from running disk balancer. Top defaults to " + getDefaultTop();
|
||||||
String footer = ". E.g.:\n"
|
String footer = ". E.g.:\n"
|
||||||
+ "hdfs diskbalancer -report\n"
|
+ "hdfs diskbalancer -report\n"
|
||||||
+ "hdfs diskbalancer -report -top 5\n"
|
+ "hdfs diskbalancer -report -top 5\n"
|
||||||
+ "hdfs diskbalancer -report "
|
+ "hdfs diskbalancer -report "
|
||||||
+ "-node {DataNodeID | IP | Hostname}";
|
+ "-node [<DataNodeID|IP|Hostname>,...]";
|
||||||
|
|
||||||
HelpFormatter helpFormatter = new HelpFormatter();
|
HelpFormatter helpFormatter = new HelpFormatter();
|
||||||
helpFormatter.printHelp("hdfs diskbalancer -fs http://namenode.uri " +
|
helpFormatter.printHelp("hdfs diskbalancer -fs http://namenode.uri " +
|
||||||
|
|
|
@ -102,9 +102,9 @@ or
|
||||||
Plan ID can be read from datanode using query command.
|
Plan ID can be read from datanode using query command.
|
||||||
|
|
||||||
### Report
|
### Report
|
||||||
Report command provides detailed report about a node.
|
Report command provides detailed report about node(s).
|
||||||
|
|
||||||
`hdfs diskbalancer -fs http://namenode.uri -report -node {DataNodeID | IP | Hostname}`
|
`hdfs diskbalancer -fs http://namenode.uri -report -node [<DataNodeID|IP|Hostname>,...]`
|
||||||
|
|
||||||
|
|
||||||
Settings
|
Settings
|
||||||
|
|
|
@ -23,6 +23,7 @@ import static org.hamcrest.CoreMatchers.containsString;
|
||||||
import static org.hamcrest.CoreMatchers.is;
|
import static org.hamcrest.CoreMatchers.is;
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertThat;
|
import static org.junit.Assert.assertThat;
|
||||||
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
import java.io.ByteArrayOutputStream;
|
import java.io.ByteArrayOutputStream;
|
||||||
import java.io.PrintStream;
|
import java.io.PrintStream;
|
||||||
|
@ -457,4 +458,52 @@ public class TestDiskBalancerCommand {
|
||||||
List<DiskBalancerDataNode> nodeList = command.getNodes(listArg.toString());
|
List<DiskBalancerDataNode> nodeList = command.getNodes(listArg.toString());
|
||||||
assertEquals(nodeNum, nodeList.size());
|
assertEquals(nodeNum, nodeList.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 60000)
|
||||||
|
public void testReportCommandWithMultipleNodes() throws Exception {
|
||||||
|
String dataNodeUuid1 = cluster.getDataNodes().get(0).getDatanodeUuid();
|
||||||
|
String dataNodeUuid2 = cluster.getDataNodes().get(1).getDatanodeUuid();
|
||||||
|
final String planArg = String.format("-%s -%s %s,%s",
|
||||||
|
REPORT, NODE, dataNodeUuid1, dataNodeUuid2);
|
||||||
|
final String cmdLine = String.format("hdfs diskbalancer %s", planArg);
|
||||||
|
List<String> outputs = runCommand(cmdLine, cluster);
|
||||||
|
|
||||||
|
assertThat(
|
||||||
|
outputs.get(0),
|
||||||
|
containsString("Processing report command"));
|
||||||
|
assertThat(
|
||||||
|
outputs.get(1),
|
||||||
|
is(allOf(containsString("Reporting volume information for DataNode"),
|
||||||
|
containsString(dataNodeUuid1), containsString(dataNodeUuid2))));
|
||||||
|
// Since the order of input nodes will be disrupted when parse
|
||||||
|
// the node string, we should compare UUID with both output lines.
|
||||||
|
assertTrue(outputs.get(2).contains(dataNodeUuid1)
|
||||||
|
|| outputs.get(6).contains(dataNodeUuid1));
|
||||||
|
assertTrue(outputs.get(2).contains(dataNodeUuid2)
|
||||||
|
|| outputs.get(6).contains(dataNodeUuid2));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 60000)
|
||||||
|
public void testReportCommandWithInvalidNode() throws Exception {
|
||||||
|
String dataNodeUuid1 = cluster.getDataNodes().get(0).getDatanodeUuid();
|
||||||
|
String invalidNode = "invalidNode";
|
||||||
|
final String planArg = String.format("-%s -%s %s,%s",
|
||||||
|
REPORT, NODE, dataNodeUuid1, invalidNode);
|
||||||
|
final String cmdLine = String.format("hdfs diskbalancer %s", planArg);
|
||||||
|
List<String> outputs = runCommand(cmdLine, cluster);
|
||||||
|
|
||||||
|
assertThat(
|
||||||
|
outputs.get(0),
|
||||||
|
containsString("Processing report command"));
|
||||||
|
assertThat(
|
||||||
|
outputs.get(1),
|
||||||
|
is(allOf(containsString("Reporting volume information for DataNode"),
|
||||||
|
containsString(dataNodeUuid1), containsString(invalidNode))));
|
||||||
|
|
||||||
|
String invalidNodeInfo =
|
||||||
|
String.format("The node(s) '%s' not found. "
|
||||||
|
+ "Please make sure that '%s' exists in the cluster."
|
||||||
|
, invalidNode, invalidNode);
|
||||||
|
assertTrue(outputs.get(2).contains(invalidNodeInfo));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue