HDFS-10500. Diskbalancer: Print out information when a plan is not generated. Contributed by Anu Engineer.

This commit is contained in:
Anu Engineer 2016-06-09 13:43:19 -07:00 committed by Arpit Agarwal
parent d2ff793ae4
commit 78a1032b71
4 changed files with 37 additions and 49 deletions

View File

@ -70,7 +70,7 @@ public void execute(CommandLine cmd) throws Exception {
// points us to the plan file, we can compute the hash as well as read // points us to the plan file, we can compute the hash as well as read
// the address of the datanode from the plan file. // the address of the datanode from the plan file.
String planFile = cmd.getOptionValue(DiskBalancer.CANCEL); String planFile = cmd.getOptionValue(DiskBalancer.CANCEL);
Preconditions.checkArgument(planFile == null || planFile.isEmpty(), Preconditions.checkArgument(planFile != null && !planFile.isEmpty(),
"Invalid plan file specified."); "Invalid plan file specified.");
String planData = null; String planData = null;
try (FSDataInputStream plan = open(planFile)) { try (FSDataInputStream plan = open(planFile)) {
@ -88,7 +88,7 @@ public void execute(CommandLine cmd) throws Exception {
*/ */
private void cancelPlan(String planData) throws IOException { private void cancelPlan(String planData) throws IOException {
Preconditions.checkNotNull(planData); Preconditions.checkNotNull(planData);
NodePlan plan = readPlan(planData); NodePlan plan = NodePlan.parseJson(planData);
String dataNodeAddress = plan.getNodeName() + ":" + plan.getPort(); String dataNodeAddress = plan.getNodeName() + ":" + plan.getPort();
Preconditions.checkNotNull(dataNodeAddress); Preconditions.checkNotNull(dataNodeAddress);
ClientDatanodeProtocol dataNode = getDataNodeProxy(dataNodeAddress); ClientDatanodeProtocol dataNode = getDataNodeProxy(dataNodeAddress);

View File

@ -31,16 +31,13 @@
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan;
import org.apache.hadoop.hdfs.tools.DiskBalancer;
import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector; import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector;
import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ConnectorFactory; import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ConnectorFactory;
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster; import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster;
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode; import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode;
import org.apache.hadoop.hdfs.tools.DiskBalancer;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.htrace.fasterxml.jackson.databind.ObjectMapper;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
@ -59,10 +56,10 @@
import java.util.HashMap; import java.util.HashMap;
import java.util.Iterator; import java.util.Iterator;
import java.util.LinkedList; import java.util.LinkedList;
import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Set; import java.util.Set;
import java.util.TreeSet; import java.util.TreeSet;
import java.util.List;
/** /**
* Common interface for command handling. * Common interface for command handling.
@ -394,16 +391,4 @@ protected void addValidCommandParameters(String key, String desc) {
protected DiskBalancerCluster getCluster() { protected DiskBalancerCluster getCluster() {
return cluster; return cluster;
} }
/**
* Returns a plan from the Json Data.
*
* @param planData - Json String
* @return NodePlan
* @throws IOException
*/
protected NodePlan readPlan(String planData) throws IOException {
ObjectMapper mapper = new ObjectMapper();
return mapper.readValue(planData, NodePlan.class);
}
} }

View File

@ -79,7 +79,7 @@ public void execute(CommandLine cmd) throws Exception {
*/ */
private void submitPlan(String planData) throws IOException { private void submitPlan(String planData) throws IOException {
Preconditions.checkNotNull(planData); Preconditions.checkNotNull(planData);
NodePlan plan = readPlan(planData); NodePlan plan = NodePlan.parseJson(planData);
String dataNodeAddress = plan.getNodeName() + ":" + plan.getPort(); String dataNodeAddress = plan.getNodeName() + ":" + plan.getPort();
Preconditions.checkNotNull(dataNodeAddress); Preconditions.checkNotNull(dataNodeAddress);
ClientDatanodeProtocol dataNode = getDataNodeProxy(dataNodeAddress); ClientDatanodeProtocol dataNode = getDataNodeProxy(dataNodeAddress);

View File

@ -25,13 +25,12 @@
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
import org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerConstants; import org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerConstants;
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode;
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume; import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume;
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet; import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet;
import org.apache.hadoop.hdfs.tools.DiskBalancer;
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel
.DiskBalancerDataNode;
import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan; import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan;
import org.apache.hadoop.hdfs.server.diskbalancer.planner.Step; import org.apache.hadoop.hdfs.server.diskbalancer.planner.Step;
import org.apache.hadoop.hdfs.tools.DiskBalancer;
import org.codehaus.jackson.map.ObjectMapper; import org.codehaus.jackson.map.ObjectMapper;
import java.io.IOException; import java.io.IOException;
@ -122,11 +121,14 @@ public void execute(CommandLine cmd) throws Exception {
setNodesToProcess(node); setNodesToProcess(node);
populatePathNames(node); populatePathNames(node);
NodePlan plan = null;
List<NodePlan> plans = getCluster().computePlan(this.thresholdPercentage); List<NodePlan> plans = getCluster().computePlan(this.thresholdPercentage);
setPlanParams(plans); setPlanParams(plans);
LOG.info("Writing plan to : {}", getOutputPath()); if (plans.size() > 0) {
System.out.printf("Writing plan to : %s%n", getOutputPath()); plan = plans.get(0);
}
try (FSDataOutputStream beforeStream = create(String.format( try (FSDataOutputStream beforeStream = create(String.format(
DiskBalancer.BEFORE_TEMPLATE, DiskBalancer.BEFORE_TEMPLATE,
@ -135,18 +137,24 @@ public void execute(CommandLine cmd) throws Exception {
.getBytes(StandardCharsets.UTF_8)); .getBytes(StandardCharsets.UTF_8));
} }
try (FSDataOutputStream planStream = create(String.format( if (plan != null) {
DiskBalancer.PLAN_TEMPLATE, LOG.info("Writing plan to : {}", getOutputPath());
cmd.getOptionValue(DiskBalancer.PLAN)))) { try (FSDataOutputStream planStream = create(String.format(
planStream.write(getPlan(plans).getBytes(StandardCharsets.UTF_8)); DiskBalancer.PLAN_TEMPLATE,
cmd.getOptionValue(DiskBalancer.PLAN)))) {
planStream.write(plan.toJson().getBytes(StandardCharsets.UTF_8));
}
} else {
LOG.info("No plan generated. DiskBalancing not needed for node: {} " +
"threshold used: {}", cmd.getOptionValue(DiskBalancer.PLAN),
this.thresholdPercentage);
} }
if (cmd.hasOption(DiskBalancer.VERBOSE)) { if (cmd.hasOption(DiskBalancer.VERBOSE) && plans.size() > 0) {
printToScreen(plans); printToScreen(plans);
} }
} }
/** /**
* Reads the Physical path of the disks we are balancing. This is needed to * Reads the Physical path of the disks we are balancing. This is needed to
* make the disk balancer human friendly and not used in balancing. * make the disk balancer human friendly and not used in balancing.
@ -210,14 +218,21 @@ private double getThresholdPercentage(CommandLine cmd) {
static private void printToScreen(List<NodePlan> plans) { static private void printToScreen(List<NodePlan> plans) {
System.out.println("\nPlan :\n"); System.out.println("\nPlan :\n");
System.out.println(StringUtils.repeat("=", 80)); System.out.println(StringUtils.repeat("=", 80));
System.out.println("Source Disk\t\t Dest.Disk\t\t Move Size\t Type\n ");
System.out.println(
StringUtils.center("Source Disk", 30) +
StringUtils.center("Dest.Disk", 30) +
StringUtils.center("Size", 10) +
StringUtils.center("Type", 10));
for (NodePlan plan : plans) { for (NodePlan plan : plans) {
for (Step step : plan.getVolumeSetPlans()) { for (Step step : plan.getVolumeSetPlans()) {
System.out.println(String.format("%s\t%s\t%s\t%s", System.out.println(String.format("%s %s %s %s",
step.getSourceVolume().getPath(), StringUtils.center(step.getSourceVolume().getPath(), 30),
step.getDestinationVolume().getPath(), StringUtils.center(step.getDestinationVolume().getPath(), 30),
step.getSizeString(step.getBytesToMove()), StringUtils.center(step.getSizeString(step.getBytesToMove()), 10),
step.getDestinationVolume().getStorageType())); StringUtils.center(step.getDestinationVolume().getStorageType(),
10)));
} }
} }
@ -243,16 +258,4 @@ private void setPlanParams(List<NodePlan> plans) {
} }
} }
} }
/**
* Returns a Json represenation of the plans.
*
* @param plan - List of plans.
* @return String.
* @throws IOException
*/
private String getPlan(List<NodePlan> plan) throws IOException {
ObjectMapper mapper = new ObjectMapper();
return mapper.writeValueAsString(plan);
}
} }