diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/CancelCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/CancelCommand.java index f39580281d6..3834d9b0cc5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/CancelCommand.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/CancelCommand.java @@ -70,7 +70,7 @@ public class CancelCommand extends Command { // points us to the plan file, we can compute the hash as well as read // the address of the datanode from the plan file. String planFile = cmd.getOptionValue(DiskBalancer.CANCEL); - Preconditions.checkArgument(planFile == null || planFile.isEmpty(), + Preconditions.checkArgument(planFile != null && !planFile.isEmpty(), "Invalid plan file specified."); String planData = null; try (FSDataInputStream plan = open(planFile)) { @@ -88,7 +88,7 @@ public class CancelCommand extends Command { */ private void cancelPlan(String planData) throws IOException { Preconditions.checkNotNull(planData); - NodePlan plan = readPlan(planData); + NodePlan plan = NodePlan.parseJson(planData); String dataNodeAddress = plan.getNodeName() + ":" + plan.getPort(); Preconditions.checkNotNull(dataNodeAddress); ClientDatanodeProtocol dataNode = getDataNodeProxy(dataNodeAddress); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java index fb975a88a91..94a21d1325e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java @@ -31,16 +31,13 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; -import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan; -import org.apache.hadoop.hdfs.tools.DiskBalancer; import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector; import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ConnectorFactory; import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster; import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode; - +import org.apache.hadoop.hdfs.tools.DiskBalancer; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.htrace.fasterxml.jackson.databind.ObjectMapper; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -59,10 +56,10 @@ import java.util.Date; import java.util.HashMap; import java.util.Iterator; import java.util.LinkedList; +import java.util.List; import java.util.Map; import java.util.Set; import java.util.TreeSet; -import java.util.List; /** * Common interface for command handling. @@ -394,16 +391,4 @@ public abstract class Command extends Configured { protected DiskBalancerCluster getCluster() { return cluster; } - - /** - * Returns a plan from the Json Data. - * - * @param planData - Json String - * @return NodePlan - * @throws IOException - */ - protected NodePlan readPlan(String planData) throws IOException { - ObjectMapper mapper = new ObjectMapper(); - return mapper.readValue(planData, NodePlan.class); - } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ExecuteCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ExecuteCommand.java index 6d30e86cdbd..85f2a8628ee 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ExecuteCommand.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ExecuteCommand.java @@ -79,7 +79,7 @@ public class ExecuteCommand extends Command { */ private void submitPlan(String planData) throws IOException { Preconditions.checkNotNull(planData); - NodePlan plan = readPlan(planData); + NodePlan plan = NodePlan.parseJson(planData); String dataNodeAddress = plan.getNodeName() + ":" + plan.getPort(); Preconditions.checkNotNull(dataNodeAddress); ClientDatanodeProtocol dataNode = getDataNodeProxy(dataNodeAddress); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java index 7cf0df1a4d5..91ab7fbfb85 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java @@ -25,13 +25,12 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; import org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerConstants; +import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode; import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume; import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet; -import org.apache.hadoop.hdfs.tools.DiskBalancer; -import org.apache.hadoop.hdfs.server.diskbalancer.datamodel - .DiskBalancerDataNode; import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan; import org.apache.hadoop.hdfs.server.diskbalancer.planner.Step; +import org.apache.hadoop.hdfs.tools.DiskBalancer; import org.codehaus.jackson.map.ObjectMapper; import java.io.IOException; @@ -122,11 +121,14 @@ public class PlanCommand extends Command { setNodesToProcess(node); populatePathNames(node); + NodePlan plan = null; List plans = getCluster().computePlan(this.thresholdPercentage); setPlanParams(plans); - LOG.info("Writing plan to : {}", getOutputPath()); - System.out.printf("Writing plan to : %s%n", getOutputPath()); + if (plans.size() > 0) { + plan = plans.get(0); + } + try (FSDataOutputStream beforeStream = create(String.format( DiskBalancer.BEFORE_TEMPLATE, @@ -135,18 +137,24 @@ public class PlanCommand extends Command { .getBytes(StandardCharsets.UTF_8)); } - try (FSDataOutputStream planStream = create(String.format( - DiskBalancer.PLAN_TEMPLATE, - cmd.getOptionValue(DiskBalancer.PLAN)))) { - planStream.write(getPlan(plans).getBytes(StandardCharsets.UTF_8)); + if (plan != null) { + LOG.info("Writing plan to : {}", getOutputPath()); + try (FSDataOutputStream planStream = create(String.format( + DiskBalancer.PLAN_TEMPLATE, + cmd.getOptionValue(DiskBalancer.PLAN)))) { + planStream.write(plan.toJson().getBytes(StandardCharsets.UTF_8)); + } + } else { + LOG.info("No plan generated. DiskBalancing not needed for node: {} " + + "threshold used: {}", cmd.getOptionValue(DiskBalancer.PLAN), + this.thresholdPercentage); } - if (cmd.hasOption(DiskBalancer.VERBOSE)) { + if (cmd.hasOption(DiskBalancer.VERBOSE) && plans.size() > 0) { printToScreen(plans); } } - /** * Reads the Physical path of the disks we are balancing. This is needed to * make the disk balancer human friendly and not used in balancing. @@ -210,14 +218,21 @@ public class PlanCommand extends Command { static private void printToScreen(List plans) { System.out.println("\nPlan :\n"); System.out.println(StringUtils.repeat("=", 80)); - System.out.println("Source Disk\t\t Dest.Disk\t\t Move Size\t Type\n "); + + System.out.println( + StringUtils.center("Source Disk", 30) + + StringUtils.center("Dest.Disk", 30) + + StringUtils.center("Size", 10) + + StringUtils.center("Type", 10)); + for (NodePlan plan : plans) { for (Step step : plan.getVolumeSetPlans()) { - System.out.println(String.format("%s\t%s\t%s\t%s", - step.getSourceVolume().getPath(), - step.getDestinationVolume().getPath(), - step.getSizeString(step.getBytesToMove()), - step.getDestinationVolume().getStorageType())); + System.out.println(String.format("%s %s %s %s", + StringUtils.center(step.getSourceVolume().getPath(), 30), + StringUtils.center(step.getDestinationVolume().getPath(), 30), + StringUtils.center(step.getSizeString(step.getBytesToMove()), 10), + StringUtils.center(step.getDestinationVolume().getStorageType(), + 10))); } } @@ -243,16 +258,4 @@ public class PlanCommand extends Command { } } } - - /** - * Returns a Json represenation of the plans. - * - * @param plan - List of plans. - * @return String. - * @throws IOException - */ - private String getPlan(List plan) throws IOException { - ObjectMapper mapper = new ObjectMapper(); - return mapper.writeValueAsString(plan); - } }