From 64ccb232ccf204991a28fa0211917fa935ad30c5 Mon Sep 17 00:00:00 2001 From: Anu Engineer Date: Tue, 7 Jun 2016 10:29:35 -0700 Subject: [PATCH] HDFS-10478. DiskBalancer: resolve volume path names. Contributed by Anu Engineer. --- .../diskbalancer/command/PlanCommand.java | 37 +++++++++++++++ .../connectors/DBNameNodeConnector.java | 1 - .../hdfs/server/diskbalancer/TestPlanner.java | 47 +++++++++++++++++++ 3 files changed, 84 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java index d346c84963e..7cf0df1a4d5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java @@ -23,6 +23,10 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; +import org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerConstants; +import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume; +import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet; import org.apache.hadoop.hdfs.tools.DiskBalancer; import org.apache.hadoop.hdfs.server.diskbalancer.datamodel .DiskBalancerDataNode; @@ -32,7 +36,9 @@ import java.io.IOException; import java.nio.charset.StandardCharsets; +import java.util.HashMap; import java.util.List; +import java.util.Map; /** * Class that implements Plan Command. @@ -111,7 +117,10 @@ public void execute(CommandLine cmd) throws Exception { cmd.getOptionValue(DiskBalancer.PLAN)); } this.thresholdPercentage = getThresholdPercentage(cmd); + + LOG.debug("threshold Percentage is {}", this.thresholdPercentage); setNodesToProcess(node); + populatePathNames(node); List plans = getCluster().computePlan(this.thresholdPercentage); setPlanParams(plans); @@ -137,6 +146,32 @@ public void execute(CommandLine cmd) throws Exception { } } + + /** + * Reads the Physical path of the disks we are balancing. This is needed to + * make the disk balancer human friendly and not used in balancing. + * + * @param node - Disk Balancer Node. + */ + private void populatePathNames(DiskBalancerDataNode node) throws IOException { + String dnAddress = node.getDataNodeIP() + ":" + node.getDataNodePort(); + ClientDatanodeProtocol dnClient = getDataNodeProxy(dnAddress); + String volumeNameJson = dnClient.getDiskBalancerSetting( + DiskBalancerConstants.DISKBALANCER_VOLUME_NAME); + ObjectMapper mapper = new ObjectMapper(); + + @SuppressWarnings("unchecked") + Map volumeMap = + mapper.readValue(volumeNameJson, HashMap.class); + for (DiskBalancerVolumeSet set : node.getVolumeSets().values()) { + for (DiskBalancerVolume vol : set.getVolumes()) { + if (volumeMap.containsKey(vol.getUuid())) { + vol.setPath(volumeMap.get(vol.getUuid())); + } + } + } + } + /** * Gets extended help for this command. * @@ -198,9 +233,11 @@ private void setPlanParams(List plans) { for (NodePlan plan : plans) { for (Step step : plan.getVolumeSetPlans()) { if (this.bandwidth > 0) { + LOG.debug("Setting bandwidth to {}", this.bandwidth); step.setBandwidth(this.bandwidth); } if (this.maxError > 0) { + LOG.debug("Setting max error to {}", this.maxError); step.setMaxDiskErrors(this.maxError); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/DBNameNodeConnector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/DBNameNodeConnector.java index acf1fa121a8..b044baf4781 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/DBNameNodeConnector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/DBNameNodeConnector.java @@ -155,7 +155,6 @@ private void getVolumeInfoFromStorageReports(DiskBalancerDataNode node, .READ_ONLY_SHARED) || report.isFailed()); volume.setStorageType(storage.getStorageType().name()); volume.setIsTransient(storage.getStorageType().isTransient()); - //volume.setPath(storage.getVolumePath()); node.addVolume(volume); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestPlanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestPlanner.java index 77c2aa3c30f..d01e8ad265d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestPlanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestPlanner.java @@ -456,4 +456,51 @@ public void testNodePlanSerialize() throws Exception { assertEquals(newPlan.getVolumeSetPlans().size(), copy.getVolumeSetPlans().size()); } + + @Test + public void testGreedyPlannerLargeDisksWithData() throws Exception { + NullConnector nullConnector = new NullConnector(); + DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector); + + DiskBalancerDataNode node = + new DiskBalancerDataNode(UUID.randomUUID().toString()); + + // All disks have same capacity of data + DiskBalancerVolume volume1 = createVolume("volume1", 1968, 88); + DiskBalancerVolume volume2 = createVolume("volume2", 1968, 88); + DiskBalancerVolume volume3 = createVolume("volume3", 1968, 111); + DiskBalancerVolume volume4 = createVolume("volume4", 1968, 111); + DiskBalancerVolume volume5 = createVolume("volume5", 1968, 30); + DiskBalancerVolume volume6 = createVolume("volume6", 1563, 30); + DiskBalancerVolume volume7 = createVolume("volume7", 1563, 30); + DiskBalancerVolume volume8 = createVolume("volume8", 1563, 30); + DiskBalancerVolume volume9 = createVolume("volume9", 1563, 210); + + + + + node.addVolume(volume1); + node.addVolume(volume2); + node.addVolume(volume3); + + node.addVolume(volume4); + node.addVolume(volume5); + node.addVolume(volume6); + + node.addVolume(volume7); + node.addVolume(volume8); + node.addVolume(volume9); + + + nullConnector.addNode(node); + cluster.readClusterInfo(); + Assert.assertEquals(1, cluster.getNodes().size()); + + GreedyPlanner planner = new GreedyPlanner(1.0f, node); + NodePlan plan = new NodePlan(node.getDataNodeName(), + node.getDataNodePort()); + planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan); + + assertTrue(plan.getVolumeSetPlans().size() > 2); + } } \ No newline at end of file