From 918722bdd202acbeda92d650ff0dcecbcd8a0697 Mon Sep 17 00:00:00 2001 From: Arpit Agarwal Date: Wed, 9 Mar 2016 09:44:22 -0800 Subject: [PATCH] HDFS-9702. DiskBalancer: getVolumeMap implementation. (Contributed by Anu Engineer) --- .../hadoop/hdfs/server/datanode/DataNode.java | 17 +++-- .../hdfs/server/datanode/DiskBalancer.java | 26 ++++++++ .../diskbalancer/DiskBalancerException.java | 3 +- .../diskbalancer/TestDiskBalancerRPC.java | 66 +++++++++++++++++++ 4 files changed, 107 insertions(+), 5 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 126deb4be29..00e124dbbd7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -169,6 +169,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics; import org.apache.hadoop.hdfs.server.datanode.web.DatanodeHttpServer; +import org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerConstants; import org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; @@ -3360,8 +3361,8 @@ public class DataNode extends ReconfigurableBase } /** - * Gets a run-time configuration value from running diskbalancer instance. For - * example : Disk Balancer bandwidth of a running instance. + * Gets a runtime configuration value from diskbalancer instance. For + * example : DiskBalancer bandwidth. * * @param key - String that represents the run time key value. * @return value of the key as a string. @@ -3370,7 +3371,15 @@ public class DataNode extends ReconfigurableBase @Override public String getDiskBalancerSetting(String key) throws IOException { checkSuperuserPrivilege(); - throw new DiskBalancerException("Not Implemented", - DiskBalancerException.Result.INTERNAL_ERROR); + Preconditions.checkNotNull(key); + switch (key) { + case DiskBalancerConstants.DISKBALANCER_VOLUME_NAME: + return this.diskBalancer.getVolumeNames(); + default: + LOG.error("Disk Balancer - Unknown key in get balancer setting. Key: " + + key); + throw new DiskBalancerException("Unknown key", + DiskBalancerException.Result.UNKNOWN_KEY); + } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java index d5c402e6bf3..9e41d2ef2de 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException; import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan; import org.apache.hadoop.hdfs.server.diskbalancer.planner.Step; import org.apache.hadoop.util.Time; +import org.codehaus.jackson.map.ObjectMapper; import java.io.IOException; import java.nio.charset.Charset; @@ -221,6 +222,31 @@ public class DiskBalancer { } } + /** + * Returns a volume ID to Volume base path map. + * + * @return Json string of the volume map. + * @throws DiskBalancerException + */ + public String getVolumeNames() throws DiskBalancerException { + lock.lock(); + try { + checkDiskBalancerEnabled(); + Map pathMap = new HashMap<>(); + Map volMap = getStorageIDToVolumeMap(); + for (Map.Entry entry : volMap.entrySet()) { + pathMap.put(entry.getKey(), entry.getValue().getBasePath()); + } + ObjectMapper mapper = new ObjectMapper(); + return mapper.writeValueAsString(pathMap); + } catch (IOException e) { + throw new DiskBalancerException("Internal error, Unable to " + + "create JSON string.", e, + DiskBalancerException.Result.INTERNAL_ERROR); + } finally { + lock.unlock(); + } + } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerException.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerException.java index 00fe53d9c6e..38455a7141a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerException.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerException.java @@ -36,7 +36,8 @@ public class DiskBalancerException extends IOException { INVALID_VOLUME, INVALID_MOVE, INTERNAL_ERROR, - NO_SUCH_PLAN + NO_SUCH_PLAN, + UNKNOWN_KEY } private final Result result; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerRPC.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerRPC.java index e29b3b792bd..37a621617b8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerRPC.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerRPC.java @@ -24,18 +24,24 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus; +import org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException.*; import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector; import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ConnectorFactory; import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster; import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode; import org.apache.hadoop.hdfs.server.diskbalancer.planner.GreedyPlanner; import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan; +import org.hamcrest.*; import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; +import org.codehaus.jackson.map.ObjectMapper; + +import java.util.HashMap; +import java.util.Map; import static org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus.Result.NO_PLAN; import static org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus.Result.PLAN_DONE; @@ -84,6 +90,8 @@ public class TestDiskBalancerRPC { int planVersion = rpcTestHelper.getPlanVersion(); NodePlan plan = rpcTestHelper.getPlan(); thrown.expect(DiskBalancerException.class); + thrown.expect(new + ResultVerifier(Result.INVALID_PLAN_HASH)); dataNode.submitDiskBalancerPlan(planHash, planVersion, 10, plan.toJson()); } @@ -96,6 +104,8 @@ public class TestDiskBalancerRPC { planVersion++; NodePlan plan = rpcTestHelper.getPlan(); thrown.expect(DiskBalancerException.class); + thrown.expect(new + ResultVerifier(Result.INVALID_PLAN_VERSION)); dataNode.submitDiskBalancerPlan(planHash, planVersion, 10, plan.toJson()); } @@ -107,6 +117,8 @@ public class TestDiskBalancerRPC { int planVersion = rpcTestHelper.getPlanVersion(); NodePlan plan = rpcTestHelper.getPlan(); thrown.expect(DiskBalancerException.class); + thrown.expect(new + ResultVerifier(Result.INVALID_PLAN)); dataNode.submitDiskBalancerPlan(planHash, planVersion, 10, ""); } @@ -131,6 +143,8 @@ public class TestDiskBalancerRPC { planHash = String.valueOf(hashArray); NodePlan plan = rpcTestHelper.getPlan(); thrown.expect(DiskBalancerException.class); + thrown.expect(new + ResultVerifier(Result.NO_SUCH_PLAN)); dataNode.cancelDiskBalancePlan(planHash); } @@ -141,9 +155,38 @@ public class TestDiskBalancerRPC { String planHash = ""; NodePlan plan = rpcTestHelper.getPlan(); thrown.expect(DiskBalancerException.class); + thrown.expect(new + ResultVerifier(Result.NO_SUCH_PLAN)); dataNode.cancelDiskBalancePlan(planHash); } + @Test + public void testGetDiskBalancerVolumeMapping() throws Exception { + final int dnIndex = 0; + DataNode dataNode = cluster.getDataNodes().get(dnIndex); + String volumeNameJson = dataNode.getDiskBalancerSetting( + DiskBalancerConstants.DISKBALANCER_VOLUME_NAME); + Assert.assertNotNull(volumeNameJson); + ObjectMapper mapper = new ObjectMapper(); + + @SuppressWarnings("unchecked") + Map volumemap = + mapper.readValue(volumeNameJson, HashMap.class); + + Assert.assertEquals(2, volumemap.size()); + } + + @Test + public void testGetDiskBalancerInvalidSetting() throws Exception { + final int dnIndex = 0; + final String invalidSetting = "invalidSetting"; + DataNode dataNode = cluster.getDataNodes().get(dnIndex); + thrown.expect(DiskBalancerException.class); + thrown.expect(new + ResultVerifier(Result.UNKNOWN_KEY)); + dataNode.getDiskBalancerSetting(invalidSetting); + } + @Test public void testQueryPlan() throws Exception { @@ -173,6 +216,8 @@ public class TestDiskBalancerRPC { final int dnIndex = 0; DataNode dataNode = cluster.getDataNodes().get(dnIndex); thrown.expect(DiskBalancerException.class); + thrown.expect(new + ResultVerifier(Result.UNKNOWN_KEY)); dataNode.getDiskBalancerSetting( DiskBalancerConstants.DISKBALANCER_BANDWIDTH); } @@ -223,4 +268,25 @@ public class TestDiskBalancerRPC { return this; } } + + private class ResultVerifier + extends TypeSafeMatcher { + private final DiskBalancerException.Result expectedResult; + + ResultVerifier(DiskBalancerException.Result expectedResult){ + this.expectedResult = expectedResult; + } + + @Override + protected boolean matchesSafely(DiskBalancerException exception) { + return (this.expectedResult == exception.getResult()); + } + + @Override + public void describeTo(Description description) { + description.appendText("expects Result: ") + .appendValue(this.expectedResult); + + } + } }