From dfb8369c2918c3377a4e0bd77c67bd0513518d17 Mon Sep 17 00:00:00 2001 From: Suresh Srinivas Date: Fri, 5 Oct 2012 07:02:52 +0000 Subject: [PATCH] HDFS-3735. NameNode WebUI should allow sorting live datanode list by fields Block Pool Used, Block Pool Used(%) and Failed Volumes. Contributed by Brahma Reddy Battula. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1394385 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 4 ++ .../hadoop/hdfs/server/common/JspHelper.java | 21 ++++++++++ .../hdfs/server/common/TestJspHelper.java | 42 +++++++++++++++++++ 3 files changed, 67 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 4ac6c597d1e..0cfd284d4d2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -143,6 +143,10 @@ Trunk (Unreleased) HDFS-3995. Use DFSTestUtil.createFile() for file creation and writing in test cases. (Jing Zhao via suresh) + HDFS-3735. NameNode WebUI should allow sorting live datanode list by fields + Block Pool Used, Block Pool Used(%) and Failed Volumes. + (Brahma Reddy Battula via suresh) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java index 831f3430bee..a2e0f501bde 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java @@ -276,6 +276,9 @@ public class JspHelper { FIELD_PERCENT_REMAINING = 9, FIELD_ADMIN_STATE = 10, FIELD_DECOMMISSIONED = 11, + FIELD_BLOCKPOOL_USED = 12, + FIELD_PERBLOCKPOOL_USED = 13, + FIELD_FAILED_VOLUMES = 14, SORT_ORDER_ASC = 1, SORT_ORDER_DSC = 2; @@ -303,6 +306,12 @@ public class JspHelper { sortField = FIELD_ADMIN_STATE; } else if (field.equals("decommissioned")) { sortField = FIELD_DECOMMISSIONED; + } else if (field.equals("bpused")) { + sortField = FIELD_BLOCKPOOL_USED; + } else if (field.equals("pcbpused")) { + sortField = FIELD_PERBLOCKPOOL_USED; + } else if (field.equals("volfails")) { + sortField = FIELD_FAILED_VOLUMES; } else { sortField = FIELD_NAME; } @@ -361,6 +370,18 @@ public class JspHelper { case FIELD_NAME: ret = d1.getHostName().compareTo(d2.getHostName()); break; + case FIELD_BLOCKPOOL_USED: + dlong = d1.getBlockPoolUsed() - d2.getBlockPoolUsed(); + ret = (dlong < 0) ? -1 : ((dlong > 0) ? 1 : 0); + break; + case FIELD_PERBLOCKPOOL_USED: + ddbl = d1.getBlockPoolUsedPercent() - d2.getBlockPoolUsedPercent(); + ret = (ddbl < 0) ? -1 : ((ddbl > 0) ? 1 : 0); + break; + case FIELD_FAILED_VOLUMES: + int dint = d1.getVolumeFailures() - d2.getVolumeFailures(); + ret = (dint < 0) ? -1 : ((dint > 0) ? 1 : 0); + break; } return (sortOrder == SORT_ORDER_DSC) ? -ret : ret; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java index d024bcdbf7c..bad1fff3ef2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java @@ -22,6 +22,7 @@ import static org.mockito.Mockito.when; import java.io.IOException; import java.net.InetSocketAddress; +import java.util.ArrayList; import javax.servlet.ServletContext; import javax.servlet.http.HttpServletRequest; @@ -29,7 +30,9 @@ import javax.servlet.http.HttpServletRequest; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.namenode.NameNodeHttpServer; import org.apache.hadoop.hdfs.web.resources.DoAsParam; import org.apache.hadoop.hdfs.web.resources.UserParam; @@ -399,4 +402,43 @@ public class TestJspHelper { ugi.getAuthenticationMethod()); } } + + @Test + public void testSortNodeByFields() throws Exception { + DatanodeID dnId1 = new DatanodeID("127.0.0.1", "localhost1", "storage1", + 1234, 2345, 3456); + DatanodeID dnId2 = new DatanodeID("127.0.0.2", "localhost2", "storage2", + 1235, 2346, 3457); + DatanodeDescriptor dnDesc1 = new DatanodeDescriptor(dnId1, "rack1", 1024, + 100, 924, 100, 10, 2); + DatanodeDescriptor dnDesc2 = new DatanodeDescriptor(dnId2, "rack2", 2500, + 200, 1848, 200, 20, 1); + ArrayList live = new ArrayList(); + live.add(dnDesc1); + live.add(dnDesc2); + + // Test sorting by failed volumes + JspHelper.sortNodeList(live, "volfails", "ASC"); + Assert.assertEquals(dnDesc2, live.get(0)); + Assert.assertEquals(dnDesc1, live.get(1)); + JspHelper.sortNodeList(live, "volfails", "DSC"); + Assert.assertEquals(dnDesc1, live.get(0)); + Assert.assertEquals(dnDesc2, live.get(1)); + + // Test sorting by Blockpool used + JspHelper.sortNodeList(live, "bpused", "ASC"); + Assert.assertEquals(dnDesc1, live.get(0)); + Assert.assertEquals(dnDesc2, live.get(1)); + JspHelper.sortNodeList(live, "bpused", "DSC"); + Assert.assertEquals(dnDesc2, live.get(0)); + Assert.assertEquals(dnDesc1, live.get(1)); + + // Test sorting by Percentage Blockpool used + JspHelper.sortNodeList(live, "pcbpused", "ASC"); + Assert.assertEquals(dnDesc2, live.get(0)); + Assert.assertEquals(dnDesc1, live.get(1)); + JspHelper.sortNodeList(live, "pcbpused", "DSC"); + Assert.assertEquals(dnDesc1, live.get(0)); + Assert.assertEquals(dnDesc2, live.get(1)); + } }