HDFS-10813. DiskBalancer: Add the getNodeList method in Command. Contributed by Yiqun Lin.

This commit is contained in:
Anu Engineer 2016-08-30 18:42:55 -07:00
parent d6d9cff21b
commit 20ae1fa259
2 changed files with 65 additions and 1 deletions

View File

@ -18,7 +18,10 @@
package org.apache.hadoop.hdfs.server.diskbalancer.command;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.Option;
import org.apache.commons.lang.StringUtils;
@ -221,7 +224,7 @@ public abstract class Command extends Configured {
* @return Set of node names
* @throws IOException
*/
private Set<String> getNodeList(String listArg) throws IOException {
protected Set<String> getNodeList(String listArg) throws IOException {
URL listURL;
String nodeData;
Set<String> resultSet = new TreeSet<>();
@ -242,6 +245,37 @@ public abstract class Command extends Configured {
return resultSet;
}
/**
* Returns a DiskBalancer Node list from the Cluster or null if not found.
*
* @param listArg String File URL or a comma separated list of node names.
* @return List of DiskBalancer Node
* @throws IOException
*/
protected List<DiskBalancerDataNode> getNodes(String listArg)
throws IOException {
Set<String> nodeNames = null;
List<DiskBalancerDataNode> nodeList = Lists.newArrayList();
if ((listArg == null) || listArg.isEmpty()) {
return nodeList;
}
nodeNames = getNodeList(listArg);
DiskBalancerDataNode node = null;
if (!nodeNames.isEmpty()) {
for (String name : nodeNames) {
node = getNode(name);
if (node != null) {
nodeList.add(node);
}
}
}
return nodeList;
}
/**
* Verifies if the command line options are sane.
*
@ -471,4 +505,12 @@ public abstract class Command extends Configured {
public int getTopNodes() {
return topNodes;
}
/**
* Set DiskBalancer cluster
*/
@VisibleForTesting
public void setCluster(DiskBalancerCluster newCluster) {
this.cluster = newCluster;
}
}

View File

@ -40,6 +40,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector;
import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ConnectorFactory;
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster;
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
@ -435,4 +436,25 @@ public class TestDiskBalancerCommand {
miniDFSCluster.shutdown();
}
}
@Test(timeout = 60000)
public void testGetNodeList() throws Exception {
ClusterConnector jsonConnector =
ConnectorFactory.getCluster(clusterJson, conf);
DiskBalancerCluster diskBalancerCluster =
new DiskBalancerCluster(jsonConnector);
diskBalancerCluster.readClusterInfo();
int nodeNum = 5;
StringBuilder listArg = new StringBuilder();
for (int i = 0; i < nodeNum; i++) {
listArg.append(diskBalancerCluster.getNodes().get(i).getDataNodeUUID())
.append(",");
}
ReportCommand command = new ReportCommand(conf, null);
command.setCluster(diskBalancerCluster);
List<DiskBalancerDataNode> nodeList = command.getNodes(listArg.toString());
assertEquals(nodeNum, nodeList.size());
}
}