HDFS-16581.Print node status when executing printTopology. (#4321)
Reviewed-by: Viraj Jasani <vjasani@apache.org> Signed-off-by: Tao Li <tomscut@apache.org>
This commit is contained in:
parent
ee3ee98ee5
commit
18a5e843bc
|
@ -35,7 +35,6 @@ import java.util.LinkedList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Optional;
|
import java.util.Optional;
|
||||||
import java.util.TreeSet;
|
|
||||||
import java.util.concurrent.ExecutorService;
|
import java.util.concurrent.ExecutorService;
|
||||||
import java.util.concurrent.Executors;
|
import java.util.concurrent.Executors;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
|
@ -1595,40 +1594,45 @@ public class DFSAdmin extends FsShell {
|
||||||
* @throws IOException If an error while getting datanode report
|
* @throws IOException If an error while getting datanode report
|
||||||
*/
|
*/
|
||||||
public int printTopology() throws IOException {
|
public int printTopology() throws IOException {
|
||||||
DistributedFileSystem dfs = getDFS();
|
DistributedFileSystem dfs = getDFS();
|
||||||
final DatanodeInfo[] report = dfs.getDataNodeStats();
|
final DatanodeInfo[] report = dfs.getDataNodeStats();
|
||||||
|
|
||||||
// Build a map of rack -> nodes from the datanode report
|
// Build a map of rack -> nodes from the datanode report
|
||||||
HashMap<String, TreeSet<String> > tree = new HashMap<String, TreeSet<String>>();
|
Map<String, HashMap<String, String>> map = new HashMap<>();
|
||||||
for(DatanodeInfo dni : report) {
|
for(DatanodeInfo dni : report) {
|
||||||
String location = dni.getNetworkLocation();
|
String location = dni.getNetworkLocation();
|
||||||
String name = dni.getName();
|
String name = dni.getName();
|
||||||
|
String dnState = dni.getAdminState().toString();
|
||||||
|
|
||||||
if(!tree.containsKey(location)) {
|
if(!map.containsKey(location)) {
|
||||||
tree.put(location, new TreeSet<String>());
|
map.put(location, new HashMap<>());
|
||||||
}
|
|
||||||
|
|
||||||
tree.get(location).add(name);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sort the racks (and nodes) alphabetically, display in order
|
|
||||||
ArrayList<String> racks = new ArrayList<String>(tree.keySet());
|
|
||||||
Collections.sort(racks);
|
|
||||||
|
|
||||||
for(String r : racks) {
|
|
||||||
System.out.println("Rack: " + r);
|
|
||||||
TreeSet<String> nodes = tree.get(r);
|
|
||||||
|
|
||||||
for(String n : nodes) {
|
Map<String, String> node = map.get(location);
|
||||||
System.out.print(" " + n);
|
node.put(name, dnState);
|
||||||
String hostname = NetUtils.getHostNameOfIP(n);
|
}
|
||||||
if(hostname != null)
|
|
||||||
System.out.print(" (" + hostname + ")");
|
// Sort the racks (and nodes) alphabetically, display in order
|
||||||
System.out.println();
|
List<String> racks = new ArrayList<>(map.keySet());
|
||||||
|
Collections.sort(racks);
|
||||||
|
|
||||||
|
for(String r : racks) {
|
||||||
|
System.out.println("Rack: " + r);
|
||||||
|
Map<String, String> nodes = map.get(r);
|
||||||
|
|
||||||
|
for(Map.Entry<String, String> entry : nodes.entrySet()) {
|
||||||
|
String n = entry.getKey();
|
||||||
|
System.out.print(" " + n);
|
||||||
|
String hostname = NetUtils.getHostNameOfIP(n);
|
||||||
|
if(hostname != null) {
|
||||||
|
System.out.print(" (" + hostname + ")");
|
||||||
}
|
}
|
||||||
|
System.out.print(" " + entry.getValue());
|
||||||
System.out.println();
|
System.out.println();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
System.out.println();
|
||||||
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -59,6 +59,8 @@ import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
|
||||||
import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
|
import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
|
||||||
|
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
||||||
|
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
|
||||||
import org.apache.hadoop.hdfs.server.common.Storage;
|
import org.apache.hadoop.hdfs.server.common.Storage;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
|
import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
|
||||||
|
@ -512,6 +514,52 @@ public class TestDFSAdmin {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 30000)
|
||||||
|
public void testPrintTopologyWithStatus() throws Exception {
|
||||||
|
redirectStream();
|
||||||
|
final Configuration dfsConf = new HdfsConfiguration();
|
||||||
|
final File baseDir = new File(
|
||||||
|
PathUtils.getTestDir(getClass()),
|
||||||
|
GenericTestUtils.getMethodName());
|
||||||
|
dfsConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());
|
||||||
|
|
||||||
|
final int numDn = 4;
|
||||||
|
final String[] racks = {
|
||||||
|
"/d1/r1", "/d1/r2",
|
||||||
|
"/d2/r1", "/d2/r2"};
|
||||||
|
|
||||||
|
try (MiniDFSCluster miniCluster = new MiniDFSCluster.Builder(dfsConf)
|
||||||
|
.numDataNodes(numDn).racks(racks).build()) {
|
||||||
|
miniCluster.waitActive();
|
||||||
|
assertEquals(numDn, miniCluster.getDataNodes().size());
|
||||||
|
|
||||||
|
DatanodeManager dm = miniCluster.getNameNode().getNamesystem().
|
||||||
|
getBlockManager().getDatanodeManager();
|
||||||
|
DatanodeDescriptor maintenanceNode = dm.getDatanode(
|
||||||
|
miniCluster.getDataNodes().get(1).getDatanodeId());
|
||||||
|
maintenanceNode.setInMaintenance();
|
||||||
|
DatanodeDescriptor demissionNode = dm.getDatanode(
|
||||||
|
miniCluster.getDataNodes().get(2).getDatanodeId());
|
||||||
|
demissionNode.setDecommissioned();
|
||||||
|
|
||||||
|
final DFSAdmin dfsAdmin = new DFSAdmin(dfsConf);
|
||||||
|
|
||||||
|
resetStream();
|
||||||
|
final int ret = ToolRunner.run(dfsAdmin, new String[] {"-printTopology"});
|
||||||
|
|
||||||
|
/* collect outputs */
|
||||||
|
final List<String> outs = Lists.newArrayList();
|
||||||
|
scanIntoList(out, outs);
|
||||||
|
|
||||||
|
/* verify results */
|
||||||
|
assertEquals(0, ret);
|
||||||
|
assertTrue(outs.get(1).contains(DatanodeInfo.AdminStates.NORMAL.toString()));
|
||||||
|
assertTrue(outs.get(4).contains(DatanodeInfo.AdminStates.IN_MAINTENANCE.toString()));
|
||||||
|
assertTrue(outs.get(7).contains(DatanodeInfo.AdminStates.DECOMMISSIONED.toString()));
|
||||||
|
assertTrue(outs.get(10).contains(DatanodeInfo.AdminStates.NORMAL.toString()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Test(timeout = 30000)
|
@Test(timeout = 30000)
|
||||||
public void testNameNodeGetReconfigurationStatus() throws IOException,
|
public void testNameNodeGetReconfigurationStatus() throws IOException,
|
||||||
InterruptedException, TimeoutException {
|
InterruptedException, TimeoutException {
|
||||||
|
|
Loading…
Reference in New Issue