HDFS-16048. RBF: Print network topology on the router web (#3062)

Reviewed-by: Inigo Goiri <inigoiri@apache.org>
Reviewed-by: Hemanth Boyina <hemanthboyina@apache.org>
Reviewed-by: Akira Ajisaka <aajisaka@apache.org>
(cherry picked from commit c748fce17a)
This commit is contained in:
litao 2021-06-08 15:14:06 +08:00 committed by Takanobu Asanuma
parent 46d4b51bff
commit 02249171b1
7 changed files with 290 additions and 5 deletions

View File

@ -125,6 +125,9 @@ public class RouterHttpServer extends AbstractService {
RouterFsckServlet.PATH_SPEC,
RouterFsckServlet.class,
true);
httpServer.addInternalServlet(RouterNetworkTopologyServlet.SERVLET_NAME,
RouterNetworkTopologyServlet.PATH_SPEC,
RouterNetworkTopologyServlet.class);
}
public InetSocketAddress getHttpAddress() {

View File

@ -0,0 +1,69 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.federation.router;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.namenode.NetworkTopologyServlet;
import org.apache.hadoop.net.Node;
import org.apache.hadoop.util.StringUtils;
import javax.servlet.ServletContext;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
import java.io.PrintStream;
import java.util.Arrays;
import java.util.List;
/**
* A servlet to print out the network topology from router.
*/
public class RouterNetworkTopologyServlet extends NetworkTopologyServlet {
@Override
public void doGet(HttpServletRequest request, HttpServletResponse response)
throws IOException {
final ServletContext context = getServletContext();
String format = parseAcceptHeader(request);
if (FORMAT_TEXT.equals(format)) {
response.setContentType("text/plain; charset=UTF-8");
} else if (FORMAT_JSON.equals(format)) {
response.setContentType("application/json; charset=UTF-8");
}
Router router = RouterHttpServer.getRouterFromContext(context);
DatanodeInfo[] datanodeReport =
router.getRpcServer().getDatanodeReport(
HdfsConstants.DatanodeReportType.ALL);
List<Node> datanodeInfos = Arrays.asList(datanodeReport);
try (PrintStream out = new PrintStream(
response.getOutputStream(), false, "UTF-8")) {
printTopology(out, datanodeInfos, format);
} catch (Throwable t) {
String errMsg = "Print network topology failed. "
+ StringUtils.stringifyException(t);
response.sendError(HttpServletResponse.SC_GONE, errMsg);
throw new IOException(errMsg);
} finally {
response.getOutputStream().close();
}
}
}

View File

@ -48,6 +48,7 @@
<li><a href="jmx">Metrics</a></li>
<li><a href="conf">Configuration</a></li>
<li><a href="stacks">Process Thread Dump</a></li>
<li><a href="topology">Network Topology</a></li>
</ul>
</li>
</ul>

View File

@ -52,6 +52,7 @@
<li><a href="jmx">Metrics</a></li>
<li><a href="conf">Configuration</a></li>
<li><a href="stacks">Process Thread Dump</a></li>
<li><a href="topology">Network Topology</a></li>
</ul>
</li>
</ul>

View File

@ -0,0 +1,210 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.federation.router;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
import org.apache.hadoop.hdfs.server.federation.StateStoreDFSCluster;
import org.apache.hadoop.hdfs.server.federation.resolver.MultipleDestinationMountTableResolver;
import org.apache.hadoop.io.IOUtils;
import org.junit.BeforeClass;
import org.junit.Test;
import java.io.ByteArrayOutputStream;
import java.net.HttpURLConnection;
import java.net.URL;
import java.util.Iterator;
import java.util.Map;
import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_HTTP_ENABLE;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
public class TestRouterNetworkTopologyServlet {
private static StateStoreDFSCluster clusterWithDatanodes;
private static StateStoreDFSCluster clusterNoDatanodes;
@BeforeClass
public static void setUp() throws Exception {
// Builder configuration
Configuration routerConf =
new RouterConfigBuilder().stateStore().admin().quota().rpc().build();
routerConf.set(DFS_ROUTER_HTTP_ENABLE, "true");
Configuration hdfsConf = new Configuration(false);
// Build and start a federated cluster
clusterWithDatanodes = new StateStoreDFSCluster(false, 2,
MultipleDestinationMountTableResolver.class);
clusterWithDatanodes.addNamenodeOverrides(hdfsConf);
clusterWithDatanodes.addRouterOverrides(routerConf);
clusterWithDatanodes.setNumDatanodesPerNameservice(9);
clusterWithDatanodes.setIndependentDNs();
clusterWithDatanodes.setRacks(
new String[] {"/rack1", "/rack1", "/rack1", "/rack2", "/rack2",
"/rack2", "/rack3", "/rack3", "/rack3", "/rack4", "/rack4",
"/rack4", "/rack5", "/rack5", "/rack5", "/rack6", "/rack6",
"/rack6"});
clusterWithDatanodes.startCluster();
clusterWithDatanodes.startRouters();
clusterWithDatanodes.waitClusterUp();
clusterWithDatanodes.waitActiveNamespaces();
// Build and start a federated cluster
clusterNoDatanodes = new StateStoreDFSCluster(false, 2,
MultipleDestinationMountTableResolver.class);
clusterNoDatanodes.addNamenodeOverrides(hdfsConf);
clusterNoDatanodes.addRouterOverrides(routerConf);
clusterNoDatanodes.setNumDatanodesPerNameservice(0);
clusterNoDatanodes.setIndependentDNs();
clusterNoDatanodes.startCluster();
clusterNoDatanodes.startRouters();
clusterNoDatanodes.waitClusterUp();
clusterNoDatanodes.waitActiveNamespaces();
}
@Test
public void testPrintTopologyTextFormat() throws Exception {
// get http Address
String httpAddress = clusterWithDatanodes.getRandomRouter().getRouter()
.getHttpServerAddress().toString();
// send http request
URL url = new URL("http:/" + httpAddress + "/topology");
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setReadTimeout(20000);
conn.setConnectTimeout(20000);
conn.connect();
ByteArrayOutputStream out = new ByteArrayOutputStream();
IOUtils.copyBytes(conn.getInputStream(), out, 4096, true);
StringBuilder sb =
new StringBuilder("-- Network Topology -- \n");
sb.append(out);
sb.append("\n-- Network Topology -- ");
String topology = sb.toString();
// assert rack info
assertTrue(topology.contains("/ns0/rack1"));
assertTrue(topology.contains("/ns0/rack2"));
assertTrue(topology.contains("/ns0/rack3"));
assertTrue(topology.contains("/ns1/rack4"));
assertTrue(topology.contains("/ns1/rack5"));
assertTrue(topology.contains("/ns1/rack6"));
// assert node number
assertEquals(18,
topology.split("127.0.0.1").length - 1);
}
@Test
public void testPrintTopologyJsonFormat() throws Exception {
// get http Address
String httpAddress = clusterWithDatanodes.getRandomRouter().getRouter()
.getHttpServerAddress().toString();
// send http request
URL url = new URL("http:/" + httpAddress + "/topology");
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setReadTimeout(20000);
conn.setConnectTimeout(20000);
conn.setRequestProperty("Accept", "application/json");
conn.connect();
ByteArrayOutputStream out = new ByteArrayOutputStream();
IOUtils.copyBytes(conn.getInputStream(), out, 4096, true);
String topology = out.toString();
// parse json
JsonNode racks = new ObjectMapper().readTree(topology);
// assert rack number
assertEquals(6, racks.size());
// assert rack info
assertTrue(topology.contains("/ns0/rack1"));
assertTrue(topology.contains("/ns0/rack2"));
assertTrue(topology.contains("/ns0/rack3"));
assertTrue(topology.contains("/ns1/rack4"));
assertTrue(topology.contains("/ns1/rack5"));
assertTrue(topology.contains("/ns1/rack6"));
// assert node number
Iterator<JsonNode> elements = racks.elements();
int dataNodesCount = 0;
while(elements.hasNext()){
JsonNode rack = elements.next();
Iterator<Map.Entry<String, JsonNode>> fields = rack.fields();
while (fields.hasNext()) {
dataNodesCount += fields.next().getValue().size();
}
}
assertEquals(18, dataNodesCount);
}
@Test
public void testPrintTopologyNoDatanodesTextFormat() throws Exception {
// get http Address
String httpAddress = clusterNoDatanodes.getRandomRouter().getRouter()
.getHttpServerAddress().toString();
// send http request
URL url = new URL("http:/" + httpAddress + "/topology");
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setReadTimeout(20000);
conn.setConnectTimeout(20000);
conn.connect();
ByteArrayOutputStream out = new ByteArrayOutputStream();
IOUtils.copyBytes(conn.getInputStream(), out, 4096, true);
StringBuilder sb =
new StringBuilder("-- Network Topology -- \n");
sb.append(out);
sb.append("\n-- Network Topology -- ");
String topology = sb.toString();
// assert node number
assertTrue(topology.contains("No DataNodes"));
}
@Test
public void testPrintTopologyNoDatanodesJsonFormat() throws Exception {
// get http Address
String httpAddress = clusterNoDatanodes.getRandomRouter().getRouter()
.getHttpServerAddress().toString();
// send http request
URL url = new URL("http:/" + httpAddress + "/topology");
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setReadTimeout(20000);
conn.setConnectTimeout(20000);
conn.setRequestProperty("Accept", "application/json");
conn.connect();
ByteArrayOutputStream out = new ByteArrayOutputStream();
IOUtils.copyBytes(conn.getInputStream(), out, 4096, true);
StringBuilder sb =
new StringBuilder("-- Network Topology -- \n");
sb.append(out);
sb.append("\n-- Network Topology -- ");
String topology = sb.toString();
// assert node number
assertTrue(topology.contains("No DataNodes"));
}
}

View File

@ -253,7 +253,7 @@ public class NameNodeHttpServer {
httpServer.addInternalServlet(IsNameNodeActiveServlet.SERVLET_NAME,
IsNameNodeActiveServlet.PATH_SPEC,
IsNameNodeActiveServlet.class);
httpServer.addInternalServlet("topology",
httpServer.addInternalServlet(NetworkTopologyServlet.SERVLET_NAME,
NetworkTopologyServlet.PATH_SPEC, NetworkTopologyServlet.class);
}

View File

@ -46,6 +46,7 @@ import java.util.TreeSet;
@InterfaceAudience.Private
public class NetworkTopologyServlet extends DfsServlet {
public static final String SERVLET_NAME = "topology";
public static final String PATH_SPEC = "/topology";
protected static final String FORMAT_JSON = "json";
@ -90,7 +91,7 @@ public class NetworkTopologyServlet extends DfsServlet {
* @param leaves leaves nodes under base scope
* @param format the response format
*/
public void printTopology(PrintStream stream, List<Node> leaves,
protected void printTopology(PrintStream stream, List<Node> leaves,
String format) throws BadFormatException, IOException {
if (leaves.isEmpty()) {
stream.print("No DataNodes");
@ -120,7 +121,7 @@ public class NetworkTopologyServlet extends DfsServlet {
}
}
private void printJsonFormat(PrintStream stream, Map<String,
protected void printJsonFormat(PrintStream stream, Map<String,
TreeSet<String>> tree, ArrayList<String> racks) throws IOException {
JsonFactory dumpFactory = new JsonFactory();
JsonGenerator dumpGenerator = dumpFactory.createGenerator(stream);
@ -152,7 +153,7 @@ public class NetworkTopologyServlet extends DfsServlet {
}
}
private void printTextFormat(PrintStream stream, Map<String,
protected void printTextFormat(PrintStream stream, Map<String,
TreeSet<String>> tree, ArrayList<String> racks) {
for(String r : racks) {
stream.println("Rack: " + r);
@ -171,7 +172,7 @@ public class NetworkTopologyServlet extends DfsServlet {
}
@VisibleForTesting
static String parseAcceptHeader(HttpServletRequest request) {
protected static String parseAcceptHeader(HttpServletRequest request) {
String format = request.getHeader(HttpHeaders.ACCEPT);
return format != null && format.contains(FORMAT_JSON) ?
FORMAT_JSON : FORMAT_TEXT;