HDFS-12273. Federation UI. Contributed by Inigo Goiri.

(cherry picked from commit adbb2e00c7)
This commit is contained in:
Inigo Goiri 2017-10-05 17:26:43 -07:00
parent c85b4ba028
commit 87b1f87094
17 changed files with 1102 additions and 11 deletions

View File

@ -259,6 +259,9 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
<copy file="${basedir}/src/main/webapps/proto-web.xml" <copy file="${basedir}/src/main/webapps/proto-web.xml"
tofile="${project.build.directory}/webapps/nfs3/WEB-INF/web.xml" tofile="${project.build.directory}/webapps/nfs3/WEB-INF/web.xml"
filtering="true"/> filtering="true"/>
<copy file="${basedir}/src/main/webapps/proto-web.xml"
tofile="${project.build.directory}/webapps/router/WEB-INF/web.xml"
filtering="true"/>
<copy toDir="${project.build.directory}/webapps"> <copy toDir="${project.build.directory}/webapps">
<fileset dir="${basedir}/src/main/webapps"> <fileset dir="${basedir}/src/main/webapps">
<exclude name="**/proto-web.xml"/> <exclude name="**/proto-web.xml"/>

View File

@ -1249,6 +1249,25 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
FEDERATION_ROUTER_PREFIX + "admin.enable"; FEDERATION_ROUTER_PREFIX + "admin.enable";
public static final boolean DFS_ROUTER_ADMIN_ENABLE_DEFAULT = true; public static final boolean DFS_ROUTER_ADMIN_ENABLE_DEFAULT = true;
// HDFS Router-based federation web
public static final String DFS_ROUTER_HTTP_ENABLE =
FEDERATION_ROUTER_PREFIX + "http.enable";
public static final boolean DFS_ROUTER_HTTP_ENABLE_DEFAULT = true;
public static final String DFS_ROUTER_HTTP_ADDRESS_KEY =
FEDERATION_ROUTER_PREFIX + "http-address";
public static final int DFS_ROUTER_HTTP_PORT_DEFAULT = 50071;
public static final String DFS_ROUTER_HTTP_BIND_HOST_KEY =
FEDERATION_ROUTER_PREFIX + "http-bind-host";
public static final String DFS_ROUTER_HTTP_ADDRESS_DEFAULT =
"0.0.0.0:" + DFS_ROUTER_HTTP_PORT_DEFAULT;
public static final String DFS_ROUTER_HTTPS_ADDRESS_KEY =
FEDERATION_ROUTER_PREFIX + "https-address";
public static final int DFS_ROUTER_HTTPS_PORT_DEFAULT = 50072;
public static final String DFS_ROUTER_HTTPS_BIND_HOST_KEY =
FEDERATION_ROUTER_PREFIX + "https-bind-host";
public static final String DFS_ROUTER_HTTPS_ADDRESS_DEFAULT =
"0.0.0.0:" + DFS_ROUTER_HTTPS_PORT_DEFAULT;
// dfs.client.retry confs are moved to HdfsClientConfigKeys.Retry // dfs.client.retry confs are moved to HdfsClientConfigKeys.Retry
@Deprecated @Deprecated
public static final String DFS_CLIENT_RETRY_POLICY_ENABLED_KEY public static final String DFS_CLIENT_RETRY_POLICY_ENABLED_KEY

View File

@ -201,4 +201,11 @@ public interface FederationMBean {
* @return Host and port of the router. * @return Host and port of the router.
*/ */
String getBlockPoolId(); String getBlockPoolId();
/**
* Get the current state of the router.
*
* @return String label for the current router state.
*/
String getRouterStatus();
} }

View File

@ -21,6 +21,9 @@ import static org.apache.hadoop.util.Time.now;
import java.io.IOException; import java.io.IOException;
import java.lang.reflect.Method; import java.lang.reflect.Method;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.UnknownHostException;
import java.text.SimpleDateFormat; import java.text.SimpleDateFormat;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
@ -34,6 +37,7 @@ import java.util.LinkedList;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Set; import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.function.Function; import java.util.function.Function;
import java.util.function.ToIntFunction; import java.util.function.ToIntFunction;
import java.util.function.ToLongFunction; import java.util.function.ToLongFunction;
@ -83,6 +87,9 @@ public class FederationMetrics implements FederationMBean {
/** Format for a date. */ /** Format for a date. */
private static final String DATE_FORMAT = "yyyy/MM/dd HH:mm:ss"; private static final String DATE_FORMAT = "yyyy/MM/dd HH:mm:ss";
/** Prevent holding the page from load too long. */
private static final long TIME_OUT = TimeUnit.SECONDS.toMillis(1);
/** Router interface. */ /** Router interface. */
private final Router router; private final Router router;
@ -353,8 +360,8 @@ public class FederationMetrics implements FederationMBean {
final Map<String, Map<String, Object>> info = new HashMap<>(); final Map<String, Map<String, Object>> info = new HashMap<>();
try { try {
RouterRpcServer rpcServer = this.router.getRpcServer(); RouterRpcServer rpcServer = this.router.getRpcServer();
DatanodeInfo[] live = DatanodeInfo[] live = rpcServer.getDatanodeReport(
rpcServer.getDatanodeReport(DatanodeReportType.LIVE); DatanodeReportType.LIVE, TIME_OUT);
if (live.length > 0) { if (live.length > 0) {
float totalDfsUsed = 0; float totalDfsUsed = 0;
@ -446,7 +453,14 @@ public class FederationMetrics implements FederationMBean {
@Override @Override
public String getHostAndPort() { public String getHostAndPort() {
// TODO this should be the HTTP address InetSocketAddress address = this.router.getHttpServerAddress();
if (address != null) {
try {
String hostname = InetAddress.getLocalHost().getHostName();
int port = address.getPort();
return hostname + ":" + port;
} catch (UnknownHostException ignored) { }
}
return "Unknown"; return "Unknown";
} }
@ -479,6 +493,11 @@ public class FederationMetrics implements FederationMBean {
} }
} }
@Override
public String getRouterStatus() {
return "RUNNING";
}
/** /**
* Build a set of unique values found in all namespaces. * Build a set of unique values found in all namespaces.
* *

View File

@ -43,6 +43,7 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.GetNamespaceInfoR
import org.apache.hadoop.hdfs.server.federation.store.protocol.NamenodeHeartbeatRequest; import org.apache.hadoop.hdfs.server.federation.store.protocol.NamenodeHeartbeatRequest;
import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateNamenodeRegistrationRequest; import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateNamenodeRegistrationRequest;
import org.apache.hadoop.hdfs.server.federation.store.records.MembershipState; import org.apache.hadoop.hdfs.server.federation.store.records.MembershipState;
import org.apache.hadoop.hdfs.server.federation.store.records.MembershipStats;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
@ -222,6 +223,28 @@ public class MembershipNamenodeResolver
report.getServiceAddress(), report.getLifelineAddress(), report.getServiceAddress(), report.getLifelineAddress(),
report.getWebAddress(), report.getState(), report.getSafemode()); report.getWebAddress(), report.getState(), report.getSafemode());
if (report.statsValid()) {
MembershipStats stats = MembershipStats.newInstance();
stats.setNumOfFiles(report.getNumFiles());
stats.setNumOfBlocks(report.getNumBlocks());
stats.setNumOfBlocksMissing(report.getNumBlocksMissing());
stats.setNumOfBlocksPendingReplication(
report.getNumOfBlocksPendingReplication());
stats.setNumOfBlocksUnderReplicated(
report.getNumOfBlocksUnderReplicated());
stats.setNumOfBlocksPendingDeletion(
report.getNumOfBlocksPendingDeletion());
stats.setAvailableSpace(report.getAvailableSpace());
stats.setTotalSpace(report.getTotalSpace());
stats.setNumOfDecommissioningDatanodes(
report.getNumDecommissioningDatanodes());
stats.setNumOfActiveDatanodes(report.getNumLiveDatanodes());
stats.setNumOfDeadDatanodes(report.getNumDeadDatanodes());
stats.setNumOfDecomActiveDatanodes(report.getNumDecomLiveDatanodes());
stats.setNumOfDecomDeadDatanodes(report.getNumDecomDeadDatanodes());
record.setStats(stats);
}
if (report.getState() != UNAVAILABLE) { if (report.getState() != UNAVAILABLE) {
// Set/update our last contact time // Set/update our last contact time
record.setLastContact(Time.now()); record.setLastContact(Time.now());

View File

@ -89,6 +89,9 @@ public class Router extends CompositeService {
private RouterAdminServer adminServer; private RouterAdminServer adminServer;
private InetSocketAddress adminAddress; private InetSocketAddress adminAddress;
/** HTTP interface and web application. */
private RouterHttpServer httpServer;
/** Interface with the State Store. */ /** Interface with the State Store. */
private StateStoreService stateStore; private StateStoreService stateStore;
@ -168,6 +171,14 @@ public class Router extends CompositeService {
addService(this.adminServer); addService(this.adminServer);
} }
if (conf.getBoolean(
DFSConfigKeys.DFS_ROUTER_HTTP_ENABLE,
DFSConfigKeys.DFS_ROUTER_HTTP_ENABLE_DEFAULT)) {
// Create HTTP server
this.httpServer = createHttpServer();
addService(this.httpServer);
}
if (conf.getBoolean( if (conf.getBoolean(
DFSConfigKeys.DFS_ROUTER_HEARTBEAT_ENABLE, DFSConfigKeys.DFS_ROUTER_HEARTBEAT_ENABLE,
DFSConfigKeys.DFS_ROUTER_HEARTBEAT_ENABLE_DEFAULT)) { DFSConfigKeys.DFS_ROUTER_HEARTBEAT_ENABLE_DEFAULT)) {
@ -353,6 +364,31 @@ public class Router extends CompositeService {
return adminAddress; return adminAddress;
} }
/////////////////////////////////////////////////////////
// HTTP server
/////////////////////////////////////////////////////////
/**
* Create an HTTP server for this Router.
*
* @return HTTP server for this Router.
*/
protected RouterHttpServer createHttpServer() {
return new RouterHttpServer(this);
}
/**
* Get the current HTTP socket address for the router.
*
* @return InetSocketAddress HTTP address.
*/
public InetSocketAddress getHttpServerAddress() {
if (httpServer != null) {
return httpServer.getHttpAddress();
}
return null;
}
///////////////////////////////////////////////////////// /////////////////////////////////////////////////////////
// Namenode heartbeat monitors // Namenode heartbeat monitors
///////////////////////////////////////////////////////// /////////////////////////////////////////////////////////

View File

@ -0,0 +1,124 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.federation.router;
import java.net.InetSocketAddress;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.http.HttpServer2;
import org.apache.hadoop.service.AbstractService;
/**
* Web interface for the {@link Router}. It exposes the Web UI and the WebHDFS
* methods from {@link RouterWebHdfsMethods}.
*/
public class RouterHttpServer extends AbstractService {
protected static final String NAMENODE_ATTRIBUTE_KEY = "name.node";
/** Configuration for the Router HTTP server. */
private Configuration conf;
/** Router using this HTTP server. */
private final Router router;
/** HTTP server. */
private HttpServer2 httpServer;
/** HTTP addresses. */
private InetSocketAddress httpAddress;
private InetSocketAddress httpsAddress;
public RouterHttpServer(Router router) {
super(RouterHttpServer.class.getName());
this.router = router;
}
@Override
protected void serviceInit(Configuration configuration) throws Exception {
this.conf = configuration;
// Get HTTP address
this.httpAddress = conf.getSocketAddr(
DFSConfigKeys.DFS_ROUTER_HTTP_BIND_HOST_KEY,
DFSConfigKeys.DFS_ROUTER_HTTP_ADDRESS_KEY,
DFSConfigKeys.DFS_ROUTER_HTTP_ADDRESS_DEFAULT,
DFSConfigKeys.DFS_ROUTER_HTTP_PORT_DEFAULT);
// Get HTTPs address
this.httpsAddress = conf.getSocketAddr(
DFSConfigKeys.DFS_ROUTER_HTTPS_BIND_HOST_KEY,
DFSConfigKeys.DFS_ROUTER_HTTPS_ADDRESS_KEY,
DFSConfigKeys.DFS_ROUTER_HTTPS_ADDRESS_DEFAULT,
DFSConfigKeys.DFS_ROUTER_HTTPS_PORT_DEFAULT);
super.serviceInit(conf);
}
@Override
protected void serviceStart() throws Exception {
// Build and start server
String webApp = "router";
HttpServer2.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(
this.conf, this.httpAddress, this.httpsAddress, webApp,
DFSConfigKeys.DFS_NAMENODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY,
DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY);
this.httpServer = builder.build();
this.httpServer.setAttribute(NAMENODE_ATTRIBUTE_KEY, this.router);
this.httpServer.setAttribute(JspHelper.CURRENT_CONF, this.conf);
setupServlets(this.httpServer, this.conf);
this.httpServer.start();
// The server port can be ephemeral... ensure we have the correct info
InetSocketAddress listenAddress = this.httpServer.getConnectorAddress(0);
if (listenAddress != null) {
this.httpAddress = new InetSocketAddress(this.httpAddress.getHostName(),
listenAddress.getPort());
}
super.serviceStart();
}
@Override
protected void serviceStop() throws Exception {
if(this.httpServer != null) {
this.httpServer.stop();
}
super.serviceStop();
}
private static void setupServlets(
HttpServer2 httpServer, Configuration conf) {
// TODO Add servlets for FSCK, etc
}
public InetSocketAddress getHttpAddress() {
return this.httpAddress;
}
public InetSocketAddress getHttpsAddress() {
return this.httpsAddress;
}
}

View File

@ -35,11 +35,13 @@ import java.util.Map.Entry;
import java.util.Set; import java.util.Set;
import java.util.TreeMap; import java.util.TreeMap;
import java.util.concurrent.Callable; import java.util.concurrent.Callable;
import java.util.concurrent.CancellationException;
import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService; import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors; import java.util.concurrent.Executors;
import java.util.concurrent.Future; import java.util.concurrent.Future;
import java.util.concurrent.ThreadFactory; import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import java.util.regex.Matcher; import java.util.regex.Matcher;
import java.util.regex.Pattern; import java.util.regex.Pattern;
@ -713,6 +715,7 @@ public class RouterRpcClient {
* Re-throws exceptions generated by the remote RPC call as either * Re-throws exceptions generated by the remote RPC call as either
* RemoteException or IOException. * RemoteException or IOException.
* *
* @param <T> The type of the remote location.
* @param locations List of remote locations to call concurrently. * @param locations List of remote locations to call concurrently.
* @param remoteMethod The remote method and parameters to invoke. * @param remoteMethod The remote method and parameters to invoke.
* @param requireResponse If true an exception will be thrown if all calls do * @param requireResponse If true an exception will be thrown if all calls do
@ -723,10 +726,35 @@ public class RouterRpcClient {
* @throws IOException If requiredResponse=true and any of the calls throw an * @throws IOException If requiredResponse=true and any of the calls throw an
* exception. * exception.
*/ */
@SuppressWarnings("unchecked")
public <T extends RemoteLocationContext> Map<T, Object> invokeConcurrent( public <T extends RemoteLocationContext> Map<T, Object> invokeConcurrent(
final Collection<T> locations, final RemoteMethod method, final Collection<T> locations, final RemoteMethod method,
boolean requireResponse, boolean standby) throws IOException { boolean requireResponse, boolean standby) throws IOException {
return invokeConcurrent(locations, method, requireResponse, standby, -1);
}
/**
* Invokes multiple concurrent proxy calls to different clients. Returns an
* array of results.
*
* Re-throws exceptions generated by the remote RPC call as either
* RemoteException or IOException.
*
* @param locations List of remote locations to call concurrently.
* @param remoteMethod The remote method and parameters to invoke.
* @param requireResponse If true an exception will be thrown if all calls do
* not complete. If false exceptions are ignored and all data results
* successfully received are returned.
* @param standby If the requests should go to the standby namenodes too.
* @param timeoutMs Timeout for each individual call.
* @return Result of invoking the method per subcluster: nsId -> result.
* @throws IOException If requiredResponse=true and any of the calls throw an
* exception.
*/
@SuppressWarnings("unchecked")
public <T extends RemoteLocationContext> Map<T, Object> invokeConcurrent(
final Collection<T> locations, final RemoteMethod method,
boolean requireResponse, boolean standby, long timeOutMs)
throws IOException {
final UserGroupInformation ugi = RouterRpcServer.getRemoteUser(); final UserGroupInformation ugi = RouterRpcServer.getRemoteUser();
final Method m = method.getMethod(); final Method m = method.getMethod();
@ -782,7 +810,13 @@ public class RouterRpcClient {
} }
try { try {
List<Future<Object>> futures = executorService.invokeAll(callables); List<Future<Object>> futures = null;
if (timeOutMs > 0) {
futures = executorService.invokeAll(
callables, timeOutMs, TimeUnit.MILLISECONDS);
} else {
futures = executorService.invokeAll(callables);
}
Map<T, Object> results = new TreeMap<>(); Map<T, Object> results = new TreeMap<>();
Map<T, IOException> exceptions = new TreeMap<>(); Map<T, IOException> exceptions = new TreeMap<>();
for (int i=0; i<futures.size(); i++) { for (int i=0; i<futures.size(); i++) {
@ -791,6 +825,13 @@ public class RouterRpcClient {
Future<Object> future = futures.get(i); Future<Object> future = futures.get(i);
Object result = future.get(); Object result = future.get();
results.put(location, result); results.put(location, result);
} catch (CancellationException ce) {
T loc = orderedLocations.get(i);
String msg =
"Invocation to \"" + loc + "\" for \"" + method + "\" timed out";
LOG.error(msg);
IOException ioe = new IOException(msg);
exceptions.put(location, ioe);
} catch (ExecutionException ex) { } catch (ExecutionException ex) {
Throwable cause = ex.getCause(); Throwable cause = ex.getCause();
LOG.debug("Canot execute {} in {}: {}", LOG.debug("Canot execute {} in {}: {}",

View File

@ -1091,6 +1091,19 @@ public class RouterRpcServer extends AbstractService implements ClientProtocol {
public DatanodeInfo[] getDatanodeReport(DatanodeReportType type) public DatanodeInfo[] getDatanodeReport(DatanodeReportType type)
throws IOException { throws IOException {
checkOperation(OperationCategory.UNCHECKED); checkOperation(OperationCategory.UNCHECKED);
return getDatanodeReport(type, 0);
}
/**
* Get the datanode report with a timeout.
* @param type Type of the datanode.
* @param timeOutMs Time out for the reply in milliseconds.
* @return List of datanodes.
* @throws IOException If it cannot get the report.
*/
public DatanodeInfo[] getDatanodeReport(
DatanodeReportType type, long timeOutMs) throws IOException {
checkOperation(OperationCategory.UNCHECKED);
Map<String, DatanodeInfo> datanodesMap = new LinkedHashMap<>(); Map<String, DatanodeInfo> datanodesMap = new LinkedHashMap<>();
RemoteMethod method = new RemoteMethod("getDatanodeReport", RemoteMethod method = new RemoteMethod("getDatanodeReport",
@ -1098,7 +1111,7 @@ public class RouterRpcServer extends AbstractService implements ClientProtocol {
Set<FederationNamespaceInfo> nss = namenodeResolver.getNamespaces(); Set<FederationNamespaceInfo> nss = namenodeResolver.getNamespaces();
Map<FederationNamespaceInfo, Object> results = Map<FederationNamespaceInfo, Object> results =
rpcClient.invokeConcurrent(nss, method, true, false); rpcClient.invokeConcurrent(nss, method, true, false, timeOutMs);
for (Entry<FederationNamespaceInfo, Object> entry : results.entrySet()) { for (Entry<FederationNamespaceInfo, Object> entry : results.entrySet()) {
FederationNamespaceInfo ns = entry.getKey(); FederationNamespaceInfo ns = entry.getKey();
DatanodeInfo[] result = (DatanodeInfo[]) entry.getValue(); DatanodeInfo[] result = (DatanodeInfo[]) entry.getValue();

View File

@ -4801,6 +4801,62 @@
</description> </description>
</property> </property>
<property>
<name>dfs.federation.router.http-address</name>
<value>0.0.0.0:50071</value>
<description>
HTTP address that handles the web requests to the Router.
The value of this property will take the form of router-host1:http-port.
</description>
</property>
<property>
<name>dfs.federation.router.http-bind-host</name>
<value></value>
<description>
The actual address the HTTP server will bind to. If this optional
address is set, it overrides only the hostname portion of
dfs.federation.router.http-address. This is useful for making the name
node listen on all interfaces by setting it to 0.0.0.0.
</description>
</property>
<property>
<name>dfs.federation.router.https-address</name>
<value>0.0.0.0:50072</value>
<description>
HTTPS address that handles the web requests to the Router.
The value of this property will take the form of router-host1:https-port.
</description>
</property>
<property>
<name>dfs.federation.router.https-bind-host</name>
<value></value>
<description>
The actual address the HTTPS server will bind to. If this optional
address is set, it overrides only the hostname portion of
dfs.federation.router.https-address. This is useful for making the name
node listen on all interfaces by setting it to 0.0.0.0.
</description>
</property>
<property>
<name>dfs.federation.router.http.enable</name>
<value>true</value>
<description>
If the HTTP service to handle client requests in the router is enabled.
</description>
</property>
<property>
<name>dfs.federation.router.metrics.enable</name>
<value>true</value>
<description>
If the metrics service in the router is enabled.
</description>
</property>
<property> <property>
<name>dfs.federation.router.file.resolver.client.class</name> <name>dfs.federation.router.file.resolver.client.class</name>
<value>org.apache.hadoop.hdfs.server.federation.MockResolver</value> <value>org.apache.hadoop.hdfs.server.federation.MockResolver</value>

View File

@ -0,0 +1,371 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="X-UA-Compatible" content="IE=9" />
<link rel="stylesheet" type="text/css" href="/static/bootstrap-3.0.2/css/bootstrap.min.css" />
<link rel="stylesheet" type="text/css" href="/static/dataTables.bootstrap.css" />
<link rel="stylesheet" type="text/css" href="/static/hadoop.css" />
<title>Router Information</title>
</head>
<body>
<header class="navbar navbar-inverse bs-docs-nav" role="banner">
<div class="container">
<div class="navbar-header">
<div class="navbar-brand">Hadoop</div>
</div>
<ul class="nav navbar-nav" id="ui-tabs">
<li><a href="#tab-overview">Overview</a></li>
<li><a href="#tab-namenode">Subclusters</a></li>
<li><a href="#tab-datanode">Datanodes</a></li>
<li><a href="#tab-mounttable">Mount table</a></li>
<li class="dropdown">
<a href="#" class="dropdown-toggle" data-toggle="dropdown">Utilities <b class="caret"></b></a>
<ul class="dropdown-menu">
<li><a href="jmx">Metrics</a></li>
<li><a href="conf">Configuration</a></li>
<li><a href="logs">Logs</a></li>
<li><a href="fsck">FSCK</a></li>
</ul>
</li>
</ul>
</div>
</header>
<div class="container">
<div id="alert-panel">
<div class="alert alert-danger">
<button type="button" class="close" onclick="$('#alert-panel').hide();">&times;</button>
<div class="alert-body" id="alert-panel-body"></div>
</div>
</div>
<div class="tab-content">
<div class="tab-pane" id="tab-overview"></div>
<div class="tab-pane" id="tab-namenode"></div>
<div class="tab-pane" id="tab-datanode"></div>
<div class="tab-pane" id="tab-mounttable"></div>
</div>
<div class="row">
<hr />
<div class="col-xs-2"><p>Hadoop, {release-year-token}.</p></div>
</div>
</div>
<!-- Overview -->
<script type="text/x-dust-template" id="tmpl-federationhealth">
<div class="page-header"><h1>Router {#federation}<small>'{HostAndPort}'</small>{/federation}</h1></div>
{#federation}
<table class="table table-bordered table-striped">
<tr><th>Started:</th><td>{RouterStarted}</td></tr>
<tr><th>Version:</th><td>{Version}</td></tr>
<tr><th>Compiled:</th><td>{CompileInfo}</td></tr>
<tr><th>Cluster ID:</th><td>{ClusterId}</td></tr>
<tr><th>Block Pool ID:</th><td>{BlockPoolId}</td></tr>
<tr><th>Status:</th><td>{RouterStatus}</td></tr>
</table>
{/federation}
<div class="page-header"><h1>Summary</h1></div>
{#federation}
<table class="table table-bordered table-striped">
<tr><th>Total capacity</th><td>{TotalCapacity|fmt_bytes}</td></tr>
<tr><th>Used capacity</th><td>{UsedCapacity|fmt_bytes}</td></tr>
<tr><th>Remaining capacity</th><td>{RemainingCapacity|fmt_bytes}</td></tr>
<tr><th>Nameservices</th><td>{NumNameservices}</td></tr>
<tr><th>Namenodes</th><td>{NumNamenodes}</td></tr>
<tr>
<th>DataNodes usages% (Min/Median/Max/stdDev)</th>
<td>{#NodeUsage.nodeUsage}{min} / {median} / {max} / {stdDev}{/NodeUsage.nodeUsage}</td>
</tr>
<tr><th><a href="#tab-datanode">Live Nodes</a></th><td>{NumLiveNodes} (Decommissioned: {NumDecomLiveNodes})</td></tr>
<tr><th><a href="#tab-datanode">Dead Nodes</a></th><td>{NumDeadNodes} (Decommissioned: {NumDecomDeadNodes})</td></tr>
<tr><th><a href="#tab-datanode">Decommissioning Nodes</a></th><td>{NumDecommissioningNodes}</td></tr>
<tr><th>Files</th><td>{NumFiles}</td></tr>
<tr><th>Blocks</th><td>{NumBlocks}</td></tr>
<tr><th title="Excludes missing blocks.">Number of Under-Replicated Blocks</th><td>{NumOfBlocksUnderReplicated}</td></tr>
<tr><th>Number of Blocks Pending Deletion</th><td>{NumOfBlocksPendingDeletion}</td></tr>
</table>
{/federation}
</script>
<!-- Subclusters info: list of nameservices and namenodes -->
<script type="text/x-dust-template" id="tmpl-namenode">
<div class="page-header"><h1>Nameservice Information</h1></div>
<div>
<ul class="dfshealth-node-legend">
<li class="dfshealth-node-icon dfshealth-node-alive">Active</li>
<li class="dfshealth-node-icon dfshealth-node-down-decommissioned">Standby</li>
<li class="dfshealth-node-icon dfshealth-node-down-maintenance">Safe mode</li>
<li class="dfshealth-node-icon dfshealth-node-down">Unavailable</li>
</ul>
</div>
<small>
<table class="table">
<thead>
<tr>
<th colspan="6"></th>
<th colspan="3">Blocks</th>
<th colspan="2">Nodes</th>
<th colspan="3">Decom</th>
</tr>
<tr>
<th></th>
<th>Nameservice</th>
<th>Namenode</th>
<th>Last Contact</th>
<th>Capacity</th>
<th>Files</th>
<th>Total</th>
<th>Missing</th>
<th>Under-Replicated</th>
<th>Live</th>
<th>Dead</th>
<th>Progress</th>
<th>Live</th>
<th>Dead</th>
</tr>
</thead>
<tbody>
{#Nameservices}
<tr>
<td class="dfshealth-node-icon dfshealth-node-{iconState}" title="{title}"></td>
<td><a href="http://{webAddress}">{nameserviceId}</a></td>
<td><a href="http://{webAddress}">{namenodeId}</a></td>
<td>{lastHeartbeat}</td>
<td ng-value="{usedPercentage}" style="width:210px">
<div>
<div style="display:inline-block; float: left; padding-right: 10px; width:60px;">{totalSpace|fmt_bytes}</div>
<div class="clearfix progress dfshealth-node-capacity-bar" title="Used: {used|fmt_bytes}">
<div class="progress-bar {#helper_usage_bar value="{usedPercentage}"/}" style="width: {usedPercentage}%">
</div>
</div>
</div>
</td>
<td>{numOfFiles}</td>
<td>{numOfBlocks}</td>
<td>{numOfBlocksMissing}</td>
<td>{numOfBlocksUnderReplicated}</td>
<td>{numOfActiveDatanodes}</td>
<td>{numOfDeadDatanodes}</td>
<td>{numOfDecommissioningDatanodes}</td>
<td>{numOfDecomActiveDatanodes}</td>
<td>{numOfDecomDeadDatanodes}</td>
</tr>
{/Nameservices}
</tbody>
</table>
</small>
<div class="page-header"><h1>Namenode Information</h1></div>
<div>
<ul class="dfshealth-node-legend">
<li class="dfshealth-node-icon dfshealth-node-alive">Active</li>
<li class="dfshealth-node-icon dfshealth-node-down-decommissioned">Standby</li>
<li class="dfshealth-node-icon dfshealth-node-down-maintenance">Safe mode</li>
<li class="dfshealth-node-icon dfshealth-node-down">Unavailable</li>
</ul>
</div>
<small>
<table class="table">
<thead>
<tr>
<th colspan="7"></th>
<th colspan="3">Blocks</th>
<th colspan="2">Nodes</th>
<th colspan="3">Decom</th>
</tr>
<tr>
<th></th>
<th colspan="2">Namenode</th>
<th>Web address</th>
<th>Last Contact</th>
<th>Capacity</th>
<th>Files</th>
<th>Total</th>
<th>Missing</th>
<th>Under-Replicated</th>
<th>Live</th>
<th>Dead</th>
<th>Progress</th>
<th>Live</th>
<th>Dead</th>
</tr>
</thead>
<tbody>
{#Namenodes}
<tr>
<td class="dfshealth-node-icon dfshealth-node-{iconState}" title="{title}"></td>
<td>{nameserviceId}</td>
<td>{namenodeId}</td>
<td><a href="http://{webAddress}">{webAddress}</a></td>
<td>{lastHeartbeat}</td>
<td ng-value="{usedPercentage}" style="width:210px">
<div>
<div style="display:inline-block; float: left; padding-right: 10px; width:60px;">{totalSpace|fmt_bytes}</div>
<div class="clearfix progress dfshealth-node-capacity-bar" title="Used: {used|fmt_bytes}">
<div class="progress-bar {#helper_usage_bar value="{usedPercentage}"/}" style="width: {usedPercentage}%">
</div>
</div>
</div>
</td>
<td>{numOfFiles}</td>
<td>{numOfBlocks}</td>
<td>{numOfBlocksMissing}</td>
<td>{numOfBlocksUnderReplicated}</td>
<td>{numOfActiveDatanodes}</td>
<td>{numOfDeadDatanodes}</td>
<td>{numOfDecommissioningDatanodes}</td>
<td>{numOfDecomActiveDatanodes}</td>
<td>{numOfDecomDeadDatanodes}</td>
</tr>
{/Namenodes}
</tbody>
</table>
</small>
</script>
<!-- Datanodes -->
<script type="text/x-dust-template" id="tmpl-datanode">
<div class="page-header"><h1>Datanode Information</h1></div>
<div>
<ul class="dfshealth-node-legend">
<li class="dfshealth-node-icon dfshealth-node-alive">In service</li>
<li class="dfshealth-node-icon dfshealth-node-down">Down</li>
<li class="dfshealth-node-icon dfshealth-node-decommisioned">Decommisioned</li>
<li class="dfshealth-node-icon dfshealth-node-down-decommisioned">Decommissioned &amp; dead</li>
</ul>
</div>
<div class="page-header"><h1><small>In operation</small></h1></div>
<small>
<table class="table" id="table-datanodes">
<thead>
<tr>
<th>Node</th>
<th>Last contact</th>
<th style="width:180px; text-align:center">Capacity</th>
<!--th>Blocks</th-->
<th>Block pool used</th>
<!--th>Version</th-->
</tr>
</thead>
{#LiveNodes}
<tr>
<td ng-value="{state}-{name}" class="dfshealth-node-icon dfshealth-node-{state}">{location}/{name} ({xferaddr})</td>
<td ng-value="{lastContact}">{#helper_relative_time value="{lastContact}"/}</td>
<td ng-value="{usedPercentage}" style="width:210px">
<div>
<div style="display:inline-block; float: left; padding-right: 10px; width:60px;">{capacity|fmt_bytes}</div>
<div class="clearfix progress dfshealth-node-capacity-bar" title="Non DFS: {nonDfsUsedSpace|fmt_bytes}, Used: {used|fmt_bytes}">
<div class="progress-bar {#helper_usage_bar value="{usedPercentage}"/}" style="width: {usedPercentage}%">
</div>
</div>
</div>
</td>
<!--td>{numBlocks}</td-->
<td ng-value="{blockPoolUsedPercent}">{blockPoolUsed|fmt_bytes} ({blockPoolUsedPercent|fmt_percentage})</td>
<!--td>{version}</td-->
</tr>
{/LiveNodes}
{#DeadNodes}
<tr class="danger">
<td ng-value="{state}-{name}" class="dfshealth-node-icon dfshealth-node-{state}">{location}/{name} ({xferaddr})</td>
<td>{#helper_relative_time value="{lastContact}"/}</td>
<td></td>
<!--td></td-->
<td></td>
<!--td></td-->
</tr>
{/DeadNodes}
</table>
</small>
<div class="page-header"><h1><small>Decommissioning</small></h1></div>
<small>
<table class="table">
<thead>
<tr>
<th>Node</th>
<th>Under replicated blocks</th>
<th>Blocks with no live replicas</th>
<th>Under Replicated Blocks <br/>In files under construction</th>
</tr>
</thead>
{#DecomNodes}
<tr>
<td>{location}/{name} ({xferaddr})</td>
<td>{underReplicatedBlocks}</td>
<td>{decommissionOnlyReplicas}</td>
<td>{underReplicateInOpenFiles}</td>
</tr>
{/DecomNodes}
</table>
</small>
</script>
<!-- Mount table -->
<script type="text/x-dust-template" id="tmpl-mounttable">
<div class="page-header"><h1>Mount Table</h1></div>
<small>
<table class="table">
<thead>
<tr>
<th>Global path</th>
<th>Target nameservice</th>
<th>Target path</th>
<th>Order</th>
<th>Read only</th>
<th>Date Modified</th>
<th>Date Created</th>
</tr>
</thead>
<tbody>
{#MountTable}
<tr>
<td>{sourcePath}</td>
<td>{nameserviceId}</td>
<td>{path}</td>
<td>{order}</td>
<td class="dfshealth-node-icon dfshealth-mount-read-only-{readonly}"/>
<td>{dateModified}</td>
<td>{dateCreated}</td>
</tr>
{/MountTable}
</tbody>
</table>
</small>
</script>
<script type="text/javascript" src="/static/jquery-1.10.2.min.js"></script>
<script type="text/javascript" src="/static/jquery.dataTables.min.js"></script>
<script type="text/javascript" src="/static/bootstrap-3.0.2/js/bootstrap.min.js"></script>
<script type="text/javascript" src="/static/dataTables.bootstrap.js"></script>
<script type="text/javascript" src="/static/moment.min.js"></script>
<script type="text/javascript" src="/static/dust-full-2.0.0.min.js"></script>
<script type="text/javascript" src="/static/dust-helpers-1.1.1.min.js"></script>
<script type="text/javascript" src="/static/dfs-dust.js"></script>
<script type="text/javascript" src="federationhealth.js"></script>
</body>
</html>

View File

@ -0,0 +1,313 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
(function () {
"use strict";
dust.loadSource(dust.compile($('#tmpl-federationhealth').html(), 'federationhealth'));
dust.loadSource(dust.compile($('#tmpl-namenode').html(), 'namenode-info'));
dust.loadSource(dust.compile($('#tmpl-datanode').html(), 'datanode-info'));
dust.loadSource(dust.compile($('#tmpl-mounttable').html(), 'mounttable'));
$.fn.dataTable.ext.order['ng-value'] = function (settings, col)
{
return this.api().column(col, {order:'index'} ).nodes().map(function (td, i) {
return $(td).attr('ng-value');
});
};
function load_overview() {
var BEANS = [
{"name": "federation", "url": "/jmx?qry=Hadoop:service=Router,name=FederationState"}
];
var HELPERS = {
'helper_fs_max_objects': function (chunk, ctx, bodies, params) {
var o = ctx.current();
if (o.MaxObjects > 0) {
chunk.write('(' + Math.round((o.FilesTotal + o.BlockTotal) / o.MaxObjects * 100) * 100 + ')%');
}
},
'helper_dir_status': function (chunk, ctx, bodies, params) {
var j = ctx.current();
for (var i in j) {
chunk.write('<tr><td>' + i + '</td><td>' + j[i] + '</td><td>' + params.type + '</td></tr>');
}
},
'helper_date_tostring' : function (chunk, ctx, bodies, params) {
var value = dust.helpers.tap(params.value, chunk, ctx);
return chunk.write('' + new Date(Number(value)).toLocaleString());
}
};
var data = {};
// Workarounds for the fact that JMXJsonServlet returns non-standard JSON strings
function workaround(nn) {
nn.NodeUsage = JSON.parse(nn.NodeUsage);
return nn;
}
load_json(
BEANS,
guard_with_startup_progress(function(d) {
for (var k in d) {
data[k] = k === 'federation' ? workaround(d[k].beans[0]) : d[k].beans[0];
}
render();
}),
function (url, jqxhr, text, err) {
show_err_msg('<p>Failed to retrieve data from ' + url + ', cause: ' + err + '</p>');
});
function render() {
var base = dust.makeBase(HELPERS);
dust.render('federationhealth', base.push(data), function(err, out) {
$('#tab-overview').html(out);
$('#ui-tabs a[href="#tab-overview"]').tab('show');
});
}
}
function load_namenode_info() {
var HELPERS = {
'helper_lastcontact_tostring' : function (chunk, ctx, bodies, params) {
var value = dust.helpers.tap(params.value, chunk, ctx);
return chunk.write('' + new Date(Date.now()-1000*Number(value)));
}
};
function workaround(r) {
function node_map_to_array(nodes) {
var res = [];
for (var n in nodes) {
var p = nodes[n];
p.name = n;
res.push(p);
}
return res;
}
function capitalise(string) {
return string.charAt(0).toUpperCase() + string.slice(1).toLowerCase();
}
function augment_namenodes(nodes) {
for (var i = 0, e = nodes.length; i < e; ++i) {
var n = nodes[i];
n.usedPercentage = Math.round(n.used * 1.0 / n.totalSpace * 100);
n.title = "Unavailable";
n.iconState = "down";
if (n.isSafeMode === true) {
n.title = capitalise(n.state) + " (safe mode)"
n.iconState = "decommisioned";
} else if (n.state === "ACTIVE") {
n.title = capitalise(n.state);
n.iconState = "alive";
} else if (nodes[i].state === "STANDBY") {
n.title = capitalise(n.state);
n.iconState = "down-decommisioned";
} else if (nodes[i].state === "UNAVAILABLE") {
n.title = capitalise(n.state);
n.iconState = "down";
}
if (n.namenodeId === "null") {
n.namenodeId = "";
}
}
}
r.Nameservices = node_map_to_array(JSON.parse(r.Nameservices));
augment_namenodes(r.Nameservices);
r.Namenodes = node_map_to_array(JSON.parse(r.Namenodes));
augment_namenodes(r.Namenodes);
return r;
}
$.get(
'/jmx?qry=Hadoop:service=Router,name=FederationState',
guard_with_startup_progress(function (resp) {
var data = workaround(resp.beans[0]);
var base = dust.makeBase(HELPERS);
dust.render('namenode-info', base.push(data), function(err, out) {
$('#tab-namenode').html(out);
$('#ui-tabs a[href="#tab-namenode"]').tab('show');
});
})).error(ajax_error_handler);
}
// TODO Copied directly from dfshealth.js; is there a way to import this function?
function load_datanode_info() {
var HELPERS = {
'helper_relative_time' : function (chunk, ctx, bodies, params) {
var value = dust.helpers.tap(params.value, chunk, ctx);
return chunk.write(moment().subtract(Number(value), 'seconds').format('YYYY-MM-DD HH:mm:ss'));
},
'helper_usage_bar' : function (chunk, ctx, bodies, params) {
var value = dust.helpers.tap(params.value, chunk, ctx);
var v = Number(value);
var r = null;
if (v < 70) {
r = 'progress-bar-success';
} else if (v < 85) {
r = 'progress-bar-warning';
} else {
r = "progress-bar-danger";
}
return chunk.write(r);
},
};
function workaround(r) {
function node_map_to_array(nodes) {
var res = [];
for (var n in nodes) {
var p = nodes[n];
p.name = n;
res.push(p);
}
return res;
}
function augment_live_nodes(nodes) {
for (var i = 0, e = nodes.length; i < e; ++i) {
var n = nodes[i];
n.usedPercentage = Math.round((n.used + n.nonDfsUsedSpace) * 1.0 / n.capacity * 100);
if (n.adminState === "In Service") {
n.state = "alive";
} else if (nodes[i].adminState === "Decommission In Progress") {
n.state = "decommisioning";
} else if (nodes[i].adminState === "Decommissioned") {
n.state = "decommissioned";
}
}
}
function augment_dead_nodes(nodes) {
for (var i = 0, e = nodes.length; i < e; ++i) {
if (nodes[i].decommissioned) {
nodes[i].state = "down-decommissioned";
} else {
nodes[i].state = "down";
}
}
}
r.LiveNodes = node_map_to_array(JSON.parse(r.LiveNodes));
augment_live_nodes(r.LiveNodes);
r.DeadNodes = node_map_to_array(JSON.parse(r.DeadNodes));
augment_dead_nodes(r.DeadNodes);
r.DecomNodes = node_map_to_array(JSON.parse(r.DecomNodes));
return r;
}
$.get(
'/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo',
guard_with_startup_progress(function (resp) {
var data = workaround(resp.beans[0]);
var base = dust.makeBase(HELPERS);
dust.render('datanode-info', base.push(data), function(err, out) {
$('#tab-datanode').html(out);
$('#table-datanodes').dataTable( {
'lengthMenu': [ [25, 50, 100, -1], [25, 50, 100, "All"] ],
'columns': [
{ 'orderDataType': 'ng-value', 'searchable': true },
{ 'orderDataType': 'ng-value', 'type': 'numeric' },
{ 'orderDataType': 'ng-value', 'type': 'numeric' },
{ 'orderDataType': 'ng-value', 'type': 'numeric'}
]});
$('#ui-tabs a[href="#tab-datanode"]').tab('show');
});
})).error(ajax_error_handler);
}
function load_mount_table() {
var HELPERS = {}
function workaround(resource) {
resource.MountTable = JSON.parse(resource.MountTable)
return resource;
}
$.get(
'/jmx?qry=Hadoop:service=Router,name=FederationState',
guard_with_startup_progress(function (resp) {
var data = workaround(resp.beans[0]);
var base = dust.makeBase(HELPERS);
dust.render('mounttable', base.push(data), function(err, out) {
$('#tab-mounttable').html(out);
$('#ui-tabs a[href="#tab-mounttable"]').tab('show');
});
})).error(ajax_error_handler);
}
function toTitleCase(str) {
return str.replace(/\w\S*/g, function(txt){
return txt.charAt(0).toUpperCase() + txt.substr(1).toLowerCase();
});
}
function show_err_msg(msg) {
$('#alert-panel-body').html(msg);
$('#alert-panel').show();
}
function ajax_error_handler(url, jqxhr, text, err) {
show_err_msg('<p>Failed to retrieve data from ' + url + ', cause: ' + err + '</p>');
}
function guard_with_startup_progress(fn) {
return function() {
try {
fn.apply(this, arguments);
} catch (err) {
if (err instanceof TypeError) {
show_err_msg('Router error: ' + err);
}
}
};
}
function load_page() {
var hash = window.location.hash;
switch(hash) {
case "#tab-mounttable":
load_mount_table();
break;
case "#tab-namenode":
load_namenode_info();
break;
case "#tab-datanode":
load_datanode_info();
break;
case "#tab-overview":
load_overview();
break;
default:
window.location.hash = "tab-overview";
break;
}
}
load_page();
$(window).bind('hashchange', function () {
load_page();
});
})();

View File

@ -0,0 +1,24 @@
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="REFRESH" content="0;url=federationhealth.html" />
<title>Hadoop Administration</title>
</head>
</html>

View File

@ -29,6 +29,7 @@ public class RouterConfigBuilder {
private boolean enableRpcServer = false; private boolean enableRpcServer = false;
private boolean enableAdminServer = false; private boolean enableAdminServer = false;
private boolean enableHttpServer = false;
private boolean enableHeartbeat = false; private boolean enableHeartbeat = false;
private boolean enableLocalHeartbeat = false; private boolean enableLocalHeartbeat = false;
private boolean enableStateStore = false; private boolean enableStateStore = false;
@ -45,6 +46,7 @@ public class RouterConfigBuilder {
public RouterConfigBuilder all() { public RouterConfigBuilder all() {
this.enableRpcServer = true; this.enableRpcServer = true;
this.enableAdminServer = true; this.enableAdminServer = true;
this.enableHttpServer = true;
this.enableHeartbeat = true; this.enableHeartbeat = true;
this.enableLocalHeartbeat = true; this.enableLocalHeartbeat = true;
this.enableStateStore = true; this.enableStateStore = true;
@ -67,6 +69,11 @@ public class RouterConfigBuilder {
return this; return this;
} }
public RouterConfigBuilder http(boolean enable) {
this.enableHttpServer = enable;
return this;
}
public RouterConfigBuilder heartbeat(boolean enable) { public RouterConfigBuilder heartbeat(boolean enable) {
this.enableHeartbeat = enable; this.enableHeartbeat = enable;
return this; return this;
@ -90,6 +97,10 @@ public class RouterConfigBuilder {
return this.admin(true); return this.admin(true);
} }
public RouterConfigBuilder http() {
return this.http(true);
}
public RouterConfigBuilder heartbeat() { public RouterConfigBuilder heartbeat() {
return this.heartbeat(true); return this.heartbeat(true);
} }
@ -108,6 +119,8 @@ public class RouterConfigBuilder {
conf.setBoolean(DFSConfigKeys.DFS_ROUTER_RPC_ENABLE, this.enableRpcServer); conf.setBoolean(DFSConfigKeys.DFS_ROUTER_RPC_ENABLE, this.enableRpcServer);
conf.setBoolean(DFSConfigKeys.DFS_ROUTER_ADMIN_ENABLE, conf.setBoolean(DFSConfigKeys.DFS_ROUTER_ADMIN_ENABLE,
this.enableAdminServer); this.enableAdminServer);
conf.setBoolean(DFSConfigKeys.DFS_ROUTER_HTTP_ENABLE,
this.enableHttpServer);
conf.setBoolean(DFSConfigKeys.DFS_ROUTER_HEARTBEAT_ENABLE, conf.setBoolean(DFSConfigKeys.DFS_ROUTER_HEARTBEAT_ENABLE,
this.enableHeartbeat); this.enableHeartbeat);
conf.setBoolean(DFSConfigKeys.DFS_ROUTER_MONITOR_LOCAL_NAMENODE, conf.setBoolean(DFSConfigKeys.DFS_ROUTER_MONITOR_LOCAL_NAMENODE,

View File

@ -33,6 +33,9 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_CACHE_TIME_TO_LIVE
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_HANDLER_COUNT_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_HANDLER_COUNT_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_HEARTBEAT_INTERVAL_MS; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_HEARTBEAT_INTERVAL_MS;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_HTTPS_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_HTTP_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_HTTP_BIND_HOST_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_MONITOR_NAMENODE; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_MONITOR_NAMENODE;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_RPC_ADDRESS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_RPC_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_RPC_BIND_HOST_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_RPC_BIND_HOST_KEY;
@ -134,6 +137,7 @@ public class RouterDFSCluster {
private String nameserviceId; private String nameserviceId;
private String namenodeId; private String namenodeId;
private int rpcPort; private int rpcPort;
private int httpPort;
private DFSClient client; private DFSClient client;
private Configuration conf; private Configuration conf;
private RouterClient adminClient; private RouterClient adminClient;
@ -164,6 +168,10 @@ public class RouterDFSCluster {
return this.rpcPort; return this.rpcPort;
} }
public int getHttpPort() {
return this.httpPort;
}
public FileContext getFileContext() { public FileContext getFileContext() {
return this.fileContext; return this.fileContext;
} }
@ -183,6 +191,10 @@ public class RouterDFSCluster {
this.fileContext = null; this.fileContext = null;
} }
} }
InetSocketAddress httpAddress = router.getHttpServerAddress();
if (httpAddress != null) {
this.httpPort = httpAddress.getPort();
}
} }
public FileSystem getFileSystem() throws IOException { public FileSystem getFileSystem() throws IOException {
@ -399,16 +411,21 @@ public class RouterDFSCluster {
conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY + "." + suffix, conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY + "." + suffix,
"127.0.0.1:" + context.rpcPort); "127.0.0.1:" + context.rpcPort);
conf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY + "." + suffix,
"127.0.0.1:" + context.servicePort);
conf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY + "." + suffix, conf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY + "." + suffix,
"127.0.0.1:" + context.httpPort); "127.0.0.1:" + context.httpPort);
conf.set(DFS_NAMENODE_RPC_BIND_HOST_KEY + "." + suffix, conf.set(DFS_NAMENODE_RPC_BIND_HOST_KEY + "." + suffix,
"0.0.0.0"); "0.0.0.0");
// If the service port is enabled by default, we need to set them up
boolean servicePortEnabled = false;
if (servicePortEnabled) {
conf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY + "." + suffix,
"127.0.0.1:" + context.servicePort);
conf.set(DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY + "." + suffix, conf.set(DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY + "." + suffix,
"0.0.0.0"); "0.0.0.0");
} }
} }
}
if (this.namenodeOverrides != null) { if (this.namenodeOverrides != null) {
conf.addResource(this.namenodeOverrides); conf.addResource(this.namenodeOverrides);
@ -447,6 +464,10 @@ public class RouterDFSCluster {
conf.set(DFS_ROUTER_ADMIN_ADDRESS_KEY, "127.0.0.1:0"); conf.set(DFS_ROUTER_ADMIN_ADDRESS_KEY, "127.0.0.1:0");
conf.set(DFS_ROUTER_ADMIN_BIND_HOST_KEY, "0.0.0.0"); conf.set(DFS_ROUTER_ADMIN_BIND_HOST_KEY, "0.0.0.0");
conf.set(DFS_ROUTER_HTTP_ADDRESS_KEY, "127.0.0.1:0");
conf.set(DFS_ROUTER_HTTPS_ADDRESS_KEY, "127.0.0.1:0");
conf.set(DFS_ROUTER_HTTP_BIND_HOST_KEY, "0.0.0.0");
conf.set(DFS_ROUTER_DEFAULT_NAMESERVICE, nameservices.get(0)); conf.set(DFS_ROUTER_DEFAULT_NAMESERVICE, nameservices.get(0));
conf.setLong(DFS_ROUTER_HEARTBEAT_INTERVAL_MS, heartbeatInterval); conf.setLong(DFS_ROUTER_HEARTBEAT_INTERVAL_MS, heartbeatInterval);
conf.setLong(DFS_ROUTER_CACHE_TIME_TO_LIVE_MS, cacheFlushInterval); conf.setLong(DFS_ROUTER_CACHE_TIME_TO_LIVE_MS, cacheFlushInterval);

View File

@ -64,6 +64,7 @@ public class TestMetricsBase {
routerConfig = new RouterConfigBuilder() routerConfig = new RouterConfigBuilder()
.stateStore() .stateStore()
.metrics() .metrics()
.http()
.build(); .build();
router = new Router(); router = new Router();
router.init(routerConfig); router.init(routerConfig);

View File

@ -24,6 +24,7 @@ import java.io.IOException;
import java.net.URISyntaxException; import java.net.URISyntaxException;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.server.federation.MockResolver; import org.apache.hadoop.hdfs.server.federation.MockResolver;
import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder; import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
@ -61,10 +62,13 @@ public class TestRouter {
conf.set(DFSConfigKeys.DFS_ROUTER_RPC_ADDRESS_KEY, "127.0.0.1:0"); conf.set(DFSConfigKeys.DFS_ROUTER_RPC_ADDRESS_KEY, "127.0.0.1:0");
conf.set(DFSConfigKeys.DFS_ROUTER_ADMIN_ADDRESS_KEY, "127.0.0.1:0"); conf.set(DFSConfigKeys.DFS_ROUTER_ADMIN_ADDRESS_KEY, "127.0.0.1:0");
conf.set(DFSConfigKeys.DFS_ROUTER_ADMIN_BIND_HOST_KEY, "0.0.0.0"); conf.set(DFSConfigKeys.DFS_ROUTER_ADMIN_BIND_HOST_KEY, "0.0.0.0");
conf.set(DFSConfigKeys.DFS_ROUTER_HTTP_ADDRESS_KEY, "127.0.0.1:0");
conf.set(DFSConfigKeys.DFS_ROUTER_HTTPS_ADDRESS_KEY, "127.0.0.1:0");
conf.set(DFSConfigKeys.DFS_ROUTER_HTTP_BIND_HOST_KEY, "0.0.0.0");
// Simulate a co-located NN // Simulate a co-located NN
conf.set(DFSConfigKeys.DFS_NAMESERVICES, "ns0"); conf.set(DFSConfigKeys.DFS_NAMESERVICES, "ns0");
conf.set("fs.defaultFS", "hdfs://" + "ns0"); conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://" + "ns0");
conf.set(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + "ns0", conf.set(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + "ns0",
"127.0.0.1:0" + 0); "127.0.0.1:0" + 0);
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY + "." + "ns0", conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY + "." + "ns0",
@ -104,6 +108,9 @@ public class TestRouter {
// Admin only // Admin only
testRouterStartup(new RouterConfigBuilder(conf).admin().build()); testRouterStartup(new RouterConfigBuilder(conf).admin().build());
// Http only
testRouterStartup(new RouterConfigBuilder(conf).http().build());
// Rpc only // Rpc only
testRouterStartup(new RouterConfigBuilder(conf).rpc().build()); testRouterStartup(new RouterConfigBuilder(conf).rpc().build());