From 01cd616d170d5d26a539e51e731e8e73b789b360 Mon Sep 17 00:00:00 2001 From: Todd Lipcon Date: Tue, 26 Jul 2011 00:04:30 +0000 Subject: [PATCH] HDFS-2180. Refactor NameNode HTTP server into new class. Contributed by Todd Lipcon. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1150960 13f79535-47bb-0310-9956-ffa450edef68 --- hdfs/CHANGES.txt | 2 + .../apache/hadoop/hdfsproxy/HdfsProxy.java | 3 +- .../hdfsproxy/ProxyFileDataServlet.java | 3 +- .../hadoop/hdfs/server/common/JspHelper.java | 6 +- .../hdfs/server/namenode/BackupNode.java | 4 +- .../CancelDelegationTokenServlet.java | 7 +- .../hdfs/server/namenode/Checkpointer.java | 5 - .../hdfs/server/namenode/DfsServlet.java | 7 +- .../server/namenode/FileChecksumServlets.java | 7 +- .../hdfs/server/namenode/FileDataServlet.java | 10 +- .../hdfs/server/namenode/FsckServlet.java | 9 +- .../namenode/GetDelegationTokenServlet.java | 6 +- .../hdfs/server/namenode/GetImageServlet.java | 2 +- .../hadoop/hdfs/server/namenode/NameNode.java | 138 ++---------- .../server/namenode/NameNodeHttpServer.java | 201 ++++++++++++++++++ .../server/namenode/NamenodeJspHelper.java | 6 +- .../namenode/RenewDelegationTokenServlet.java | 5 +- .../hdfs/server/common/TestJspHelper.java | 6 +- hdfs/src/webapps/hdfs/block_info_xml.jsp | 2 +- hdfs/src/webapps/hdfs/corrupt_files.jsp | 2 +- .../src/webapps/hdfs/corrupt_replicas_xml.jsp | 2 +- hdfs/src/webapps/hdfs/dfshealth.jsp | 2 +- hdfs/src/webapps/hdfs/dfsnodelist.jsp | 2 +- 23 files changed, 267 insertions(+), 170 deletions(-) create mode 100644 hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java diff --git a/hdfs/CHANGES.txt b/hdfs/CHANGES.txt index e501d1d97c2..3070cd90700 100644 --- a/hdfs/CHANGES.txt +++ b/hdfs/CHANGES.txt @@ -599,6 +599,8 @@ Trunk (unreleased changes) HDFS-2144. If SNN shuts down during initialization it does not log the cause. (Ravi Prakash via atm) + HDFS-2180. Refactor NameNode HTTP server into new class. (todd) + OPTIMIZATIONS HDFS-1458. Improve checkpoint performance by avoiding unnecessary image diff --git a/hdfs/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/HdfsProxy.java b/hdfs/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/HdfsProxy.java index d9939137782..1837adb1239 100644 --- a/hdfs/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/HdfsProxy.java +++ b/hdfs/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/HdfsProxy.java @@ -33,6 +33,7 @@ import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.server.common.JspHelper; import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.hdfs.server.namenode.NameNodeHttpServer; /** * A HTTPS/SSL proxy to HDFS, implementing certificate based access control. @@ -70,7 +71,7 @@ public class HdfsProxy { this.server = new ProxyHttpServer(sslAddr, sslConf); this.server.setAttribute("proxy.https.port", server.getPort()); - this.server.setAttribute(NameNode.NAMENODE_ADDRESS_ATTRIBUTE_KEY, nnAddr); + this.server.setAttribute(NameNodeHttpServer.NAMENODE_ADDRESS_ATTRIBUTE_KEY, nnAddr); this.server.setAttribute(JspHelper.CURRENT_CONF, new HdfsConfiguration()); this.server.addGlobalFilter("ProxyFilter", ProxyFilter.class.getName(), null); this.server.addServlet("listPaths", "/listPaths/*", ProxyListPathsServlet.class); diff --git a/hdfs/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyFileDataServlet.java b/hdfs/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyFileDataServlet.java index a9e2ac4dfc7..c9c8abd0837 100644 --- a/hdfs/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyFileDataServlet.java +++ b/hdfs/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyFileDataServlet.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.server.common.JspHelper; import org.apache.hadoop.hdfs.server.namenode.FileDataServlet; import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.hdfs.server.namenode.NameNodeHttpServer; import org.apache.hadoop.security.UserGroupInformation; /** {@inheritDoc} */ @@ -47,7 +48,7 @@ public class ProxyFileDataServlet extends FileDataServlet { dtParam=JspHelper.getDelegationTokenUrlParam(dt); } InetSocketAddress nnAddress = (InetSocketAddress) getServletContext() - .getAttribute(NameNode.NAMENODE_ADDRESS_ATTRIBUTE_KEY); + .getAttribute(NameNodeHttpServer.NAMENODE_ADDRESS_ATTRIBUTE_KEY); String nnHostPort = nnAddress == null ? null : NameNode .getHostPortString(nnAddress); String addrParam = JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/common/JspHelper.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/common/JspHelper.java index d12a2ff399e..e96ce9ec642 100644 --- a/hdfs/src/java/org/apache/hadoop/hdfs/server/common/JspHelper.java +++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/common/JspHelper.java @@ -51,7 +51,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; -import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.hdfs.server.namenode.NameNodeHttpServer; import org.apache.hadoop.http.HtmlQuoting; import org.apache.hadoop.io.Text; import org.apache.hadoop.net.NetUtils; @@ -487,8 +487,8 @@ public class JspHelper { if (namenodeAddressInUrl != null) { namenodeAddress = DFSUtil.getSocketAddress(namenodeAddressInUrl); } else if (context != null) { - namenodeAddress = (InetSocketAddress) context - .getAttribute(NameNode.NAMENODE_ADDRESS_ATTRIBUTE_KEY); + namenodeAddress = NameNodeHttpServer.getNameNodeAddressFromContext( + context); } if (namenodeAddress != null) { return (namenodeAddress.getAddress().getHostAddress() + ":" diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java index 906d869682d..2e7f48f2843 100644 --- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java +++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java @@ -111,10 +111,10 @@ public class BackupNode extends NameNode { String addr = conf.get(BN_HTTP_ADDRESS_NAME_KEY, BN_HTTP_ADDRESS_DEFAULT); return NetUtils.createSocketAddr(addr); } - + @Override // NameNode protected void setHttpServerAddress(Configuration conf){ - conf.set(BN_HTTP_ADDRESS_NAME_KEY, getHostPortString(httpAddress)); + conf.set(BN_HTTP_ADDRESS_NAME_KEY, getHostPortString(getHttpAddress())); } @Override // NameNode diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/CancelDelegationTokenServlet.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/CancelDelegationTokenServlet.java index f8f2ecca1ee..e4de6345b63 100644 --- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/CancelDelegationTokenServlet.java +++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/CancelDelegationTokenServlet.java @@ -28,7 +28,6 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; -import org.apache.hadoop.hdfs.server.common.JspHelper; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; @@ -46,8 +45,7 @@ public class CancelDelegationTokenServlet extends DfsServlet { throws ServletException, IOException { final UserGroupInformation ugi; final ServletContext context = getServletContext(); - final Configuration conf = - (Configuration) context.getAttribute(JspHelper.CURRENT_CONF); + final Configuration conf = NameNodeHttpServer.getConfFromContext(context); try { ugi = getUGI(req, conf); } catch(IOException ioe) { @@ -57,7 +55,8 @@ public class CancelDelegationTokenServlet extends DfsServlet { "Unable to identify or authenticate user"); return; } - final NameNode nn = (NameNode) context.getAttribute("name.node"); + final NameNode nn = NameNodeHttpServer.getNameNodeFromContext( + context); String tokenString = req.getParameter(TOKEN); if (tokenString == null) { resp.sendError(HttpServletResponse.SC_MULTIPLE_CHOICES, diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java index f2f9c8e61c0..ee5e87c9aa8 100644 --- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java +++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java @@ -101,11 +101,6 @@ class Checkpointer extends Daemon { String fullInfoAddr = conf.get(DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, DFS_NAMENODE_BACKUP_HTTP_ADDRESS_DEFAULT); infoBindAddress = fullInfoAddr.substring(0, fullInfoAddr.indexOf(":")); - - HttpServer httpServer = backupNode.httpServer; - httpServer.setAttribute("name.system.image", getFSImage()); - httpServer.setAttribute("name.conf", conf); - httpServer.addInternalServlet("getimage", "/getimage", GetImageServlet.class); LOG.info("Checkpoint Period : " + checkpointPeriod + " secs " + "(" + checkpointPeriod/60 + " min)"); diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/DfsServlet.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/DfsServlet.java index e7bbd2042ed..cee3e8d3a5e 100644 --- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/DfsServlet.java +++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/DfsServlet.java @@ -75,13 +75,14 @@ abstract class DfsServlet extends HttpServlet { ServletContext context = getServletContext(); // if we are running in the Name Node, use it directly rather than via // rpc - NameNode nn = (NameNode) context.getAttribute("name.node"); + NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context); if (nn != null) { return nn; } - InetSocketAddress nnAddr = (InetSocketAddress)context.getAttribute("name.node.address"); + InetSocketAddress nnAddr = + NameNodeHttpServer.getNameNodeAddressFromContext(context); Configuration conf = new HdfsConfiguration( - (Configuration)context.getAttribute(JspHelper.CURRENT_CONF)); + NameNodeHttpServer.getConfFromContext(context)); return DFSUtil.createNamenode(nnAddr, conf); } diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java index 17d30103b34..7fc7edbdb24 100644 --- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java +++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java @@ -37,7 +37,6 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.server.common.HdfsConstants; -import org.apache.hadoop.hdfs.server.common.JspHelper; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DatanodeJspHelper; import org.apache.hadoop.net.NetUtils; @@ -57,10 +56,10 @@ public class FileChecksumServlets { public void doGet(HttpServletRequest request, HttpServletResponse response ) throws ServletException, IOException { final ServletContext context = getServletContext(); - final Configuration conf = - (Configuration) context.getAttribute(JspHelper.CURRENT_CONF); + final Configuration conf = NameNodeHttpServer.getConfFromContext(context); final UserGroupInformation ugi = getUGI(request, conf); - final NameNode namenode = (NameNode)context.getAttribute("name.node"); + final NameNode namenode = NameNodeHttpServer.getNameNodeFromContext( + context); final DatanodeID datanode = NamenodeJspHelper.getRandomDatanode(namenode); try { final URI uri = createRedirectUri("/getFileChecksum", ugi, datanode, diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java index 0b9f6d3ea2a..9573caa6a62 100644 --- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java +++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java @@ -65,7 +65,8 @@ public class FileDataServlet extends DfsServlet { } // Add namenode address to the url params - NameNode nn = (NameNode)getServletContext().getAttribute("name.node"); + NameNode nn = NameNodeHttpServer.getNameNodeFromContext( + getServletContext()); String addr = NameNode.getHostPortString(nn.getNameNodeAddress()); String addrParam = JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, addr); @@ -85,7 +86,8 @@ public class FileDataServlet extends DfsServlet { throws IOException { if (i.getLen() == 0 || blks.getLocatedBlocks().size() <= 0) { // pick a random datanode - NameNode nn = (NameNode)getServletContext().getAttribute("name.node"); + NameNode nn = NameNodeHttpServer.getNameNodeFromContext( + getServletContext()); return NamenodeJspHelper.getRandomDatanode(nn); } return JspHelper.bestNode(blks); @@ -101,8 +103,8 @@ public class FileDataServlet extends DfsServlet { public void doGet(final HttpServletRequest request, final HttpServletResponse response) throws IOException { - final Configuration conf = - (Configuration) getServletContext().getAttribute(JspHelper.CURRENT_CONF); + final Configuration conf = NameNodeHttpServer.getConfFromContext( + getServletContext()); final UserGroupInformation ugi = getUGI(request, conf); try { diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java index 95b4e6594de..96ccad6d422 100644 --- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java +++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java @@ -30,7 +30,6 @@ import javax.servlet.http.HttpServletResponse; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; -import org.apache.hadoop.hdfs.server.common.JspHelper; import org.apache.hadoop.security.UserGroupInformation; /** @@ -49,17 +48,15 @@ public class FsckServlet extends DfsServlet { final PrintWriter out = response.getWriter(); final InetAddress remoteAddress = InetAddress.getByName(request.getRemoteAddr()); - final Configuration conf = - (Configuration) getServletContext().getAttribute(JspHelper.CURRENT_CONF); + final ServletContext context = getServletContext(); + final Configuration conf = NameNodeHttpServer.getConfFromContext(context); final UserGroupInformation ugi = getUGI(request, conf); try { ugi.doAs(new PrivilegedExceptionAction() { @Override public Object run() throws Exception { - final ServletContext context = getServletContext(); - - NameNode nn = (NameNode) context.getAttribute("name.node"); + NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context); final FSNamesystem namesystem = nn.getNamesystem(); final int totalDatanodes = diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/GetDelegationTokenServlet.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/GetDelegationTokenServlet.java index bc41b2e7cbe..2c0f81abc5a 100644 --- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/GetDelegationTokenServlet.java +++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/GetDelegationTokenServlet.java @@ -29,7 +29,6 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; -import org.apache.hadoop.hdfs.server.common.JspHelper; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.UserGroupInformation; @@ -49,8 +48,7 @@ public class GetDelegationTokenServlet extends DfsServlet { throws ServletException, IOException { final UserGroupInformation ugi; final ServletContext context = getServletContext(); - final Configuration conf = - (Configuration) context.getAttribute(JspHelper.CURRENT_CONF); + final Configuration conf = NameNodeHttpServer.getConfFromContext(context); try { ugi = getUGI(req, conf); } catch(IOException ioe) { @@ -61,7 +59,7 @@ public class GetDelegationTokenServlet extends DfsServlet { return; } LOG.info("Sending token: {" + ugi.getUserName() + "," + req.getRemoteAddr() +"}"); - final NameNode nn = (NameNode) context.getAttribute("name.node"); + final NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context); String renewer = req.getParameter(RENEWER); final String renewerFinal = (renewer == null) ? req.getUserPrincipal().getName() : renewer; diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java index 40a0cded801..04bfe845a43 100644 --- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java +++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java @@ -57,7 +57,7 @@ public class GetImageServlet extends HttpServlet { Map pmap = request.getParameterMap(); try { ServletContext context = getServletContext(); - final FSImage nnImage = (FSImage)context.getAttribute("name.system.image"); + final FSImage nnImage = NameNodeHttpServer.getFsImageFromContext(context); final TransferFsImage ff = new TransferFsImage(pmap, request, response); final Configuration conf = (Configuration)getServletContext().getAttribute(JspHelper.CURRENT_CONF); diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index e8293c470ba..8505189f6cd 100644 --- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -21,7 +21,6 @@ import java.io.File; import java.io.IOException; import java.net.InetSocketAddress; import java.net.URI; -import java.security.PrivilegedExceptionAction; import java.util.Arrays; import java.util.Collection; import java.util.HashMap; @@ -67,7 +66,6 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifie import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole; import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.IncorrectVersionException; -import org.apache.hadoop.hdfs.server.common.JspHelper; import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport; import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; @@ -81,7 +79,6 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.NodeRegistration; import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand; -import org.apache.hadoop.http.HttpServer; import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.ipc.ProtocolSignature; @@ -95,7 +92,6 @@ import org.apache.hadoop.security.Groups; import org.apache.hadoop.security.RefreshUserMappingsProtocol; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.security.authorize.AuthorizationException; import org.apache.hadoop.security.authorize.ProxyUsers; import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol; @@ -208,8 +204,6 @@ public class NameNode implements NamenodeProtocols, FSConstants { public static final Log LOG = LogFactory.getLog(NameNode.class.getName()); public static final Log stateChangeLog = LogFactory.getLog("org.apache.hadoop.hdfs.StateChange"); - public static final String NAMENODE_ADDRESS_ATTRIBUTE_KEY = "name.node.address"; - protected FSNamesystem namesystem; protected NamenodeRole role; /** RPC server. Package-protected for use in tests. */ @@ -225,9 +219,7 @@ public class NameNode implements NamenodeProtocols, FSConstants { /** RPC server for DN address */ protected InetSocketAddress serviceRPCAddress = null; /** httpServer */ - protected HttpServer httpServer; - /** HTTP server address */ - protected InetSocketAddress httpAddress = null; + protected NameNodeHttpServer httpServer; private Thread emptier; /** only used for testing purposes */ protected boolean stopRequested = false; @@ -372,9 +364,10 @@ public class NameNode implements NamenodeProtocols, FSConstants { return NetUtils.createSocketAddr( conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:50070")); } - - protected void setHttpServerAddress(Configuration conf){ - conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, getHostPortString(httpAddress)); + + protected void setHttpServerAddress(Configuration conf) { + conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, + getHostPortString(getHttpAddress())); } protected void loadNamesystem(Configuration conf) throws IOException { @@ -388,11 +381,20 @@ public class NameNode implements NamenodeProtocols, FSConstants { NamenodeRegistration setRegistration() { nodeRegistration = new NamenodeRegistration( getHostPortString(rpcAddress), - getHostPortString(httpAddress), + getHostPortString(getHttpAddress()), getFSImage().getStorage(), getRole(), getFSImage().getStorage().getCheckpointTime()); return nodeRegistration; } + /** + * Login as the configured user for the NameNode. + */ + void loginAsNameNodeUser(Configuration conf) throws IOException { + InetSocketAddress socAddr = getRpcServerAddress(conf); + SecurityUtil.login(conf, DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, + DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY, socAddr.getHostName()); + } + /** * Initialize name-node. * @@ -401,8 +403,7 @@ public class NameNode implements NamenodeProtocols, FSConstants { protected void initialize(Configuration conf) throws IOException { InetSocketAddress socAddr = getRpcServerAddress(conf); UserGroupInformation.setConfiguration(conf); - SecurityUtil.login(conf, DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, - DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY, socAddr.getHostName()); + loginAsNameNodeUser(conf); int handlerCount = conf.getInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_DEFAULT); @@ -514,108 +515,9 @@ public class NameNode implements NamenodeProtocols, FSConstants { } private void startHttpServer(final Configuration conf) throws IOException { - final InetSocketAddress infoSocAddr = getHttpServerAddress(conf); - final String infoHost = infoSocAddr.getHostName(); - if(UserGroupInformation.isSecurityEnabled()) { - String httpsUser = SecurityUtil.getServerPrincipal(conf - .get(DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY), infoHost); - if (httpsUser == null) { - LOG.warn(DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY - + " not defined in config. Starting http server as " - + SecurityUtil.getServerPrincipal(conf - .get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY), rpcAddress - .getHostName()) - + ": Kerberized SSL may be not function correctly."); - } else { - // Kerberized SSL servers must be run from the host principal... - LOG.info("Logging in as " + httpsUser + " to start http server."); - SecurityUtil.login(conf, DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, - DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY, infoHost); - } - } - UserGroupInformation ugi = UserGroupInformation.getLoginUser(); - try { - this.httpServer = ugi.doAs(new PrivilegedExceptionAction() { - @Override - public HttpServer run() throws IOException, InterruptedException { - int infoPort = infoSocAddr.getPort(); - httpServer = new HttpServer("hdfs", infoHost, infoPort, - infoPort == 0, conf, - new AccessControlList(conf.get(DFSConfigKeys.DFS_ADMIN, " "))); - - boolean certSSL = conf.getBoolean("dfs.https.enable", false); - boolean useKrb = UserGroupInformation.isSecurityEnabled(); - if (certSSL || useKrb) { - boolean needClientAuth = conf.getBoolean( - DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY, - DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT); - InetSocketAddress secInfoSocAddr = NetUtils.createSocketAddr(conf - .get(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, - DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT)); - Configuration sslConf = new HdfsConfiguration(false); - if (certSSL) { - sslConf.addResource(conf.get( - "dfs.https.server.keystore.resource", "ssl-server.xml")); - } - httpServer.addSslListener(secInfoSocAddr, sslConf, needClientAuth, - useKrb); - // assume same ssl port for all datanodes - InetSocketAddress datanodeSslPort = NetUtils.createSocketAddr(conf - .get("dfs.datanode.https.address", infoHost + ":" + 50475)); - httpServer.setAttribute("datanode.https.port", datanodeSslPort - .getPort()); - } - httpServer.setAttribute("name.node", NameNode.this); - httpServer.setAttribute(NAMENODE_ADDRESS_ATTRIBUTE_KEY, - getNameNodeAddress()); - httpServer.setAttribute("name.system.image", getFSImage()); - httpServer.setAttribute(JspHelper.CURRENT_CONF, conf); - httpServer.addInternalServlet("getDelegationToken", - GetDelegationTokenServlet.PATH_SPEC, - GetDelegationTokenServlet.class, true); - httpServer.addInternalServlet("renewDelegationToken", - RenewDelegationTokenServlet.PATH_SPEC, - RenewDelegationTokenServlet.class, true); - httpServer.addInternalServlet("cancelDelegationToken", - CancelDelegationTokenServlet.PATH_SPEC, - CancelDelegationTokenServlet.class, true); - httpServer.addInternalServlet("fsck", "/fsck", FsckServlet.class, - true); - httpServer.addInternalServlet("getimage", "/getimage", - GetImageServlet.class, true); - httpServer.addInternalServlet("listPaths", "/listPaths/*", - ListPathsServlet.class, false); - httpServer.addInternalServlet("data", "/data/*", - FileDataServlet.class, false); - httpServer.addInternalServlet("checksum", "/fileChecksum/*", - FileChecksumServlets.RedirectServlet.class, false); - httpServer.addInternalServlet("contentSummary", "/contentSummary/*", - ContentSummaryServlet.class, false); - httpServer.start(); - - // The web-server port can be ephemeral... ensure we have the correct - // info - infoPort = httpServer.getPort(); - httpAddress = new InetSocketAddress(infoHost, infoPort); - setHttpServerAddress(conf); - LOG.info(getRole() + " Web-server up at: " + httpAddress); - return httpServer; - } - }); - } catch (InterruptedException e) { - throw new IOException(e); - } finally { - if(UserGroupInformation.isSecurityEnabled() && - conf.get(DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY) != null) { - // Go back to being the correct Namenode principal - LOG.info("Logging back in as " - + SecurityUtil.getServerPrincipal(conf - .get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY), rpcAddress - .getHostName()) + " following http server start."); - SecurityUtil.login(conf, DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, - DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY, rpcAddress.getHostName()); - } - } + httpServer = new NameNodeHttpServer(conf, this, getHttpServerAddress(conf)); + httpServer.start(); + setHttpServerAddress(conf); } /** @@ -1420,7 +1322,7 @@ public class NameNode implements NamenodeProtocols, FSConstants { * @return the http address. */ public InetSocketAddress getHttpAddress() { - return httpAddress; + return httpServer.getHttpAddress(); } /** diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java new file mode 100644 index 00000000000..fc911fd944b --- /dev/null +++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java @@ -0,0 +1,201 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.security.PrivilegedExceptionAction; + +import javax.servlet.ServletContext; + +import org.apache.commons.logging.Log; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.server.common.JspHelper; +import org.apache.hadoop.http.HttpServer; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authorize.AccessControlList; + +import org.apache.hadoop.classification.InterfaceAudience; + +/** + * Encapsulates the HTTP server started by the NameNode. + */ +@InterfaceAudience.Private +public class NameNodeHttpServer { + private HttpServer httpServer; + private final Configuration conf; + private final NameNode nn; + + private final Log LOG = NameNode.LOG; + private InetSocketAddress httpAddress; + + private InetSocketAddress bindAddress; + + + public static final String NAMENODE_ADDRESS_ATTRIBUTE_KEY = "name.node.address"; + public static final String FSIMAGE_ATTRIBUTE_KEY = "name.system.image"; + protected static final String NAMENODE_ATTRIBUTE_KEY = "name.node"; + + public NameNodeHttpServer( + Configuration conf, + NameNode nn, + InetSocketAddress bindAddress) { + this.conf = conf; + this.nn = nn; + this.bindAddress = bindAddress; + } + + private String getDefaultServerPrincipal() throws IOException { + return SecurityUtil.getServerPrincipal( + conf.get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY), + nn.getNameNodeAddress().getHostName()); + } + + public void start() throws IOException { + final String infoHost = bindAddress.getHostName(); + + if(UserGroupInformation.isSecurityEnabled()) { + String httpsUser = SecurityUtil.getServerPrincipal(conf + .get(DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY), infoHost); + if (httpsUser == null) { + LOG.warn(DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY + + " not defined in config. Starting http server as " + + getDefaultServerPrincipal() + + ": Kerberized SSL may be not function correctly."); + } else { + // Kerberized SSL servers must be run from the host principal... + LOG.info("Logging in as " + httpsUser + " to start http server."); + SecurityUtil.login(conf, DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, + DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY, infoHost); + } + } + + UserGroupInformation ugi = UserGroupInformation.getLoginUser(); + try { + this.httpServer = ugi.doAs(new PrivilegedExceptionAction() { + @Override + public HttpServer run() throws IOException, InterruptedException { + int infoPort = bindAddress.getPort(); + httpServer = new HttpServer("hdfs", infoHost, infoPort, + infoPort == 0, conf, + new AccessControlList(conf.get(DFSConfigKeys.DFS_ADMIN, " "))); + + boolean certSSL = conf.getBoolean("dfs.https.enable", false); + boolean useKrb = UserGroupInformation.isSecurityEnabled(); + if (certSSL || useKrb) { + boolean needClientAuth = conf.getBoolean( + DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY, + DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT); + InetSocketAddress secInfoSocAddr = NetUtils.createSocketAddr(conf + .get(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, + DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT)); + Configuration sslConf = new HdfsConfiguration(false); + if (certSSL) { + sslConf.addResource(conf.get( + "dfs.https.server.keystore.resource", "ssl-server.xml")); + } + httpServer.addSslListener(secInfoSocAddr, sslConf, needClientAuth, + useKrb); + // assume same ssl port for all datanodes + InetSocketAddress datanodeSslPort = NetUtils.createSocketAddr(conf + .get("dfs.datanode.https.address", infoHost + ":" + 50475)); + httpServer.setAttribute("datanode.https.port", datanodeSslPort + .getPort()); + } + httpServer.setAttribute(NAMENODE_ATTRIBUTE_KEY, nn); + httpServer.setAttribute(NAMENODE_ADDRESS_ATTRIBUTE_KEY, + nn.getNameNodeAddress()); + httpServer.setAttribute(FSIMAGE_ATTRIBUTE_KEY, nn.getFSImage()); + httpServer.setAttribute(JspHelper.CURRENT_CONF, conf); + setupServlets(httpServer); + httpServer.start(); + + // The web-server port can be ephemeral... ensure we have the correct + // info + infoPort = httpServer.getPort(); + httpAddress = new InetSocketAddress(infoHost, infoPort); + LOG.info(nn.getRole() + " Web-server up at: " + httpAddress); + return httpServer; + } + }); + } catch (InterruptedException e) { + throw new IOException(e); + } finally { + if(UserGroupInformation.isSecurityEnabled() && + conf.get(DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY) != null) { + // Go back to being the correct Namenode principal + LOG.info("Logging back in as NameNode user following http server start"); + nn.loginAsNameNodeUser(conf); + } + } + } + + public void stop() throws Exception { + httpServer.stop(); + } + + public InetSocketAddress getHttpAddress() { + return httpAddress; + } + + private static void setupServlets(HttpServer httpServer) { + httpServer.addInternalServlet("getDelegationToken", + GetDelegationTokenServlet.PATH_SPEC, + GetDelegationTokenServlet.class, true); + httpServer.addInternalServlet("renewDelegationToken", + RenewDelegationTokenServlet.PATH_SPEC, + RenewDelegationTokenServlet.class, true); + httpServer.addInternalServlet("cancelDelegationToken", + CancelDelegationTokenServlet.PATH_SPEC, + CancelDelegationTokenServlet.class, true); + httpServer.addInternalServlet("fsck", "/fsck", FsckServlet.class, + true); + httpServer.addInternalServlet("getimage", "/getimage", + GetImageServlet.class, true); + httpServer.addInternalServlet("listPaths", "/listPaths/*", + ListPathsServlet.class, false); + httpServer.addInternalServlet("data", "/data/*", + FileDataServlet.class, false); + httpServer.addInternalServlet("checksum", "/fileChecksum/*", + FileChecksumServlets.RedirectServlet.class, false); + httpServer.addInternalServlet("contentSummary", "/contentSummary/*", + ContentSummaryServlet.class, false); + } + + public static FSImage getFsImageFromContext(ServletContext context) { + return (FSImage)context.getAttribute(FSIMAGE_ATTRIBUTE_KEY); + } + + public static NameNode getNameNodeFromContext(ServletContext context) { + return (NameNode)context.getAttribute(NAMENODE_ATTRIBUTE_KEY); + } + + public static Configuration getConfFromContext(ServletContext context) { + return (Configuration)context.getAttribute(JspHelper.CURRENT_CONF); + } + + public static InetSocketAddress getNameNodeAddressFromContext( + ServletContext context) { + return (InetSocketAddress)context.getAttribute( + NAMENODE_ADDRESS_ATTRIBUTE_KEY); + } +} diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java index e2a1f158cf6..d28e3307d90 100644 --- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java +++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java @@ -385,7 +385,7 @@ class NamenodeJspHelper { static void redirectToRandomDataNode(ServletContext context, HttpServletRequest request, HttpServletResponse resp) throws IOException, InterruptedException { - final NameNode nn = (NameNode) context.getAttribute("name.node"); + final NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context); final Configuration conf = (Configuration) context .getAttribute(JspHelper.CURRENT_CONF); final DatanodeID datanode = getRandomDatanode(nn); @@ -566,12 +566,12 @@ class NamenodeJspHelper { HttpServletRequest request) throws IOException { ArrayList live = new ArrayList(); ArrayList dead = new ArrayList(); - final NameNode nn = (NameNode)context.getAttribute("name.node"); + final NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context); nn.getNamesystem().DFSNodesStatus(live, dead); nn.getNamesystem().removeDecomNodeFromList(live); nn.getNamesystem().removeDecomNodeFromList(dead); InetSocketAddress nnSocketAddress = (InetSocketAddress) context - .getAttribute(NameNode.NAMENODE_ADDRESS_ATTRIBUTE_KEY); + .getAttribute(NameNodeHttpServer.NAMENODE_ADDRESS_ATTRIBUTE_KEY); String nnaddr = nnSocketAddress.getAddress().getHostAddress() + ":" + nnSocketAddress.getPort(); diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/RenewDelegationTokenServlet.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/RenewDelegationTokenServlet.java index 4bfeb25459d..5e2041cd383 100644 --- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/RenewDelegationTokenServlet.java +++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/RenewDelegationTokenServlet.java @@ -47,8 +47,7 @@ public class RenewDelegationTokenServlet extends DfsServlet { throws ServletException, IOException { final UserGroupInformation ugi; final ServletContext context = getServletContext(); - final Configuration conf = - (Configuration) context.getAttribute(JspHelper.CURRENT_CONF); + final Configuration conf = NameNodeHttpServer.getConfFromContext(context); try { ugi = getUGI(req, conf); } catch(IOException ioe) { @@ -58,7 +57,7 @@ public class RenewDelegationTokenServlet extends DfsServlet { "Unable to identify or authenticate user"); return; } - final NameNode nn = (NameNode) context.getAttribute("name.node"); + final NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context); String tokenString = req.getParameter(TOKEN); if (tokenString == null) { resp.sendError(HttpServletResponse.SC_MULTIPLE_CHOICES, diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/common/TestJspHelper.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/common/TestJspHelper.java index bf01a505dbd..eda9f61e53d 100644 --- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/common/TestJspHelper.java +++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/common/TestJspHelper.java @@ -30,7 +30,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; -import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.hdfs.server.namenode.NameNodeHttpServer; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; @@ -93,7 +93,7 @@ public class TestJspHelper { //Set the nnaddr url parameter to null. when(request.getParameter(JspHelper.NAMENODE_ADDRESS)).thenReturn(null); InetSocketAddress addr = new InetSocketAddress("localhost", 2222); - when(context.getAttribute(NameNode.NAMENODE_ADDRESS_ATTRIBUTE_KEY)) + when(context.getAttribute(NameNodeHttpServer.NAMENODE_ADDRESS_ATTRIBUTE_KEY)) .thenReturn(addr); verifyServiceInToken(context, request, addr.getAddress().getHostAddress() + ":2222"); @@ -102,7 +102,7 @@ public class TestJspHelper { token.setService(new Text("3.3.3.3:3333")); tokenString = token.encodeToUrlString(); //Set the name.node.address attribute in Servlet context to null - when(context.getAttribute(NameNode.NAMENODE_ADDRESS_ATTRIBUTE_KEY)) + when(context.getAttribute(NameNodeHttpServer.NAMENODE_ADDRESS_ATTRIBUTE_KEY)) .thenReturn(null); when(request.getParameter(JspHelper.DELEGATION_PARAMETER_NAME)).thenReturn( tokenString); diff --git a/hdfs/src/webapps/hdfs/block_info_xml.jsp b/hdfs/src/webapps/hdfs/block_info_xml.jsp index b3cb978f033..0fd0ba97a63 100644 --- a/hdfs/src/webapps/hdfs/block_info_xml.jsp +++ b/hdfs/src/webapps/hdfs/block_info_xml.jsp @@ -68,7 +68,7 @@ private static final long serialVersionUID = 1L; %> <% -NameNode nn = (NameNode)application.getAttribute("name.node"); +NameNode nn = NameNodeHttpServer.getNameNodeFromContext(application); String namenodeRole = nn.getRole().toString(); FSNamesystem fsn = nn.getNamesystem(); diff --git a/hdfs/src/webapps/hdfs/corrupt_files.jsp b/hdfs/src/webapps/hdfs/corrupt_files.jsp index 734978135bc..519aa01819a 100644 --- a/hdfs/src/webapps/hdfs/corrupt_files.jsp +++ b/hdfs/src/webapps/hdfs/corrupt_files.jsp @@ -28,7 +28,7 @@ <%!//for java.io.Serializable private static final long serialVersionUID = 1L;%> <% - NameNode nn = (NameNode) application.getAttribute("name.node"); + NameNode nn = NameNodeHttpServer.getNameNodeFromContext(application); FSNamesystem fsn = nn.getNamesystem(); String namenodeRole = nn.getRole().toString(); String namenodeLabel = nn.getNameNodeAddress().getHostName() + ":" diff --git a/hdfs/src/webapps/hdfs/corrupt_replicas_xml.jsp b/hdfs/src/webapps/hdfs/corrupt_replicas_xml.jsp index 699661541d4..32386a24635 100644 --- a/hdfs/src/webapps/hdfs/corrupt_replicas_xml.jsp +++ b/hdfs/src/webapps/hdfs/corrupt_replicas_xml.jsp @@ -62,7 +62,7 @@ %> <% - NameNode nn = (NameNode)application.getAttribute("name.node"); + NameNode nn = NameNodeHttpServer.getNameNodeFromContext(application); FSNamesystem fsn = nn.getNamesystem(); Integer numCorruptBlocks = 10; diff --git a/hdfs/src/webapps/hdfs/dfshealth.jsp b/hdfs/src/webapps/hdfs/dfshealth.jsp index caa0534b621..d14808e5504 100644 --- a/hdfs/src/webapps/hdfs/dfshealth.jsp +++ b/hdfs/src/webapps/hdfs/dfshealth.jsp @@ -27,7 +27,7 @@ %> <% final NamenodeJspHelper.HealthJsp healthjsp = new NamenodeJspHelper.HealthJsp(); - NameNode nn = (NameNode)application.getAttribute("name.node"); + NameNode nn = NameNodeHttpServer.getNameNodeFromContext(application); FSNamesystem fsn = nn.getNamesystem(); String namenodeRole = nn.getRole().toString(); String namenodeLabel = nn.getNameNodeAddress().getHostName() + ":" + nn.getNameNodeAddress().getPort(); diff --git a/hdfs/src/webapps/hdfs/dfsnodelist.jsp b/hdfs/src/webapps/hdfs/dfsnodelist.jsp index be2f9390dfc..9b4d0c32ea4 100644 --- a/hdfs/src/webapps/hdfs/dfsnodelist.jsp +++ b/hdfs/src/webapps/hdfs/dfsnodelist.jsp @@ -27,7 +27,7 @@ %> <% final NamenodeJspHelper.NodeListJsp nodelistjsp = new NamenodeJspHelper.NodeListJsp(); -NameNode nn = (NameNode)application.getAttribute("name.node"); +NameNode nn = NameNodeHttpServer.getNameNodeFromContext(application); String namenodeRole = nn.getRole().toString(); FSNamesystem fsn = nn.getNamesystem(); String namenodeLabel = nn.getNameNodeAddress().getHostName() + ":" + nn.getNameNodeAddress().getPort();