HDFS-2845. SBN should not allow browsing of the file system via web UI. Contributed by Bikas Saha.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-1623@1238897 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
6be13332db
commit
43679fcccd
|
@ -143,3 +143,5 @@ HDFS-2691. Fixes for pipeline recovery in an HA cluster: report RBW replicas imm
|
|||
HDFS-2824. Fix failover when prior NN died just after creating an edit log segment. (atm via todd)
|
||||
|
||||
HDFS-2853. HA: NN fails to start if the shared edits dir is marked required (atm via eli)
|
||||
|
||||
HDFS-2845. SBN should not allow browsing of the file system via web UI. (Bikas Saha via atm)
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
import="org.apache.hadoop.fs.FileStatus"
|
||||
import="org.apache.hadoop.fs.FileUtil"
|
||||
import="org.apache.hadoop.fs.Path"
|
||||
import="org.apache.hadoop.ha.HAServiceProtocol.HAServiceState"
|
||||
import="java.util.Collection"
|
||||
import="java.util.Arrays" %>
|
||||
<%!//for java.io.Serializable
|
||||
|
@ -30,6 +31,8 @@
|
|||
<%
|
||||
NameNode nn = NameNodeHttpServer.getNameNodeFromContext(application);
|
||||
FSNamesystem fsn = nn.getNamesystem();
|
||||
HAServiceState nnHAState = nn.getServiceState();
|
||||
boolean isActive = (nnHAState == HAServiceState.ACTIVE);
|
||||
String namenodeRole = nn.getRole().toString();
|
||||
String namenodeLabel = nn.getNameNodeAddress().getHostName() + ":"
|
||||
+ nn.getNameNodeAddress().getPort();
|
||||
|
@ -46,8 +49,10 @@
|
|||
<h1><%=namenodeRole%> '<%=namenodeLabel%>'</h1>
|
||||
<%=NamenodeJspHelper.getVersionTable(fsn)%>
|
||||
<br>
|
||||
<% if (isActive) { %>
|
||||
<b><a href="/nn_browsedfscontent.jsp">Browse the filesystem</a></b>
|
||||
<br>
|
||||
<% } %>
|
||||
<b><a href="/logs/"><%=namenodeRole%> Logs</a></b>
|
||||
<br>
|
||||
<b><a href=/dfshealth.jsp> Go back to DFS home</a></b>
|
||||
|
|
|
@ -30,8 +30,10 @@
|
|||
final NamenodeJspHelper.HealthJsp healthjsp = new NamenodeJspHelper.HealthJsp();
|
||||
NameNode nn = NameNodeHttpServer.getNameNodeFromContext(application);
|
||||
FSNamesystem fsn = nn.getNamesystem();
|
||||
HAServiceState nnHAState = nn.getServiceState();
|
||||
boolean isActive = (nnHAState == HAServiceState.ACTIVE);
|
||||
String namenodeRole = nn.getRole().toString();
|
||||
String namenodeState = nn.getServiceState().toString();
|
||||
String namenodeState = nnHAState.toString();
|
||||
String namenodeLabel = nn.getNameNodeAddress().getHostName() + ":" + nn.getNameNodeAddress().getPort();
|
||||
%>
|
||||
|
||||
|
@ -45,7 +47,9 @@
|
|||
<h1><%=namenodeRole%> '<%=namenodeLabel%>' (<%=namenodeState%>)</h1>
|
||||
<%= NamenodeJspHelper.getVersionTable(fsn) %>
|
||||
<br />
|
||||
<% if (isActive) { %>
|
||||
<b><a href="/nn_browsedfscontent.jsp">Browse the filesystem</a></b><br>
|
||||
<% } %>
|
||||
<b><a href="/logs/"><%=namenodeRole%> Logs</a></b>
|
||||
|
||||
<hr>
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
<%@ page
|
||||
contentType="text/html; charset=UTF-8"
|
||||
import="org.apache.hadoop.util.ServletUtil"
|
||||
import="org.apache.hadoop.ha.HAServiceProtocol.HAServiceState"
|
||||
%>
|
||||
<%!
|
||||
//for java.io.Serializable
|
||||
|
@ -30,6 +31,8 @@ final NamenodeJspHelper.NodeListJsp nodelistjsp = new NamenodeJspHelper.NodeList
|
|||
NameNode nn = NameNodeHttpServer.getNameNodeFromContext(application);
|
||||
String namenodeRole = nn.getRole().toString();
|
||||
FSNamesystem fsn = nn.getNamesystem();
|
||||
HAServiceState nnHAState = nn.getServiceState();
|
||||
boolean isActive = (nnHAState == HAServiceState.ACTIVE);
|
||||
String namenodeLabel = nn.getNameNodeAddress().getHostName() + ":" + nn.getNameNodeAddress().getPort();
|
||||
%>
|
||||
|
||||
|
@ -43,7 +46,9 @@ String namenodeLabel = nn.getNameNodeAddress().getHostName() + ":" + nn.getNameN
|
|||
<h1><%=namenodeRole%> '<%=namenodeLabel%>'</h1>
|
||||
<%= NamenodeJspHelper.getVersionTable(fsn) %>
|
||||
<br />
|
||||
<% if (isActive) { %>
|
||||
<b><a href="/nn_browsedfscontent.jsp">Browse the filesystem</a></b><br>
|
||||
<% } %>
|
||||
<b><a href="/logs/"><%=namenodeRole%> Logs</a></b><br>
|
||||
<b><a href=/dfshealth.jsp> Go back to DFS home</a></b>
|
||||
<hr>
|
||||
|
|
|
@ -0,0 +1,70 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode.ha;
|
||||
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
import java.net.URL;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
import org.junit.Test;
|
||||
|
||||
public class TestHAWebUI {
|
||||
|
||||
/**
|
||||
* Tests that the web UI of the name node provides a link to browse the file
|
||||
* system only in active state
|
||||
*
|
||||
*/
|
||||
@Test
|
||||
public void testLinkToBrowseFilesystem() throws Exception {
|
||||
Configuration conf = new Configuration();
|
||||
|
||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
|
||||
.nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0)
|
||||
.build();
|
||||
try {
|
||||
cluster.waitActive();
|
||||
|
||||
cluster.transitionToActive(0);
|
||||
String pageContents = DFSTestUtil.urlGet(new URL("http://localhost:"
|
||||
+ NameNode.getHttpAddress(cluster.getConfiguration(0)).getPort()
|
||||
+ "/dfshealth.jsp"));
|
||||
assertTrue(pageContents.contains("Browse the filesystem"));
|
||||
|
||||
cluster.transitionToStandby(0);
|
||||
pageContents = DFSTestUtil.urlGet(new URL("http://localhost:"
|
||||
+ NameNode.getHttpAddress(cluster.getConfiguration(0)).getPort()
|
||||
+ "/dfshealth.jsp"));
|
||||
assertFalse(pageContents.contains("Browse the filesystem"));
|
||||
|
||||
cluster.transitionToActive(0);
|
||||
pageContents = DFSTestUtil.urlGet(new URL("http://localhost:"
|
||||
+ NameNode.getHttpAddress(cluster.getConfiguration(0)).getPort()
|
||||
+ "/dfshealth.jsp"));
|
||||
assertTrue(pageContents.contains("Browse the filesystem"));
|
||||
|
||||
} finally {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue