svn merge -c 1530669 merging from trunk to branch-2 to fix HDFS-4510.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1530672 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Kihwal Lee 2013-10-09 15:12:29 +00:00
parent 2031005eeb
commit 39d1443e3f
3 changed files with 245 additions and 20 deletions

View File

@ -62,6 +62,9 @@ Release 2.3.0 - UNRELEASED
HDFS-4512. Cover package org.apache.hadoop.hdfs.server.common with tests.
(Vadim Bondarev via kihwal)
HDFS-4510. Cover classes ClusterJspHelper/NamenodeJspHelper with unit
tests. (Andrey Klochkov via kihwal)
OPTIMIZATIONS
HDFS-5239. Allow FSNamesystem lock fairness to be configurable (daryn)

View File

@ -0,0 +1,59 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.ClusterJspHelper.ClusterStatus;
import org.apache.hadoop.hdfs.server.namenode.ClusterJspHelper.DecommissionStatus;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
public class TestClusterJspHelper {
private MiniDFSCluster cluster;
private Configuration conf;
@Before
public void setUp() throws Exception {
conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitClusterUp();
}
@After
public void tearDown() throws Exception {
if (cluster != null)
cluster.shutdown();
}
@Test(timeout = 15000)
public void testClusterJspHelperReports() {
ClusterJspHelper clusterJspHelper = new ClusterJspHelper();
ClusterStatus clusterStatus = clusterJspHelper
.generateClusterHealthReport();
assertNotNull("testClusterJspHelperReports ClusterStatus is null",
clusterStatus);
DecommissionStatus decommissionStatus = clusterJspHelper
.generateDecommissioningReport();
assertNotNull("testClusterJspHelperReports DecommissionStatus is null",
decommissionStatus);
}
}

View File

@ -17,18 +17,22 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase.LOADING_EDITS;
import static org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase.LOADING_FSIMAGE;
import static org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase.SAFEMODE;
import static org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase.SAVING_CHECKPOINT;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Mockito.atLeastOnce;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import javax.servlet.ServletContext;
@ -40,30 +44,44 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.hdfs.web.resources.UserParam;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.After;
import org.apache.hadoop.util.VersionInfo;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.mockito.ArgumentCaptor;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import org.znerd.xmlenc.XMLOutputter;
import com.google.common.collect.ImmutableSet;
public class TestNameNodeJspHelper {
private MiniDFSCluster cluster = null;
Configuration conf = null;
private static final int DATA_NODES_AMOUNT = 2;
@Before
public void setUp() throws Exception {
private static MiniDFSCluster cluster;
private static Configuration conf;
private static final String NAMENODE_ATTRIBUTE_KEY = "name.node";
@BeforeClass
public static void setUp() throws Exception {
conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(DATA_NODES_AMOUNT).build();
cluster.waitClusterUp();
}
@After
public void tearDown() throws Exception {
@AfterClass
public static void tearDown() throws Exception {
if (cluster != null)
cluster.shutdown();
}
@ -75,18 +93,18 @@ public class TestNameNodeJspHelper {
UserGroupInformation ugi = UserGroupInformation.createRemoteUser("auser");
String tokenString = NamenodeJspHelper.getDelegationToken(nn, request,
conf, ugi);
//tokenString returned must be null because security is disabled
// tokenString returned must be null because security is disabled
Assert.assertEquals(null, tokenString);
}
@Test
public void tesSecurityModeText() {
public void testSecurityModeText() {
conf.set(DFSConfigKeys.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
UserGroupInformation.setConfiguration(conf);
String securityOnOff = NamenodeJspHelper.getSecurityModeText();
Assert.assertTrue("security mode doesn't match. Should be ON",
securityOnOff.contains("ON"));
//Security is enabled
// Security is enabled
conf.set(DFSConfigKeys.HADOOP_SECURITY_AUTHENTICATION, "simple");
UserGroupInformation.setConfiguration(conf);
@ -206,4 +224,149 @@ public class TestNameNodeJspHelper {
}
return false;
}
@Test(timeout = 15000)
public void testGetRandomDatanode() {
ImmutableSet<String> set = ImmutableSet.of();
NameNode nameNode = cluster.getNameNode();
ImmutableSet.Builder<String> builder = ImmutableSet.builder();
for (DataNode dataNode : cluster.getDataNodes()) {
builder.add(dataNode.getDisplayName());
}
set = builder.build();
for (int i = 0; i < 10; i++) {
DatanodeDescriptor dnDescriptor = NamenodeJspHelper
.getRandomDatanode(nameNode);
assertTrue("testGetRandomDatanode error",
set.contains(dnDescriptor.toString()));
}
}
@Test(timeout = 15000)
public void testNamenodeJspHelperRedirectToRandomDataNode() throws IOException, InterruptedException {
final String urlPart = "browseDirectory.jsp?namenodeInfoPort=";
ServletContext context = mock(ServletContext.class);
HttpServletRequest request = mock(HttpServletRequest.class);
HttpServletResponse resp = mock(HttpServletResponse.class);
when(request.getScheme()).thenReturn("http");
when(request.getParameter(UserParam.NAME)).thenReturn("localuser");
when(context.getAttribute(NAMENODE_ATTRIBUTE_KEY)).thenReturn(
cluster.getNameNode());
when(context.getAttribute(JspHelper.CURRENT_CONF)).thenReturn(conf);
ArgumentCaptor<String> captor = ArgumentCaptor.forClass(String.class);
doAnswer(new Answer<String>() {
@Override
public String answer(InvocationOnMock invocation) throws Throwable {
return null;
}
}).when(resp).sendRedirect(captor.capture());
NamenodeJspHelper.redirectToRandomDataNode(context, request, resp);
assertTrue(captor.getValue().contains(urlPart));
}
private enum DataNodeStatus {
LIVE("[Live Datanodes(| +):(| +)]\\d"),
DEAD("[Dead Datanodes(| +):(| +)]\\d");
private Pattern pattern;
public Pattern getPattern() {
return pattern;
}
DataNodeStatus(String line) {
this.pattern = Pattern.compile(line);
}
}
private void checkDeadLiveNodes(NameNode nameNode, int deadCount,
int lifeCount) {
FSNamesystem ns = nameNode.getNamesystem();
DatanodeManager dm = ns.getBlockManager().getDatanodeManager();
List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
dm.fetchDatanodes(live, dead, true);
assertTrue("checkDeadLiveNodes error !!!", (live.size() == lifeCount)
&& dead.size() == deadCount);
}
@Test(timeout = 15000)
public void testNodeListJspGenerateNodesList() throws IOException {
String output;
NameNode nameNode = cluster.getNameNode();
ServletContext context = mock(ServletContext.class);
when(context.getAttribute("name.node")).thenReturn(nameNode);
when(context.getAttribute(NameNodeHttpServer.NAMENODE_ADDRESS_ATTRIBUTE_KEY))
.thenReturn(cluster.getNameNode().getHttpAddress());
checkDeadLiveNodes(nameNode, 0, DATA_NODES_AMOUNT);
output = getOutputFromGeneratedNodesList(context, DataNodeStatus.LIVE);
assertCounts(DataNodeStatus.LIVE, output, DATA_NODES_AMOUNT);
output = getOutputFromGeneratedNodesList(context, DataNodeStatus.DEAD);
assertCounts(DataNodeStatus.DEAD, output, 0);
}
private void assertCounts(DataNodeStatus dataNodeStatus, String output,
int expectedCount) {
Matcher matcher = DataNodeStatus.LIVE.getPattern().matcher(output);
if (matcher.find()) {
String digitLine = output.substring(matcher.start(), matcher.end())
.trim();
assertTrue("assertCounts error. actual != expected",
Integer.valueOf(digitLine) == expectedCount);
} else {
fail("assertCount matcher error");
}
}
private String getOutputFromGeneratedNodesList(ServletContext context,
DataNodeStatus dnStatus) throws IOException {
JspWriter out = mock(JspWriter.class);
ArgumentCaptor<String> captor = ArgumentCaptor.forClass(String.class);
NamenodeJspHelper.NodeListJsp nodelistjsp = new NamenodeJspHelper.NodeListJsp();
final StringBuffer buffer = new StringBuffer();
doAnswer(new Answer<String>() {
@Override
public String answer(InvocationOnMock invok) {
Object[] args = invok.getArguments();
buffer.append((String) args[0]);
return null;
}
}).when(out).print(captor.capture());
HttpServletRequest request = mock(HttpServletRequest.class);
when(request.getScheme()).thenReturn("http");
when(request.getParameter("whatNodes")).thenReturn(dnStatus.name());
nodelistjsp.generateNodesList(context, out, request);
return buffer.toString();
}
@Test(timeout = 15000)
public void testGetInodeLimitText() {
NameNode nameNode = cluster.getNameNode();
FSNamesystem fsn = nameNode.getNamesystem();
ImmutableSet<String> patterns =
ImmutableSet.of("files and directories", "Heap Memory used", "Non Heap Memory used");
String line = NamenodeJspHelper.getInodeLimitText(fsn);
for(String pattern: patterns) {
assertTrue("testInodeLimitText error " + pattern,
line.contains(pattern));
}
}
@Test(timeout = 15000)
public void testGetVersionTable() {
NameNode nameNode = cluster.getNameNode();
FSNamesystem fsn = nameNode.getNamesystem();
ImmutableSet<String> patterns = ImmutableSet.of(VersionInfo.getVersion(),
VersionInfo.getRevision(), VersionInfo.getUser(), VersionInfo.getBranch(),
fsn.getClusterId(), fsn.getBlockPoolId());
String line = NamenodeJspHelper.getVersionTable(fsn);
for(String pattern: patterns) {
assertTrue("testGetVersionTable error " + pattern,
line.contains(pattern));
}
}
}