HDFS-4129. Add utility methods to dump NameNode in memory tree for testing. Contributed by Tsz Wo (Nicholas), SZE.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1403956 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Suresh Srinivas 2012-10-31 02:33:44 +00:00
parent cd0d206bb8
commit 07e0d7730d
6 changed files with 236 additions and 16 deletions

View File

@ -148,6 +148,9 @@ Trunk (Unreleased)
HDFS-4124. Refactor INodeDirectory#getExistingPathINodes() to enable
returningmore than INode array. (Jing Zhao via suresh)
HDFS-4129. Add utility methods to dump NameNode in memory tree for
testing. (szetszwo via suresh)
OPTIMIZATIONS
BUG FIXES

View File

@ -5453,6 +5453,10 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
public BlockManager getBlockManager() {
return blockManager;
}
/** @return the FSDirectory. */
public FSDirectory getFSDirectory() {
return dir;
}
/**
* Verifies that the given identifier and password are valid and match.

View File

@ -17,6 +17,8 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
@ -32,6 +34,7 @@ import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.util.StringUtils;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.primitives.SignedBytes;
/**
@ -225,11 +228,10 @@ abstract class INode implements Comparable<byte[]> {
abstract DirCounts spaceConsumedInTree(DirCounts counts);
/**
* Get local file name
* @return local file name
* @return null if the local name is null; otherwise, return the local name.
*/
String getLocalName() {
return DFSUtil.bytes2String(name);
return name == null? null: DFSUtil.bytes2String(name);
}
@ -239,8 +241,8 @@ abstract class INode implements Comparable<byte[]> {
}
/**
* Get local file name
* @return local file name
* @return null if the local name is null;
* otherwise, return the local name byte array.
*/
byte[] getLocalNameBytes() {
return name;
@ -458,4 +460,30 @@ abstract class INode implements Comparable<byte[]> {
return new INodeFile(permissions, blocks, replication,
modificationTime, atime, preferredBlockSize);
}
/**
* Dump the subtree starting from this inode.
* @return a text representation of the tree.
*/
@VisibleForTesting
public StringBuffer dumpTreeRecursively() {
final StringWriter out = new StringWriter();
dumpTreeRecursively(new PrintWriter(out, true), new StringBuilder());
return out.getBuffer();
}
/**
* Dump tree recursively.
* @param prefix The prefix string that each line should print.
*/
@VisibleForTesting
public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix) {
out.print(prefix);
out.print(" ");
out.print(getLocalName());
out.print(" (");
final String s = super.toString();
out.print(s.substring(s.lastIndexOf(getClass().getSimpleName())));
out.println(")");
}
}

View File

@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.PrintWriter;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
@ -29,6 +30,8 @@ import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
import com.google.common.annotations.VisibleForTesting;
/**
* Directory INode class.
*/
@ -460,4 +463,52 @@ class INodeDirectory extends INode {
return inodes;
}
}
/*
* The following code is to dump the tree recursively for testing.
*
* \- foo (INodeDirectory@33dd2717)
* \- sub1 (INodeDirectory@442172)
* +- file1 (INodeFile@78392d4)
* +- file2 (INodeFile@78392d5)
* +- sub11 (INodeDirectory@8400cff)
* \- file3 (INodeFile@78392d6)
* \- z_file4 (INodeFile@45848712)
*/
static final String DUMPTREE_EXCEPT_LAST_ITEM = "+-";
static final String DUMPTREE_LAST_ITEM = "\\-";
@VisibleForTesting
@Override
public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix) {
super.dumpTreeRecursively(out, prefix);
if (prefix.length() >= 2) {
prefix.setLength(prefix.length() - 2);
prefix.append(" ");
}
dumpTreeRecursively(out, prefix, children);
}
/**
* Dump the given subtrees.
* @param prefix The prefix string that each line should print.
* @param subs The subtrees.
*/
@VisibleForTesting
protected static void dumpTreeRecursively(PrintWriter out,
StringBuilder prefix, List<? extends INode> subs) {
prefix.append(DUMPTREE_EXCEPT_LAST_ITEM);
if (subs != null && subs.size() != 0) {
int i = 0;
for(; i < subs.size() - 1; i++) {
subs.get(i).dumpTreeRecursively(out, prefix);
prefix.setLength(prefix.length() - 2);
prefix.append(DUMPTREE_EXCEPT_LAST_ITEM);
}
prefix.setLength(prefix.length() - 2);
prefix.append(DUMPTREE_LAST_ITEM);
subs.get(i).dumpTreeRecursively(out, prefix);
}
prefix.setLength(prefix.length() - 2);
}
}

View File

@ -0,0 +1,141 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.BufferedReader;
import java.io.StringReader;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
/**
* Test {@link FSDirectory}, the in-memory namespace tree.
*/
public class TestFSDirectory {
public static final Log LOG = LogFactory.getLog(TestFSDirectory.class);
private static final long seed = 0;
private static final short REPLICATION = 3;
private final Path dir = new Path("/" + getClass().getSimpleName());
private final Path sub1 = new Path(dir, "sub1");
private final Path file1 = new Path(sub1, "file1");
private final Path file2 = new Path(sub1, "file2");
private final Path sub11 = new Path(sub1, "sub11");
private final Path file3 = new Path(sub11, "file3");
private final Path file4 = new Path(sub1, "z_file4");
private final Path file5 = new Path(sub1, "z_file5");
private final Path sub2 = new Path(dir, "sub2");
private Configuration conf;
private MiniDFSCluster cluster;
private FSNamesystem fsn;
private FSDirectory fsdir;
private DistributedFileSystem hdfs;
@Before
public void setUp() throws Exception {
conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(REPLICATION)
.build();
cluster.waitActive();
fsn = cluster.getNamesystem();
fsdir = fsn.getFSDirectory();
hdfs = cluster.getFileSystem();
DFSTestUtil.createFile(hdfs, file1, 1024, REPLICATION, seed);
DFSTestUtil.createFile(hdfs, file2, 1024, REPLICATION, seed);
DFSTestUtil.createFile(hdfs, file3, 1024, REPLICATION, seed);
DFSTestUtil.createFile(hdfs, file5, 1024, REPLICATION, seed);
hdfs.mkdirs(sub2);
}
@After
public void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
/** Dump the tree, make some changes, and then dump the tree again. */
@Test
public void testDumpTree() throws Exception {
final INode root = fsdir.getINode("/");
LOG.info("Original tree");
final StringBuffer b1 = root.dumpTreeRecursively();
System.out.println("b1=" + b1);
final BufferedReader in = new BufferedReader(new StringReader(b1.toString()));
String line = in.readLine();
checkClassName(line);
for(; (line = in.readLine()) != null; ) {
line = line.trim();
Assert.assertTrue(line.startsWith(INodeDirectory.DUMPTREE_LAST_ITEM)
|| line.startsWith(INodeDirectory.DUMPTREE_EXCEPT_LAST_ITEM));
checkClassName(line);
}
LOG.info("Create a new file " + file4);
DFSTestUtil.createFile(hdfs, file4, 1024, REPLICATION, seed);
final StringBuffer b2 = root.dumpTreeRecursively();
System.out.println("b2=" + b2);
int i = 0;
int j = b1.length() - 1;
for(; b1.charAt(i) == b2.charAt(i); i++);
int k = b2.length() - 1;
for(; b1.charAt(j) == b2.charAt(k); j--, k--);
final String diff = b2.substring(i, k + 1);
System.out.println("i=" + i + ", j=" + j + ", k=" + k);
System.out.println("diff=" + diff);
Assert.assertTrue(i > j);
Assert.assertTrue(diff.contains(file4.getName()));
}
static void checkClassName(String line) {
int i = line.lastIndexOf('(');
int j = line.lastIndexOf('@');
final String classname = line.substring(i+1, j);
Assert.assertTrue(classname.equals(INodeFile.class.getSimpleName())
|| classname.equals(INodeDirectory.class.getSimpleName())
|| classname.equals(INodeDirectoryWithQuota.class.getSimpleName()));
}
}

View File

@ -32,7 +32,6 @@ import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.FSLimitException;
import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException;
import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
@ -60,17 +59,11 @@ public class TestFsLimits {
return fsn;
}
private static class TestFSDirectory extends FSDirectory {
public TestFSDirectory() throws IOException {
private static class MockFSDirectory extends FSDirectory {
public MockFSDirectory() throws IOException {
super(new FSImage(conf), getMockNamesystem(), conf);
setReady(fsIsReady);
}
@Override
public <T extends INode> void verifyFsLimits(INode[] pathComponents,
int pos, T child) throws FSLimitException {
super.verifyFsLimits(pathComponents, pos, child);
}
}
@Before
@ -157,7 +150,7 @@ public class TestFsLimits {
private void addChildWithName(String name, Class<?> expected)
throws Exception {
// have to create after the caller has had a chance to set conf values
if (fs == null) fs = new TestFSDirectory();
if (fs == null) fs = new MockFSDirectory();
INode child = new INodeDirectory(name, perms);
child.setLocalName(name);