HDFS-4129. Add utility methods to dump NameNode in memory tree for testing. Contributed by Tsz Wo (Nicholas), SZE.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1403956 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
cd0d206bb8
commit
07e0d7730d
|
@ -148,6 +148,9 @@ Trunk (Unreleased)
|
||||||
HDFS-4124. Refactor INodeDirectory#getExistingPathINodes() to enable
|
HDFS-4124. Refactor INodeDirectory#getExistingPathINodes() to enable
|
||||||
returningmore than INode array. (Jing Zhao via suresh)
|
returningmore than INode array. (Jing Zhao via suresh)
|
||||||
|
|
||||||
|
HDFS-4129. Add utility methods to dump NameNode in memory tree for
|
||||||
|
testing. (szetszwo via suresh)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
BUG FIXES
|
BUG FIXES
|
||||||
|
|
|
@ -5453,7 +5453,11 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
||||||
public BlockManager getBlockManager() {
|
public BlockManager getBlockManager() {
|
||||||
return blockManager;
|
return blockManager;
|
||||||
}
|
}
|
||||||
|
/** @return the FSDirectory. */
|
||||||
|
public FSDirectory getFSDirectory() {
|
||||||
|
return dir;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Verifies that the given identifier and password are valid and match.
|
* Verifies that the given identifier and password are valid and match.
|
||||||
* @param identifier Token identifier.
|
* @param identifier Token identifier.
|
||||||
|
|
|
@ -17,6 +17,8 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.server.namenode;
|
package org.apache.hadoop.hdfs.server.namenode;
|
||||||
|
|
||||||
|
import java.io.PrintWriter;
|
||||||
|
import java.io.StringWriter;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
|
@ -32,6 +34,7 @@ import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
|
|
||||||
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
import com.google.common.primitives.SignedBytes;
|
import com.google.common.primitives.SignedBytes;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -225,11 +228,10 @@ abstract class INode implements Comparable<byte[]> {
|
||||||
abstract DirCounts spaceConsumedInTree(DirCounts counts);
|
abstract DirCounts spaceConsumedInTree(DirCounts counts);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get local file name
|
* @return null if the local name is null; otherwise, return the local name.
|
||||||
* @return local file name
|
|
||||||
*/
|
*/
|
||||||
String getLocalName() {
|
String getLocalName() {
|
||||||
return DFSUtil.bytes2String(name);
|
return name == null? null: DFSUtil.bytes2String(name);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -239,8 +241,8 @@ abstract class INode implements Comparable<byte[]> {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get local file name
|
* @return null if the local name is null;
|
||||||
* @return local file name
|
* otherwise, return the local name byte array.
|
||||||
*/
|
*/
|
||||||
byte[] getLocalNameBytes() {
|
byte[] getLocalNameBytes() {
|
||||||
return name;
|
return name;
|
||||||
|
@ -458,4 +460,30 @@ abstract class INode implements Comparable<byte[]> {
|
||||||
return new INodeFile(permissions, blocks, replication,
|
return new INodeFile(permissions, blocks, replication,
|
||||||
modificationTime, atime, preferredBlockSize);
|
modificationTime, atime, preferredBlockSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Dump the subtree starting from this inode.
|
||||||
|
* @return a text representation of the tree.
|
||||||
|
*/
|
||||||
|
@VisibleForTesting
|
||||||
|
public StringBuffer dumpTreeRecursively() {
|
||||||
|
final StringWriter out = new StringWriter();
|
||||||
|
dumpTreeRecursively(new PrintWriter(out, true), new StringBuilder());
|
||||||
|
return out.getBuffer();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Dump tree recursively.
|
||||||
|
* @param prefix The prefix string that each line should print.
|
||||||
|
*/
|
||||||
|
@VisibleForTesting
|
||||||
|
public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix) {
|
||||||
|
out.print(prefix);
|
||||||
|
out.print(" ");
|
||||||
|
out.print(getLocalName());
|
||||||
|
out.print(" (");
|
||||||
|
final String s = super.toString();
|
||||||
|
out.print(s.substring(s.lastIndexOf(getClass().getSimpleName())));
|
||||||
|
out.println(")");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode;
|
||||||
|
|
||||||
import java.io.FileNotFoundException;
|
import java.io.FileNotFoundException;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.io.PrintWriter;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
@ -29,6 +30,8 @@ import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
|
import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
|
||||||
|
|
||||||
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Directory INode class.
|
* Directory INode class.
|
||||||
*/
|
*/
|
||||||
|
@ -460,4 +463,52 @@ class INodeDirectory extends INode {
|
||||||
return inodes;
|
return inodes;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The following code is to dump the tree recursively for testing.
|
||||||
|
*
|
||||||
|
* \- foo (INodeDirectory@33dd2717)
|
||||||
|
* \- sub1 (INodeDirectory@442172)
|
||||||
|
* +- file1 (INodeFile@78392d4)
|
||||||
|
* +- file2 (INodeFile@78392d5)
|
||||||
|
* +- sub11 (INodeDirectory@8400cff)
|
||||||
|
* \- file3 (INodeFile@78392d6)
|
||||||
|
* \- z_file4 (INodeFile@45848712)
|
||||||
|
*/
|
||||||
|
static final String DUMPTREE_EXCEPT_LAST_ITEM = "+-";
|
||||||
|
static final String DUMPTREE_LAST_ITEM = "\\-";
|
||||||
|
@VisibleForTesting
|
||||||
|
@Override
|
||||||
|
public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix) {
|
||||||
|
super.dumpTreeRecursively(out, prefix);
|
||||||
|
if (prefix.length() >= 2) {
|
||||||
|
prefix.setLength(prefix.length() - 2);
|
||||||
|
prefix.append(" ");
|
||||||
|
}
|
||||||
|
dumpTreeRecursively(out, prefix, children);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Dump the given subtrees.
|
||||||
|
* @param prefix The prefix string that each line should print.
|
||||||
|
* @param subs The subtrees.
|
||||||
|
*/
|
||||||
|
@VisibleForTesting
|
||||||
|
protected static void dumpTreeRecursively(PrintWriter out,
|
||||||
|
StringBuilder prefix, List<? extends INode> subs) {
|
||||||
|
prefix.append(DUMPTREE_EXCEPT_LAST_ITEM);
|
||||||
|
if (subs != null && subs.size() != 0) {
|
||||||
|
int i = 0;
|
||||||
|
for(; i < subs.size() - 1; i++) {
|
||||||
|
subs.get(i).dumpTreeRecursively(out, prefix);
|
||||||
|
prefix.setLength(prefix.length() - 2);
|
||||||
|
prefix.append(DUMPTREE_EXCEPT_LAST_ITEM);
|
||||||
|
}
|
||||||
|
|
||||||
|
prefix.setLength(prefix.length() - 2);
|
||||||
|
prefix.append(DUMPTREE_LAST_ITEM);
|
||||||
|
subs.get(i).dumpTreeRecursively(out, prefix);
|
||||||
|
}
|
||||||
|
prefix.setLength(prefix.length() - 2);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,141 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.hdfs.server.namenode;
|
||||||
|
|
||||||
|
|
||||||
|
import java.io.BufferedReader;
|
||||||
|
import java.io.StringReader;
|
||||||
|
|
||||||
|
import org.apache.commons.logging.Log;
|
||||||
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||||
|
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||||
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
|
import org.junit.After;
|
||||||
|
import org.junit.Assert;
|
||||||
|
import org.junit.Before;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test {@link FSDirectory}, the in-memory namespace tree.
|
||||||
|
*/
|
||||||
|
public class TestFSDirectory {
|
||||||
|
public static final Log LOG = LogFactory.getLog(TestFSDirectory.class);
|
||||||
|
|
||||||
|
private static final long seed = 0;
|
||||||
|
private static final short REPLICATION = 3;
|
||||||
|
|
||||||
|
private final Path dir = new Path("/" + getClass().getSimpleName());
|
||||||
|
|
||||||
|
private final Path sub1 = new Path(dir, "sub1");
|
||||||
|
private final Path file1 = new Path(sub1, "file1");
|
||||||
|
private final Path file2 = new Path(sub1, "file2");
|
||||||
|
|
||||||
|
private final Path sub11 = new Path(sub1, "sub11");
|
||||||
|
private final Path file3 = new Path(sub11, "file3");
|
||||||
|
private final Path file4 = new Path(sub1, "z_file4");
|
||||||
|
private final Path file5 = new Path(sub1, "z_file5");
|
||||||
|
|
||||||
|
private final Path sub2 = new Path(dir, "sub2");
|
||||||
|
|
||||||
|
private Configuration conf;
|
||||||
|
private MiniDFSCluster cluster;
|
||||||
|
private FSNamesystem fsn;
|
||||||
|
private FSDirectory fsdir;
|
||||||
|
|
||||||
|
private DistributedFileSystem hdfs;
|
||||||
|
|
||||||
|
@Before
|
||||||
|
public void setUp() throws Exception {
|
||||||
|
conf = new Configuration();
|
||||||
|
cluster = new MiniDFSCluster.Builder(conf)
|
||||||
|
.numDataNodes(REPLICATION)
|
||||||
|
.build();
|
||||||
|
cluster.waitActive();
|
||||||
|
|
||||||
|
fsn = cluster.getNamesystem();
|
||||||
|
fsdir = fsn.getFSDirectory();
|
||||||
|
|
||||||
|
hdfs = cluster.getFileSystem();
|
||||||
|
DFSTestUtil.createFile(hdfs, file1, 1024, REPLICATION, seed);
|
||||||
|
DFSTestUtil.createFile(hdfs, file2, 1024, REPLICATION, seed);
|
||||||
|
DFSTestUtil.createFile(hdfs, file3, 1024, REPLICATION, seed);
|
||||||
|
|
||||||
|
DFSTestUtil.createFile(hdfs, file5, 1024, REPLICATION, seed);
|
||||||
|
hdfs.mkdirs(sub2);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@After
|
||||||
|
public void tearDown() throws Exception {
|
||||||
|
if (cluster != null) {
|
||||||
|
cluster.shutdown();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Dump the tree, make some changes, and then dump the tree again. */
|
||||||
|
@Test
|
||||||
|
public void testDumpTree() throws Exception {
|
||||||
|
final INode root = fsdir.getINode("/");
|
||||||
|
|
||||||
|
LOG.info("Original tree");
|
||||||
|
final StringBuffer b1 = root.dumpTreeRecursively();
|
||||||
|
System.out.println("b1=" + b1);
|
||||||
|
|
||||||
|
final BufferedReader in = new BufferedReader(new StringReader(b1.toString()));
|
||||||
|
|
||||||
|
String line = in.readLine();
|
||||||
|
checkClassName(line);
|
||||||
|
|
||||||
|
for(; (line = in.readLine()) != null; ) {
|
||||||
|
line = line.trim();
|
||||||
|
Assert.assertTrue(line.startsWith(INodeDirectory.DUMPTREE_LAST_ITEM)
|
||||||
|
|| line.startsWith(INodeDirectory.DUMPTREE_EXCEPT_LAST_ITEM));
|
||||||
|
checkClassName(line);
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG.info("Create a new file " + file4);
|
||||||
|
DFSTestUtil.createFile(hdfs, file4, 1024, REPLICATION, seed);
|
||||||
|
|
||||||
|
final StringBuffer b2 = root.dumpTreeRecursively();
|
||||||
|
System.out.println("b2=" + b2);
|
||||||
|
|
||||||
|
int i = 0;
|
||||||
|
int j = b1.length() - 1;
|
||||||
|
for(; b1.charAt(i) == b2.charAt(i); i++);
|
||||||
|
int k = b2.length() - 1;
|
||||||
|
for(; b1.charAt(j) == b2.charAt(k); j--, k--);
|
||||||
|
final String diff = b2.substring(i, k + 1);
|
||||||
|
System.out.println("i=" + i + ", j=" + j + ", k=" + k);
|
||||||
|
System.out.println("diff=" + diff);
|
||||||
|
Assert.assertTrue(i > j);
|
||||||
|
Assert.assertTrue(diff.contains(file4.getName()));
|
||||||
|
}
|
||||||
|
|
||||||
|
static void checkClassName(String line) {
|
||||||
|
int i = line.lastIndexOf('(');
|
||||||
|
int j = line.lastIndexOf('@');
|
||||||
|
final String classname = line.substring(i+1, j);
|
||||||
|
Assert.assertTrue(classname.equals(INodeFile.class.getSimpleName())
|
||||||
|
|| classname.equals(INodeDirectory.class.getSimpleName())
|
||||||
|
|| classname.equals(INodeDirectoryWithQuota.class.getSimpleName()));
|
||||||
|
}
|
||||||
|
}
|
|
@ -32,7 +32,6 @@ import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.fs.permission.PermissionStatus;
|
import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.hdfs.protocol.FSLimitException;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException;
|
import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException;
|
||||||
import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException;
|
import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException;
|
||||||
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
||||||
|
@ -60,17 +59,11 @@ public class TestFsLimits {
|
||||||
return fsn;
|
return fsn;
|
||||||
}
|
}
|
||||||
|
|
||||||
private static class TestFSDirectory extends FSDirectory {
|
private static class MockFSDirectory extends FSDirectory {
|
||||||
public TestFSDirectory() throws IOException {
|
public MockFSDirectory() throws IOException {
|
||||||
super(new FSImage(conf), getMockNamesystem(), conf);
|
super(new FSImage(conf), getMockNamesystem(), conf);
|
||||||
setReady(fsIsReady);
|
setReady(fsIsReady);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public <T extends INode> void verifyFsLimits(INode[] pathComponents,
|
|
||||||
int pos, T child) throws FSLimitException {
|
|
||||||
super.verifyFsLimits(pathComponents, pos, child);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Before
|
@Before
|
||||||
|
@ -157,7 +150,7 @@ public class TestFsLimits {
|
||||||
private void addChildWithName(String name, Class<?> expected)
|
private void addChildWithName(String name, Class<?> expected)
|
||||||
throws Exception {
|
throws Exception {
|
||||||
// have to create after the caller has had a chance to set conf values
|
// have to create after the caller has had a chance to set conf values
|
||||||
if (fs == null) fs = new TestFSDirectory();
|
if (fs == null) fs = new MockFSDirectory();
|
||||||
|
|
||||||
INode child = new INodeDirectory(name, perms);
|
INode child = new INodeDirectory(name, perms);
|
||||||
child.setLocalName(name);
|
child.setLocalName(name);
|
||||||
|
|
Loading…
Reference in New Issue