HDFS-4666. Define ".snapshot" as a reserved inode name so that users cannot create a file/directory with ".snapshot" as the name. If ".snapshot" is used in a previous version of HDFS, it must be renamed before upgrade; otherwise, upgrade will fail.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1468238 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2013-04-15 21:42:06 +00:00
parent bf807063bc
commit d13f6ebe20
14 changed files with 437 additions and 426 deletions

View File

@ -236,3 +236,8 @@ Branch-2802 Snapshot (Unreleased)
szetszwo) szetszwo)
HDFS-4692. Use timestamp as default snapshot names. (szetszwo) HDFS-4692. Use timestamp as default snapshot names. (szetszwo)
HDFS-4666. Define ".snapshot" as a reserved inode name so that users cannot
create a file/directory with ".snapshot" as the name. If ".snapshot" is used
in a previous version of HDFS, it must be renamed before upgrade; otherwise,
upgrade will fail. (szetszwo)

View File

@ -97,4 +97,15 @@ public abstract class FSLimitException extends QuotaExceededException {
" is exceeded: limit=" + quota + " items=" + count; " is exceeded: limit=" + quota + " items=" + count;
} }
} }
/** The given name is illegal. */
public static final class IllegalNameException extends FSLimitException {
public static final long serialVersionUID = 1L;
public IllegalNameException() {}
public IllegalNameException(String msg) {
super(msg);
}
}
} }

View File

@ -18,7 +18,9 @@
package org.apache.hadoop.hdfs.protocol; package org.apache.hadoop.hdfs.protocol;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
/************************************ /************************************
@ -107,4 +109,10 @@ public class HdfsConstants {
* A special path component contained in the path for a snapshot file/dir * A special path component contained in the path for a snapshot file/dir
*/ */
public static final String DOT_SNAPSHOT_DIR = ".snapshot"; public static final String DOT_SNAPSHOT_DIR = ".snapshot";
public static final byte[] DOT_SNAPSHOT_DIR_BYTES
= DFSUtil.string2Bytes(DOT_SNAPSHOT_DIR);
public static final String SEPARATOR_DOT_SNAPSHOT_DIR
= Path.SEPARATOR + DOT_SNAPSHOT_DIR;
} }

View File

@ -98,7 +98,7 @@ public class LayoutVersion {
"add OP_UPDATE_BLOCKS"), "add OP_UPDATE_BLOCKS"),
RESERVED_REL1_2_0(-41, -32, "Reserved for release 1.2.0", true, CONCAT), RESERVED_REL1_2_0(-41, -32, "Reserved for release 1.2.0", true, CONCAT),
ADD_INODE_ID(-42, -40, "Assign a unique inode id for each inode", false), ADD_INODE_ID(-42, -40, "Assign a unique inode id for each inode", false),
SNAPSHOT(-43, -42, "Support for snapshot feature", false); SNAPSHOT(-43, "Support for snapshot feature");
final int lv; final int lv;
final int ancestorLV; final int ancestorLV;

View File

@ -23,6 +23,7 @@ import java.io.Closeable;
import java.io.FileNotFoundException; import java.io.FileNotFoundException;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays;
import java.util.List; import java.util.List;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.Condition;
@ -41,11 +42,12 @@ import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.FSLimitException; import org.apache.hadoop.hdfs.protocol.FSLimitException.IllegalNameException;
import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException; import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException;
import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException; import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@ -1355,7 +1357,7 @@ public class FSDirectory implements Closeable {
readLock(); readLock();
try { try {
if (srcs.endsWith(Path.SEPARATOR + HdfsConstants.DOT_SNAPSHOT_DIR)) { if (srcs.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR)) {
return getSnapshotsListing(srcs, startAfter); return getSnapshotsListing(srcs, startAfter);
} }
final INodesInPath inodesInPath = rootDir.getLastINodeInPath(srcs, true); final INodesInPath inodesInPath = rootDir.getLastINodeInPath(srcs, true);
@ -1393,10 +1395,10 @@ public class FSDirectory implements Closeable {
*/ */
private DirectoryListing getSnapshotsListing(String src, byte[] startAfter) private DirectoryListing getSnapshotsListing(String src, byte[] startAfter)
throws UnresolvedLinkException, IOException { throws UnresolvedLinkException, IOException {
assert hasReadLock(); Preconditions.checkState(hasReadLock());
final String dotSnapshot = Path.SEPARATOR + HdfsConstants.DOT_SNAPSHOT_DIR; Preconditions.checkArgument(
Preconditions.checkArgument(src.endsWith(dotSnapshot), src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR),
src + " does not end with " + dotSnapshot); "%s does not end with %s", src, HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);
final String dirPath = normalizePath(src.substring(0, final String dirPath = normalizePath(src.substring(0,
src.length() - HdfsConstants.DOT_SNAPSHOT_DIR.length())); src.length() - HdfsConstants.DOT_SNAPSHOT_DIR.length()));
@ -1428,7 +1430,7 @@ public class FSDirectory implements Closeable {
String srcs = normalizePath(src); String srcs = normalizePath(src);
readLock(); readLock();
try { try {
if (srcs.endsWith(Path.SEPARATOR + HdfsConstants.DOT_SNAPSHOT_DIR)) { if (srcs.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR)) {
return getFileInfo4DotSnapshot(srcs); return getFileInfo4DotSnapshot(srcs);
} }
final INodesInPath inodesInPath = rootDir.getLastINodeInPath(srcs, resolveLink); final INodesInPath inodesInPath = rootDir.getLastINodeInPath(srcs, resolveLink);
@ -1442,9 +1444,9 @@ public class FSDirectory implements Closeable {
private HdfsFileStatus getFileInfo4DotSnapshot(String src) private HdfsFileStatus getFileInfo4DotSnapshot(String src)
throws UnresolvedLinkException { throws UnresolvedLinkException {
final String dotSnapshot = Path.SEPARATOR + HdfsConstants.DOT_SNAPSHOT_DIR; Preconditions.checkArgument(
Preconditions.checkArgument(src.endsWith(dotSnapshot), src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR),
src + " does not end with " + dotSnapshot); "%s does not end with %s", src, HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);
final String dirPath = normalizePath(src.substring(0, final String dirPath = normalizePath(src.substring(0,
src.length() - HdfsConstants.DOT_SNAPSHOT_DIR.length())); src.length() - HdfsConstants.DOT_SNAPSHOT_DIR.length()));
@ -1928,37 +1930,46 @@ public class FSDirectory implements Closeable {
delta.get(Quota.DISKSPACE), src[i - 1]); delta.get(Quota.DISKSPACE), src[i - 1]);
} }
/** /** Verify if the snapshot name is legal. */
* Verify that filesystem limit constraints are not violated void verifySnapshotName(String snapshotName, String path)
*/ throws PathComponentTooLongException, IllegalNameException {
void verifyFsLimits(INode[] pathComponents, int pos, INode child) final byte[] bytes = DFSUtil.string2Bytes(snapshotName);
throws FSLimitException { verifyINodeName(bytes);
verifyMaxComponentLength(child.getLocalName(), pathComponents, pos); verifyMaxComponentLength(bytes, path, 0);
verifyMaxDirItems(pathComponents, pos); }
/** Verify if the inode name is legal. */
void verifyINodeName(byte[] childName) throws IllegalNameException {
if (Arrays.equals(HdfsConstants.DOT_SNAPSHOT_DIR_BYTES, childName)) {
String s = "\"" + HdfsConstants.DOT_SNAPSHOT_DIR + "\" is a reserved name.";
if (!ready) {
s += " Please rename it before upgrade.";
}
throw new IllegalNameException(s);
}
} }
/** /**
* Verify child's name for fs limit. * Verify child's name for fs limit.
* @throws PathComponentTooLongException child's name is too long. * @throws PathComponentTooLongException child's name is too long.
*/ */
public void verifyMaxComponentLength(String childName, void verifyMaxComponentLength(byte[] childName, Object parentPath, int pos)
Object parentPath, int pos) throws PathComponentTooLongException { throws PathComponentTooLongException {
if (maxComponentLength == 0) { if (maxComponentLength == 0) {
return; return;
} }
final int length = childName.length(); final int length = childName.length;
if (length > maxComponentLength) { if (length > maxComponentLength) {
final String p = parentPath instanceof INode[]? final String p = parentPath instanceof INode[]?
getFullPathName((INode[])parentPath, pos - 1): (String)parentPath; getFullPathName((INode[])parentPath, pos - 1): (String)parentPath;
final PathComponentTooLongException e = new PathComponentTooLongException( final PathComponentTooLongException e = new PathComponentTooLongException(
maxComponentLength, length, p, childName); maxComponentLength, length, p, DFSUtil.bytes2String(childName));
if (ready) { if (ready) {
throw e; throw e;
} else { } else {
// Do not throw if edits log is still being processed // Do not throw if edits log is still being processed
NameNode.LOG.error("FSDirectory.verifyMaxComponentLength: " NameNode.LOG.error("ERROR in FSDirectory.verifyINodeName", e);
+ e.getLocalizedMessage());
} }
} }
} }
@ -1967,7 +1978,7 @@ public class FSDirectory implements Closeable {
* Verify children size for fs limit. * Verify children size for fs limit.
* @throws MaxDirectoryItemsExceededException too many children. * @throws MaxDirectoryItemsExceededException too many children.
*/ */
private void verifyMaxDirItems(INode[] pathComponents, int pos) void verifyMaxDirItems(INode[] pathComponents, int pos)
throws MaxDirectoryItemsExceededException { throws MaxDirectoryItemsExceededException {
if (maxDirItems == 0) { if (maxDirItems == 0) {
return; return;
@ -2015,8 +2026,11 @@ public class FSDirectory implements Closeable {
// original location becase a quota violation would cause the the item // original location becase a quota violation would cause the the item
// to go "poof". The fs limits must be bypassed for the same reason. // to go "poof". The fs limits must be bypassed for the same reason.
if (checkQuota) { if (checkQuota) {
verifyFsLimits(inodes, pos, child); verifyMaxComponentLength(child.getLocalNameBytes(), inodes, pos);
verifyMaxDirItems(inodes, pos);
} }
// always verify inode name
verifyINodeName(child.getLocalNameBytes());
final Quota.Counts counts = child.computeQuotaUsage(); final Quota.Counts counts = child.computeQuotaUsage();
updateCount(iip, pos, updateCount(iip, pos,

View File

@ -565,6 +565,10 @@ public class FSImageFormat {
INode loadINode(final byte[] localName, boolean isSnapshotINode, INode loadINode(final byte[] localName, boolean isSnapshotINode,
DataInput in) throws IOException { DataInput in) throws IOException {
final int imgVersion = getLayoutVersion(); final int imgVersion = getLayoutVersion();
if (LayoutVersion.supports(Feature.SNAPSHOT, imgVersion)) {
namesystem.getFSDirectory().verifyINodeName(localName);
}
long inodeId = LayoutVersion.supports(Feature.ADD_INODE_ID, imgVersion) ? long inodeId = LayoutVersion.supports(Feature.ADD_INODE_ID, imgVersion) ?
in.readLong() : namesystem.allocateNewInodeId(); in.readLong() : namesystem.allocateNewInodeId();
@ -903,7 +907,7 @@ public class FSImageFormat {
* actually leads to. * actually leads to.
* @return The snapshot path. * @return The snapshot path.
*/ */
private String computeSnapshotPath(String nonSnapshotPath, private static String computeSnapshotPath(String nonSnapshotPath,
Snapshot snapshot) { Snapshot snapshot) {
String snapshotParentFullPath = snapshot.getRoot().getParent() String snapshotParentFullPath = snapshot.getRoot().getParent()
.getFullPathName(); .getFullPathName();
@ -911,10 +915,8 @@ public class FSImageFormat {
String relativePath = nonSnapshotPath.equals(snapshotParentFullPath) ? String relativePath = nonSnapshotPath.equals(snapshotParentFullPath) ?
Path.SEPARATOR : nonSnapshotPath.substring( Path.SEPARATOR : nonSnapshotPath.substring(
snapshotParentFullPath.length()); snapshotParentFullPath.length());
String snapshotFullPath = snapshotParentFullPath + Path.SEPARATOR return Snapshot.getSnapshotPath(snapshotParentFullPath,
+ HdfsConstants.DOT_SNAPSHOT_DIR + Path.SEPARATOR + snapshotName snapshotName + relativePath);
+ relativePath;
return snapshotFullPath;
} }
/** /**

View File

@ -5806,7 +5806,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
if (snapshotName == null || snapshotName.isEmpty()) { if (snapshotName == null || snapshotName.isEmpty()) {
snapshotName = Snapshot.generateDefaultSnapshotName(); snapshotName = Snapshot.generateDefaultSnapshotName();
} }
dir.verifyMaxComponentLength(snapshotName, snapshotRoot, 0); dir.verifySnapshotName(snapshotName, snapshotRoot);
dir.writeLock(); dir.writeLock();
try { try {
snapshotPath = snapshotManager.createSnapshot(snapshotRoot, snapshotName); snapshotPath = snapshotManager.createSnapshot(snapshotRoot, snapshotName);
@ -5844,7 +5844,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
safeMode); safeMode);
} }
checkOwner(pc, path); checkOwner(pc, path);
dir.verifyMaxComponentLength(snapshotNewName, path, 0); dir.verifySnapshotName(snapshotNewName, path);
snapshotManager.renameSnapshot(path, snapshotOldName, snapshotNewName); snapshotManager.renameSnapshot(path, snapshotOldName, snapshotNewName);
getEditLog().logRenameSnapshot(path, snapshotOldName, snapshotNewName); getEditLog().logRenameSnapshot(path, snapshotOldName, snapshotNewName);
@ -5854,12 +5854,9 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
getEditLog().logSync(); getEditLog().logSync();
if (auditLog.isInfoEnabled() && isExternalInvocation()) { if (auditLog.isInfoEnabled() && isExternalInvocation()) {
Path oldSnapshotRoot = new Path(path, HdfsConstants.DOT_SNAPSHOT_DIR String oldSnapshotRoot = Snapshot.getSnapshotPath(path, snapshotOldName);
+ "/" + snapshotOldName); String newSnapshotRoot = Snapshot.getSnapshotPath(path, snapshotNewName);
Path newSnapshotRoot = new Path(path, HdfsConstants.DOT_SNAPSHOT_DIR logAuditEvent(true, "renameSnapshot", oldSnapshotRoot, newSnapshotRoot, null);
+ "/" + snapshotNewName);
logAuditEvent(true, "renameSnapshot", oldSnapshotRoot.toString(),
newSnapshotRoot.toString(), null);
} }
} }
@ -5959,9 +5956,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
getEditLog().logSync(); getEditLog().logSync();
if (auditLog.isInfoEnabled() && isExternalInvocation()) { if (auditLog.isInfoEnabled() && isExternalInvocation()) {
Path rootPath = new Path(snapshotRoot, HdfsConstants.DOT_SNAPSHOT_DIR String rootPath = Snapshot.getSnapshotPath(snapshotRoot, snapshotName);
+ Path.SEPARATOR + snapshotName); logAuditEvent(true, "deleteSnapshot", rootPath, null, null);
logAuditEvent(true, "deleteSnapshot", rootPath.toString(), null, null);
} }
} }

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.namenode;
import java.io.FileNotFoundException; import java.io.FileNotFoundException;
import java.io.PrintWriter; import java.io.PrintWriter;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections; import java.util.Collections;
import java.util.Iterator; import java.util.Iterator;
import java.util.List; import java.util.List;
@ -511,8 +512,8 @@ public class INodeDirectory extends INodeWithAdditionalFields {
* @return true if path component is {@link HdfsConstants#DOT_SNAPSHOT_DIR} * @return true if path component is {@link HdfsConstants#DOT_SNAPSHOT_DIR}
*/ */
private static boolean isDotSnapshotDir(byte[] pathComponent) { private static boolean isDotSnapshotDir(byte[] pathComponent) {
return pathComponent == null ? false : HdfsConstants.DOT_SNAPSHOT_DIR return pathComponent == null ? false
.equalsIgnoreCase(DFSUtil.bytes2String(pathComponent)); : Arrays.equals(HdfsConstants.DOT_SNAPSHOT_DIR_BYTES, pathComponent);
} }
/** /**

View File

@ -49,9 +49,16 @@ public class Snapshot implements Comparable<byte[]> {
return new SimpleDateFormat(DEFAULT_SNAPSHOT_NAME_PATTERN).format(new Date()); return new SimpleDateFormat(DEFAULT_SNAPSHOT_NAME_PATTERN).format(new Date());
} }
static String getSnapshotPath(String snapshottableDir, String snapshotName) { public static String getSnapshotPath(String snapshottableDir,
return new Path(snapshottableDir, HdfsConstants.DOT_SNAPSHOT_DIR String snapshotRelativePath) {
+ Path.SEPARATOR + snapshotName).toString(); final StringBuilder b = new StringBuilder(snapshottableDir);
if (b.charAt(b.length() - 1) != Path.SEPARATOR_CHAR) {
b.append(Path.SEPARATOR);
}
return b.append(HdfsConstants.DOT_SNAPSHOT_DIR)
.append(Path.SEPARATOR)
.append(snapshotRelativePath)
.toString();
} }
/** /**
@ -123,9 +130,7 @@ public class Snapshot implements Comparable<byte[]> {
@Override @Override
public String getFullPathName() { public String getFullPathName() {
return getParent().getFullPathName() + Path.SEPARATOR return getSnapshotPath(getParent().getFullPathName(), getLocalName());
+ HdfsConstants.DOT_SNAPSHOT_DIR + Path.SEPARATOR
+ this.getLocalName();
} }
} }

View File

@ -123,7 +123,7 @@ class ImageLoaderCurrent implements ImageLoader {
new SimpleDateFormat("yyyy-MM-dd HH:mm"); new SimpleDateFormat("yyyy-MM-dd HH:mm");
private static int[] versions = { -16, -17, -18, -19, -20, -21, -22, -23, private static int[] versions = { -16, -17, -18, -19, -20, -21, -22, -23,
-24, -25, -26, -27, -28, -30, -31, -32, -33, -34, -35, -36, -37, -38, -39, -24, -25, -26, -27, -28, -30, -31, -32, -33, -34, -35, -36, -37, -38, -39,
-40, -41, -42}; -40, -41, -42, -43};
private int imageVersion = 0; private int imageVersion = 0;
/* (non-Javadoc) /* (non-Javadoc)

View File

@ -43,16 +43,20 @@ public class SnapshotDiff {
if (Path.CUR_DIR.equals(name)) { // current directory if (Path.CUR_DIR.equals(name)) { // current directory
return ""; return "";
} }
if (name.startsWith(HdfsConstants.DOT_SNAPSHOT_DIR + Path.SEPARATOR) final int i;
|| name.startsWith(Path.SEPARATOR + HdfsConstants.DOT_SNAPSHOT_DIR if (name.startsWith(HdfsConstants.DOT_SNAPSHOT_DIR + Path.SEPARATOR)) {
+ Path.SEPARATOR)) { i = 0;
// get the snapshot name } else if (name.startsWith(
int i = name.indexOf(HdfsConstants.DOT_SNAPSHOT_DIR); HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR + Path.SEPARATOR)) {
return name.substring(i + HdfsConstants.DOT_SNAPSHOT_DIR.length() + 1); i = 1;
} } else {
return name; return name;
} }
// get the snapshot name
return name.substring(i + HdfsConstants.DOT_SNAPSHOT_DIR.length() + 1);
}
public static void main(String[] argv) throws IOException { public static void main(String[] argv) throws IOException {
String description = "SnapshotDiff <snapshotDir> <from> <to>:\n" + String description = "SnapshotDiff <snapshotDir> <from> <to>:\n" +
"\tGet the difference between two snapshots, \n" + "\tGet the difference between two snapshots, \n" +

View File

@ -33,8 +33,10 @@ import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.FSLimitException.IllegalNameException;
import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException; import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException;
import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException; import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
@ -104,6 +106,7 @@ public class TestFsLimits {
addChildWithName("333", null); addChildWithName("333", null);
addChildWithName("4444", null); addChildWithName("4444", null);
addChildWithName("55555", null); addChildWithName("55555", null);
addChildWithName(HdfsConstants.DOT_SNAPSHOT_DIR, IllegalNameException.class);
} }
@Test @Test
@ -143,6 +146,7 @@ public class TestFsLimits {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY, 2); conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY, 2);
fsIsReady = false; fsIsReady = false;
addChildWithName(HdfsConstants.DOT_SNAPSHOT_DIR, IllegalNameException.class);
addChildWithName("1", null); addChildWithName("1", null);
addChildWithName("22", null); addChildWithName("22", null);
addChildWithName("333", null); addChildWithName("333", null);
@ -159,7 +163,10 @@ public class TestFsLimits {
Class<?> generated = null; Class<?> generated = null;
try { try {
fs.verifyFsLimits(inodes, 1, child); fs.verifyMaxComponentLength(child.getLocalNameBytes(), inodes, 1);
fs.verifyMaxDirItems(inodes, 1);
fs.verifyINodeName(child.getLocalNameBytes());
rootInode.addChild(child); rootInode.addChild(child);
} catch (QuotaExceededException e) { } catch (QuotaExceededException e) {
generated = e.getClass(); generated = e.getClass();