HDFS-6102. Lower the default maximum items per directory to fix PB fsimage loading. Contributed by Andrew Wang.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1577426 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Andrew Wang 2014-03-14 04:52:03 +00:00
parent d9cdcb9474
commit d37c31a2db
7 changed files with 41 additions and 22 deletions

View File

@ -617,6 +617,9 @@ Release 2.4.0 - UNRELEASED
HDFS-6097. zero-copy reads are incorrectly disabled on file offsets above
2GB (cmccabe)
HDFS-6102. Lower the default maximum items per directory to fix PB fsimage
loading. (wang)
BREAKDOWN OF HDFS-5698 SUBTASKS AND RELATED JIRAS
HDFS-5717. Save FSImage header in protobuf. (Haohui Mai via jing9)

View File

@ -283,9 +283,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
//Filesystem limit keys
public static final String DFS_NAMENODE_MAX_COMPONENT_LENGTH_KEY = "dfs.namenode.fs-limits.max-component-length";
public static final int DFS_NAMENODE_MAX_COMPONENT_LENGTH_DEFAULT = 0; // no limit
public static final int DFS_NAMENODE_MAX_COMPONENT_LENGTH_DEFAULT = 255;
public static final String DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY = "dfs.namenode.fs-limits.max-directory-items";
public static final int DFS_NAMENODE_MAX_DIRECTORY_ITEMS_DEFAULT = 0; // no limit
public static final int DFS_NAMENODE_MAX_DIRECTORY_ITEMS_DEFAULT = 1024*1024;
public static final String DFS_NAMENODE_MIN_BLOCK_SIZE_KEY = "dfs.namenode.fs-limits.min-block-size";
public static final long DFS_NAMENODE_MIN_BLOCK_SIZE_DEFAULT = 1024*1024;
public static final String DFS_NAMENODE_MAX_BLOCKS_PER_FILE_KEY = "dfs.namenode.fs-limits.max-blocks-per-file";

View File

@ -188,6 +188,14 @@ public class FSDirectory implements Closeable {
this.maxDirItems = conf.getInt(
DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY,
DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_DEFAULT);
// We need a maximum maximum because by default, PB limits message sizes
// to 64MB. This means we can only store approximately 6.7 million entries
// per directory, but let's use 6.4 million for some safety.
final int MAX_DIR_ITEMS = 64 * 100 * 1000;
Preconditions.checkArgument(
maxDirItems > 0 && maxDirItems <= MAX_DIR_ITEMS, "Cannot set "
+ DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY
+ " to a value less than 0 or greater than " + MAX_DIR_ITEMS);
int threshold = conf.getInt(
DFSConfigKeys.DFS_NAMENODE_NAME_CACHE_THRESHOLD_KEY,
@ -2180,9 +2188,6 @@ public class FSDirectory implements Closeable {
*/
void verifyMaxDirItems(INode[] pathComponents, int pos)
throws MaxDirectoryItemsExceededException {
if (maxDirItems == 0) {
return;
}
final INodeDirectory parent = pathComponents[pos-1].asDirectory();
final int count = parent.getChildrenList(Snapshot.CURRENT_STATE_ID).size();

View File

@ -173,6 +173,10 @@ message FilesUnderConstructionSection {
* NAME: INODE_DIR
*/
message INodeDirectorySection {
/**
* A single DirEntry needs to fit in the default PB max message size of
* 64MB. Please be careful when adding more fields to a DirEntry!
*/
message DirEntry {
optional uint64 parent = 1;
// children that are not reference nodes

View File

@ -352,6 +352,12 @@ message CheckpointCommandProto {
/**
* Block information
*
* Please be wary of adding additional fields here, since INodeFiles
* need to fit in PB's default max message size of 64MB.
* We restrict the max # of blocks per file
* (dfs.namenode.fs-limits.max-blocks-per-file), but it's better
* to avoid changing this.
*/
message BlockProto {
required uint64 blockId = 1;

View File

@ -288,7 +288,7 @@
<property>
<name>dfs.namenode.fs-limits.max-directory-items</name>
<value>0</value>
<value>1048576</value>
<description>Defines the maximum number of items that a directory may
contain. A value of 0 will disable the check.</description>
</property>

View File

@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException;
import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Before;
import org.junit.Test;
@ -82,22 +83,6 @@ public class TestFsLimits {
fsIsReady = true;
}
@Test
public void testDefaultMaxComponentLength() {
int maxComponentLength = conf.getInt(
DFSConfigKeys.DFS_NAMENODE_MAX_COMPONENT_LENGTH_KEY,
DFSConfigKeys.DFS_NAMENODE_MAX_COMPONENT_LENGTH_DEFAULT);
assertEquals(0, maxComponentLength);
}
@Test
public void testDefaultMaxDirItems() {
int maxDirItems = conf.getInt(
DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY,
DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_DEFAULT);
assertEquals(0, maxDirItems);
}
@Test
public void testNoLimits() throws Exception {
addChildWithName("1", null);
@ -129,6 +114,22 @@ public class TestFsLimits {
addChildWithName("4444", MaxDirectoryItemsExceededException.class);
}
@Test
public void testMaxDirItemsLimits() throws Exception {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY, 0);
try {
addChildWithName("1", null);
} catch (IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains("Cannot set dfs", e);
}
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY, 64*100*1024);
try {
addChildWithName("1", null);
} catch (IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains("Cannot set dfs", e);
}
}
@Test
public void testMaxComponentsAndMaxDirItems() throws Exception {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_COMPONENT_LENGTH_KEY, 3);