HDFS-5517. Lower the default maximum number of blocks per file. Contributed by Aaron T. Myers and Andrew Wang.

This commit is contained in:
Andrew Wang 2016-11-30 15:58:31 -08:00
parent 69fb70c31a
commit 7226a71b1f
4 changed files with 12 additions and 5 deletions

View File

@ -399,7 +399,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_NAMENODE_MIN_BLOCK_SIZE_KEY = "dfs.namenode.fs-limits.min-block-size"; public static final String DFS_NAMENODE_MIN_BLOCK_SIZE_KEY = "dfs.namenode.fs-limits.min-block-size";
public static final long DFS_NAMENODE_MIN_BLOCK_SIZE_DEFAULT = 1024*1024; public static final long DFS_NAMENODE_MIN_BLOCK_SIZE_DEFAULT = 1024*1024;
public static final String DFS_NAMENODE_MAX_BLOCKS_PER_FILE_KEY = "dfs.namenode.fs-limits.max-blocks-per-file"; public static final String DFS_NAMENODE_MAX_BLOCKS_PER_FILE_KEY = "dfs.namenode.fs-limits.max-blocks-per-file";
public static final long DFS_NAMENODE_MAX_BLOCKS_PER_FILE_DEFAULT = 1024*1024; public static final long DFS_NAMENODE_MAX_BLOCKS_PER_FILE_DEFAULT = 10*1000;
public static final String DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY = "dfs.namenode.fs-limits.max-xattrs-per-inode"; public static final String DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY = "dfs.namenode.fs-limits.max-xattrs-per-inode";
public static final int DFS_NAMENODE_MAX_XATTRS_PER_INODE_DEFAULT = 32; public static final int DFS_NAMENODE_MAX_XATTRS_PER_INODE_DEFAULT = 32;
public static final String DFS_NAMENODE_MAX_XATTR_SIZE_KEY = "dfs.namenode.fs-limits.max-xattr-size"; public static final String DFS_NAMENODE_MAX_XATTR_SIZE_KEY = "dfs.namenode.fs-limits.max-xattr-size";

View File

@ -372,7 +372,7 @@
<property> <property>
<name>dfs.namenode.fs-limits.max-blocks-per-file</name> <name>dfs.namenode.fs-limits.max-blocks-per-file</name>
<value>1048576</value> <value>10000</value>
<description>Maximum number of blocks per file, enforced by the Namenode on <description>Maximum number of blocks per file, enforced by the Namenode on
write. This prevents the creation of extremely large files which can write. This prevents the creation of extremely large files which can
degrade performance.</description> degrade performance.</description>

View File

@ -590,8 +590,15 @@ public class TestDirectoryScanner {
100); 100);
DataNode dataNode = cluster.getDataNodes().get(0); DataNode dataNode = cluster.getDataNodes().get(0);
createFile(GenericTestUtils.getMethodName(), final int maxBlocksPerFile = (int) DFSConfigKeys
BLOCK_LENGTH * blocks, false); .DFS_NAMENODE_MAX_BLOCKS_PER_FILE_DEFAULT;
int numBlocksToCreate = blocks;
while (numBlocksToCreate > 0) {
final int toCreate = Math.min(maxBlocksPerFile, numBlocksToCreate);
createFile(GenericTestUtils.getMethodName() + numBlocksToCreate,
BLOCK_LENGTH * toCreate, false);
numBlocksToCreate -= toCreate;
}
float ratio = 0.0f; float ratio = 0.0f;
int retries = maxRetries; int retries = maxRetries;

View File

@ -571,7 +571,7 @@ public class TestNameNodeMetrics {
Path file1_Path = new Path(TEST_ROOT_DIR_PATH, "ReadData.dat"); Path file1_Path = new Path(TEST_ROOT_DIR_PATH, "ReadData.dat");
//Perform create file operation //Perform create file operation
createFile(file1_Path, 1024 * 1024,(short)2); createFile(file1_Path, 1024, (short) 2);
// Perform read file operation on earlier created file // Perform read file operation on earlier created file
readFile(fs, file1_Path); readFile(fs, file1_Path);