diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index e1c4a275860..7932e685895 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -360,6 +360,11 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_KEY = "dfs.namenode.startup.delay.block.deletion.sec"; public static final long DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_DEFAULT = 0L; + /** Block deletion increment. */ + public static final String DFS_NAMENODE_BLOCK_DELETION_INCREMENT_KEY = + "dfs.namenode.block.deletion.increment"; + public static final int DFS_NAMENODE_BLOCK_DELETION_INCREMENT_DEFAULT = 1000; + public static final String DFS_NAMENODE_SNAPSHOT_CAPTURE_OPENFILES = HdfsClientConfigKeys.DFS_NAMENODE_SNAPSHOT_CAPTURE_OPENFILES; public static final boolean DFS_NAMENODE_SNAPSHOT_CAPTURE_OPENFILES_DEFAULT = diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index a7e95336d2d..c6522943b33 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -419,18 +419,18 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, FSNamesystem.class.getName() + ".audit"); private final int maxCorruptFileBlocksReturn; - static int BLOCK_DELETION_INCREMENT = 1000; private final boolean isPermissionEnabled; private final UserGroupInformation fsOwner; private final String supergroup; private final boolean standbyShouldCheckpoint; + private final int blockDeletionIncrement; /** Interval between each check of lease to release. */ private final long leaseRecheckIntervalMs; /** Maximum time the lock is hold to release lease. */ private final long maxLockHoldToReleaseLeaseMs; - // Batch size for open files response + // Batch size for open files respons private final int maxListOpenFilesResponses; // Scan interval is not configurable. @@ -895,6 +895,13 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, DFSConfigKeys.DFS_NAMENODE_LIST_OPENFILES_NUM_RESPONSES + " must be a positive integer." ); + + this.blockDeletionIncrement = conf.getInt( + DFSConfigKeys.DFS_NAMENODE_BLOCK_DELETION_INCREMENT_KEY, + DFSConfigKeys.DFS_NAMENODE_BLOCK_DELETION_INCREMENT_DEFAULT); + Preconditions.checkArgument(blockDeletionIncrement > 0, + DFSConfigKeys.DFS_NAMENODE_BLOCK_DELETION_INCREMENT_KEY + + " must be a positive integer."); } catch(IOException e) { LOG.error(getClass().getSimpleName() + " initialization failed.", e); close(); @@ -3005,7 +3012,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, while (iter.hasNext()) { writeLock(); try { - for (int i = 0; i < BLOCK_DELETION_INCREMENT && iter.hasNext(); i++) { + for (int i = 0; i < blockDeletionIncrement && iter.hasNext(); i++) { blockManager.removeBlock(iter.next()); } } finally { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index 5dd2fb4c8f7..95203a23af3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -4814,12 +4814,12 @@ - dfs.reformat.disabled - false + dfs.namenode.block.deletion.increment + 1000 - Disable reformat of NameNode. If it's value is set to "true" - and metadata directories already exist then attempt to format NameNode - will throw NameNodeFormatException. + The number of block deletion increment. + This setting will control the block increment deletion rate to + ensure that other waiters on the lock can get in. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java index 0882d18386c..3027c79739c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java @@ -49,6 +49,7 @@ public class TestLargeDirectoryDelete { static { CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1); CONF.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 1); + CONF.setInt(DFSConfigKeys.DFS_NAMENODE_BLOCK_DELETION_INCREMENT_KEY, 1); } /** create a file with a length of filelen */ @@ -137,7 +138,6 @@ public class TestLargeDirectoryDelete { threads[1].start(); final long start = Time.now(); - FSNamesystem.BLOCK_DELETION_INCREMENT = 1; mc.getFileSystem().delete(new Path("/root"), true); // recursive delete final long end = Time.now(); threads[0].endThread();