diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index c3911d08e03..8c9a7403221 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -783,6 +783,9 @@ Release 2.7.1 - UNRELEASED
HDFS08656. Preserve compatibility of ClientProtocol#rollingUpgrade after
finalization. (wang)
+ HDFS-8681. BlockScanner is incorrectly disabled by default.
+ (Arpit Agarwal)
+
Release 2.7.0 - 2015-04-20
INCOMPATIBLE CHANGES
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 2e6d2566a25..0fafadedc07 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -374,7 +374,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_DATANODE_MAX_RECEIVER_THREADS_KEY = "dfs.datanode.max.transfer.threads";
public static final int DFS_DATANODE_MAX_RECEIVER_THREADS_DEFAULT = 4096;
public static final String DFS_DATANODE_SCAN_PERIOD_HOURS_KEY = "dfs.datanode.scan.period.hours";
- public static final int DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT = 0;
+ public static final int DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT = 21 * 24; // 3 weeks.
public static final String DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND = "dfs.block.scanner.volume.bytes.per.second";
public static final long DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND_DEFAULT = 1048576L;
public static final String DFS_DATANODE_TRANSFERTO_ALLOWED_KEY = "dfs.datanode.transferTo.allowed";
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
index b0248c50d9c..9c4dd10f005 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
@@ -115,6 +115,34 @@ public class BlockScanner {
}
}
+ /**
+ * Determine the configured block scanner interval.
+ *
+ * For compatibility with prior releases of HDFS, if the
+ * configured value is zero then the scan period is
+ * set to 3 weeks.
+ *
+ * If the configured value is less than zero then the scanner
+ * is disabled.
+ *
+ * @param conf Configuration object.
+ * @return block scan period in milliseconds.
+ */
+ private static long getConfiguredScanPeriodMs(Configuration conf) {
+ long tempScanPeriodMs = getUnitTestLong(
+ conf, INTERNAL_DFS_DATANODE_SCAN_PERIOD_MS,
+ TimeUnit.MILLISECONDS.convert(conf.getLong(
+ DFS_DATANODE_SCAN_PERIOD_HOURS_KEY,
+ DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT), TimeUnit.HOURS));
+
+ if (tempScanPeriodMs == 0) {
+ tempScanPeriodMs = TimeUnit.MILLISECONDS.convert(
+ DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT, TimeUnit.HOURS);
+ }
+
+ return tempScanPeriodMs;
+ }
+
@SuppressWarnings("unchecked")
Conf(Configuration conf) {
this.targetBytesPerSec = Math.max(0L, conf.getLong(
@@ -123,11 +151,7 @@ public class BlockScanner {
this.maxStalenessMs = Math.max(0L, getUnitTestLong(conf,
INTERNAL_DFS_BLOCK_SCANNER_MAX_STALENESS_MS,
INTERNAL_DFS_BLOCK_SCANNER_MAX_STALENESS_MS_DEFAULT));
- this.scanPeriodMs = Math.max(0L,
- getUnitTestLong(conf, INTERNAL_DFS_DATANODE_SCAN_PERIOD_MS,
- TimeUnit.MILLISECONDS.convert(conf.getLong(
- DFS_DATANODE_SCAN_PERIOD_HOURS_KEY,
- DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT), TimeUnit.HOURS)));
+ this.scanPeriodMs = getConfiguredScanPeriodMs(conf);
this.cursorSaveMs = Math.max(0L, getUnitTestLong(conf,
INTERNAL_DFS_BLOCK_SCANNER_CURSOR_SAVE_INTERVAL_MS,
INTERNAL_DFS_BLOCK_SCANNER_CURSOR_SAVE_INTERVAL_MS_DEFAULT));
@@ -159,7 +183,7 @@ public class BlockScanner {
* no threads will start.
*/
public boolean isEnabled() {
- return (conf.scanPeriodMs) > 0 && (conf.targetBytesPerSec > 0);
+ return (conf.scanPeriodMs > 0) && (conf.targetBytesPerSec > 0);
}
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index f234f28493a..0dff1b3eea8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -1059,11 +1059,14 @@
dfs.datanode.scan.period.hours
- 0
+ 504
- If this is 0 or negative, the DataNode's block scanner will be
- disabled. If this is positive, the DataNode will not scan any
+ If this is positive, the DataNode will not scan any
individual block more than once in the specified scan period.
+ If this is negative, the block scanner is disabled.
+ If this is set to zero, then the default value of 504 hours
+ or 3 weeks is used. Prior versions of HDFS incorrectly documented
+ that setting this key to zero will disable the block scanner.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
index 59c7ade2ab2..d03fa2c03e3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
@@ -274,6 +274,7 @@ public class TestFsDatasetImpl {
public void testChangeVolumeWithRunningCheckDirs() throws IOException {
RoundRobinVolumeChoosingPolicy blockChooser =
new RoundRobinVolumeChoosingPolicy<>();
+ conf.setLong(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1);
final BlockScanner blockScanner = new BlockScanner(datanode, conf);
final FsVolumeList volumeList = new FsVolumeList(
Collections.emptyList(), blockScanner, blockChooser);