HDFS-8276. LazyPersistFileScrubber should be disabled if scrubber interval configured zero. (Contributed by Surendra Singh Lilhore)

This commit is contained in:
Arpit Agarwal 2015-05-01 11:11:48 -07:00
parent 3393461197
commit 64d30a6186
4 changed files with 50 additions and 6 deletions

View File

@ -595,6 +595,9 @@ Release 2.8.0 - UNRELEASED
HDFS-8300. Fix unit test failures and findbugs warning caused by HDFS-8283. HDFS-8300. Fix unit test failures and findbugs warning caused by HDFS-8283.
(jing9) (jing9)
HDFS-8276. LazyPersistFileScrubber should be disabled if scrubber interval
configured zero. (Surendra Singh Lilhore via Arpit Agarwal)
Release 2.7.1 - UNRELEASED Release 2.7.1 - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -825,9 +825,10 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
DFS_NAMENODE_LAZY_PERSIST_FILE_SCRUB_INTERVAL_SEC, DFS_NAMENODE_LAZY_PERSIST_FILE_SCRUB_INTERVAL_SEC,
DFS_NAMENODE_LAZY_PERSIST_FILE_SCRUB_INTERVAL_SEC_DEFAULT); DFS_NAMENODE_LAZY_PERSIST_FILE_SCRUB_INTERVAL_SEC_DEFAULT);
if (this.lazyPersistFileScrubIntervalSec == 0) { if (this.lazyPersistFileScrubIntervalSec < 0) {
throw new IllegalArgumentException( throw new IllegalArgumentException(
DFS_NAMENODE_LAZY_PERSIST_FILE_SCRUB_INTERVAL_SEC + " must be non-zero."); DFS_NAMENODE_LAZY_PERSIST_FILE_SCRUB_INTERVAL_SEC
+ " must be zero (for disable) or greater than zero.");
} }
// For testing purposes, allow the DT secret manager to be started regardless // For testing purposes, allow the DT secret manager to be started regardless
@ -1173,6 +1174,9 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
lazyPersistFileScrubber = new Daemon(new LazyPersistFileScrubber( lazyPersistFileScrubber = new Daemon(new LazyPersistFileScrubber(
lazyPersistFileScrubIntervalSec)); lazyPersistFileScrubIntervalSec));
lazyPersistFileScrubber.start(); lazyPersistFileScrubber.start();
} else {
LOG.warn("Lazy persist file scrubber is disabled,"
+ " configured scrub interval is zero.");
} }
cacheManager.startMonitorThread(); cacheManager.startMonitorThread();

View File

@ -58,6 +58,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
@ -239,12 +240,17 @@ public abstract class LazyPersistTestCase {
long ramDiskStorageLimit, long ramDiskStorageLimit,
long evictionLowWatermarkReplicas, long evictionLowWatermarkReplicas,
boolean useSCR, boolean useSCR,
boolean useLegacyBlockReaderLocal) throws IOException { boolean useLegacyBlockReaderLocal,
boolean disableScrubber) throws IOException {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
conf.setLong(DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); conf.setLong(DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
conf.setInt(DFS_NAMENODE_LAZY_PERSIST_FILE_SCRUB_INTERVAL_SEC, if (disableScrubber) {
LAZY_WRITE_FILE_SCRUBBER_INTERVAL_SEC); conf.setInt(DFS_NAMENODE_LAZY_PERSIST_FILE_SCRUB_INTERVAL_SEC, 0);
} else {
conf.setInt(DFS_NAMENODE_LAZY_PERSIST_FILE_SCRUB_INTERVAL_SEC,
LAZY_WRITE_FILE_SCRUBBER_INTERVAL_SEC);
}
conf.setLong(DFS_HEARTBEAT_INTERVAL_KEY, HEARTBEAT_INTERVAL_SEC); conf.setLong(DFS_HEARTBEAT_INTERVAL_KEY, HEARTBEAT_INTERVAL_SEC);
conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
HEARTBEAT_RECHECK_INTERVAL_MSEC); HEARTBEAT_RECHECK_INTERVAL_MSEC);
@ -357,11 +363,16 @@ public abstract class LazyPersistTestCase {
return this; return this;
} }
public ClusterWithRamDiskBuilder disableScrubber() {
this.disableScrubber = true;
return this;
}
public void build() throws IOException { public void build() throws IOException {
LazyPersistTestCase.this.startUpCluster( LazyPersistTestCase.this.startUpCluster(
numDatanodes, hasTransientStorage, storageTypes, ramDiskReplicaCapacity, numDatanodes, hasTransientStorage, storageTypes, ramDiskReplicaCapacity,
ramDiskStorageLimit, evictionLowWatermarkReplicas, ramDiskStorageLimit, evictionLowWatermarkReplicas,
useScr, useLegacyBlockReaderLocal); useScr, useLegacyBlockReaderLocal,disableScrubber);
} }
private int numDatanodes = REPL_FACTOR; private int numDatanodes = REPL_FACTOR;
@ -372,6 +383,7 @@ public abstract class LazyPersistTestCase {
private boolean useScr = false; private boolean useScr = false;
private boolean useLegacyBlockReaderLocal = false; private boolean useLegacyBlockReaderLocal = false;
private long evictionLowWatermarkReplicas = EVICTION_LOW_WATERMARK; private long evictionLowWatermarkReplicas = EVICTION_LOW_WATERMARK;
private boolean disableScrubber=false;
} }
protected final void triggerBlockReport() protected final void triggerBlockReport()

View File

@ -120,6 +120,31 @@ public class TestLazyPersistFiles extends LazyPersistTestCase {
is(0L)); is(0L));
} }
@Test
public void testDisableLazyPersistFileScrubber()
throws IOException, InterruptedException {
getClusterBuilder().setRamDiskReplicaCapacity(2).disableScrubber().build();
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
makeTestFile(path1, BLOCK_SIZE, true);
ensureFileReplicasOnStorageType(path1, RAM_DISK);
// Stop the DataNode and sleep for the time it takes the NN to
// detect the DN as being dead.
cluster.shutdownDataNodes();
Thread.sleep(30000L);
// Next, wait for the replication monitor to mark the file as corrupt
Thread.sleep(2 * DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT * 1000);
// Wait for the LazyPersistFileScrubber to run
Thread.sleep(2 * LAZY_WRITE_FILE_SCRUBBER_INTERVAL_SEC * 1000);
// Ensure that path1 exist.
Assert.assertTrue(fs.exists(path1));
}
/** /**
* Concurrent read from the same node and verify the contents. * Concurrent read from the same node and verify the contents.
*/ */