HDFS-6753. Initialize checkDisk when DirectoryScanner not able to get files list for scanning (Contributed by J.Andreina)

This commit is contained in:
Vinayakumar B 2015-02-27 16:36:28 +05:30
parent 2954e65467
commit 4f75b15628
4 changed files with 19 additions and 7 deletions

View File

@ -1040,6 +1040,9 @@ Release 2.7.0 - UNRELEASED
HDFS-7774. Unresolved symbols error while compiling HDFS on Windows 7/32 bit. HDFS-7774. Unresolved symbols error while compiling HDFS on Windows 7/32 bit.
(Kiran Kumar M R via cnauroth) (Kiran Kumar M R via cnauroth)
HDFS-6753. Initialize checkDisk when DirectoryScanner not able to get
files list for scanning (J.Andreina via vinayakumarb)
BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
HDFS-7720. Quota by Storage Type API, tools and ClientNameNode HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

View File

@ -815,7 +815,7 @@ public class DataNode extends ReconfigurableBase
reason = "verifcation is not supported by SimulatedFSDataset"; reason = "verifcation is not supported by SimulatedFSDataset";
} }
if (reason == null) { if (reason == null) {
directoryScanner = new DirectoryScanner(data, conf); directoryScanner = new DirectoryScanner(this, data, conf);
directoryScanner.start(); directoryScanner.start();
} else { } else {
LOG.info("Periodic Directory Tree Verification scan is disabled because " + LOG.info("Periodic Directory Tree Verification scan is disabled because " +

View File

@ -63,6 +63,7 @@ public class DirectoryScanner implements Runnable {
private final long scanPeriodMsecs; private final long scanPeriodMsecs;
private volatile boolean shouldRun = false; private volatile boolean shouldRun = false;
private boolean retainDiffs = false; private boolean retainDiffs = false;
private final DataNode datanode;
final ScanInfoPerBlockPool diffs = new ScanInfoPerBlockPool(); final ScanInfoPerBlockPool diffs = new ScanInfoPerBlockPool();
final Map<String, Stats> stats = new HashMap<String, Stats>(); final Map<String, Stats> stats = new HashMap<String, Stats>();
@ -308,7 +309,8 @@ public class DirectoryScanner implements Runnable {
} }
} }
DirectoryScanner(FsDatasetSpi<?> dataset, Configuration conf) { DirectoryScanner(DataNode datanode, FsDatasetSpi<?> dataset, Configuration conf) {
this.datanode = datanode;
this.dataset = dataset; this.dataset = dataset;
int interval = conf.getInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, int interval = conf.getInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,
DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_DEFAULT); DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_DEFAULT);
@ -547,7 +549,7 @@ public class DirectoryScanner implements Runnable {
for (int i = 0; i < volumes.size(); i++) { for (int i = 0; i < volumes.size(); i++) {
if (isValid(dataset, volumes.get(i))) { if (isValid(dataset, volumes.get(i))) {
ReportCompiler reportCompiler = ReportCompiler reportCompiler =
new ReportCompiler(volumes.get(i)); new ReportCompiler(datanode,volumes.get(i));
Future<ScanInfoPerBlockPool> result = Future<ScanInfoPerBlockPool> result =
reportCompileThreadPool.submit(reportCompiler); reportCompileThreadPool.submit(reportCompiler);
compilersInProgress.put(i, result); compilersInProgress.put(i, result);
@ -585,8 +587,10 @@ public class DirectoryScanner implements Runnable {
private static class ReportCompiler private static class ReportCompiler
implements Callable<ScanInfoPerBlockPool> { implements Callable<ScanInfoPerBlockPool> {
private final FsVolumeSpi volume; private final FsVolumeSpi volume;
private final DataNode datanode;
public ReportCompiler(FsVolumeSpi volume) { public ReportCompiler(DataNode datanode, FsVolumeSpi volume) {
this.datanode = datanode;
this.volume = volume; this.volume = volume;
} }
@ -611,6 +615,8 @@ public class DirectoryScanner implements Runnable {
files = FileUtil.listFiles(dir); files = FileUtil.listFiles(dir);
} catch (IOException ioe) { } catch (IOException ioe) {
LOG.warn("Exception occured while compiling report: ", ioe); LOG.warn("Exception occured while compiling report: ", ioe);
// Initiate a check on disk failure.
datanode.checkDiskErrorAsync();
// Ignore this directory and proceed. // Ignore this directory and proceed.
return report; return report;
} }

View File

@ -299,10 +299,11 @@ public class TestDirectoryScanner {
.build(); .build();
try { try {
cluster.waitActive(); cluster.waitActive();
DataNode dataNode = cluster.getDataNodes().get(0);
bpid = cluster.getNamesystem().getBlockPoolId(); bpid = cluster.getNamesystem().getBlockPoolId();
fds = DataNodeTestUtils.getFSDataset(cluster.getDataNodes().get(0)); fds = DataNodeTestUtils.getFSDataset(cluster.getDataNodes().get(0));
client = cluster.getFileSystem().getClient(); client = cluster.getFileSystem().getClient();
scanner = new DirectoryScanner(fds, CONF); scanner = new DirectoryScanner(dataNode, fds, CONF);
scanner.setRetainDiffs(true); scanner.setRetainDiffs(true);
FsDatasetTestUtil.stopLazyWriter(cluster.getDataNodes().get(0)); FsDatasetTestUtil.stopLazyWriter(cluster.getDataNodes().get(0));
@ -340,9 +341,10 @@ public class TestDirectoryScanner {
try { try {
cluster.waitActive(); cluster.waitActive();
bpid = cluster.getNamesystem().getBlockPoolId(); bpid = cluster.getNamesystem().getBlockPoolId();
DataNode dataNode = cluster.getDataNodes().get(0);
fds = DataNodeTestUtils.getFSDataset(cluster.getDataNodes().get(0)); fds = DataNodeTestUtils.getFSDataset(cluster.getDataNodes().get(0));
client = cluster.getFileSystem().getClient(); client = cluster.getFileSystem().getClient();
scanner = new DirectoryScanner(fds, CONF); scanner = new DirectoryScanner(dataNode, fds, CONF);
scanner.setRetainDiffs(true); scanner.setRetainDiffs(true);
FsDatasetTestUtil.stopLazyWriter(cluster.getDataNodes().get(0)); FsDatasetTestUtil.stopLazyWriter(cluster.getDataNodes().get(0));
@ -389,7 +391,8 @@ public class TestDirectoryScanner {
client = cluster.getFileSystem().getClient(); client = cluster.getFileSystem().getClient();
CONF.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THREADS_KEY, CONF.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THREADS_KEY,
parallelism); parallelism);
scanner = new DirectoryScanner(fds, CONF); DataNode dataNode = cluster.getDataNodes().get(0);
scanner = new DirectoryScanner(dataNode, fds, CONF);
scanner.setRetainDiffs(true); scanner.setRetainDiffs(true);
// Add files with 100 blocks // Add files with 100 blocks