HDFS-11274. Datanode should only check the failed volume upon IO errors. Contributed by Xiaoyu Yao.

This commit is contained in:
Xiaoyu Yao 2016-12-28 22:08:13 -08:00 committed by Arpit Agarwal
parent ec80de3ccc
commit eafaddca1a
23 changed files with 377 additions and 265 deletions

View File

@ -276,10 +276,9 @@ class BlockReceiver implements Closeable {
IOException cause = DatanodeUtil.getCauseIfDiskError(ioe);
DataNode.LOG.warn("IOException in BlockReceiver constructor"
+ (cause == null ? "" : ". Cause is "), cause);
if (cause != null) { // possible disk error
if (cause != null) {
ioe = cause;
datanode.checkDiskErrorAsync();
// Volume error check moved to FileIoProvider
}
throw ioe;
@ -361,9 +360,8 @@ public void close() throws IOException {
if (measuredFlushTime) {
datanode.metrics.addFlushNanos(flushTotalNanos);
}
// disk check
if(ioe != null) {
datanode.checkDiskErrorAsync();
// Volume error check moved to FileIoProvider
throw ioe;
}
}
@ -786,7 +784,7 @@ private int receivePacket() throws IOException {
manageWriterOsCache(offsetInBlock);
}
} catch (IOException iex) {
datanode.checkDiskErrorAsync();
// Volume error check moved to FileIoProvider
throw iex;
}
}
@ -1395,7 +1393,7 @@ public void run() {
} catch (IOException e) {
LOG.warn("IOException in BlockReceiver.run(): ", e);
if (running) {
datanode.checkDiskErrorAsync();
// Volume error check moved to FileIoProvider
LOG.info(myString, e);
running = false;
if (!Thread.interrupted()) { // failure not caused by interruption

View File

@ -37,7 +37,7 @@
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class CountingFileIoEvents implements FileIoEvents {
public class CountingFileIoEvents extends FileIoEvents {
private final Map<OPERATION, Counts> counts;
private static class Counts {
@ -90,7 +90,6 @@ public void afterFileIo(
public void onFailure(
@Nullable FsVolumeSpi volume, OPERATION op, Exception e, long begin) {
counts.get(op).failures.incrementAndGet();
}
@Override

View File

@ -371,6 +371,7 @@ public static InetSocketAddress createSocketAddr(String target) {
SaslDataTransferServer saslServer;
private final boolean getHdfsBlockLocationsEnabled;
private ObjectName dataNodeInfoBeanName;
// Test verification only
private volatile long lastDiskErrorCheck;
private String supergroup;
private boolean isPermissionEnabled;
@ -408,7 +409,7 @@ private static Tracer createTracer(Configuration conf) {
this.tracer = createTracer(conf);
this.tracerConfigurationManager =
new TracerConfigurationManager(DATANODE_HTRACE_PREFIX, conf);
this.fileIoProvider = new FileIoProvider(conf);
this.fileIoProvider = new FileIoProvider(conf, this);
this.fileDescriptorPassingDisabledReason = null;
this.maxNumberOfBlocksToLog = 0;
this.confVersion = null;
@ -433,7 +434,7 @@ private static Tracer createTracer(Configuration conf) {
this.tracer = createTracer(conf);
this.tracerConfigurationManager =
new TracerConfigurationManager(DATANODE_HTRACE_PREFIX, conf);
this.fileIoProvider = new FileIoProvider(conf);
this.fileIoProvider = new FileIoProvider(conf, this);
this.blockScanner = new BlockScanner(this);
this.lastDiskErrorCheck = 0;
this.maxNumberOfBlocksToLog = conf.getLong(DFS_MAX_NUM_BLOCKS_TO_LOG_KEY,
@ -783,7 +784,7 @@ public IOException call() {
/**
* Remove volumes from DataNode.
* See {@link #removeVolumes(Set, boolean)} for details.
* See {@link #removeVolumes(Collection, boolean)} for details.
*
* @param locations the StorageLocations of the volumes to be removed.
* @throws IOException
@ -810,7 +811,7 @@ private void removeVolumes(final Collection<StorageLocation> locations)
* <ul>Reset configuration DATA_DIR and {@link #dataDirs} to represent
* active volumes.</ul>
* </li>
* @param absoluteVolumePaths the absolute path of volumes.
* @param storageLocations the absolute path of volumes.
* @param clearFailure if true, clears the failure information related to the
* volumes.
* @throws IOException
@ -1258,7 +1259,7 @@ boolean areCacheReportsDisabledForTests() {
* If conf's CONFIG_PROPERTY_SIMULATED property is set
* then a simulated storage based data node is created.
*
* @param dataDirs - only for a non-simulated storage data node
* @param dataDirectories - only for a non-simulated storage data node
* @throws IOException
*/
void startDataNode(List<StorageLocation> dataDirectories,
@ -2020,10 +2021,11 @@ public void shutdown() {
tracer.close();
}
/**
* Check if there is a disk failure asynchronously and if so, handle the error
* Check if there is a disk failure asynchronously
* and if so, handle the error.
*/
@VisibleForTesting
public void checkDiskErrorAsync() {
volumeChecker.checkAllVolumesAsync(
data, new DatasetVolumeChecker.Callback() {
@ -2037,14 +2039,37 @@ public void call(Set<FsVolumeSpi> healthyVolumes,
LOG.debug("checkDiskErrorAsync: no volume failures detected");
}
lastDiskErrorCheck = Time.monotonicNow();
DataNode.this.handleVolumeFailures(failedVolumes);
handleVolumeFailures(failedVolumes);
}
});
}
private void handleDiskError(String errMsgr) {
/**
* Check if there is a disk failure asynchronously
* and if so, handle the error.
*/
public void checkDiskErrorAsync(FsVolumeSpi volume) {
volumeChecker.checkVolume(
volume, new DatasetVolumeChecker.Callback() {
@Override
public void call(Set<FsVolumeSpi> healthyVolumes,
Set<FsVolumeSpi> failedVolumes) {
if (failedVolumes.size() > 0) {
LOG.warn("checkDiskErrorAsync callback got {} failed volumes: {}",
failedVolumes.size(), failedVolumes);
} else {
LOG.debug("checkDiskErrorAsync: no volume failures detected");
}
lastDiskErrorCheck = Time.monotonicNow();
handleVolumeFailures(failedVolumes);
}
});
}
private void handleDiskError(String failedVolumes) {
final boolean hasEnoughResources = data.hasEnoughResource();
LOG.warn("DataNode.handleDiskError: Keep Running: " + hasEnoughResources);
LOG.warn("DataNode.handleDiskError on : [" + failedVolumes +
"] Keep Running: " + hasEnoughResources);
// If we have enough active valid volumes then we do not want to
// shutdown the DN completely.
@ -2054,7 +2079,7 @@ private void handleDiskError(String errMsgr) {
//inform NameNodes
for(BPOfferService bpos: blockPoolManager.getAllNamenodeThreads()) {
bpos.trySendErrorReport(dpError, errMsgr);
bpos.trySendErrorReport(dpError, failedVolumes);
}
if(hasEnoughResources) {
@ -2062,7 +2087,8 @@ private void handleDiskError(String errMsgr) {
return; // do not shutdown
}
LOG.warn("DataNode is shutting down: " + errMsgr);
LOG.warn("DataNode is shutting down due to failed volumes: ["
+ failedVolumes + "]");
shouldRun = false;
}
@ -2412,8 +2438,11 @@ public void run() {
}
LOG.warn(bpReg + ":Failed to transfer " + b + " to " +
targets[0] + " got ", ie);
// check if there are any disk problem
checkDiskErrorAsync();
// disk check moved to FileIoProvider
IOException cause = DatanodeUtil.getCauseIfDiskError(ie);
if (cause != null) { // possible disk error
LOG.warn("IOException in DataTransfer#run(). Cause is ", cause);
}
} finally {
xmitsInProgress.getAndDecrement();
IOUtils.closeStream(blockSender);
@ -3167,10 +3196,15 @@ public void checkDiskError() throws IOException {
}
private void handleVolumeFailures(Set<FsVolumeSpi> unhealthyVolumes) {
if (unhealthyVolumes.isEmpty()) {
LOG.debug("handleVolumeFailures done with empty " +
"unhealthyVolumes");
return;
}
data.handleVolumeFailures(unhealthyVolumes);
final Set<File> unhealthyDirs = new HashSet<>(unhealthyVolumes.size());
if (!unhealthyVolumes.isEmpty()) {
StringBuilder sb = new StringBuilder("DataNode failed volumes:");
for (FsVolumeSpi vol : unhealthyVolumes) {
unhealthyDirs.add(new File(vol.getBasePath()).getAbsoluteFile());
@ -3184,11 +3218,14 @@ private void handleVolumeFailures(Set<FsVolumeSpi> unhealthyVolumes) {
LOG.warn("Error occurred when removing unhealthy storage dirs: "
+ e.getMessage(), e);
}
LOG.info(sb.toString());
if (LOG.isDebugEnabled()) {
LOG.debug(sb.toString());
}
// send blockreport regarding volume failure
handleDiskError(sb.toString());
}
}
@VisibleForTesting
public long getLastDiskErrorCheck() {
return lastDiskErrorCheck;
}

View File

@ -31,7 +31,7 @@
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public final class DefaultFileIoEvents implements FileIoEvents {
public final class DefaultFileIoEvents extends FileIoEvents {
@Override
public long beforeMetadataOp(
@Nullable FsVolumeSpi volume, OPERATION op) {

View File

@ -855,7 +855,7 @@ private LinkedList<ScanInfo> compileReport(FsVolumeSpi vol,
} catch (IOException ioe) {
LOG.warn("Exception occured while compiling report: ", ioe);
// Initiate a check on disk failure.
datanode.checkDiskErrorAsync();
datanode.checkDiskErrorAsync(volume);
// Ignore this directory and proceed.
return report;
}

View File

@ -32,7 +32,7 @@
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public interface FileIoEvents {
public abstract class FileIoEvents {
/**
* Invoked before a filesystem metadata operation.
@ -42,7 +42,7 @@ public interface FileIoEvents {
* @return timestamp at which the operation was started. 0 if
* unavailable.
*/
long beforeMetadataOp(@Nullable FsVolumeSpi volume, OPERATION op);
abstract long beforeMetadataOp(@Nullable FsVolumeSpi volume, OPERATION op);
/**
* Invoked after a filesystem metadata operation has completed.
@ -52,7 +52,8 @@ public interface FileIoEvents {
* @param begin timestamp at which the operation was started. 0
* if unavailable.
*/
void afterMetadataOp(@Nullable FsVolumeSpi volume, OPERATION op, long begin);
abstract void afterMetadataOp(@Nullable FsVolumeSpi volume, OPERATION op,
long begin);
/**
* Invoked before a read/write/flush/channel transfer operation.
@ -63,7 +64,8 @@ public interface FileIoEvents {
* @return timestamp at which the operation was started. 0 if
* unavailable.
*/
long beforeFileIo(@Nullable FsVolumeSpi volume, OPERATION op, long len);
abstract long beforeFileIo(@Nullable FsVolumeSpi volume, OPERATION op,
long len);
/**
@ -76,7 +78,7 @@ public interface FileIoEvents {
* @return timestamp at which the operation was started. 0 if
* unavailable.
*/
void afterFileIo(@Nullable FsVolumeSpi volume, OPERATION op,
abstract void afterFileIo(@Nullable FsVolumeSpi volume, OPERATION op,
long begin, long len);
/**
@ -86,12 +88,28 @@ void afterFileIo(@Nullable FsVolumeSpi volume, OPERATION op,
* @param e Exception encountered during the operation.
* @param begin time at which the operation was started.
*/
void onFailure(
abstract void onFailure(
@Nullable FsVolumeSpi volume, OPERATION op, Exception e, long begin);
/**
* Invoked by FileIoProvider if an operation fails with an exception.
* @param datanode datanode that runs volume check upon volume io failure
* @param volume target volume for the operation. Null if unavailable.
* @param op type of operation.
* @param e Exception encountered during the operation.
* @param begin time at which the operation was started.
*/
void onFailure(DataNode datanode,
@Nullable FsVolumeSpi volume, OPERATION op, Exception e, long begin) {
onFailure(volume, op, e, begin);
if (datanode != null && volume != null) {
datanode.checkDiskErrorAsync(volume);
}
}
/**
* Return statistics as a JSON string.
* @return
*/
@Nullable String getStatistics();
@Nullable abstract String getStatistics();
}

View File

@ -79,12 +79,16 @@ public class FileIoProvider {
FileIoProvider.class);
private final FileIoEvents eventHooks;
private final DataNode datanode;
/**
* @param conf Configuration object. May be null. When null,
* the event handlers are no-ops.
* @param datanode datanode that owns this FileIoProvider. Used for
* IO error based volume checker callback
*/
public FileIoProvider(@Nullable Configuration conf) {
public FileIoProvider(@Nullable Configuration conf,
final DataNode datanode) {
if (conf != null) {
final Class<? extends FileIoEvents> clazz = conf.getClass(
DFSConfigKeys.DFS_DATANODE_FILE_IO_EVENTS_CLASS_KEY,
@ -94,6 +98,7 @@ public FileIoProvider(@Nullable Configuration conf) {
} else {
eventHooks = new DefaultFileIoEvents();
}
this.datanode = datanode;
}
/**
@ -139,7 +144,7 @@ public void flush(
f.flush();
eventHooks.afterFileIo(volume, FLUSH, begin, 0);
} catch (Exception e) {
eventHooks.onFailure(volume, FLUSH, e, begin);
eventHooks.onFailure(datanode, volume, FLUSH, e, begin);
throw e;
}
}
@ -157,7 +162,7 @@ public void sync(
fos.getChannel().force(true);
eventHooks.afterFileIo(volume, SYNC, begin, 0);
} catch (Exception e) {
eventHooks.onFailure(volume, SYNC, e, begin);
eventHooks.onFailure(datanode, volume, SYNC, e, begin);
throw e;
}
}
@ -176,7 +181,7 @@ public void syncFileRange(
NativeIO.POSIX.syncFileRangeIfPossible(outFd, offset, numBytes, flags);
eventHooks.afterFileIo(volume, SYNC, begin, 0);
} catch (Exception e) {
eventHooks.onFailure(volume, SYNC, e, begin);
eventHooks.onFailure(datanode, volume, SYNC, e, begin);
throw e;
}
}
@ -196,7 +201,7 @@ public void posixFadvise(
identifier, outFd, offset, length, flags);
eventHooks.afterMetadataOp(volume, FADVISE, begin);
} catch (Exception e) {
eventHooks.onFailure(volume, FADVISE, e, begin);
eventHooks.onFailure(datanode, volume, FADVISE, e, begin);
throw e;
}
}
@ -214,7 +219,7 @@ public boolean delete(@Nullable FsVolumeSpi volume, File f) {
eventHooks.afterMetadataOp(volume, DELETE, begin);
return deleted;
} catch (Exception e) {
eventHooks.onFailure(volume, DELETE, e, begin);
eventHooks.onFailure(datanode, volume, DELETE, e, begin);
throw e;
}
}
@ -236,7 +241,7 @@ public boolean deleteWithExistsCheck(@Nullable FsVolumeSpi volume, File f) {
}
return deleted;
} catch (Exception e) {
eventHooks.onFailure(volume, DELETE, e, begin);
eventHooks.onFailure(datanode, volume, DELETE, e, begin);
throw e;
}
}
@ -264,7 +269,7 @@ public void transferToSocketFully(
waitTime, transferTime);
eventHooks.afterFileIo(volume, TRANSFER, begin, count);
} catch (Exception e) {
eventHooks.onFailure(volume, TRANSFER, e, begin);
eventHooks.onFailure(datanode, volume, TRANSFER, e, begin);
throw e;
}
}
@ -285,7 +290,7 @@ public boolean createFile(
eventHooks.afterMetadataOp(volume, OPEN, begin);
return created;
} catch (Exception e) {
eventHooks.onFailure(volume, OPEN, e, begin);
eventHooks.onFailure(datanode, volume, OPEN, e, begin);
throw e;
}
}
@ -312,7 +317,7 @@ public FileInputStream getFileInputStream(
return fis;
} catch(Exception e) {
org.apache.commons.io.IOUtils.closeQuietly(fis);
eventHooks.onFailure(volume, OPEN, e, begin);
eventHooks.onFailure(datanode, volume, OPEN, e, begin);
throw e;
}
}
@ -328,7 +333,7 @@ public FileInputStream getFileInputStream(
* @param f File object.
* @param append if true, then bytes will be written to the end of the
* file rather than the beginning.
* @param FileOutputStream to the given file object.
* @return FileOutputStream to the given file object.
* @throws FileNotFoundException
*/
public FileOutputStream getFileOutputStream(
@ -342,7 +347,7 @@ public FileOutputStream getFileOutputStream(
return fos;
} catch(Exception e) {
org.apache.commons.io.IOUtils.closeQuietly(fos);
eventHooks.onFailure(volume, OPEN, e, begin);
eventHooks.onFailure(datanode, volume, OPEN, e, begin);
throw e;
}
}
@ -372,7 +377,7 @@ public FileOutputStream getFileOutputStream(
* before delegating to the wrapped stream.
*
* @param volume target volume. null if unavailable.
* @param f File object.
* @param fd File descriptor object.
* @return FileOutputStream to the given file object.
* @throws FileNotFoundException
*/
@ -407,7 +412,7 @@ public FileInputStream getShareDeleteFileInputStream(
return fis;
} catch(Exception e) {
org.apache.commons.io.IOUtils.closeQuietly(fis);
eventHooks.onFailure(volume, OPEN, e, begin);
eventHooks.onFailure(datanode, volume, OPEN, e, begin);
throw e;
}
}
@ -438,7 +443,7 @@ public FileInputStream openAndSeek(
return fis;
} catch(Exception e) {
org.apache.commons.io.IOUtils.closeQuietly(fis);
eventHooks.onFailure(volume, OPEN, e, begin);
eventHooks.onFailure(datanode, volume, OPEN, e, begin);
throw e;
}
}
@ -468,7 +473,7 @@ public RandomAccessFile getRandomAccessFile(
return raf;
} catch(Exception e) {
org.apache.commons.io.IOUtils.closeQuietly(raf);
eventHooks.onFailure(volume, OPEN, e, begin);
eventHooks.onFailure(datanode, volume, OPEN, e, begin);
throw e;
}
}
@ -487,7 +492,7 @@ public boolean fullyDelete(@Nullable FsVolumeSpi volume, File dir) {
eventHooks.afterMetadataOp(volume, DELETE, begin);
return deleted;
} catch(Exception e) {
eventHooks.onFailure(volume, DELETE, e, begin);
eventHooks.onFailure(datanode, volume, DELETE, e, begin);
throw e;
}
}
@ -508,7 +513,7 @@ public void replaceFile(
FileUtil.replaceFile(src, target);
eventHooks.afterMetadataOp(volume, MOVE, begin);
} catch(Exception e) {
eventHooks.onFailure(volume, MOVE, e, begin);
eventHooks.onFailure(datanode, volume, MOVE, e, begin);
throw e;
}
}
@ -530,7 +535,7 @@ public void rename(
Storage.rename(src, target);
eventHooks.afterMetadataOp(volume, MOVE, begin);
} catch(Exception e) {
eventHooks.onFailure(volume, MOVE, e, begin);
eventHooks.onFailure(datanode, volume, MOVE, e, begin);
throw e;
}
}
@ -552,7 +557,7 @@ public void moveFile(
FileUtils.moveFile(src, target);
eventHooks.afterMetadataOp(volume, MOVE, begin);
} catch(Exception e) {
eventHooks.onFailure(volume, MOVE, e, begin);
eventHooks.onFailure(datanode, volume, MOVE, e, begin);
throw e;
}
}
@ -576,7 +581,7 @@ public void move(
Files.move(src, target, options);
eventHooks.afterMetadataOp(volume, MOVE, begin);
} catch(Exception e) {
eventHooks.onFailure(volume, MOVE, e, begin);
eventHooks.onFailure(datanode, volume, MOVE, e, begin);
throw e;
}
}
@ -625,7 +630,7 @@ public void nativeCopyFileUnbuffered(
Storage.nativeCopyFileUnbuffered(src, target, preserveFileDate);
eventHooks.afterFileIo(volume, NATIVE_COPY, begin, length);
} catch(Exception e) {
eventHooks.onFailure(volume, NATIVE_COPY, e, begin);
eventHooks.onFailure(datanode, volume, NATIVE_COPY, e, begin);
throw e;
}
}
@ -650,7 +655,7 @@ public boolean mkdirs(
isDirectory = !created && dir.isDirectory();
eventHooks.afterMetadataOp(volume, MKDIRS, begin);
} catch(Exception e) {
eventHooks.onFailure(volume, MKDIRS, e, begin);
eventHooks.onFailure(datanode, volume, MKDIRS, e, begin);
throw e;
}
@ -676,7 +681,7 @@ public void mkdirsWithExistsCheck(
succeeded = dir.isDirectory() || dir.mkdirs();
eventHooks.afterMetadataOp(volume, MKDIRS, begin);
} catch(Exception e) {
eventHooks.onFailure(volume, MKDIRS, e, begin);
eventHooks.onFailure(datanode, volume, MKDIRS, e, begin);
throw e;
}
@ -702,7 +707,7 @@ public File[] listFiles(
eventHooks.afterMetadataOp(volume, LIST, begin);
return children;
} catch(Exception e) {
eventHooks.onFailure(volume, LIST, e, begin);
eventHooks.onFailure(datanode, volume, LIST, e, begin);
throw e;
}
}
@ -712,7 +717,7 @@ public File[] listFiles(
* {@link FileUtil#listFiles(File)}.
*
* @param volume target volume. null if unavailable.
* @param Driectory to be listed.
* @param dir directory to be listed.
* @return array of strings representing the directory entries.
* @throws IOException
*/
@ -724,7 +729,7 @@ public String[] list(
eventHooks.afterMetadataOp(volume, LIST, begin);
return children;
} catch(Exception e) {
eventHooks.onFailure(volume, LIST, e, begin);
eventHooks.onFailure(datanode, volume, LIST, e, begin);
throw e;
}
}
@ -747,7 +752,7 @@ public List<String> listDirectory(
eventHooks.afterMetadataOp(volume, LIST, begin);
return children;
} catch(Exception e) {
eventHooks.onFailure(volume, LIST, e, begin);
eventHooks.onFailure(datanode, volume, LIST, e, begin);
throw e;
}
}
@ -769,7 +774,7 @@ public int getHardLinkCount(
eventHooks.afterMetadataOp(volume, LIST, begin);
return count;
} catch(Exception e) {
eventHooks.onFailure(volume, LIST, e, begin);
eventHooks.onFailure(datanode, volume, LIST, e, begin);
throw e;
}
}
@ -788,7 +793,7 @@ public boolean exists(@Nullable FsVolumeSpi volume, File f) {
eventHooks.afterMetadataOp(volume, EXISTS, begin);
return exists;
} catch(Exception e) {
eventHooks.onFailure(volume, EXISTS, e, begin);
eventHooks.onFailure(datanode, volume, EXISTS, e, begin);
throw e;
}
}
@ -829,7 +834,7 @@ public int read() throws IOException {
eventHooks.afterFileIo(volume, READ, begin, 1);
return b;
} catch(Exception e) {
eventHooks.onFailure(volume, READ, e, begin);
eventHooks.onFailure(datanode, volume, READ, e, begin);
throw e;
}
}
@ -845,7 +850,7 @@ public int read(@Nonnull byte[] b) throws IOException {
eventHooks.afterFileIo(volume, READ, begin, numBytesRead);
return numBytesRead;
} catch(Exception e) {
eventHooks.onFailure(volume, READ, e, begin);
eventHooks.onFailure(datanode, volume, READ, e, begin);
throw e;
}
}
@ -861,7 +866,7 @@ public int read(@Nonnull byte[] b, int off, int len) throws IOException {
eventHooks.afterFileIo(volume, READ, begin, numBytesRead);
return numBytesRead;
} catch(Exception e) {
eventHooks.onFailure(volume, READ, e, begin);
eventHooks.onFailure(datanode, volume, READ, e, begin);
throw e;
}
}
@ -903,7 +908,7 @@ public void write(int b) throws IOException {
super.write(b);
eventHooks.afterFileIo(volume, WRITE, begin, 1);
} catch(Exception e) {
eventHooks.onFailure(volume, WRITE, e, begin);
eventHooks.onFailure(datanode, volume, WRITE, e, begin);
throw e;
}
}
@ -918,7 +923,7 @@ public void write(@Nonnull byte[] b) throws IOException {
super.write(b);
eventHooks.afterFileIo(volume, WRITE, begin, b.length);
} catch(Exception e) {
eventHooks.onFailure(volume, WRITE, e, begin);
eventHooks.onFailure(datanode, volume, WRITE, e, begin);
throw e;
}
}
@ -933,7 +938,7 @@ public void write(@Nonnull byte[] b, int off, int len) throws IOException {
super.write(b, off, len);
eventHooks.afterFileIo(volume, WRITE, begin, len);
} catch(Exception e) {
eventHooks.onFailure(volume, WRITE, e, begin);
eventHooks.onFailure(datanode, volume, WRITE, e, begin);
throw e;
}
}
@ -961,7 +966,7 @@ public int read() throws IOException {
eventHooks.afterFileIo(volume, READ, begin, 1);
return b;
} catch(Exception e) {
eventHooks.onFailure(volume, READ, e, begin);
eventHooks.onFailure(datanode, volume, READ, e, begin);
throw e;
}
}
@ -974,7 +979,7 @@ public int read(byte[] b, int off, int len) throws IOException {
eventHooks.afterFileIo(volume, READ, begin, numBytesRead);
return numBytesRead;
} catch(Exception e) {
eventHooks.onFailure(volume, READ, e, begin);
eventHooks.onFailure(datanode, volume, READ, e, begin);
throw e;
}
}
@ -987,7 +992,7 @@ public int read(byte[] b) throws IOException {
eventHooks.afterFileIo(volume, READ, begin, numBytesRead);
return numBytesRead;
} catch(Exception e) {
eventHooks.onFailure(volume, READ, e, begin);
eventHooks.onFailure(datanode, volume, READ, e, begin);
throw e;
}
}
@ -999,7 +1004,7 @@ public void write(int b) throws IOException {
super.write(b);
eventHooks.afterFileIo(volume, WRITE, begin, 1);
} catch(Exception e) {
eventHooks.onFailure(volume, WRITE, e, begin);
eventHooks.onFailure(datanode, volume, WRITE, e, begin);
throw e;
}
}
@ -1011,7 +1016,7 @@ public void write(@Nonnull byte[] b) throws IOException {
super.write(b);
eventHooks.afterFileIo(volume, WRITE, begin, b.length);
} catch(Exception e) {
eventHooks.onFailure(volume, WRITE, e, begin);
eventHooks.onFailure(datanode, volume, WRITE, e, begin);
throw e;
}
}
@ -1023,7 +1028,7 @@ public void write(byte[] b, int off, int len) throws IOException {
super.write(b, off, len);
eventHooks.afterFileIo(volume, WRITE, begin, len);
} catch(Exception e) {
eventHooks.onFailure(volume, WRITE, e, begin);
eventHooks.onFailure(datanode, volume, WRITE, e, begin);
throw e;
}
}

View File

@ -30,7 +30,7 @@
* related operations on datanode volumes.
*/
@InterfaceAudience.Private
class ProfilingFileIoEvents implements FileIoEvents {
class ProfilingFileIoEvents extends FileIoEvents {
@Override
public long beforeMetadataOp(@Nullable FsVolumeSpi volume,

View File

@ -64,7 +64,7 @@ abstract public class ReplicaInfo extends Block
/** This is used by some tests and FsDatasetUtil#computeChecksum. */
private static final FileIoProvider DEFAULT_FILE_IO_PROVIDER =
new FileIoProvider(null);
new FileIoProvider(null, null);
/**
* Constructor

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.hdfs.server.datanode.checker;
import com.google.common.base.Optional;
import com.google.common.util.concurrent.ListenableFuture;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@ -43,10 +44,10 @@ public interface AsyncChecker<K, V> {
* @param context the interpretation of the context depends on the
* target.
*
* @return returns a {@link ListenableFuture} that can be used to
* @return returns a {@link Optional of ListenableFuture} that can be used to
* retrieve the result of the asynchronous check.
*/
ListenableFuture<V> schedule(Checkable<K, V> target, K context);
Optional<ListenableFuture<V>> schedule(Checkable<K, V> target, K context);
/**
* Cancel all executing checks and wait for them to complete.

View File

@ -19,6 +19,7 @@
package org.apache.hadoop.hdfs.server.datanode.checker;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.collect.Sets;
import com.google.common.util.concurrent.FutureCallback;
@ -191,18 +192,26 @@ public Set<FsVolumeSpi> checkAllVolumes(
for (int i = 0; i < references.size(); ++i) {
final FsVolumeReference reference = references.getReference(i);
allVolumes.add(reference.getVolume());
ListenableFuture<VolumeCheckResult> future =
Optional<ListenableFuture<VolumeCheckResult>> olf =
delegateChecker.schedule(reference.getVolume(), IGNORED_CONTEXT);
LOG.info("Scheduled health check for volume {}", reference.getVolume());
Futures.addCallback(future, new ResultHandler(
reference, healthyVolumes, failedVolumes, numVolumes, new Callback() {
if (olf.isPresent()) {
allVolumes.add(reference.getVolume());
Futures.addCallback(olf.get(),
new ResultHandler(reference, healthyVolumes, failedVolumes,
numVolumes, new Callback() {
@Override
public void call(Set<FsVolumeSpi> ignored1,
Set<FsVolumeSpi> ignored2) {
latch.countDown();
}
}));
} else {
IOUtils.cleanup(null, reference);
if (numVolumes.decrementAndGet() == 0) {
latch.countDown();
}
}
}
// Wait until our timeout elapses, after which we give up on
@ -263,18 +272,26 @@ public boolean checkAllVolumesAsync(
final Set<FsVolumeSpi> healthyVolumes = new HashSet<>();
final Set<FsVolumeSpi> failedVolumes = new HashSet<>();
final AtomicLong numVolumes = new AtomicLong(references.size());
boolean added = false;
LOG.info("Checking {} volumes", references.size());
for (int i = 0; i < references.size(); ++i) {
final FsVolumeReference reference = references.getReference(i);
// The context parameter is currently ignored.
ListenableFuture<VolumeCheckResult> future =
Optional<ListenableFuture<VolumeCheckResult>> olf =
delegateChecker.schedule(reference.getVolume(), IGNORED_CONTEXT);
Futures.addCallback(future, new ResultHandler(
reference, healthyVolumes, failedVolumes, numVolumes, callback));
if (olf.isPresent()) {
added = true;
Futures.addCallback(olf.get(),
new ResultHandler(reference, healthyVolumes, failedVolumes,
numVolumes, callback));
} else {
IOUtils.cleanup(null, reference);
numVolumes.decrementAndGet();
}
}
numAsyncDatasetChecks.incrementAndGet();
return true;
return added;
}
/**
@ -291,7 +308,7 @@ void call(Set<FsVolumeSpi> healthyVolumes,
}
/**
* Check a single volume, returning a {@link ListenableFuture}
* Check a single volume asynchronously, returning a {@link ListenableFuture}
* that can be used to retrieve the final result.
*
* If the volume cannot be referenced then it is already closed and
@ -305,21 +322,32 @@ void call(Set<FsVolumeSpi> healthyVolumes,
public boolean checkVolume(
final FsVolumeSpi volume,
Callback callback) {
if (volume == null) {
LOG.debug("Cannot schedule check on null volume");
return false;
}
FsVolumeReference volumeReference;
try {
volumeReference = volume.obtainReference();
} catch (ClosedChannelException e) {
// The volume has already been closed.
callback.call(new HashSet<FsVolumeSpi>(), new HashSet<FsVolumeSpi>());
return false;
}
ListenableFuture<VolumeCheckResult> future =
Optional<ListenableFuture<VolumeCheckResult>> olf =
delegateChecker.schedule(volume, IGNORED_CONTEXT);
if (olf.isPresent()) {
numVolumeChecks.incrementAndGet();
Futures.addCallback(future, new ResultHandler(
volumeReference, new HashSet<FsVolumeSpi>(), new HashSet<FsVolumeSpi>(),
Futures.addCallback(olf.get(),
new ResultHandler(volumeReference, new HashSet<FsVolumeSpi>(),
new HashSet<FsVolumeSpi>(),
new AtomicLong(1), callback));
return true;
} else {
IOUtils.cleanup(null, volumeReference);
}
return false;
}
/**
@ -343,8 +371,8 @@ private class ResultHandler
* successful, add the volume here.
* @param failedVolumes set of failed volumes. If the disk check fails,
* add the volume here.
* @param semaphore semaphore used to trigger callback invocation.
* @param callback invoked when the semaphore can be successfully acquired.
* @param volumeCounter volumeCounter used to trigger callback invocation.
* @param callback invoked when the volumeCounter reaches 0.
*/
ResultHandler(FsVolumeReference reference,
Set<FsVolumeSpi> healthyVolumes,

View File

@ -20,6 +20,7 @@
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
import com.google.common.base.Optional;
import com.google.common.collect.Maps;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
@ -158,8 +159,11 @@ public List<StorageLocation> check(
// Start parallel disk check operations on all StorageLocations.
for (StorageLocation location : dataDirs) {
goodLocations.put(location, true);
futures.put(location,
delegateChecker.schedule(location, context));
Optional<ListenableFuture<VolumeCheckResult>> olf =
delegateChecker.schedule(location, context);
if (olf.isPresent()) {
futures.put(location, olf.get());
}
}
if (maxVolumeFailuresTolerated >= dataDirs.size()) {

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.hdfs.server.datanode.checker;
import com.google.common.base.Optional;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
@ -101,13 +102,11 @@ public class ThrottledAsyncChecker<K, V> implements AsyncChecker<K, V> {
* will receive the same Future.
*/
@Override
public synchronized ListenableFuture<V> schedule(
final Checkable<K, V> target,
final K context) {
LOG.debug("Scheduling a check of {}", target);
public Optional<ListenableFuture<V>> schedule(
final Checkable<K, V> target, final K context) {
LOG.info("Scheduling a check for {}", target);
if (checksInProgress.containsKey(target)) {
return checksInProgress.get(target);
return Optional.absent();
}
if (completedChecks.containsKey(target)) {
@ -117,9 +116,7 @@ public synchronized ListenableFuture<V> schedule(
LOG.debug("Skipped checking {}. Time since last check {}ms " +
"is less than the min gap {}ms.",
target, msSinceLastCheck, minMsBetweenChecks);
return result.result != null ?
Futures.immediateFuture(result.result) :
Futures.<V>immediateFailedFuture(result.exception);
return Optional.absent();
}
}
@ -132,7 +129,7 @@ public V call() throws Exception {
});
checksInProgress.put(target, lf);
addResultCachingCallback(target, lf);
return lf;
return Optional.of(lf);
}
/**

View File

@ -277,7 +277,10 @@ void saveDfsUsed() {
fileIoProvider.getFileOutputStream(volume, outFile), "UTF-8")) {
// mtime is written last, so that truncated writes won't be valid.
out.write(Long.toString(used) + " " + Long.toString(timer.now()));
fileIoProvider.flush(volume, out);
// This is only called as part of the volume shutdown.
// We explicitly avoid calling flush with fileIoProvider which triggers
// volume check upon io exception to avoid cyclic volume checks.
out.flush();
}
} catch (IOException ioe) {
// If write failed, the volume might be bad. Since the cache file is

View File

@ -1974,17 +1974,22 @@ private boolean isValid(final ExtendedBlock b, final ReplicaState state) {
*/
File validateBlockFile(String bpid, long blockId) {
//Should we check for metadata file too?
final File f;
File f = null;
ReplicaInfo info;
try(AutoCloseableLock lock = datasetLock.acquire()) {
f = getFile(bpid, blockId, false);
info = volumeMap.get(bpid, blockId);
if (info != null) {
f = info.getBlockFile();
}
}
if(f != null ) {
if(f.exists())
if(f.exists()) {
return f;
}
// if file is not null, but doesn't exist - possibly disk failed
datanode.checkDiskErrorAsync();
datanode.checkDiskErrorAsync(info.getVolume());
}
if (LOG.isDebugEnabled()) {

View File

@ -137,7 +137,8 @@ public class FsVolumeImpl implements FsVolumeSpi {
this.configuredCapacity = -1;
// dataset.datanode may be null in some tests.
this.fileIoProvider = dataset.datanode != null ?
dataset.datanode.getFileIoProvider() : new FileIoProvider(conf);
dataset.datanode.getFileIoProvider() :
new FileIoProvider(conf, dataset.datanode);
cacheExecutor = initializeCacheExecutor(parent);
this.metrics = DataNodeVolumeMetrics.create(conf, parent.getAbsolutePath());
}

View File

@ -589,7 +589,7 @@ public SimulatedFSDataset(DataNode datanode, DataStorage storage, Configuration
}
registerMBean(datanodeUuid);
this.fileIoProvider = new FileIoProvider(conf);
this.fileIoProvider = new FileIoProvider(conf, datanode);
this.storage = new SimulatedStorage(
conf.getLong(CONFIG_PROPERTY_CAPACITY, DEFAULT_CAPACITY),
conf.getEnum(CONFIG_PROPERTY_STATE, DEFAULT_STATE));

View File

@ -942,7 +942,7 @@ public void testDirectlyReloadAfterCheckDiskError()
DataNodeTestUtils.injectDataDirFailure(dirToFail);
// Call and wait DataNode to detect disk failure.
long lastDiskErrorCheck = dn.getLastDiskErrorCheck();
dn.checkDiskErrorAsync();
dn.checkDiskErrorAsync(failedVolume);
while (dn.getLastDiskErrorCheck() == lastDiskErrorCheck) {
Thread.sleep(100);
}

View File

@ -17,7 +17,7 @@
*/
package org.apache.hadoop.hdfs.server.datanode;
import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertArrayEquals;
@ -318,6 +318,12 @@ public void testMultipleVolFailuresOnNode() throws Exception {
DFSTestUtil.createFile(fs, file1, 1024, (short)3, 1L);
DFSTestUtil.waitReplication(fs, file1, (short)3);
// Create additional file to trigger failure based volume check on dn1Vol2
// and dn2Vol2.
Path file2 = new Path("/test2");
DFSTestUtil.createFile(fs, file2, 1024, (short)3, 1L);
DFSTestUtil.waitReplication(fs, file2, (short)3);
ArrayList<DataNode> dns = cluster.getDataNodes();
assertTrue("DN1 should be up", dns.get(0).isDatanodeUp());
assertTrue("DN2 should be up", dns.get(1).isDatanodeUp());
@ -536,8 +542,6 @@ private void checkAggregateFailuresAtNameNode(boolean expectCapacityKnown,
private void checkFailuresAtDataNode(DataNode dn,
long expectedVolumeFailuresCounter, boolean expectCapacityKnown,
String... expectedFailedVolumes) throws Exception {
assertCounter("VolumeFailures", expectedVolumeFailuresCounter,
getMetrics(dn.getMetrics().name()));
FsDatasetSpi<?> fsd = dn.getFSDataset();
assertEquals(expectedFailedVolumes.length, fsd.getNumFailedVolumes());
assertArrayEquals(expectedFailedVolumes, fsd.getFailedStorageLocations());

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.hdfs.server.datanode.checker;
import com.google.common.base.Optional;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import org.apache.hadoop.hdfs.HdfsConfiguration;
@ -103,12 +104,14 @@ public void testCheckOneVolume() throws Exception {
/**
* Request a check and ensure it triggered {@link FsVolumeSpi#check}.
*/
boolean result =
checker.checkVolume(volume, new DatasetVolumeChecker.Callback() {
@Override
public void call(Set<FsVolumeSpi> healthyVolumes,
Set<FsVolumeSpi> failedVolumes) {
numCallbackInvocations.incrementAndGet();
if (expectedVolumeHealth != null && expectedVolumeHealth != FAILED) {
if (expectedVolumeHealth != null &&
expectedVolumeHealth != FAILED) {
assertThat(healthyVolumes.size(), is(1));
assertThat(failedVolumes.size(), is(0));
} else {
@ -120,8 +123,10 @@ public void call(Set<FsVolumeSpi> healthyVolumes,
// Ensure that the check was invoked at least once.
verify(volume, times(1)).check(any(VolumeCheckContext.class));
if (result) {
assertThat(numCallbackInvocations.get(), is(1L));
}
}
/**
* Test {@link DatasetVolumeChecker#checkAllVolumes} propagates
@ -172,7 +177,7 @@ public void testCheckAllVolumesAsync() throws Exception {
checker.setDelegateChecker(new DummyChecker());
final AtomicLong numCallbackInvocations = new AtomicLong(0);
checker.checkAllVolumesAsync(
boolean result = checker.checkAllVolumesAsync(
dataset, new DatasetVolumeChecker.Callback() {
@Override
public void call(
@ -192,7 +197,9 @@ public void call(
});
// The callback should be invoked exactly once.
if (result) {
assertThat(numCallbackInvocations.get(), is(1L));
}
// Ensure each volume's check() method was called exactly once.
for (FsVolumeSpi volume : volumes) {
@ -206,15 +213,18 @@ public void call(
*/
static class DummyChecker
implements AsyncChecker<VolumeCheckContext, VolumeCheckResult> {
@Override
public ListenableFuture<VolumeCheckResult> schedule(
public Optional<ListenableFuture<VolumeCheckResult>> schedule(
Checkable<VolumeCheckContext, VolumeCheckResult> target,
VolumeCheckContext context) {
try {
return Futures.immediateFuture(target.check(context));
return Optional.of(
Futures.immediateFuture(target.check(context)));
} catch (Exception e) {
LOG.info("check routine threw exception " + e);
return Futures.immediateFailedFuture(e);
return Optional.of(
Futures.<VolumeCheckResult>immediateFailedFuture(e));
}
}

View File

@ -131,29 +131,6 @@ public void testMinGapIsEnforcedForSyncChecks() throws Exception {
assertThat(checker.getNumSkippedChecks(), is(1L));
}
@Test(timeout=60000)
public void testMinGapIsEnforcedForASyncChecks() throws Exception {
final List<FsVolumeSpi> volumes =
TestDatasetVolumeChecker.makeVolumes(1, VolumeCheckResult.HEALTHY);
final FsDatasetSpi<FsVolumeSpi> dataset =
TestDatasetVolumeChecker.makeDataset(volumes);
final DatasetVolumeChecker checker = new DatasetVolumeChecker(conf, timer);
checker.checkAllVolumesAsync(dataset, null);
assertThat(checker.getNumAsyncDatasetChecks(), is(1L));
// Re-check without advancing the timer. Ensure the check is skipped.
checker.checkAllVolumesAsync(dataset, null);
assertThat(checker.getNumAsyncDatasetChecks(), is(1L));
assertThat(checker.getNumSkippedChecks(), is(1L));
// Re-check after advancing the timer. Ensure the check is performed.
timer.advance(MIN_DISK_CHECK_GAP_MS);
checker.checkAllVolumesAsync(dataset, null);
assertThat(checker.getNumAsyncDatasetChecks(), is(2L));
assertThat(checker.getNumSkippedChecks(), is(1L));
}
/**
* Create a mock FsVolumeSpi whose {@link FsVolumeSpi#check} routine
* hangs forever.

View File

@ -18,15 +18,14 @@
package org.apache.hadoop.hdfs.server.datanode.checker;
import com.google.common.base.Optional;
import com.google.common.base.Supplier;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.FakeTimer;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -38,10 +37,7 @@
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.core.Is.isA;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
@ -53,9 +49,6 @@ public class TestThrottledAsyncChecker {
LoggerFactory.getLogger(TestThrottledAsyncChecker.class);
private static final long MIN_ERROR_CHECK_GAP = 1000;
@Rule
public ExpectedException thrown = ExpectedException.none();
/**
* Test various scheduling combinations to ensure scheduling and
* throttling behave as expected.
@ -70,34 +63,34 @@ public void testScheduler() throws Exception {
getExecutorService());
// check target1 and ensure we get back the expected result.
assertTrue(checker.schedule(target1, true).get());
assertThat(target1.numChecks.get(), is(1L));
assertTrue(checker.schedule(target1, true).isPresent());
waitTestCheckableCheckCount(target1, 1L);
// Check target1 again without advancing the timer. target1 should not
// be checked again and the cached result should be returned.
assertTrue(checker.schedule(target1, true).get());
assertThat(target1.numChecks.get(), is(1L));
// be checked again.
assertFalse(checker.schedule(target1, true).isPresent());
waitTestCheckableCheckCount(target1, 1L);
// Schedule target2 scheduled without advancing the timer.
// target2 should be checked as it has never been checked before.
assertTrue(checker.schedule(target2, true).get());
assertThat(target2.numChecks.get(), is(1L));
assertTrue(checker.schedule(target2, true).isPresent());
waitTestCheckableCheckCount(target2, 1L);
// Advance the timer but just short of the min gap.
// Neither target1 nor target2 should be checked again.
timer.advance(MIN_ERROR_CHECK_GAP - 1);
assertTrue(checker.schedule(target1, true).get());
assertThat(target1.numChecks.get(), is(1L));
assertTrue(checker.schedule(target2, true).get());
assertThat(target2.numChecks.get(), is(1L));
assertFalse(checker.schedule(target1, true).isPresent());
waitTestCheckableCheckCount(target1, 1L);
assertFalse(checker.schedule(target2, true).isPresent());
waitTestCheckableCheckCount(target2, 1L);
// Advance the timer again.
// Both targets should be checked now.
timer.advance(MIN_ERROR_CHECK_GAP);
assertTrue(checker.schedule(target1, true).get());
assertThat(target1.numChecks.get(), is(2L));
assertTrue(checker.schedule(target2, true).get());
assertThat(target1.numChecks.get(), is(2L));
assertTrue(checker.schedule(target1, true).isPresent());
waitTestCheckableCheckCount(target1, 2L);
assertTrue(checker.schedule(target2, true).isPresent());
waitTestCheckableCheckCount(target2, 2L);
}
@Test (timeout=60000)
@ -109,13 +102,16 @@ public void testCancellation() throws Exception {
new ThrottledAsyncChecker<>(timer, MIN_ERROR_CHECK_GAP,
getExecutorService());
ListenableFuture<Boolean> lf = checker.schedule(target, true);
Futures.addCallback(lf, callback);
Optional<ListenableFuture<Boolean>> olf =
checker.schedule(target, true);
if (olf.isPresent()) {
Futures.addCallback(olf.get(), callback);
}
// Request immediate cancellation.
checker.shutdownAndWait(0, TimeUnit.MILLISECONDS);
try {
assertFalse(lf.get());
assertFalse(olf.get().get());
fail("Failed to get expected InterruptedException");
} catch (ExecutionException ee) {
assertTrue(ee.getCause() instanceof InterruptedException);
@ -130,27 +126,33 @@ public void testConcurrentChecks() throws Exception {
final ThrottledAsyncChecker<Boolean, Boolean> checker =
new ThrottledAsyncChecker<>(timer, MIN_ERROR_CHECK_GAP,
getExecutorService());
final ListenableFuture<Boolean> lf1 = checker.schedule(target, true);
final ListenableFuture<Boolean> lf2 = checker.schedule(target, true);
final Optional<ListenableFuture<Boolean>> olf1 =
checker.schedule(target, true);
// Ensure that concurrent requests return the same future object.
assertTrue(lf1 == lf2);
final Optional<ListenableFuture<Boolean>> olf2 =
checker.schedule(target, true);
// Ensure that concurrent requests return the future object
// for the first caller.
assertTrue(olf1.isPresent());
assertFalse(olf2.isPresent());
// Unblock the latch and wait for it to finish execution.
target.latch.countDown();
lf1.get();
olf1.get().get();
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
// We should not get back the same future as before.
// We should get an absent Optional.
// This can take a short while until the internal callback in
// ThrottledAsyncChecker is scheduled for execution.
// Also this should not trigger a new check operation as the timer
// was not advanced. If it does trigger a new check then the test
// will fail with a timeout.
final ListenableFuture<Boolean> lf3 = checker.schedule(target, true);
return lf3 != lf2;
final Optional<ListenableFuture<Boolean>> olf3 =
checker.schedule(target, true);
return !olf3.isPresent();
}
}, 100, 10000);
}
@ -168,15 +170,30 @@ public void testContextIsPassed() throws Exception {
new ThrottledAsyncChecker<>(timer, MIN_ERROR_CHECK_GAP,
getExecutorService());
assertTrue(checker.schedule(target1, true).get());
assertThat(target1.numChecks.get(), is(1L));
assertTrue(checker.schedule(target1, true).isPresent());
waitTestCheckableCheckCount(target1, 1L);
timer.advance(MIN_ERROR_CHECK_GAP + 1);
assertFalse(checker.schedule(target1, false).get());
assertThat(target1.numChecks.get(), is(2L));
assertTrue(checker.schedule(target1, false).isPresent());
waitTestCheckableCheckCount(target1, 2L);
}
private void waitTestCheckableCheckCount(
final TestCheckableBase target,
final long expectedChecks) throws Exception {
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
// This can take a short while until the internal callback in
// ThrottledAsyncChecker is scheduled for execution.
// If it does trigger a new check then the test
// will fail with a timeout.
return target.getTotalChecks() == expectedChecks;
}
}, 100, 10000);
}
/**
* Ensure that the exeption from a failed check is cached
* Ensure that the exception from a failed check is cached
* and returned without re-running the check when the minimum
* gap has not elapsed.
*
@ -190,13 +207,11 @@ public void testExceptionCaching() throws Exception {
new ThrottledAsyncChecker<>(timer, MIN_ERROR_CHECK_GAP,
getExecutorService());
thrown.expectCause(isA(DummyException.class));
checker.schedule(target1, true).get();
assertThat(target1.numChecks.get(), is(1L));
assertTrue(checker.schedule(target1, true).isPresent());
waitTestCheckableCheckCount(target1, 1L);
thrown.expectCause(isA(DummyException.class));
checker.schedule(target1, true).get();
assertThat(target1.numChecks.get(), is(2L));
assertFalse(checker.schedule(target1, true).isPresent());
waitTestCheckableCheckCount(target1, 1L);
}
/**
@ -206,28 +221,38 @@ private ExecutorService getExecutorService() {
return new ScheduledThreadPoolExecutor(1);
}
private abstract static class TestCheckableBase
implements Checkable<Boolean, Boolean> {
protected final AtomicLong numChecks = new AtomicLong(0);
public long getTotalChecks() {
return numChecks.get();
}
public void incrTotalChecks() {
numChecks.incrementAndGet();
}
}
/**
* A Checkable that just returns its input.
*/
private static class NoOpCheckable
implements Checkable<Boolean, Boolean> {
private final AtomicLong numChecks = new AtomicLong(0);
extends TestCheckableBase {
@Override
public Boolean check(Boolean context) {
numChecks.incrementAndGet();
incrTotalChecks();
return context;
}
}
private static class ThrowingCheckable
implements Checkable<Boolean, Boolean> {
private final AtomicLong numChecks = new AtomicLong(0);
extends TestCheckableBase {
@Override
public Boolean check(Boolean context) throws DummyException {
numChecks.incrementAndGet();
incrTotalChecks();
throw new DummyException();
}
}
private static class DummyException extends Exception {

View File

@ -159,7 +159,7 @@ public void setUp() throws IOException {
this.conf = new Configuration();
this.conf.setLong(DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, 0);
final FileIoProvider fileIoProvider = new FileIoProvider(conf);
final FileIoProvider fileIoProvider = new FileIoProvider(conf, null);
when(datanode.getFileIoProvider()).thenReturn(fileIoProvider);
when(datanode.getConf()).thenReturn(conf);
final DNConf dnConf = new DNConf(datanode);