HDFS-7990. IBR delete ack should not be delayed. Contributed by Daryn Sharp.

This commit is contained in:
Kihwal Lee 2015-03-27 09:05:17 -05:00
parent af618f23a7
commit 60882ab26d
5 changed files with 23 additions and 15 deletions

View File

@ -342,6 +342,8 @@ Release 2.8.0 - UNRELEASED
HDFS-7928. Scanning blocks from disk during rolling upgrade startup takes
a lot of time if disks are busy (Rushabh S Shah via kihwal)
HDFS-7990. IBR delete ack should not be delayed. (daryn via kihwal)
OPTIMIZATIONS
BUG FIXES

View File

@ -82,12 +82,11 @@ class BPServiceActor implements Runnable {
final BPOfferService bpos;
// lastBlockReport, lastDeletedReport and lastHeartbeat may be assigned/read
// lastBlockReport and lastHeartbeat may be assigned/read
// by testing threads (through BPServiceActor#triggerXXX), while also
// assigned/read by the actor thread. Thus they should be declared as volatile
// to make sure the "happens-before" consistency.
volatile long lastBlockReport = 0;
volatile long lastDeletedReport = 0;
boolean resetBlockReportTime = true;
@ -417,10 +416,10 @@ class BPServiceActor implements Runnable {
@VisibleForTesting
void triggerDeletionReportForTests() {
synchronized (pendingIncrementalBRperStorage) {
lastDeletedReport = 0;
sendImmediateIBR = true;
pendingIncrementalBRperStorage.notifyAll();
while (lastDeletedReport == 0) {
while (sendImmediateIBR) {
try {
pendingIncrementalBRperStorage.wait(100);
} catch (InterruptedException e) {
@ -465,7 +464,6 @@ class BPServiceActor implements Runnable {
// or we will report an RBW replica after the BlockReport already reports
// a FINALIZED one.
reportReceivedDeletedBlocks();
lastDeletedReport = startTime;
long brCreateStartTime = monotonicNow();
Map<DatanodeStorage, BlockListAsLongs> perVolumeBlockLists =
@ -674,7 +672,6 @@ class BPServiceActor implements Runnable {
*/
private void offerService() throws Exception {
LOG.info("For namenode " + nnAddr + " using"
+ " DELETEREPORT_INTERVAL of " + dnConf.deleteReportInterval + " msec "
+ " BLOCKREPORT_INTERVAL of " + dnConf.blockReportInterval + "msec"
+ " CACHEREPORT_INTERVAL of " + dnConf.cacheReportInterval + "msec"
+ " Initial delay: " + dnConf.initialBlockReportDelay + "msec"
@ -690,7 +687,9 @@ class BPServiceActor implements Runnable {
//
// Every so often, send heartbeat or block-report
//
if (startTime - lastHeartbeat >= dnConf.heartBeatInterval) {
boolean sendHeartbeat =
startTime - lastHeartbeat >= dnConf.heartBeatInterval;
if (sendHeartbeat) {
//
// All heartbeat messages include following info:
// -- Datanode name
@ -729,10 +728,8 @@ class BPServiceActor implements Runnable {
}
}
}
if (sendImmediateIBR ||
(startTime - lastDeletedReport > dnConf.deleteReportInterval)) {
if (sendImmediateIBR || sendHeartbeat) {
reportReceivedDeletedBlocks();
lastDeletedReport = startTime;
}
List<DatanodeCommand> cmds = blockReport();

View File

@ -82,7 +82,6 @@ public class DNConf {
final long heartBeatInterval;
final long blockReportInterval;
final long blockReportSplitThreshold;
final long deleteReportInterval;
final long initialBlockReportDelay;
final long cacheReportInterval;
final long dfsclientSlowIoWarningThresholdMs;
@ -164,7 +163,6 @@ public class DNConf {
heartBeatInterval = conf.getLong(DFS_HEARTBEAT_INTERVAL_KEY,
DFS_HEARTBEAT_INTERVAL_DEFAULT) * 1000L;
this.deleteReportInterval = 100 * heartBeatInterval;
// do we need to sync block file contents to disk when blockfile is closed?
this.syncOnClose = conf.getBoolean(DFS_DATANODE_SYNCONCLOSE_KEY,
DFS_DATANODE_SYNCONCLOSE_DEFAULT);

View File

@ -84,7 +84,7 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
@Override
public SimulatedFSDataset newInstance(DataNode datanode,
DataStorage storage, Configuration conf) throws IOException {
return new SimulatedFSDataset(storage, conf);
return new SimulatedFSDataset(datanode, storage, conf);
}
@Override
@ -509,8 +509,15 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
private final SimulatedStorage storage;
private final SimulatedVolume volume;
private final String datanodeUuid;
private final DataNode datanode;
public SimulatedFSDataset(DataStorage storage, Configuration conf) {
this(null, storage, conf);
}
public SimulatedFSDataset(DataNode datanode, DataStorage storage, Configuration conf) {
this.datanode = datanode;
if (storage != null) {
for (int i = 0; i < storage.getNumStorageDirs(); ++i) {
storage.createStorageID(storage.getStorageDir(i), false);
@ -737,6 +744,10 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
}
storage.free(bpid, binfo.getNumBytes());
map.remove(b);
if (datanode != null) {
datanode.notifyNamenodeDeletedBlock(new ExtendedBlock(bpid, b),
binfo.getStorageUuid());
}
}
if (error) {
throw new IOException("Invalidate: Missing blocks.");

View File

@ -159,8 +159,8 @@ public class TestIncrementalBlockReports {
anyString(),
any(StorageReceivedDeletedBlocks[].class));
// Trigger a block report, this also triggers an IBR.
DataNodeTestUtils.triggerBlockReport(singletonDn);
// Trigger a heartbeat, this also triggers an IBR.
DataNodeTestUtils.triggerHeartbeat(singletonDn);
Thread.sleep(2000);
// Ensure that the deleted block is reported.