diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 743a5c2b935..09bbfaa89b3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -27,6 +27,8 @@ Release 2.8.0 - UNRELEASED HDFS-7928. Scanning blocks from disk during rolling upgrade startup takes a lot of time if disks are busy (Rushabh S Shah via kihwal) + HDFS-7990. IBR delete ack should not be delayed. (daryn via kihwal) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java index 10cce4545ae..3b4756cb377 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java @@ -82,12 +82,11 @@ class BPServiceActor implements Runnable { final BPOfferService bpos; - // lastBlockReport, lastDeletedReport and lastHeartbeat may be assigned/read + // lastBlockReport and lastHeartbeat may be assigned/read // by testing threads (through BPServiceActor#triggerXXX), while also // assigned/read by the actor thread. Thus they should be declared as volatile // to make sure the "happens-before" consistency. volatile long lastBlockReport = 0; - volatile long lastDeletedReport = 0; boolean resetBlockReportTime = true; @@ -417,10 +416,10 @@ class BPServiceActor implements Runnable { @VisibleForTesting void triggerDeletionReportForTests() { synchronized (pendingIncrementalBRperStorage) { - lastDeletedReport = 0; + sendImmediateIBR = true; pendingIncrementalBRperStorage.notifyAll(); - while (lastDeletedReport == 0) { + while (sendImmediateIBR) { try { pendingIncrementalBRperStorage.wait(100); } catch (InterruptedException e) { @@ -465,7 +464,6 @@ class BPServiceActor implements Runnable { // or we will report an RBW replica after the BlockReport already reports // a FINALIZED one. reportReceivedDeletedBlocks(); - lastDeletedReport = startTime; long brCreateStartTime = monotonicNow(); Map perVolumeBlockLists = @@ -674,7 +672,6 @@ class BPServiceActor implements Runnable { */ private void offerService() throws Exception { LOG.info("For namenode " + nnAddr + " using" - + " DELETEREPORT_INTERVAL of " + dnConf.deleteReportInterval + " msec " + " BLOCKREPORT_INTERVAL of " + dnConf.blockReportInterval + "msec" + " CACHEREPORT_INTERVAL of " + dnConf.cacheReportInterval + "msec" + " Initial delay: " + dnConf.initialBlockReportDelay + "msec" @@ -690,7 +687,9 @@ class BPServiceActor implements Runnable { // // Every so often, send heartbeat or block-report // - if (startTime - lastHeartbeat >= dnConf.heartBeatInterval) { + boolean sendHeartbeat = + startTime - lastHeartbeat >= dnConf.heartBeatInterval; + if (sendHeartbeat) { // // All heartbeat messages include following info: // -- Datanode name @@ -729,10 +728,8 @@ class BPServiceActor implements Runnable { } } } - if (sendImmediateIBR || - (startTime - lastDeletedReport > dnConf.deleteReportInterval)) { + if (sendImmediateIBR || sendHeartbeat) { reportReceivedDeletedBlocks(); - lastDeletedReport = startTime; } List cmds = blockReport(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java index 67cd1ce1aae..3406f29f4c4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java @@ -82,7 +82,6 @@ public class DNConf { final long heartBeatInterval; final long blockReportInterval; final long blockReportSplitThreshold; - final long deleteReportInterval; final long initialBlockReportDelay; final long cacheReportInterval; final long dfsclientSlowIoWarningThresholdMs; @@ -164,7 +163,6 @@ public class DNConf { heartBeatInterval = conf.getLong(DFS_HEARTBEAT_INTERVAL_KEY, DFS_HEARTBEAT_INTERVAL_DEFAULT) * 1000L; - this.deleteReportInterval = 100 * heartBeatInterval; // do we need to sync block file contents to disk when blockfile is closed? this.syncOnClose = conf.getBoolean(DFS_DATANODE_SYNCONCLOSE_KEY, DFS_DATANODE_SYNCONCLOSE_DEFAULT); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java index 5c7b4ac41fb..23fc95bf3c8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java @@ -84,7 +84,7 @@ public class SimulatedFSDataset implements FsDatasetSpi { @Override public SimulatedFSDataset newInstance(DataNode datanode, DataStorage storage, Configuration conf) throws IOException { - return new SimulatedFSDataset(storage, conf); + return new SimulatedFSDataset(datanode, storage, conf); } @Override @@ -509,8 +509,15 @@ public class SimulatedFSDataset implements FsDatasetSpi { private final SimulatedStorage storage; private final SimulatedVolume volume; private final String datanodeUuid; + private final DataNode datanode; + public SimulatedFSDataset(DataStorage storage, Configuration conf) { + this(null, storage, conf); + } + + public SimulatedFSDataset(DataNode datanode, DataStorage storage, Configuration conf) { + this.datanode = datanode; if (storage != null) { for (int i = 0; i < storage.getNumStorageDirs(); ++i) { storage.createStorageID(storage.getStorageDir(i), false); @@ -737,6 +744,10 @@ public class SimulatedFSDataset implements FsDatasetSpi { } storage.free(bpid, binfo.getNumBytes()); map.remove(b); + if (datanode != null) { + datanode.notifyNamenodeDeletedBlock(new ExtendedBlock(bpid, b), + binfo.getStorageUuid()); + } } if (error) { throw new IOException("Invalidate: Missing blocks."); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestIncrementalBlockReports.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestIncrementalBlockReports.java index b5aa93f6e64..cd2c1254245 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestIncrementalBlockReports.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestIncrementalBlockReports.java @@ -159,8 +159,8 @@ public class TestIncrementalBlockReports { anyString(), any(StorageReceivedDeletedBlocks[].class)); - // Trigger a block report, this also triggers an IBR. - DataNodeTestUtils.triggerBlockReport(singletonDn); + // Trigger a heartbeat, this also triggers an IBR. + DataNodeTestUtils.triggerHeartbeat(singletonDn); Thread.sleep(2000); // Ensure that the deleted block is reported.