diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 2ce3a9765a8..9a0ecbea52e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -761,6 +761,9 @@ Release 0.23.0 - Unreleased HDFS-2118. Couple dfs data dir improvements. (eli) + HDFS-2500. Avoid file system operations in BPOfferService thread while + processing deletes. (todd) + BUG FIXES HDFS-2344. Fix the TestOfflineEditsViewer test failure in 0.23 branch. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index eb953ab7eb2..3f773360899 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -1108,8 +1108,15 @@ public class DataNode extends Configured if (!heartbeatsDisabledForTests) { DatanodeCommand[] cmds = sendHeartBeat(); metrics.addHeartbeat(now() - startTime); + + long startProcessCommands = now(); if (!processCommand(cmds)) continue; + long endProcessCommands = now(); + if (endProcessCommands - startProcessCommands > 2000) { + LOG.info("Took " + (endProcessCommands - startProcessCommands) + + "ms to process " + cmds.length + " commands from NN"); + } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java index 08b40fed2aa..53c55259084 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java @@ -2088,10 +2088,9 @@ public class FSDataset implements FSDatasetInterface { volumeMap.remove(bpid, invalidBlks[i]); } File metaFile = getMetaFile(f, invalidBlks[i].getGenerationStamp()); - long dfsBytes = f.length() + metaFile.length(); // Delete the block asynchronously to make sure we can do it fast enough - asyncDiskService.deleteAsync(v, bpid, f, metaFile, dfsBytes, + asyncDiskService.deleteAsync(v, bpid, f, metaFile, invalidBlks[i].toString()); } if (error) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetAsyncDiskService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetAsyncDiskService.java index 176f8825d9f..0c2523b8341 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetAsyncDiskService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetAsyncDiskService.java @@ -148,11 +148,11 @@ class FSDatasetAsyncDiskService { * dfsUsed statistics accordingly. */ void deleteAsync(FSDataset.FSVolume volume, String bpid, File blockFile, - File metaFile, long dfsBytes, String blockName) { + File metaFile, String blockName) { DataNode.LOG.info("Scheduling block " + blockName + " file " + blockFile + " for deletion"); ReplicaFileDeleteTask deletionTask = - new ReplicaFileDeleteTask(volume, bpid, blockFile, metaFile, dfsBytes, + new ReplicaFileDeleteTask(volume, bpid, blockFile, metaFile, blockName); execute(volume.getCurrentDir(), deletionTask); } @@ -165,16 +165,14 @@ class FSDatasetAsyncDiskService { final String blockPoolId; final File blockFile; final File metaFile; - final long dfsBytes; final String blockName; ReplicaFileDeleteTask(FSDataset.FSVolume volume, String bpid, - File blockFile, File metaFile, long dfsBytes, String blockName) { + File blockFile, File metaFile, String blockName) { this.volume = volume; this.blockPoolId = bpid; this.blockFile = blockFile; this.metaFile = metaFile; - this.dfsBytes = dfsBytes; this.blockName = blockName; } @@ -192,6 +190,7 @@ class FSDatasetAsyncDiskService { @Override public void run() { + long dfsBytes = blockFile.length() + metaFile.length(); if ( !blockFile.delete() || ( !metaFile.delete() && metaFile.exists() ) ) { DataNode.LOG.warn("Unexpected error trying to delete block " + blockPoolId + " " + blockName + " at file " + blockFile