HDFS-2500. Avoid file system operations in BPOfferService thread while processing deletes. Contributed by Todd Lipcon.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1190072 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Todd Lipcon 2011-10-27 22:47:28 +00:00
parent cc5a75b521
commit 66de48a353
4 changed files with 15 additions and 7 deletions

View File

@ -761,6 +761,9 @@ Release 0.23.0 - Unreleased
HDFS-2118. Couple dfs data dir improvements. (eli)
HDFS-2500. Avoid file system operations in BPOfferService thread while
processing deletes. (todd)
BUG FIXES
HDFS-2344. Fix the TestOfflineEditsViewer test failure in 0.23 branch.

View File

@ -1108,8 +1108,15 @@ private void offerService() throws Exception {
if (!heartbeatsDisabledForTests) {
DatanodeCommand[] cmds = sendHeartBeat();
metrics.addHeartbeat(now() - startTime);
long startProcessCommands = now();
if (!processCommand(cmds))
continue;
long endProcessCommands = now();
if (endProcessCommands - startProcessCommands > 2000) {
LOG.info("Took " + (endProcessCommands - startProcessCommands) +
"ms to process " + cmds.length + " commands from NN");
}
}
}

View File

@ -2088,10 +2088,9 @@ public void invalidate(String bpid, Block invalidBlks[]) throws IOException {
volumeMap.remove(bpid, invalidBlks[i]);
}
File metaFile = getMetaFile(f, invalidBlks[i].getGenerationStamp());
long dfsBytes = f.length() + metaFile.length();
// Delete the block asynchronously to make sure we can do it fast enough
asyncDiskService.deleteAsync(v, bpid, f, metaFile, dfsBytes,
asyncDiskService.deleteAsync(v, bpid, f, metaFile,
invalidBlks[i].toString());
}
if (error) {

View File

@ -148,11 +148,11 @@ synchronized void shutdown() {
* dfsUsed statistics accordingly.
*/
void deleteAsync(FSDataset.FSVolume volume, String bpid, File blockFile,
File metaFile, long dfsBytes, String blockName) {
File metaFile, String blockName) {
DataNode.LOG.info("Scheduling block " + blockName + " file " + blockFile
+ " for deletion");
ReplicaFileDeleteTask deletionTask =
new ReplicaFileDeleteTask(volume, bpid, blockFile, metaFile, dfsBytes,
new ReplicaFileDeleteTask(volume, bpid, blockFile, metaFile,
blockName);
execute(volume.getCurrentDir(), deletionTask);
}
@ -165,16 +165,14 @@ static class ReplicaFileDeleteTask implements Runnable {
final String blockPoolId;
final File blockFile;
final File metaFile;
final long dfsBytes;
final String blockName;
ReplicaFileDeleteTask(FSDataset.FSVolume volume, String bpid,
File blockFile, File metaFile, long dfsBytes, String blockName) {
File blockFile, File metaFile, String blockName) {
this.volume = volume;
this.blockPoolId = bpid;
this.blockFile = blockFile;
this.metaFile = metaFile;
this.dfsBytes = dfsBytes;
this.blockName = blockName;
}
@ -192,6 +190,7 @@ public String toString() {
@Override
public void run() {
long dfsBytes = blockFile.length() + metaFile.length();
if ( !blockFile.delete() || ( !metaFile.delete() && metaFile.exists() ) ) {
DataNode.LOG.warn("Unexpected error trying to delete block "
+ blockPoolId + " " + blockName + " at file " + blockFile