From 553efd719061e8fee98f91ed1b766d2e77c78c9c Mon Sep 17 00:00:00 2001 From: yliu Date: Thu, 19 Mar 2015 23:24:55 +0800 Subject: [PATCH] HDFS-7930. commitBlockSynchronization() does not remove locations. (yliu) (cherry picked from commit 90164ffd84f6ef56e9f8f99dcc7424a8d115dbae) Conflicts: hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java (cherry picked from commit 2c9a7461ec2ceba5885e95bc79f8dcbfd198df60) --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + .../server/blockmanagement/BlockManager.java | 41 +++++++++++++++++++ .../hdfs/server/namenode/FSNamesystem.java | 8 +++- 3 files changed, 49 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index ff335ab72c9..904d02d3df5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -100,6 +100,8 @@ Release 2.6.1 - UNRELEASED HDFS-7929. inotify unable fetch pre-upgrade edit log segments once upgrade starts (Zhe Zhang via Colin P. McCabe) + HDFS-7930. commitBlockSynchronization() does not remove locations. (yliu) + Release 2.6.0 - 2014-11-18 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index d26cc524a7b..5a3835118dc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -1930,6 +1930,47 @@ public class BlockManager { return toInvalidate; } + /** + * Mark block replicas as corrupt except those on the storages in + * newStorages list. + */ + public void markBlockReplicasAsCorrupt(BlockInfo block, + long oldGenerationStamp, long oldNumBytes, + DatanodeStorageInfo[] newStorages) throws IOException { + assert namesystem.hasWriteLock(); + BlockToMarkCorrupt b = null; + if (block.getGenerationStamp() != oldGenerationStamp) { + b = new BlockToMarkCorrupt(block, oldGenerationStamp, + "genstamp does not match " + oldGenerationStamp + + " : " + block.getGenerationStamp(), Reason.GENSTAMP_MISMATCH); + } else if (block.getNumBytes() != oldNumBytes) { + b = new BlockToMarkCorrupt(block, + "length does not match " + oldNumBytes + + " : " + block.getNumBytes(), Reason.SIZE_MISMATCH); + } else { + return; + } + + for (DatanodeStorageInfo storage : getStorages(block)) { + boolean isCorrupt = true; + if (newStorages != null) { + for (DatanodeStorageInfo newStorage : newStorages) { + if (newStorage!= null && storage.equals(newStorage)) { + isCorrupt = false; + break; + } + } + } + if (isCorrupt) { + blockLog.info("BLOCK* markBlockReplicasAsCorrupt: mark block replica" + + b + " on " + storage.getDatanodeDescriptor() + + " as corrupt because the dn is not in the new committed " + + "storage list."); + markBlockAsCorrupt(b, storage, storage.getDatanodeDescriptor()); + } + } + } + /** * processFirstBlockReport is intended only for processing "initial" block * reports, the first block report received from a DN after it registers. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index c92b431b697..fa5298139e8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -4791,6 +4791,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats, throw new IOException("Block (=" + lastblock + ") not found"); } } + final long oldGenerationStamp = storedBlock.getGenerationStamp(); + final long oldNumBytes = storedBlock.getNumBytes(); // // The implementation of delete operation (see @deleteInternal method) // first removes the file paths from namespace, and delays the removal @@ -4845,8 +4847,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats, storedBlock.setNumBytes(newlength); // find the DatanodeDescriptor objects - // There should be no locations in the blockManager till now because the - // file is underConstruction ArrayList trimmedTargets = new ArrayList(newtargets.length); ArrayList trimmedStorages = @@ -4883,6 +4883,10 @@ public class FSNamesystem implements Namesystem, FSClusterStats, trimmedTargets.toArray(new DatanodeID[trimmedTargets.size()]), trimmedStorages.toArray(new String[trimmedStorages.size()])); iFile.setLastBlock(storedBlock, trimmedStorageInfos); + if (closeFile) { + blockManager.markBlockReplicasAsCorrupt(storedBlock, + oldGenerationStamp, oldNumBytes, trimmedStorageInfos); + } } if (closeFile) {