From 5758018f1b9d876a1e13c47a4eb30324f4d717b1 Mon Sep 17 00:00:00 2001 From: Jing Zhao Date: Mon, 22 Jul 2013 18:27:55 +0000 Subject: [PATCH] HDFS-5008. Merge change r1505761 from trunk. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1505764 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++ .../hadoop/hdfs/protocol/ClientProtocol.java | 5 ++-- .../hdfs/server/namenode/FSDirectory.java | 15 ++++++++---- .../hdfs/server/namenode/FSEditLogLoader.java | 15 +++++++----- .../hdfs/server/namenode/FSNamesystem.java | 13 ++++++++-- .../namenode/INodeFileUnderConstruction.java | 9 +++---- .../apache/hadoop/hdfs/TestAbandonBlock.java | 24 ++++++++++++------- 7 files changed, 55 insertions(+), 28 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index d23fc9f9200..b100bf45f3f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -238,6 +238,8 @@ Release 2.1.0-beta - 2013-07-02 HDFS-5007. Replace hard-coded property keys with DFSConfigKeys fields. (Kousuke Saruta via jing9) + HDFS-5008. Make ClientProtocol#abandonBlock() idempotent. (jing9) + OPTIMIZATIONS HDFS-4465. Optimize datanode ReplicasMap and ReplicaInfo. (atm) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java index 1958b51f529..d77cde5819e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java @@ -274,8 +274,8 @@ public void setOwner(String src, String username, String groupname) /** * The client can give up on a block by calling abandonBlock(). - * The client can then - * either obtain a new block, or complete or abandon the file. + * The client can then either obtain a new block, or complete or abandon the + * file. * Any partial writes to the block will be discarded. * * @throws AccessControlException If access is denied @@ -283,6 +283,7 @@ public void setOwner(String src, String username, String groupname) * @throws UnresolvedLinkException If src contains a symlink * @throws IOException If an I/O error occurred */ + @Idempotent public void abandonBlock(ExtendedBlock b, String src, String holder) throws AccessControlException, FileNotFoundException, UnresolvedLinkException, IOException; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 3c943ebf747..9ba3b6b111c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -418,23 +418,27 @@ void closeFile(String path, INodeFile file) { /** * Remove a block from the file. + * @return Whether the block exists in the corresponding file */ - void removeBlock(String path, INodeFileUnderConstruction fileNode, + boolean removeBlock(String path, INodeFileUnderConstruction fileNode, Block block) throws IOException { waitForReady(); writeLock(); try { - unprotectedRemoveBlock(path, fileNode, block); + return unprotectedRemoveBlock(path, fileNode, block); } finally { writeUnlock(); } } - void unprotectedRemoveBlock(String path, INodeFileUnderConstruction fileNode, - Block block) throws IOException { + boolean unprotectedRemoveBlock(String path, + INodeFileUnderConstruction fileNode, Block block) throws IOException { // modify file-> block and blocksMap - fileNode.removeLastBlock(block); + boolean removed = fileNode.removeLastBlock(block); + if (!removed) { + return false; + } getBlockManager().removeBlockFromMap(block); if(NameNode.stateChangeLog.isDebugEnabled()) { @@ -446,6 +450,7 @@ void unprotectedRemoveBlock(String path, INodeFileUnderConstruction fileNode, // update space consumed final INodesInPath iip = rootDir.getINodesInPath4Write(path, true); updateCount(iip, 0, -fileNode.getBlockDiskspace(), true); + return true; } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index 61bc8bb53c2..642431abaa6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream.LogHeaderCorruptException; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCloseOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AllocateBlockIdOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AllowSnapshotOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.BlockListUpdatingOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.CancelDelegationTokenOp; @@ -57,6 +58,8 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameSnapshotOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenewDelegationTokenOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetGenstampV1Op; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetGenstampV2Op; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetNSQuotaOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetOwnerOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetPermissionsOp; @@ -66,9 +69,6 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.TimesOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateBlocksOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateMasterKeyOp; -import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AllocateBlockIdOp; -import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetGenstampV1Op; -import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetGenstampV2Op; import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease; import org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase; @@ -76,7 +76,6 @@ import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter; import org.apache.hadoop.hdfs.server.namenode.startupprogress.Step; import org.apache.hadoop.hdfs.util.Holder; -import org.apache.hadoop.util.StringUtils; import com.google.common.base.Joiner; @@ -661,8 +660,12 @@ private void updateBlocks(FSDirectory fsDir, BlockListUpdatingOp op, throw new IOException("Trying to remove more than one block from file " + path); } - fsDir.unprotectedRemoveBlock(path, - (INodeFileUnderConstruction)file, oldBlocks[oldBlocks.length - 1]); + Block oldBlock = oldBlocks[oldBlocks.length - 1]; + boolean removed = fsDir.unprotectedRemoveBlock(path, + (INodeFileUnderConstruction) file, oldBlock); + if (!removed && !(op instanceof UpdateBlocksOp)) { + throw new IOException("Trying to delete non-existant block " + oldBlock); + } } else if (newBlocks.length > oldBlocks.length) { // We're adding blocks for (int i = oldBlocks.length; i < newBlocks.length; i++) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 7269db3322b..dfb466f8c1a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -2561,7 +2561,11 @@ boolean abandonBlock(ExtendedBlock b, String src, String holder) // Remove the block from the pending creates list // INodeFileUnderConstruction file = checkLease(src, holder); - dir.removeBlock(src, file, ExtendedBlock.getLocalBlock(b)); + boolean removed = dir.removeBlock(src, file, + ExtendedBlock.getLocalBlock(b)); + if (!removed) { + return true; + } if(NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug("BLOCK* NameSystem.abandonBlock: " + b + " is removed from pendingCreates"); @@ -3536,7 +3540,12 @@ void commitBlockSynchronization(ExtendedBlock lastblock, INodeFileUnderConstruction pendingFile = (INodeFileUnderConstruction)iFile; if (deleteblock) { - pendingFile.removeLastBlock(ExtendedBlock.getLocalBlock(lastblock)); + Block blockToDel = ExtendedBlock.getLocalBlock(lastblock); + boolean remove = pendingFile.removeLastBlock(blockToDel); + if (!remove) { + throw new IOException("Trying to delete non-existant block " + + blockToDel); + } blockManager.removeBlockFromMap(storedBlock); } else { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java index 15f812b0323..9dd38ff320b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java @@ -157,20 +157,21 @@ protected void assertAllBlocksComplete() { * Remove a block from the block list. This block should be * the last one on the list. */ - void removeLastBlock(Block oldblock) throws IOException { + boolean removeLastBlock(Block oldblock) throws IOException { final BlockInfo[] blocks = getBlocks(); - if (blocks == null) { - throw new IOException("Trying to delete non-existant block " + oldblock); + if (blocks == null || blocks.length == 0) { + return false; } int size_1 = blocks.length - 1; if (!blocks[size_1].equals(oldblock)) { - throw new IOException("Trying to delete non-last block " + oldblock); + return false; } //copy to a new list BlockInfo[] newlist = new BlockInfo[size_1]; System.arraycopy(blocks, 0, newlist, 0, size_1); setBlocks(newlist); + return true; } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java index 9590bc3cf9f..f5c3b4c1c1a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java @@ -21,11 +21,12 @@ import java.io.IOException; +import junit.framework.Assert; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; @@ -45,7 +46,7 @@ public class TestAbandonBlock { static final String FILE_NAME_PREFIX = "/" + TestAbandonBlock.class.getSimpleName() + "_"; private MiniDFSCluster cluster; - private FileSystem fs; + private DistributedFileSystem fs; @Before public void setUp() throws Exception { @@ -73,29 +74,34 @@ public void testAbandonBlock() throws IOException { fout.hflush(); // Now abandon the last block - DFSClient dfsclient = DFSClientAdapter.getDFSClient((DistributedFileSystem)fs); + DFSClient dfsclient = DFSClientAdapter.getDFSClient(fs); LocatedBlocks blocks = dfsclient.getNamenode().getBlockLocations(src, 0, Integer.MAX_VALUE); int orginalNumBlocks = blocks.locatedBlockCount(); LocatedBlock b = blocks.getLastLocatedBlock(); - dfsclient.getNamenode().abandonBlock(b.getBlock(), src, dfsclient.clientName); + dfsclient.getNamenode().abandonBlock(b.getBlock(), src, + dfsclient.clientName); + + // call abandonBlock again to make sure the operation is idempotent + dfsclient.getNamenode().abandonBlock(b.getBlock(), src, + dfsclient.clientName); // And close the file fout.close(); // Close cluster and check the block has been abandoned after restart cluster.restartNameNode(); - blocks = dfsclient.getNamenode().getBlockLocations(src, 0, Integer.MAX_VALUE); - assert orginalNumBlocks == blocks.locatedBlockCount() + 1 : - "Blocks " + b + " has not been abandoned."; + blocks = dfsclient.getNamenode().getBlockLocations(src, 0, + Integer.MAX_VALUE); + Assert.assertEquals("Blocks " + b + " has not been abandoned.", + orginalNumBlocks, blocks.locatedBlockCount() + 1); } @Test /** Make sure that the quota is decremented correctly when a block is abandoned */ public void testQuotaUpdatedWhenBlockAbandoned() throws IOException { - DistributedFileSystem dfs = (DistributedFileSystem)fs; // Setting diskspace quota to 3MB - dfs.setQuota(new Path("/"), HdfsConstants.QUOTA_DONT_SET, 3 * 1024 * 1024); + fs.setQuota(new Path("/"), HdfsConstants.QUOTA_DONT_SET, 3 * 1024 * 1024); // Start writing a file with 2 replicas to ensure each datanode has one. // Block Size is 1MB.