diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 3f4c4b877c4..de54f0a6d94 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -140,6 +140,9 @@ Release 2.3.0 - UNRELEASED HDFS-5427. Not able to read deleted files from snapshot directly under snapshottable dir after checkpoint and NN restart. (Vinay via jing9) + HDFS-5443. Delete 0-sized block when deleting an under-construction file that + is included in snapshot. (jing9) + Release 2.2.1 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java index fc59acf10a6..243f7530249 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java @@ -19,7 +19,6 @@ package org.apache.hadoop.hdfs.server.blockmanagement; import java.io.IOException; import java.util.ArrayList; -import java.util.Collections; import java.util.Iterator; import java.util.List; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java index 9dd38ff320b..9a8934e2e08 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.namenode; import java.io.FileNotFoundException; import java.io.IOException; import java.util.Arrays; +import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.permission.PermissionStatus; @@ -30,6 +31,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.MutableBlockCollection; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; +import org.apache.hadoop.hdfs.server.namenode.Quota.Counts; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileUnderConstructionWithSnapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; @@ -130,6 +132,39 @@ public class INodeFileUnderConstruction extends INodeFile implements MutableBloc return f; } + @Override + public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior, + final BlocksMapUpdateInfo collectedBlocks, + final List removedINodes, final boolean countDiffChange) + throws QuotaExceededException { + if (snapshot == null && prior != null) { + cleanZeroSizeBlock(collectedBlocks); + return Counts.newInstance(); + } else { + return super.cleanSubtree(snapshot, prior, collectedBlocks, + removedINodes, countDiffChange); + } + } + + /** + * When deleting a file in the current fs directory, and the file is contained + * in a snapshot, we should delete the last block if it's under construction + * and its size is 0. + */ + private void cleanZeroSizeBlock(final BlocksMapUpdateInfo collectedBlocks) { + final BlockInfo[] blocks = getBlocks(); + if (blocks != null && blocks.length > 0 + && blocks[blocks.length - 1] instanceof BlockInfoUnderConstruction) { + BlockInfoUnderConstruction lastUC = + (BlockInfoUnderConstruction) blocks[blocks.length - 1]; + if (lastUC.getNumBytes() == 0) { + // this is a 0-sized block. do not need check its UC state here + collectedBlocks.addDeleteBlock(lastUC); + removeLastBlock(lastUC); + } + } + } + @Override public INodeFileUnderConstruction recordModification(final Snapshot latest, final INodeMap inodeMap) throws QuotaExceededException { @@ -157,7 +192,7 @@ public class INodeFileUnderConstruction extends INodeFile implements MutableBloc * Remove a block from the block list. This block should be * the last one on the list. */ - boolean removeLastBlock(Block oldblock) throws IOException { + boolean removeLastBlock(Block oldblock) { final BlockInfo[] blocks = getBlocks(); if (blocks == null || blocks.length == 0) { return false; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java index 9bf8c434fb7..99a8a8dc2f9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.namenode.FSDirectory; @@ -274,4 +275,76 @@ public class TestSnapshotBlocksMap { "s2/bar"); DFSTestUtil.readFile(hdfs, new Path(bar2SnapshotPath)); } + + /** + * Make sure we delete 0-sized block when deleting an INodeFileUCWithSnapshot + */ + @Test + public void testDeletionWithZeroSizeBlock() throws Exception { + final Path foo = new Path("/foo"); + final Path bar = new Path(foo, "bar"); + DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, 0L); + + SnapshotTestHelper.createSnapshot(hdfs, foo, "s0"); + hdfs.append(bar); + + INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile(); + BlockInfo[] blks = barNode.getBlocks(); + assertEquals(1, blks.length); + assertEquals(BLOCKSIZE, blks[0].getNumBytes()); + ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]); + cluster.getNameNodeRpc() + .addBlock(bar.toString(), hdfs.getClient().getClientName(), previous, + null, barNode.getId(), null); + + SnapshotTestHelper.createSnapshot(hdfs, foo, "s1"); + + barNode = fsdir.getINode4Write(bar.toString()).asFile(); + blks = barNode.getBlocks(); + assertEquals(2, blks.length); + assertEquals(BLOCKSIZE, blks[0].getNumBytes()); + assertEquals(0, blks[1].getNumBytes()); + + hdfs.delete(bar, true); + final Path sbar = SnapshotTestHelper.getSnapshotPath(foo, "s1", + bar.getName()); + barNode = fsdir.getINode(sbar.toString()).asFile(); + blks = barNode.getBlocks(); + assertEquals(1, blks.length); + assertEquals(BLOCKSIZE, blks[0].getNumBytes()); + } + + /** Make sure we delete 0-sized block when deleting an INodeFileUC */ + @Test + public void testDeletionWithZeroSizeBlock2() throws Exception { + final Path foo = new Path("/foo"); + final Path subDir = new Path(foo, "sub"); + final Path bar = new Path(subDir, "bar"); + DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, 0L); + + hdfs.append(bar); + + INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile(); + BlockInfo[] blks = barNode.getBlocks(); + assertEquals(1, blks.length); + ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]); + cluster.getNameNodeRpc() + .addBlock(bar.toString(), hdfs.getClient().getClientName(), previous, + null, barNode.getId(), null); + + SnapshotTestHelper.createSnapshot(hdfs, foo, "s1"); + + barNode = fsdir.getINode4Write(bar.toString()).asFile(); + blks = barNode.getBlocks(); + assertEquals(2, blks.length); + assertEquals(BLOCKSIZE, blks[0].getNumBytes()); + assertEquals(0, blks[1].getNumBytes()); + + hdfs.delete(subDir, true); + final Path sbar = SnapshotTestHelper.getSnapshotPath(foo, "s1", "sub/bar"); + barNode = fsdir.getINode(sbar.toString()).asFile(); + blks = barNode.getBlocks(); + assertEquals(1, blks.length); + assertEquals(BLOCKSIZE, blks[0].getNumBytes()); + } }