diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java index 779ae132b86..838497c8cd4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java @@ -33,9 +33,11 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.DFSUtilClient; +import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockUnderConstructionFeature; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.server.namenode.INodeReference.DstReference; @@ -1051,6 +1053,18 @@ public abstract class INode implements INodeAttributes, Diff.Element { assert toDelete != null : "toDelete is null"; toDelete.delete(); toDeleteList.add(toDelete); + // If the file is being truncated + // the copy-on-truncate block should also be collected for deletion + BlockUnderConstructionFeature uc = toDelete.getUnderConstructionFeature(); + if(uc == null) { + return; + } + Block truncateBlock = uc.getTruncateBlock(); + if(truncateBlock == null || truncateBlock.equals(toDelete)) { + return; + } + assert truncateBlock instanceof BlockInfo : "should be BlockInfo"; + addDeleteBlock((BlockInfo) truncateBlock); } public void addUpdateReplicationFactor(BlockInfo block, short targetRepl) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java index f97939a7f81..292eefd3ec8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java @@ -59,6 +59,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils; import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp; +import org.apache.hadoop.hdfs.tools.DFSAdmin; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; @@ -1153,6 +1154,46 @@ public class TestFileTruncate { fs.delete(parent, true); } + /** + * While rolling upgrade is in-progress the test truncates a file + * such that copy-on-truncate is triggered, then deletes the file, + * and makes sure that no blocks involved in truncate are hanging around. + */ + @Test + public void testTruncateWithRollingUpgrade() throws Exception { + final DFSAdmin dfsadmin = new DFSAdmin(cluster.getConfiguration(0)); + DistributedFileSystem dfs = cluster.getFileSystem(); + //start rolling upgrade + dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER); + int status = dfsadmin.run(new String[]{"-rollingUpgrade", "prepare"}); + assertEquals("could not prepare for rolling upgrade", 0, status); + dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE); + + Path dir = new Path("/testTruncateWithRollingUpgrade"); + fs.mkdirs(dir); + final Path p = new Path(dir, "file"); + final byte[] data = new byte[3]; + ThreadLocalRandom.current().nextBytes(data); + writeContents(data, data.length, p); + + assertEquals("block num should 1", 1, + cluster.getNamesystem().getFSDirectory().getBlockManager() + .getTotalBlocks()); + + final boolean isReady = fs.truncate(p, 2); + assertFalse("should be copy-on-truncate", isReady); + assertEquals("block num should 2", 2, + cluster.getNamesystem().getFSDirectory().getBlockManager() + .getTotalBlocks()); + fs.delete(p, true); + + assertEquals("block num should 0", 0, + cluster.getNamesystem().getFSDirectory().getBlockManager() + .getTotalBlocks()); + status = dfsadmin.run(new String[]{"-rollingUpgrade", "finalize"}); + assertEquals("could not finalize rolling upgrade", 0, status); + } + static void writeContents(byte[] contents, int fileLength, Path p) throws IOException { FSDataOutputStream out = fs.create(p, true, BLOCK_SIZE, REPLICATION,