diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java index 47f25f6f1fb..2dfb90ee672 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java @@ -61,10 +61,10 @@ class FSDirDeleteOp { removedUCFiles); if (unprotectedDelete(fsd, iip, context, mtime)) { filesRemoved = context.quotaDelta().getNsDelta(); + fsn.removeSnapshottableDirs(snapshottableDirs); } fsd.updateReplicationFactor(context.collectedBlocks() .toUpdateReplicationInfo()); - fsn.removeSnapshottableDirs(snapshottableDirs); fsd.updateCount(iip, context.quotaDelta(), false); } } finally { @@ -144,9 +144,9 @@ class FSDirDeleteOp { new ReclaimContext(fsd.getBlockStoragePolicySuite(), collectedBlocks, removedINodes, removedUCFiles), mtime); - fsn.removeSnapshottableDirs(snapshottableDirs); if (filesRemoved) { + fsn.removeSnapshottableDirs(snapshottableDirs); fsn.removeLeasesAndINodes(removedUCFiles, removedINodes, false); fsn.getBlockManager().removeBlocksAndUpdateSafemodeTotal(collectedBlocks); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java index cdb57f240fa..9d32528bf25 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java @@ -66,6 +66,8 @@ import org.mockito.Mockito; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_LEASE_HARDLIMIT_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LEASE_RECHECK_INTERVAL_MS_KEY; +import static org.junit.Assert.assertEquals; +import static org.mockito.ArgumentMatchers.any; import static org.junit.Assert.assertNotEquals; /** @@ -492,4 +494,48 @@ public class TestDeleteRace { } } } + + /** + * Test get snapshot diff on a directory which delete got failed. + */ + @Test + public void testDeleteOnSnapshottableDir() throws Exception { + conf.setBoolean( + DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_DIFF_ALLOW_SNAP_ROOT_DESCENDANT, + true); + try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + cluster.waitActive(); + DistributedFileSystem hdfs = cluster.getFileSystem(); + FSNamesystem fsn = cluster.getNamesystem(); + FSDirectory fsdir = fsn.getFSDirectory(); + Path dir = new Path("/dir"); + hdfs.mkdirs(dir); + hdfs.allowSnapshot(dir); + Path file1 = new Path(dir, "file1"); + Path file2 = new Path(dir, "file2"); + + // directory should not get deleted + FSDirectory fsdir2 = Mockito.spy(fsdir); + cluster.getNamesystem().setFSDirectory(fsdir2); + Mockito.doReturn(-1L).when(fsdir2).removeLastINode(any()); + hdfs.delete(dir, true); + + // create files and create snapshots + DFSTestUtil.createFile(hdfs, file1, BLOCK_SIZE, (short) 1, 0); + hdfs.createSnapshot(dir, "s1"); + DFSTestUtil.createFile(hdfs, file2, BLOCK_SIZE, (short) 1, 0); + hdfs.createSnapshot(dir, "s2"); + + // should able to get snapshot diff on ancestor dir + Path dirDir1 = new Path(dir, "dir1"); + hdfs.mkdirs(dirDir1); + hdfs.getSnapshotDiffReport(dirDir1, "s2", "s1"); + assertEquals(1, fsn.getSnapshotManager().getNumSnapshottableDirs()); + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } }