HDFS-15316. Deletion failure should not remove directory from snapshottables. Contributed by hemanthboyina

This commit is contained in:
Surendra Singh Lilhore 2020-05-13 15:01:07 +05:30
parent 450e5aa9dd
commit 743c2e9071
2 changed files with 48 additions and 2 deletions

View File

@ -61,10 +61,10 @@ static long delete(FSDirectory fsd, INodesInPath iip,
removedUCFiles); removedUCFiles);
if (unprotectedDelete(fsd, iip, context, mtime)) { if (unprotectedDelete(fsd, iip, context, mtime)) {
filesRemoved = context.quotaDelta().getNsDelta(); filesRemoved = context.quotaDelta().getNsDelta();
fsn.removeSnapshottableDirs(snapshottableDirs);
} }
fsd.updateReplicationFactor(context.collectedBlocks() fsd.updateReplicationFactor(context.collectedBlocks()
.toUpdateReplicationInfo()); .toUpdateReplicationInfo());
fsn.removeSnapshottableDirs(snapshottableDirs);
fsd.updateCount(iip, context.quotaDelta(), false); fsd.updateCount(iip, context.quotaDelta(), false);
} }
} finally { } finally {
@ -144,9 +144,9 @@ static void deleteForEditLog(FSDirectory fsd, INodesInPath iip, long mtime)
new ReclaimContext(fsd.getBlockStoragePolicySuite(), new ReclaimContext(fsd.getBlockStoragePolicySuite(),
collectedBlocks, removedINodes, removedUCFiles), collectedBlocks, removedINodes, removedUCFiles),
mtime); mtime);
fsn.removeSnapshottableDirs(snapshottableDirs);
if (filesRemoved) { if (filesRemoved) {
fsn.removeSnapshottableDirs(snapshottableDirs);
fsn.removeLeasesAndINodes(removedUCFiles, removedINodes, false); fsn.removeLeasesAndINodes(removedUCFiles, removedINodes, false);
fsn.getBlockManager().removeBlocksAndUpdateSafemodeTotal(collectedBlocks); fsn.getBlockManager().removeBlocksAndUpdateSafemodeTotal(collectedBlocks);
} }

View File

@ -69,6 +69,8 @@
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LEASE_RECHECK_INTERVAL_MS_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LEASE_RECHECK_INTERVAL_MS_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LEASE_RECHECK_INTERVAL_MS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LEASE_RECHECK_INTERVAL_MS_KEY;
import static org.junit.Assert.assertEquals;
import static org.mockito.ArgumentMatchers.any;
import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertNotEquals;
/** /**
@ -513,4 +515,48 @@ public void testOpenRenameRace() throws Exception {
} }
} }
} }
/**
* Test get snapshot diff on a directory which delete got failed.
*/
@Test
public void testDeleteOnSnapshottableDir() throws Exception {
conf.setBoolean(
DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_DIFF_ALLOW_SNAP_ROOT_DESCENDANT,
true);
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
DistributedFileSystem hdfs = cluster.getFileSystem();
FSNamesystem fsn = cluster.getNamesystem();
FSDirectory fsdir = fsn.getFSDirectory();
Path dir = new Path("/dir");
hdfs.mkdirs(dir);
hdfs.allowSnapshot(dir);
Path file1 = new Path(dir, "file1");
Path file2 = new Path(dir, "file2");
// directory should not get deleted
FSDirectory fsdir2 = Mockito.spy(fsdir);
cluster.getNamesystem().setFSDirectory(fsdir2);
Mockito.doReturn(-1L).when(fsdir2).removeLastINode(any());
hdfs.delete(dir, true);
// create files and create snapshots
DFSTestUtil.createFile(hdfs, file1, BLOCK_SIZE, (short) 1, 0);
hdfs.createSnapshot(dir, "s1");
DFSTestUtil.createFile(hdfs, file2, BLOCK_SIZE, (short) 1, 0);
hdfs.createSnapshot(dir, "s2");
// should able to get snapshot diff on ancestor dir
Path dirDir1 = new Path(dir, "dir1");
hdfs.mkdirs(dirDir1);
hdfs.getSnapshotDiffReport(dirDir1, "s2", "s1");
assertEquals(1, fsn.getSnapshotManager().getNumSnapshottableDirs());
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
} }