HDFS-15316. Deletion failure should not remove directory from snapshottables. Contributed by hemanthboyina

(cherry picked from commit 743c2e9071)
This commit is contained in:
Surendra Singh Lilhore 2020-05-13 15:01:07 +05:30 committed by Wei-Chiu Chuang
parent 14fddba715
commit 02d37c7d22
2 changed files with 48 additions and 2 deletions

View File

@ -61,10 +61,10 @@ class FSDirDeleteOp {
removedUCFiles);
if (unprotectedDelete(fsd, iip, context, mtime)) {
filesRemoved = context.quotaDelta().getNsDelta();
fsn.removeSnapshottableDirs(snapshottableDirs);
}
fsd.updateReplicationFactor(context.collectedBlocks()
.toUpdateReplicationInfo());
fsn.removeSnapshottableDirs(snapshottableDirs);
fsd.updateCount(iip, context.quotaDelta(), false);
}
} finally {
@ -144,9 +144,9 @@ class FSDirDeleteOp {
new ReclaimContext(fsd.getBlockStoragePolicySuite(),
collectedBlocks, removedINodes, removedUCFiles),
mtime);
fsn.removeSnapshottableDirs(snapshottableDirs);
if (filesRemoved) {
fsn.removeSnapshottableDirs(snapshottableDirs);
fsn.removeLeasesAndINodes(removedUCFiles, removedINodes, false);
fsn.getBlockManager().removeBlocksAndUpdateSafemodeTotal(collectedBlocks);
}

View File

@ -66,6 +66,8 @@ import org.mockito.Mockito;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_LEASE_HARDLIMIT_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LEASE_RECHECK_INTERVAL_MS_KEY;
import static org.junit.Assert.assertEquals;
import static org.mockito.ArgumentMatchers.any;
import static org.junit.Assert.assertNotEquals;
/**
@ -492,4 +494,48 @@ public class TestDeleteRace {
}
}
}
/**
* Test get snapshot diff on a directory which delete got failed.
*/
@Test
public void testDeleteOnSnapshottableDir() throws Exception {
conf.setBoolean(
DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_DIFF_ALLOW_SNAP_ROOT_DESCENDANT,
true);
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
DistributedFileSystem hdfs = cluster.getFileSystem();
FSNamesystem fsn = cluster.getNamesystem();
FSDirectory fsdir = fsn.getFSDirectory();
Path dir = new Path("/dir");
hdfs.mkdirs(dir);
hdfs.allowSnapshot(dir);
Path file1 = new Path(dir, "file1");
Path file2 = new Path(dir, "file2");
// directory should not get deleted
FSDirectory fsdir2 = Mockito.spy(fsdir);
cluster.getNamesystem().setFSDirectory(fsdir2);
Mockito.doReturn(-1L).when(fsdir2).removeLastINode(any());
hdfs.delete(dir, true);
// create files and create snapshots
DFSTestUtil.createFile(hdfs, file1, BLOCK_SIZE, (short) 1, 0);
hdfs.createSnapshot(dir, "s1");
DFSTestUtil.createFile(hdfs, file2, BLOCK_SIZE, (short) 1, 0);
hdfs.createSnapshot(dir, "s2");
// should able to get snapshot diff on ancestor dir
Path dirDir1 = new Path(dir, "dir1");
hdfs.mkdirs(dirDir1);
hdfs.getSnapshotDiffReport(dirDir1, "s2", "s1");
assertEquals(1, fsn.getSnapshotManager().getNumSnapshottableDirs());
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
}