diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java index 13f109201be..21ee3ceb0a7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java @@ -57,7 +57,7 @@ static long delete(FSDirectory fsd, INodesInPath iip, try { if (deleteAllowed(iip, iip.getPath()) ) { List snapshottableDirs = new ArrayList<>(); - FSDirSnapshotOp.checkSnapshot(iip.getLastINode(), snapshottableDirs); + FSDirSnapshotOp.checkSnapshot(fsd, iip, snapshottableDirs); ReclaimContext context = new ReclaimContext( fsd.getBlockStoragePolicySuite(), collectedBlocks, removedINodes, removedUCFiles); @@ -140,7 +140,7 @@ static void deleteForEditLog(FSDirectory fsd, String src, long mtime) return; } List snapshottableDirs = new ArrayList<>(); - FSDirSnapshotOp.checkSnapshot(iip.getLastINode(), snapshottableDirs); + FSDirSnapshotOp.checkSnapshot(fsd, iip, snapshottableDirs); boolean filesRemoved = unprotectedDelete(fsd, iip, new ReclaimContext(fsd.getBlockStoragePolicySuite(), collectedBlocks, removedINodes, removedUCFiles), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java index 0fdc545cfcb..911b178ca5f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java @@ -156,7 +156,7 @@ static INodesInPath unprotectedRenameTo(FSDirectory fsd, assert fsd.hasWriteLock(); final INode srcInode = srcIIP.getLastINode(); try { - validateRenameSource(srcIIP); + validateRenameSource(fsd, srcIIP); } catch (SnapshotException e) { throw e; } catch (IOException ignored) { @@ -365,7 +365,7 @@ static RenameResult unprotectedRenameTo(FSDirectory fsd, final String dst = dstIIP.getPath(); final String error; final INode srcInode = srcIIP.getLastINode(); - validateRenameSource(srcIIP); + validateRenameSource(fsd, srcIIP); // validate the destination if (dst.equals(src)) { @@ -387,7 +387,7 @@ static RenameResult unprotectedRenameTo(FSDirectory fsd, List snapshottableDirs = new ArrayList<>(); if (dstInode != null) { // Destination exists validateOverwrite(src, dst, overwrite, srcInode, dstInode); - FSDirSnapshotOp.checkSnapshot(dstInode, snapshottableDirs); + FSDirSnapshotOp.checkSnapshot(fsd, dstIIP, snapshottableDirs); } INode dstParent = dstIIP.getINode(-2); @@ -559,8 +559,8 @@ private static void validateOverwrite( } } - private static void validateRenameSource(INodesInPath srcIIP) - throws IOException { + private static void validateRenameSource(FSDirectory fsd, + INodesInPath srcIIP) throws IOException { String error; final INode srcInode = srcIIP.getLastINode(); // validate source @@ -578,7 +578,7 @@ private static void validateRenameSource(INodesInPath srcIIP) } // srcInode and its subtree cannot contain snapshottable directories with // snapshots - FSDirSnapshotOp.checkSnapshot(srcInode, null); + FSDirSnapshotOp.checkSnapshot(fsd, srcIIP, null); } private static class RenameOperation { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSnapshotOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSnapshotOp.java index c565a6aac00..ad282d164d9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSnapshotOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSnapshotOp.java @@ -35,7 +35,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collection; -import java.util.ListIterator; import java.util.List; class FSDirSnapshotOp { @@ -252,7 +251,7 @@ private static void checkSubtreeReadPermission( * @param snapshottableDirs The list of directories that are snapshottable * but do not have snapshots yet */ - static void checkSnapshot( + private static void checkSnapshot( INode target, List snapshottableDirs) throws SnapshotException { if (target.isDirectory()) { @@ -276,4 +275,23 @@ static void checkSnapshot( } } } + + /** + * Check if the given path (or one of its descendants) is snapshottable and + * already has snapshots. + * + * @param fsd the FSDirectory + * @param iip inodes of the path + * @param snapshottableDirs The list of directories that are snapshottable + * but do not have snapshots yet + */ + static void checkSnapshot(FSDirectory fsd, INodesInPath iip, + List snapshottableDirs) throws SnapshotException { + // avoid the performance penalty of recursing the tree if snapshots + // are not in use + SnapshotManager sm = fsd.getFSNamesystem().getSnapshotManager(); + if (sm.getNumSnapshottableDirs() > 0) { + checkSnapshot(iip.getLastINode(), snapshottableDirs); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java index 3a318bc7b91..07f01d00ad6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java @@ -22,6 +22,7 @@ import static org.junit.Assert.assertTrue; import java.io.FileNotFoundException; +import java.util.ArrayList; import java.util.List; import org.apache.hadoop.conf.Configuration; @@ -30,12 +31,15 @@ import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.protocol.SnapshotException; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; +import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotManager; import org.junit.AfterClass; import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; +import org.mockito.Mockito; /** Test snapshot related operations. */ public class TestSnapshotPathINodes { @@ -426,4 +430,16 @@ public void testSnapshotPathINodesAfterModification() throws Exception { hdfs.deleteSnapshot(sub1, "s3"); hdfs.disallowSnapshot(sub1); } + + @Test + public void testShortCircuitSnapshotSearch() throws SnapshotException { + FSNamesystem fsn = cluster.getNamesystem(); + SnapshotManager sm = fsn.getSnapshotManager(); + assertEquals(0, sm.getNumSnapshottableDirs()); + + INodesInPath iip = Mockito.mock(INodesInPath.class); + List snapDirs = new ArrayList<>(); + FSDirSnapshotOp.checkSnapshot(fsn.getFSDirectory(), iip, snapDirs); + Mockito.verifyZeroInteractions(iip); + } }