diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt index 2346ca3ba0a..265260203ed 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt @@ -344,3 +344,6 @@ Branch-2802 Snapshot (Unreleased) HDFS-4801. lsSnapshottableDir throws IllegalArgumentException when root is snapshottable. (Jing Zhao via szetszwo) + + HDFS-4802. Disallowing snapshot on / twice should throw SnapshotException + but not IllegalStateException. (Jing Zhao via szetszwo) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java index 5f7da53760c..fc34dee1910 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java @@ -84,13 +84,13 @@ public class SnapshotManager implements SnapshotStats { if (s.isAncestorDirectory(dir)) { throw new SnapshotException( "Nested snapshottable directories not allowed: path=" + path - + ", the ancestor " + s.getFullPathName() + + ", the subdirectory " + s.getFullPathName() + " is already a snapshottable directory."); } if (dir.isAncestorDirectory(s)) { throw new SnapshotException( "Nested snapshottable directories not allowed: path=" + path - + ", the subdirectory " + s.getFullPathName() + + ", the ancestor " + s.getFullPathName() + " is already a snapshottable directory."); } } @@ -156,6 +156,9 @@ public class SnapshotManager implements SnapshotStats { } if (s == fsdir.getRoot()) { + if (s.getSnapshotQuota() == 0) { + throw new SnapshotException("Root is not a snapshottable directory"); + } s.setSnapshotQuota(0); } else { s.replaceSelf(iip.getLatestSnapshot(), fsdir.getINodeMap()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/Diff.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/Diff.java index 667221d8574..54e7103f237 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/Diff.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/Diff.java @@ -357,7 +357,7 @@ public class Diff> { // (A1) All lists are sorted. // (A2) All elements in dlist must be in previous. // (A3) All elements in clist must be not in tmp = previous - dlist. - final List tmp = new ArrayList(); + final List tmp = new ArrayList(previous.size() - dlist.size()); { // tmp = previous - dlist final Iterator i = previous.iterator(); @@ -374,7 +374,7 @@ public class Diff> { } } - final List current = new ArrayList(); + final List current = new ArrayList(tmp.size() + clist.size()); { // current = tmp + clist final Iterator tmpIterator = tmp.iterator(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java index 63c327a4104..6885490aee9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode.snapshot; import static org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable.SNAPSHOT_LIMIT; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.io.IOException; import java.util.Random; @@ -39,6 +40,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSDirectory; import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.test.GenericTestUtils; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -123,6 +125,13 @@ public class TestNestedSnapshots { print("delete snapshot " + rootSnapshot); hdfs.disallowSnapshot(rootPath); print("disallow snapshot " + rootStr); + try { + hdfs.disallowSnapshot(rootPath); + fail("Expect snapshot exception when disallowing snapshot on root again"); + } catch (SnapshotException e) { + GenericTestUtils.assertExceptionContains( + "Root is not a snapshottable directory", e); + } //change foo to non-snapshottable hdfs.deleteSnapshot(foo, s1name); @@ -134,13 +143,13 @@ public class TestNestedSnapshots { hdfs.allowSnapshot(rootPath); Assert.fail(); } catch(SnapshotException se) { - assertNestedSnapshotException(se, "ancestor"); + assertNestedSnapshotException(se, "subdirectory"); } try { hdfs.allowSnapshot(foo); Assert.fail(); } catch(SnapshotException se) { - assertNestedSnapshotException(se, "ancestor"); + assertNestedSnapshotException(se, "subdirectory"); } final Path sub1Bar = new Path(bar, "sub1"); @@ -150,13 +159,13 @@ public class TestNestedSnapshots { hdfs.allowSnapshot(sub1Bar); Assert.fail(); } catch(SnapshotException se) { - assertNestedSnapshotException(se, "subdirectory"); + assertNestedSnapshotException(se, "ancestor"); } try { hdfs.allowSnapshot(sub2Bar); Assert.fail(); } catch(SnapshotException se) { - assertNestedSnapshotException(se, "subdirectory"); + assertNestedSnapshotException(se, "ancestor"); } }