HDFS-4802. Disallowing snapshot on / twice should throw SnapshotException but not IllegalStateException. Contributed by Jing Zhao
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1480015 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
393188c445
commit
41312abe84
|
@ -344,3 +344,6 @@ Branch-2802 Snapshot (Unreleased)
|
|||
|
||||
HDFS-4801. lsSnapshottableDir throws IllegalArgumentException when root is
|
||||
snapshottable. (Jing Zhao via szetszwo)
|
||||
|
||||
HDFS-4802. Disallowing snapshot on / twice should throw SnapshotException
|
||||
but not IllegalStateException. (Jing Zhao via szetszwo)
|
||||
|
|
|
@ -84,13 +84,13 @@ public class SnapshotManager implements SnapshotStats {
|
|||
if (s.isAncestorDirectory(dir)) {
|
||||
throw new SnapshotException(
|
||||
"Nested snapshottable directories not allowed: path=" + path
|
||||
+ ", the ancestor " + s.getFullPathName()
|
||||
+ ", the subdirectory " + s.getFullPathName()
|
||||
+ " is already a snapshottable directory.");
|
||||
}
|
||||
if (dir.isAncestorDirectory(s)) {
|
||||
throw new SnapshotException(
|
||||
"Nested snapshottable directories not allowed: path=" + path
|
||||
+ ", the subdirectory " + s.getFullPathName()
|
||||
+ ", the ancestor " + s.getFullPathName()
|
||||
+ " is already a snapshottable directory.");
|
||||
}
|
||||
}
|
||||
|
@ -156,6 +156,9 @@ public class SnapshotManager implements SnapshotStats {
|
|||
}
|
||||
|
||||
if (s == fsdir.getRoot()) {
|
||||
if (s.getSnapshotQuota() == 0) {
|
||||
throw new SnapshotException("Root is not a snapshottable directory");
|
||||
}
|
||||
s.setSnapshotQuota(0);
|
||||
} else {
|
||||
s.replaceSelf(iip.getLatestSnapshot(), fsdir.getINodeMap());
|
||||
|
|
|
@ -357,7 +357,7 @@ public class Diff<K, E extends Diff.Element<K>> {
|
|||
// (A1) All lists are sorted.
|
||||
// (A2) All elements in dlist must be in previous.
|
||||
// (A3) All elements in clist must be not in tmp = previous - dlist.
|
||||
final List<E> tmp = new ArrayList<E>();
|
||||
final List<E> tmp = new ArrayList<E>(previous.size() - dlist.size());
|
||||
{
|
||||
// tmp = previous - dlist
|
||||
final Iterator<E> i = previous.iterator();
|
||||
|
@ -374,7 +374,7 @@ public class Diff<K, E extends Diff.Element<K>> {
|
|||
}
|
||||
}
|
||||
|
||||
final List<E> current = new ArrayList<E>();
|
||||
final List<E> current = new ArrayList<E>(tmp.size() + clist.size());
|
||||
{
|
||||
// current = tmp + clist
|
||||
final Iterator<E> tmpIterator = tmp.iterator();
|
||||
|
|
|
@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode.snapshot;
|
|||
|
||||
import static org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable.SNAPSHOT_LIMIT;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Random;
|
||||
|
@ -39,6 +40,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
|
|||
import org.apache.hadoop.hdfs.server.namenode.INode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
|
||||
import org.apache.hadoop.ipc.RemoteException;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
|
@ -123,6 +125,13 @@ public class TestNestedSnapshots {
|
|||
print("delete snapshot " + rootSnapshot);
|
||||
hdfs.disallowSnapshot(rootPath);
|
||||
print("disallow snapshot " + rootStr);
|
||||
try {
|
||||
hdfs.disallowSnapshot(rootPath);
|
||||
fail("Expect snapshot exception when disallowing snapshot on root again");
|
||||
} catch (SnapshotException e) {
|
||||
GenericTestUtils.assertExceptionContains(
|
||||
"Root is not a snapshottable directory", e);
|
||||
}
|
||||
|
||||
//change foo to non-snapshottable
|
||||
hdfs.deleteSnapshot(foo, s1name);
|
||||
|
@ -134,13 +143,13 @@ public class TestNestedSnapshots {
|
|||
hdfs.allowSnapshot(rootPath);
|
||||
Assert.fail();
|
||||
} catch(SnapshotException se) {
|
||||
assertNestedSnapshotException(se, "ancestor");
|
||||
assertNestedSnapshotException(se, "subdirectory");
|
||||
}
|
||||
try {
|
||||
hdfs.allowSnapshot(foo);
|
||||
Assert.fail();
|
||||
} catch(SnapshotException se) {
|
||||
assertNestedSnapshotException(se, "ancestor");
|
||||
assertNestedSnapshotException(se, "subdirectory");
|
||||
}
|
||||
|
||||
final Path sub1Bar = new Path(bar, "sub1");
|
||||
|
@ -150,13 +159,13 @@ public class TestNestedSnapshots {
|
|||
hdfs.allowSnapshot(sub1Bar);
|
||||
Assert.fail();
|
||||
} catch(SnapshotException se) {
|
||||
assertNestedSnapshotException(se, "subdirectory");
|
||||
assertNestedSnapshotException(se, "ancestor");
|
||||
}
|
||||
try {
|
||||
hdfs.allowSnapshot(sub2Bar);
|
||||
Assert.fail();
|
||||
} catch(SnapshotException se) {
|
||||
assertNestedSnapshotException(se, "subdirectory");
|
||||
assertNestedSnapshotException(se, "ancestor");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue