HDFS-4978. Merge r1502400 from trunk.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1502402 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jing Zhao 2013-07-12 00:21:10 +00:00
parent 82ecf50a40
commit a52f448e21
5 changed files with 79 additions and 18 deletions

View File

@ -985,6 +985,8 @@ Release 2.1.0-beta - 2013-07-02
HDFS-4797. BlockScanInfo does not override equals(..) and hashCode() HDFS-4797. BlockScanInfo does not override equals(..) and hashCode()
consistently. (szetszwo) consistently. (szetszwo)
HDFS-4978. Make disallowSnapshot idempotent. (jing9)
Release 2.0.5-alpha - 06/06/2013 Release 2.0.5-alpha - 06/06/2013
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -1041,6 +1041,7 @@ public interface ClientProtocol {
* @param snapshotRoot the directory to be snapped * @param snapshotRoot the directory to be snapped
* @throws IOException on error * @throws IOException on error
*/ */
@Idempotent
public void allowSnapshot(String snapshotRoot) public void allowSnapshot(String snapshotRoot)
throws IOException; throws IOException;
@ -1049,6 +1050,7 @@ public interface ClientProtocol {
* @param snapshotRoot the directory to disallow snapshot * @param snapshotRoot the directory to disallow snapshot
* @throws IOException on error * @throws IOException on error
*/ */
@Idempotent
public void disallowSnapshot(String snapshotRoot) public void disallowSnapshot(String snapshotRoot)
throws IOException; throws IOException;
@ -1067,6 +1069,7 @@ public interface ClientProtocol {
* @return The difference report represented as a {@link SnapshotDiffReport}. * @return The difference report represented as a {@link SnapshotDiffReport}.
* @throws IOException on error * @throws IOException on error
*/ */
@Idempotent
public SnapshotDiffReport getSnapshotDiffReport(String snapshotRoot, public SnapshotDiffReport getSnapshotDiffReport(String snapshotRoot,
String fromSnapshot, String toSnapshot) throws IOException; String fromSnapshot, String toSnapshot) throws IOException;
} }

View File

@ -38,8 +38,6 @@ import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.hdfs.server.namenode.INodesInPath; import org.apache.hadoop.hdfs.server.namenode.INodesInPath;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable.SnapshotDiffInfo; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable.SnapshotDiffInfo;
import com.google.common.base.Preconditions;
/** /**
* Manage snapshottable directories and their snapshots. * Manage snapshottable directories and their snapshots.
* *
@ -128,8 +126,7 @@ public class SnapshotManager implements SnapshotStats {
/** Remove the given snapshottable directory from {@link #snapshottables}. */ /** Remove the given snapshottable directory from {@link #snapshottables}. */
private void removeSnapshottable(INodeDirectorySnapshottable s) { private void removeSnapshottable(INodeDirectorySnapshottable s) {
final INodeDirectorySnapshottable removed = snapshottables.remove(s.getId()); snapshottables.remove(s.getId());
Preconditions.checkState(s == removed);
} }
/** Remove snapshottable directories from {@link #snapshottables} */ /** Remove snapshottable directories from {@link #snapshottables} */
@ -148,17 +145,18 @@ public class SnapshotManager implements SnapshotStats {
*/ */
public void resetSnapshottable(final String path) throws IOException { public void resetSnapshottable(final String path) throws IOException {
final INodesInPath iip = fsdir.getINodesInPath4Write(path); final INodesInPath iip = fsdir.getINodesInPath4Write(path);
final INodeDirectorySnapshottable s = INodeDirectorySnapshottable.valueOf( final INodeDirectory d = INodeDirectory.valueOf(iip.getLastINode(), path);
iip.getLastINode(), path); if (!d.isSnapshottable()) {
// the directory is already non-snapshottable
return;
}
final INodeDirectorySnapshottable s = (INodeDirectorySnapshottable) d;
if (s.getNumSnapshots() > 0) { if (s.getNumSnapshots() > 0) {
throw new SnapshotException("The directory " + path + " has snapshot(s). " throw new SnapshotException("The directory " + path + " has snapshot(s). "
+ "Please redo the operation after removing all the snapshots."); + "Please redo the operation after removing all the snapshots.");
} }
if (s == fsdir.getRoot()) { if (s == fsdir.getRoot()) {
if (s.getSnapshotQuota() == 0) {
throw new SnapshotException("Root is not a snapshottable directory");
}
s.setSnapshotQuota(0); s.setSnapshotQuota(0);
} else { } else {
s.replaceSelf(iip.getLatestSnapshot(), fsdir.getINodeMap()); s.replaceSelf(iip.getLatestSnapshot(), fsdir.getINodeMap());

View File

@ -19,7 +19,6 @@ package org.apache.hadoop.hdfs.server.namenode.snapshot;
import static org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable.SNAPSHOT_LIMIT; import static org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable.SNAPSHOT_LIMIT;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException; import java.io.IOException;
import java.util.Random; import java.util.Random;
@ -41,7 +40,6 @@ import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.After; import org.junit.After;
import org.junit.Assert; import org.junit.Assert;
import org.junit.Before; import org.junit.Before;
@ -132,13 +130,6 @@ public class TestNestedSnapshots {
print("delete snapshot " + rootSnapshot); print("delete snapshot " + rootSnapshot);
hdfs.disallowSnapshot(rootPath); hdfs.disallowSnapshot(rootPath);
print("disallow snapshot " + rootStr); print("disallow snapshot " + rootStr);
try {
hdfs.disallowSnapshot(rootPath);
fail("Expect snapshot exception when disallowing snapshot on root again");
} catch (SnapshotException e) {
GenericTestUtils.assertExceptionContains(
"Root is not a snapshottable directory", e);
}
//change foo to non-snapshottable //change foo to non-snapshottable
hdfs.deleteSnapshot(foo, s1name); hdfs.deleteSnapshot(foo, s1name);

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.hdfs.server.namenode.snapshot; package org.apache.hadoop.hdfs.server.namenode.snapshot;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail; import static org.junit.Assert.fail;
@ -48,6 +49,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper.TestDirectoryTree; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper.TestDirectoryTree;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper.TestDirectoryTree.Node; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper.TestDirectoryTree.Node;
import org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewer; import org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewer;
@ -375,6 +377,71 @@ public class TestSnapshot {
} }
} }
/**
* Test multiple calls of allowSnapshot and disallowSnapshot, to make sure
* they are idempotent
*/
@Test
public void testAllowAndDisallowSnapshot() throws Exception {
final Path dir = new Path("/dir");
final Path file0 = new Path(dir, "file0");
final Path file1 = new Path(dir, "file1");
DFSTestUtil.createFile(hdfs, file0, BLOCKSIZE, REPLICATION, seed);
DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
INodeDirectory dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
assertFalse(dirNode.isSnapshottable());
hdfs.allowSnapshot(dir);
dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
assertTrue(dirNode.isSnapshottable());
// call allowSnapshot again
hdfs.allowSnapshot(dir);
dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
assertTrue(dirNode.isSnapshottable());
// disallowSnapshot on dir
hdfs.disallowSnapshot(dir);
dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
assertFalse(dirNode.isSnapshottable());
// do it again
hdfs.disallowSnapshot(dir);
dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
assertFalse(dirNode.isSnapshottable());
// same process on root
final Path root = new Path("/");
INodeDirectory rootNode = fsdir.getINode4Write(root.toString())
.asDirectory();
assertTrue(rootNode.isSnapshottable());
// root is snapshottable dir, but with 0 snapshot quota
assertEquals(0, ((INodeDirectorySnapshottable) rootNode).getSnapshotQuota());
hdfs.allowSnapshot(root);
rootNode = fsdir.getINode4Write(root.toString()).asDirectory();
assertTrue(rootNode.isSnapshottable());
assertEquals(INodeDirectorySnapshottable.SNAPSHOT_LIMIT,
((INodeDirectorySnapshottable) rootNode).getSnapshotQuota());
// call allowSnapshot again
hdfs.allowSnapshot(root);
rootNode = fsdir.getINode4Write(root.toString()).asDirectory();
assertTrue(rootNode.isSnapshottable());
assertEquals(INodeDirectorySnapshottable.SNAPSHOT_LIMIT,
((INodeDirectorySnapshottable) rootNode).getSnapshotQuota());
// disallowSnapshot on dir
hdfs.disallowSnapshot(root);
rootNode = fsdir.getINode4Write(root.toString()).asDirectory();
assertTrue(rootNode.isSnapshottable());
assertEquals(0, ((INodeDirectorySnapshottable) rootNode).getSnapshotQuota());
// do it again
hdfs.disallowSnapshot(root);
rootNode = fsdir.getINode4Write(root.toString()).asDirectory();
assertTrue(rootNode.isSnapshottable());
assertEquals(0, ((INodeDirectorySnapshottable) rootNode).getSnapshotQuota());
}
/** /**
* Prepare a list of modifications. A modification may be a file creation, * Prepare a list of modifications. A modification may be a file creation,
* file deletion, or a modification operation such as appending to an existing * file deletion, or a modification operation such as appending to an existing