HDFS-4330. Support snapshots up to the snapshot limit.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1426432 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2012-12-28 08:49:33 +00:00
parent b9f965de12
commit c82961be0f
6 changed files with 84 additions and 41 deletions

View File

@ -87,3 +87,5 @@ Branch-2802 Snapshot (Unreleased)
HDFS-4317. Change INode and its subclasses to support HDFS-4103. (szetszwo) HDFS-4317. Change INode and its subclasses to support HDFS-4103. (szetszwo)
HDFS-4103. Support O(1) snapshot creation. (szetszwo) HDFS-4103. Support O(1) snapshot creation. (szetszwo)
HDFS-4330. Support snapshots up to the snapshot limit. (szetszwo)

View File

@ -2022,7 +2022,7 @@ public class FSDirectory implements Closeable {
} }
} else { } else {
// a non-quota directory; so replace it with a directory with quota // a non-quota directory; so replace it with a directory with quota
return dirNode.replaceSelf4Quota(latest, oldNsQuota, oldDsQuota); return dirNode.replaceSelf4Quota(latest, nsQuota, dsQuota);
} }
return (oldNsQuota != nsQuota || oldDsQuota != dsQuota) ? dirNode : null; return (oldNsQuota != nsQuota || oldDsQuota != dsQuota) ? dirNode : null;
} }

View File

@ -31,7 +31,11 @@ import org.apache.hadoop.hdfs.util.ReadOnlyList;
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
/** The directory with snapshots. */ /**
* The directory with snapshots. It maintains a list of snapshot diffs for
* storing snapshot data. When there are modifications to the directory, the old
* data is stored in the latest snapshot, if there is any.
*/
public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota { public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
/** /**
* The difference between the current state and a previous snapshot * The difference between the current state and a previous snapshot
@ -351,8 +355,8 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
/** Compare diffs with snapshot ID. */ /** Compare diffs with snapshot ID. */
@Override @Override
public int compareTo(final Snapshot that_snapshot) { public int compareTo(final Snapshot that) {
return Snapshot.ID_COMPARATOR.compare(this.snapshot, that_snapshot); return Snapshot.ID_COMPARATOR.compare(this.snapshot, that);
} }
/** Is the inode the root of the snapshot? */ /** Is the inode the root of the snapshot? */
@ -381,9 +385,13 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
private INodeDirectory getSnapshotINode() { private INodeDirectory getSnapshotINode() {
// get from this diff, then the posterior diff and then the current inode // get from this diff, then the posterior diff and then the current inode
return snapshotINode != null? snapshotINode for(SnapshotDiff d = this; ; d = d.posteriorDiff) {
: posteriorDiff != null? posteriorDiff.getSnapshotINode() if (d.snapshotINode != null) {
: INodeDirectoryWithSnapshot.this; return d.snapshotINode;
} else if (d.posteriorDiff == null) {
return INodeDirectoryWithSnapshot.this;
}
}
} }
/** /**
@ -429,17 +437,18 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
/** @return the child with the given name. */ /** @return the child with the given name. */
INode getChild(byte[] name, boolean checkPosterior) { INode getChild(byte[] name, boolean checkPosterior) {
final INode[] array = diff.accessPrevious(name); for(SnapshotDiff d = this; ; d = d.posteriorDiff) {
if (array != null) { final INode[] array = d.diff.accessPrevious(name);
// this diff is able to find it if (array != null) {
return array[0]; // the diff is able to find it
} else if (!checkPosterior) { return array[0];
// Since checkPosterior is false, return null, i.e. not found. } else if (!checkPosterior) {
return null; // Since checkPosterior is false, return null, i.e. not found.
} else { return null;
// return the posterior INode. } else if (d.posteriorDiff == null) {
return posteriorDiff != null? posteriorDiff.getChild(name, true) // no more posterior diff, get from current inode.
: INodeDirectoryWithSnapshot.this.getChild(name, null); return INodeDirectoryWithSnapshot.this.getChild(name, null);
}
} }
} }
@ -554,6 +563,7 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
return save2Snapshot(latest, null); return save2Snapshot(latest, null);
} }
/** Save the snapshot copy to the latest snapshot. */
public Pair<INodeDirectory, INodeDirectory> save2Snapshot(Snapshot latest, public Pair<INodeDirectory, INodeDirectory> save2Snapshot(Snapshot latest,
INodeDirectory snapshotCopy) { INodeDirectory snapshotCopy) {
return latest == null? null return latest == null? null

View File

@ -51,7 +51,6 @@ public class TestFSDirectory {
private final Path sub11 = new Path(sub1, "sub11"); private final Path sub11 = new Path(sub1, "sub11");
private final Path file3 = new Path(sub11, "file3"); private final Path file3 = new Path(sub11, "file3");
private final Path file4 = new Path(sub1, "z_file4");
private final Path file5 = new Path(sub1, "z_file5"); private final Path file5 = new Path(sub1, "z_file5");
private final Path sub2 = new Path(dir, "sub2"); private final Path sub2 = new Path(dir, "sub2");
@ -106,29 +105,13 @@ public class TestFSDirectory {
for(; (line = in.readLine()) != null; ) { for(; (line = in.readLine()) != null; ) {
line = line.trim(); line = line.trim();
if (!line.contains("snapshot")) { if (!line.isEmpty() && !line.contains("snapshot")) {
Assert.assertTrue(line.startsWith(INodeDirectory.DUMPTREE_LAST_ITEM) Assert.assertTrue("line=" + line,
line.startsWith(INodeDirectory.DUMPTREE_LAST_ITEM)
|| line.startsWith(INodeDirectory.DUMPTREE_EXCEPT_LAST_ITEM)); || line.startsWith(INodeDirectory.DUMPTREE_EXCEPT_LAST_ITEM));
checkClassName(line); checkClassName(line);
} }
} }
LOG.info("Create a new file " + file4);
DFSTestUtil.createFile(hdfs, file4, 1024, REPLICATION, seed);
final StringBuffer b2 = root.dumpTreeRecursively();
System.out.println("b2=" + b2);
int i = 0;
int j = b1.length() - 1;
for(; b1.charAt(i) == b2.charAt(i); i++);
int k = b2.length() - 1;
for(; b1.charAt(j) == b2.charAt(k); j--, k--);
final String diff = b2.substring(i, k + 1);
System.out.println("i=" + i + ", j=" + j + ", k=" + k);
System.out.println("diff=" + diff);
Assert.assertTrue(i > j);
Assert.assertTrue(diff.contains(file4.getName()));
} }
@Test @Test

View File

@ -26,6 +26,8 @@ import java.util.HashMap;
import java.util.Map; import java.util.Map;
import java.util.Random; import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
@ -38,6 +40,8 @@ import org.apache.hadoop.hdfs.server.namenode.INode;
* Helper for writing snapshot related tests * Helper for writing snapshot related tests
*/ */
public class SnapshotTestHelper { public class SnapshotTestHelper {
public static final Log LOG = LogFactory.getLog(SnapshotTestHelper.class);
private SnapshotTestHelper() { private SnapshotTestHelper() {
// Cannot be instantinatied // Cannot be instantinatied
} }

View File

@ -17,13 +17,17 @@
*/ */
package org.apache.hadoop.hdfs.server.namenode.snapshot; package org.apache.hadoop.hdfs.server.namenode.snapshot;
import static org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable.SNAPSHOT_LIMIT;
import java.io.IOException; import java.io.IOException;
import java.util.Random;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger; import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
@ -56,6 +60,8 @@ public class TestNestedSnapshots {
} }
private static final long SEED = 0; private static final long SEED = 0;
private static Random RANDOM = new Random(SEED);
private static final short REPLICATION = 3; private static final short REPLICATION = 3;
private static final long BLOCKSIZE = 1024; private static final long BLOCKSIZE = 1024;
@ -89,7 +95,7 @@ public class TestNestedSnapshots {
*/ */
@Test @Test
public void testNestedSnapshots() throws Exception { public void testNestedSnapshots() throws Exception {
final Path foo = new Path("/test/foo"); final Path foo = new Path("/testNestedSnapshots/foo");
final Path bar = new Path(foo, "bar"); final Path bar = new Path(foo, "bar");
final Path file1 = new Path(bar, "file1"); final Path file1 = new Path(bar, "file1");
DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, SEED); DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, SEED);
@ -117,8 +123,8 @@ public class TestNestedSnapshots {
assertFile(s1path, s2path, file2, true, false, false); assertFile(s1path, s2path, file2, true, false, false);
} }
private static void print(String mess) throws UnresolvedLinkException { private static void print(String message) throws UnresolvedLinkException {
System.out.println("XXX " + mess); System.out.println("XXX " + message);
SnapshotTestHelper.dumpTreeRecursively(fsn.getFSDirectory().getINode("/")); SnapshotTestHelper.dumpTreeRecursively(fsn.getFSDirectory().getINode("/"));
} }
@ -135,4 +141,42 @@ public class TestNestedSnapshots {
Assert.assertEquals("Failed on " + paths[i], expected[i], computed); Assert.assertEquals("Failed on " + paths[i], expected[i], computed);
} }
} }
@Test
public void testSnapshotLimit() throws Exception {
final int step = 1000;
final String dirStr = "/testSnapshotLimit/dir";
final Path dir = new Path(dirStr);
hdfs.mkdirs(dir, new FsPermission((short)0777));
hdfs.allowSnapshot(dirStr);
int s = 0;
for(; s < SNAPSHOT_LIMIT; s++) {
final String snapshotName = "s" + s;
hdfs.createSnapshot(snapshotName, dirStr);
//create a file occasionally
if (s % step == 0) {
final Path file = new Path(dirStr, "f" + s);
DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, SEED);
}
}
try {
hdfs.createSnapshot("s" + s, dirStr);
Assert.fail("Expected to fail to create snapshot, but didn't.");
} catch(IOException ioe) {
SnapshotTestHelper.LOG.info("The exception is expected.", ioe);
}
for(int f = 0; f < SNAPSHOT_LIMIT; f += step) {
final String file = "f" + f;
s = RANDOM.nextInt(step);
for(; s < SNAPSHOT_LIMIT; s += RANDOM.nextInt(step)) {
final Path p = SnapshotTestHelper.getSnapshotPath(dir, "s" + s, file);
//the file #f exists in snapshot #s iff s > f.
Assert.assertEquals(s > f, hdfs.exists(p));
}
}
}
} }