HDFS-4520. Support listing snapshots under a snapshottable directory using ls. Contributed by Jing Zhao

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1449862 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2013-02-25 19:49:05 +00:00
parent 0deff1727e
commit 5ddbbd9d65
3 changed files with 61 additions and 0 deletions

View File

@ -170,3 +170,6 @@ Branch-2802 Snapshot (Unreleased)
HDFS-4524. Update SnapshotManager#snapshottables when loading fsimage. HDFS-4524. Update SnapshotManager#snapshottables when loading fsimage.
(Jing Zhao via szetszwo) (Jing Zhao via szetszwo)
HDFS-4520. Support listing snapshots under a snapshottable directory using ls.
(Jing Zhao via szetszwo)

View File

@ -62,6 +62,7 @@ import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.INodesInPath; import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.INodesInPath;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.Root;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotAccessControlException; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotAccessControlException;
import org.apache.hadoop.hdfs.util.ByteArray; import org.apache.hadoop.hdfs.util.ByteArray;
import org.apache.hadoop.hdfs.util.ReadOnlyList; import org.apache.hadoop.hdfs.util.ReadOnlyList;
@ -1228,6 +1229,9 @@ public class FSDirectory implements Closeable {
readLock(); readLock();
try { try {
if (srcs.endsWith(Path.SEPARATOR + HdfsConstants.DOT_SNAPSHOT_DIR)) {
return getSnapshotsListing(srcs, startAfter);
}
final INodesInPath inodesInPath = rootDir.getLastINodeInPath(srcs, true); final INodesInPath inodesInPath = rootDir.getLastINodeInPath(srcs, true);
final Snapshot snapshot = inodesInPath.getPathSnapshot(); final Snapshot snapshot = inodesInPath.getPathSnapshot();
final INode targetNode = inodesInPath.getINode(0); final INode targetNode = inodesInPath.getINode(0);
@ -1258,6 +1262,35 @@ public class FSDirectory implements Closeable {
} }
} }
/**
* Get a listing of all the snapshots of a snapshottable directory
*/
private DirectoryListing getSnapshotsListing(String src, byte[] startAfter)
throws UnresolvedLinkException, IOException {
assert hasReadLock();
final String dotSnapshot = Path.SEPARATOR + HdfsConstants.DOT_SNAPSHOT_DIR;
Preconditions.checkArgument(src.endsWith(dotSnapshot),
src + " does not end with " + dotSnapshot);
final String dirPath = normalizePath(src.substring(0,
src.length() - HdfsConstants.DOT_SNAPSHOT_DIR.length()));
final INode node = this.getINode(dirPath);
final INodeDirectorySnapshottable dirNode = INodeDirectorySnapshottable
.valueOf(node, dirPath);
final ReadOnlyList<Snapshot> snapshots = dirNode.getSnapshotList();
int skipSize = ReadOnlyList.Util.binarySearch(snapshots, startAfter);
skipSize = skipSize < 0 ? -skipSize - 1 : skipSize + 1;
int numOfListing = Math.min(snapshots.size() - skipSize, this.lsLimit);
final HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing];
for (int i = 0; i < numOfListing; i++) {
Root sRoot = snapshots.get(i + skipSize).getRoot();
listing[i] = createFileStatus(sRoot.getLocalNameBytes(), sRoot, null);
}
return new DirectoryListing(
listing, snapshots.size() - skipSize - numOfListing);
}
/** Get the file info for a specific file. /** Get the file info for a specific file.
* @param src The string representation of the path to the file * @param src The string representation of the path to the file
* @param resolveLink whether to throw UnresolvedLinkException * @param resolveLink whether to throw UnresolvedLinkException
@ -1269,6 +1302,9 @@ public class FSDirectory implements Closeable {
String srcs = normalizePath(src); String srcs = normalizePath(src);
readLock(); readLock();
try { try {
if (srcs.endsWith(Path.SEPARATOR + HdfsConstants.DOT_SNAPSHOT_DIR)) {
return getFileInfo4DotSnapshot(srcs);
}
final INodesInPath inodesInPath = rootDir.getLastINodeInPath(srcs, resolveLink); final INodesInPath inodesInPath = rootDir.getLastINodeInPath(srcs, resolveLink);
final INode i = inodesInPath.getINode(0); final INode i = inodesInPath.getINode(0);
return i == null? null: createFileStatus(HdfsFileStatus.EMPTY_NAME, i, return i == null? null: createFileStatus(HdfsFileStatus.EMPTY_NAME, i,
@ -1278,6 +1314,23 @@ public class FSDirectory implements Closeable {
} }
} }
private HdfsFileStatus getFileInfo4DotSnapshot(String src)
throws UnresolvedLinkException {
final String dotSnapshot = Path.SEPARATOR + HdfsConstants.DOT_SNAPSHOT_DIR;
Preconditions.checkArgument(src.endsWith(dotSnapshot),
src + " does not end with " + dotSnapshot);
final String dirPath = normalizePath(src.substring(0,
src.length() - HdfsConstants.DOT_SNAPSHOT_DIR.length()));
final INode node = this.getINode(dirPath);
if (node instanceof INodeDirectorySnapshottable) {
return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null,
HdfsFileStatus.EMPTY_NAME, -1L);
}
return null;
}
/** /**
* Get the blocks associated with the file. * Get the blocks associated with the file.
*/ */

View File

@ -197,6 +197,11 @@ public class INodeDirectorySnapshottable extends INodeDirectoryWithSnapshot {
return i < 0? null: snapshotsByNames.get(i); return i < 0? null: snapshotsByNames.get(i);
} }
/** @return {@link #snapshotsByNames} as a {@link ReadOnlyList} */
public ReadOnlyList<Snapshot> getSnapshotList() {
return ReadOnlyList.Util.asReadOnlyList(snapshotsByNames);
}
/** /**
* Rename a snapshot * Rename a snapshot
* @param path * @param path