HDFS-4177. Add a snapshot parameter to INodeDirectory.getChildrenList() for selecting particular snapshot children list views.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1408923 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
40fe1ffbaa
commit
099762a0bc
|
@ -64,3 +64,6 @@ Branch-2802 Snapshot (Unreleased)
|
||||||
and has snapshots. (Jing Zhao via szetszwo)
|
and has snapshots. (Jing Zhao via szetszwo)
|
||||||
|
|
||||||
HDFS-4170. Add snapshot information to INodesInPath. (szetszwo)
|
HDFS-4170. Add snapshot information to INodesInPath. (szetszwo)
|
||||||
|
|
||||||
|
HDFS-4177. Add a snapshot parameter to INodeDirectory.getChildrenList() for
|
||||||
|
selecting particular snapshot children list views. (szetszwo)
|
||||||
|
|
|
@ -23,7 +23,6 @@ import java.io.Closeable;
|
||||||
import java.io.FileNotFoundException;
|
import java.io.FileNotFoundException;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.List;
|
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
import java.util.concurrent.locks.Condition;
|
import java.util.concurrent.locks.Condition;
|
||||||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||||
|
@ -60,7 +59,9 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
|
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.INodesInPath;
|
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.INodesInPath;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
||||||
import org.apache.hadoop.hdfs.util.ByteArray;
|
import org.apache.hadoop.hdfs.util.ByteArray;
|
||||||
|
import org.apache.hadoop.hdfs.util.ReadOnlyList;
|
||||||
|
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
|
|
||||||
|
@ -696,14 +697,15 @@ public class FSDirectory implements Closeable {
|
||||||
+ error);
|
+ error);
|
||||||
throw new FileAlreadyExistsException(error);
|
throw new FileAlreadyExistsException(error);
|
||||||
}
|
}
|
||||||
List<INode> children = dstInode.isDirectory() ?
|
if (dstInode.isDirectory()) {
|
||||||
((INodeDirectory) dstInode).getChildren() : null;
|
final ReadOnlyList<INode> children = ((INodeDirectory) dstInode
|
||||||
if (children != null && children.size() != 0) {
|
).getChildrenList(dstInodesInPath.getPathSnapshot());
|
||||||
error = "rename cannot overwrite non empty destination directory "
|
if (!children.isEmpty()) {
|
||||||
+ dst;
|
error = "rename destination directory is not empty: " + dst;
|
||||||
NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
|
NameNode.stateChangeLog.warn(
|
||||||
+ error);
|
"DIR* FSDirectory.unprotectedRenameTo: " + error);
|
||||||
throw new IOException(error);
|
throw new IOException(error);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
INode snapshotNode = hasSnapshot(dstInode);
|
INode snapshotNode = hasSnapshot(dstInode);
|
||||||
if (snapshotNode != null) {
|
if (snapshotNode != null) {
|
||||||
|
@ -1072,12 +1074,14 @@ public class FSDirectory implements Closeable {
|
||||||
boolean isNonEmptyDirectory(String path) throws UnresolvedLinkException {
|
boolean isNonEmptyDirectory(String path) throws UnresolvedLinkException {
|
||||||
readLock();
|
readLock();
|
||||||
try {
|
try {
|
||||||
final INode inode = rootDir.getNode(path, false);
|
final INodesInPath inodesInPath = rootDir.getINodesInPath(path, false);
|
||||||
|
final INode inode = inodesInPath.getINode(0);
|
||||||
if (inode == null || !inode.isDirectory()) {
|
if (inode == null || !inode.isDirectory()) {
|
||||||
//not found or not a directory
|
//not found or not a directory
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
return ((INodeDirectory)inode).getChildrenList().size() != 0;
|
final Snapshot s = inodesInPath.getPathSnapshot();
|
||||||
|
return !((INodeDirectory)inode).getChildrenList(s).isEmpty();
|
||||||
} finally {
|
} finally {
|
||||||
readUnlock();
|
readUnlock();
|
||||||
}
|
}
|
||||||
|
@ -1155,13 +1159,10 @@ public class FSDirectory implements Closeable {
|
||||||
&& ((INodeDirectorySnapshottable) targetDir).getNumSnapshots() > 0) {
|
&& ((INodeDirectorySnapshottable) targetDir).getNumSnapshots() > 0) {
|
||||||
return target;
|
return target;
|
||||||
}
|
}
|
||||||
List<INode> children = targetDir.getChildren();
|
for (INode child : targetDir.getChildrenList(null)) {
|
||||||
if (children != null) {
|
INode snapshotDir = hasSnapshot(child);
|
||||||
for (INode child : children) {
|
if (snapshotDir != null) {
|
||||||
INode snapshotDir = hasSnapshot(child);
|
return snapshotDir;
|
||||||
if (snapshotDir != null) {
|
|
||||||
return snapshotDir;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1195,7 +1196,7 @@ public class FSDirectory implements Closeable {
|
||||||
replaceINodeUnsynced(path, oldnode, newnode);
|
replaceINodeUnsynced(path, oldnode, newnode);
|
||||||
|
|
||||||
//update children's parent directory
|
//update children's parent directory
|
||||||
for(INode i : newnode.getChildrenList()) {
|
for(INode i : newnode.getChildrenList(null)) {
|
||||||
i.parent = newnode;
|
i.parent = newnode;
|
||||||
}
|
}
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -1239,7 +1240,8 @@ public class FSDirectory implements Closeable {
|
||||||
|
|
||||||
readLock();
|
readLock();
|
||||||
try {
|
try {
|
||||||
INode targetNode = rootDir.getNode(srcs, true);
|
final INodesInPath inodesInPath = rootDir.getINodesInPath(srcs, true);
|
||||||
|
final INode targetNode = inodesInPath.getINode(0);
|
||||||
if (targetNode == null)
|
if (targetNode == null)
|
||||||
return null;
|
return null;
|
||||||
|
|
||||||
|
@ -1248,8 +1250,10 @@ public class FSDirectory implements Closeable {
|
||||||
new HdfsFileStatus[]{createFileStatus(HdfsFileStatus.EMPTY_NAME,
|
new HdfsFileStatus[]{createFileStatus(HdfsFileStatus.EMPTY_NAME,
|
||||||
targetNode, needLocation)}, 0);
|
targetNode, needLocation)}, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
INodeDirectory dirInode = (INodeDirectory)targetNode;
|
INodeDirectory dirInode = (INodeDirectory)targetNode;
|
||||||
List<INode> contents = dirInode.getChildrenList();
|
final ReadOnlyList<INode> contents = dirInode.getChildrenList(
|
||||||
|
inodesInPath.getPathSnapshot());
|
||||||
int startChild = dirInode.nextChild(startAfter);
|
int startChild = dirInode.nextChild(startAfter);
|
||||||
int totalNumChildren = contents.size();
|
int totalNumChildren = contents.size();
|
||||||
int numOfListing = Math.min(totalNumChildren-startChild, this.lsLimit);
|
int numOfListing = Math.min(totalNumChildren-startChild, this.lsLimit);
|
||||||
|
@ -1738,7 +1742,7 @@ public class FSDirectory implements Closeable {
|
||||||
}
|
}
|
||||||
if (maxDirItems != 0) {
|
if (maxDirItems != 0) {
|
||||||
INodeDirectory parent = (INodeDirectory)pathComponents[pos-1];
|
INodeDirectory parent = (INodeDirectory)pathComponents[pos-1];
|
||||||
int count = parent.getChildrenList().size();
|
int count = parent.getChildrenList(null).size();
|
||||||
if (count >= maxDirItems) {
|
if (count >= maxDirItems) {
|
||||||
throw new MaxDirectoryItemsExceededException(maxDirItems, count);
|
throw new MaxDirectoryItemsExceededException(maxDirItems, count);
|
||||||
}
|
}
|
||||||
|
@ -1881,7 +1885,7 @@ public class FSDirectory implements Closeable {
|
||||||
* INode. using 'parent' is not currently recommended. */
|
* INode. using 'parent' is not currently recommended. */
|
||||||
nodesInPath.add(dir);
|
nodesInPath.add(dir);
|
||||||
|
|
||||||
for (INode child : dir.getChildrenList()) {
|
for (INode child : dir.getChildrenList(null)) {
|
||||||
if (child.isDirectory()) {
|
if (child.isDirectory()) {
|
||||||
updateCountForINodeWithQuota((INodeDirectory)child,
|
updateCountForINodeWithQuota((INodeDirectory)child,
|
||||||
counts, nodesInPath);
|
counts, nodesInPath);
|
||||||
|
|
|
@ -30,7 +30,6 @@ import java.security.DigestInputStream;
|
||||||
import java.security.DigestOutputStream;
|
import java.security.DigestOutputStream;
|
||||||
import java.security.MessageDigest;
|
import java.security.MessageDigest;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.List;
|
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
@ -44,6 +43,7 @@ import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
||||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
|
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
|
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
|
||||||
|
import org.apache.hadoop.hdfs.util.ReadOnlyList;
|
||||||
import org.apache.hadoop.io.MD5Hash;
|
import org.apache.hadoop.io.MD5Hash;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
|
|
||||||
|
@ -530,9 +530,10 @@ class FSImageFormat {
|
||||||
private void saveImage(ByteBuffer currentDirName,
|
private void saveImage(ByteBuffer currentDirName,
|
||||||
INodeDirectory current,
|
INodeDirectory current,
|
||||||
DataOutputStream out) throws IOException {
|
DataOutputStream out) throws IOException {
|
||||||
List<INode> children = current.getChildren();
|
final ReadOnlyList<INode> children = current.getChildrenList(null);
|
||||||
if (children == null || children.isEmpty())
|
if (children.isEmpty()) {
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
// print prefix (parent directory name)
|
// print prefix (parent directory name)
|
||||||
int prefixLen = currentDirName.position();
|
int prefixLen = currentDirName.position();
|
||||||
if (prefixLen == 0) { // root
|
if (prefixLen == 0) { // root
|
||||||
|
|
|
@ -28,6 +28,8 @@ import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.fs.UnresolvedLinkException;
|
import org.apache.hadoop.fs.UnresolvedLinkException;
|
||||||
import org.apache.hadoop.fs.permission.FsAction;
|
import org.apache.hadoop.fs.permission.FsAction;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.INodesInPath;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
||||||
import org.apache.hadoop.security.AccessControlException;
|
import org.apache.hadoop.security.AccessControlException;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
|
|
||||||
|
@ -121,7 +123,8 @@ class FSPermissionChecker {
|
||||||
}
|
}
|
||||||
// check if (parentAccess != null) && file exists, then check sb
|
// check if (parentAccess != null) && file exists, then check sb
|
||||||
// Resolve symlinks, the check is performed on the link target.
|
// Resolve symlinks, the check is performed on the link target.
|
||||||
final INode[] inodes = root.getExistingPathINodes(path, true).getINodes();
|
final INodesInPath inodesInPath = root.getExistingPathINodes(path, true);
|
||||||
|
final INode[] inodes = inodesInPath.getINodes();
|
||||||
int ancestorIndex = inodes.length - 2;
|
int ancestorIndex = inodes.length - 2;
|
||||||
for(; ancestorIndex >= 0 && inodes[ancestorIndex] == null;
|
for(; ancestorIndex >= 0 && inodes[ancestorIndex] == null;
|
||||||
ancestorIndex--);
|
ancestorIndex--);
|
||||||
|
@ -141,7 +144,8 @@ class FSPermissionChecker {
|
||||||
check(inodes[inodes.length - 1], access);
|
check(inodes[inodes.length - 1], access);
|
||||||
}
|
}
|
||||||
if (subAccess != null) {
|
if (subAccess != null) {
|
||||||
checkSubAccess(inodes[inodes.length - 1], subAccess);
|
final Snapshot s = inodesInPath.getPathSnapshot();
|
||||||
|
checkSubAccess(inodes[inodes.length - 1], s, subAccess);
|
||||||
}
|
}
|
||||||
if (doCheckOwner) {
|
if (doCheckOwner) {
|
||||||
checkOwner(inodes[inodes.length - 1]);
|
checkOwner(inodes[inodes.length - 1]);
|
||||||
|
@ -162,7 +166,7 @@ class FSPermissionChecker {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void checkSubAccess(INode inode, FsAction access
|
private void checkSubAccess(INode inode, Snapshot snapshot, FsAction access
|
||||||
) throws AccessControlException {
|
) throws AccessControlException {
|
||||||
if (inode == null || !inode.isDirectory()) {
|
if (inode == null || !inode.isDirectory()) {
|
||||||
return;
|
return;
|
||||||
|
@ -173,7 +177,7 @@ class FSPermissionChecker {
|
||||||
INodeDirectory d = directories.pop();
|
INodeDirectory d = directories.pop();
|
||||||
check(d, access);
|
check(d, access);
|
||||||
|
|
||||||
for(INode child : d.getChildrenList()) {
|
for(INode child : d.getChildrenList(snapshot)) {
|
||||||
if (child.isDirectory()) {
|
if (child.isDirectory()) {
|
||||||
directories.push((INodeDirectory)child);
|
directories.push((INodeDirectory)child);
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,6 +36,7 @@ import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
|
import org.apache.hadoop.hdfs.util.ReadOnlyList;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
|
@ -48,7 +49,11 @@ import com.google.common.primitives.SignedBytes;
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public abstract class INode implements Comparable<byte[]> {
|
public abstract class INode implements Comparable<byte[]> {
|
||||||
static final List<INode> EMPTY_LIST = Collections.unmodifiableList(new ArrayList<INode>());
|
static final List<INode> EMPTY_LIST
|
||||||
|
= Collections.unmodifiableList(new ArrayList<INode>());
|
||||||
|
static final ReadOnlyList<INode> EMPTY_READ_ONLY_LIST
|
||||||
|
= ReadOnlyList.Util.asReadOnlyList(EMPTY_LIST);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The inode name is in java UTF8 encoding;
|
* The inode name is in java UTF8 encoding;
|
||||||
* The name in HdfsFileStatus should keep the same encoding as this.
|
* The name in HdfsFileStatus should keep the same encoding as this.
|
||||||
|
|
|
@ -33,6 +33,7 @@ import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
||||||
|
import org.apache.hadoop.hdfs.util.ReadOnlyList;
|
||||||
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
|
|
||||||
|
@ -55,16 +56,14 @@ public class INodeDirectory extends INode {
|
||||||
protected static final int DEFAULT_FILES_PER_DIRECTORY = 5;
|
protected static final int DEFAULT_FILES_PER_DIRECTORY = 5;
|
||||||
final static String ROOT_NAME = "";
|
final static String ROOT_NAME = "";
|
||||||
|
|
||||||
private List<INode> children;
|
private List<INode> children = null;
|
||||||
|
|
||||||
public INodeDirectory(String name, PermissionStatus permissions) {
|
public INodeDirectory(String name, PermissionStatus permissions) {
|
||||||
super(name, permissions);
|
super(name, permissions);
|
||||||
this.children = null;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public INodeDirectory(PermissionStatus permissions, long mTime) {
|
public INodeDirectory(PermissionStatus permissions, long mTime) {
|
||||||
super(permissions, mTime, 0);
|
super(permissions, mTime, 0);
|
||||||
this.children = null;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/** constructor */
|
/** constructor */
|
||||||
|
@ -79,7 +78,7 @@ public class INodeDirectory extends INode {
|
||||||
*/
|
*/
|
||||||
public INodeDirectory(INodeDirectory other) {
|
public INodeDirectory(INodeDirectory other) {
|
||||||
super(other);
|
super(other);
|
||||||
this.children = other.getChildren();
|
this.children = other.children;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** @return true unconditionally. */
|
/** @return true unconditionally. */
|
||||||
|
@ -118,39 +117,23 @@ public class INodeDirectory extends INode {
|
||||||
throw new IllegalArgumentException("No child exists to be replaced");
|
throw new IllegalArgumentException("No child exists to be replaced");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
INode getChild(String name) {
|
private INode getChild(byte[] name, Snapshot snapshot) {
|
||||||
return getChildINode(DFSUtil.string2Bytes(name));
|
final ReadOnlyList<INode> c = getChildrenList(snapshot);
|
||||||
|
final int i = ReadOnlyList.Util.binarySearch(c, name);
|
||||||
|
return i < 0? null: c.get(i);
|
||||||
}
|
}
|
||||||
|
|
||||||
private INode getChildINode(byte[] name) {
|
/** @return the {@link INodesInPath} containing only the last inode. */
|
||||||
if (children == null) {
|
INodesInPath getINodesInPath(String path, boolean resolveLink
|
||||||
return null;
|
|
||||||
}
|
|
||||||
int low = Collections.binarySearch(children, name);
|
|
||||||
if (low >= 0) {
|
|
||||||
return children.get(low);
|
|
||||||
}
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @return the INode of the last component in components, or null if the last
|
|
||||||
* component does not exist.
|
|
||||||
*/
|
|
||||||
private INode getNode(byte[][] components, boolean resolveLink
|
|
||||||
) throws UnresolvedLinkException {
|
) throws UnresolvedLinkException {
|
||||||
INodesInPath inodesInPath = getExistingPathINodes(components, 1,
|
return getExistingPathINodes(getPathComponents(path), 1, resolveLink);
|
||||||
resolveLink);
|
|
||||||
return inodesInPath.inodes[0];
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/** @return the last inode in the path. */
|
||||||
* This is the external interface
|
|
||||||
*/
|
|
||||||
INode getNode(String path, boolean resolveLink)
|
INode getNode(String path, boolean resolveLink)
|
||||||
throws UnresolvedLinkException {
|
throws UnresolvedLinkException {
|
||||||
return getNode(getPathComponents(path), resolveLink);
|
return getINodesInPath(path, resolveLink).getINode(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -269,7 +252,8 @@ public class INodeDirectory extends INode {
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// normal case, and also for resolving file/dir under snapshot root
|
// normal case, and also for resolving file/dir under snapshot root
|
||||||
curNode = parentDir.getChildINode(components[count + 1]);
|
curNode = parentDir.getChild(components[count + 1],
|
||||||
|
existing.getPathSnapshot());
|
||||||
}
|
}
|
||||||
count++;
|
count++;
|
||||||
index++;
|
index++;
|
||||||
|
@ -470,16 +454,14 @@ public class INodeDirectory extends INode {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return an empty list if the children list is null;
|
* @return the current children list if the specified snapshot is null;
|
||||||
* otherwise, return the children list.
|
* otherwise, return the children list corresponding to the snapshot.
|
||||||
* The returned list should not be modified.
|
* Note that the returned list is never null.
|
||||||
*/
|
*/
|
||||||
public List<INode> getChildrenList() {
|
public ReadOnlyList<INode> getChildrenList(final Snapshot snapshot) {
|
||||||
return children==null ? EMPTY_LIST : children;
|
//TODO: use snapshot to select children list
|
||||||
}
|
return children == null ? EMPTY_READ_ONLY_LIST
|
||||||
/** @return the children list which is possibly null. */
|
: ReadOnlyList.Util.asReadOnlyList(children);
|
||||||
public List<INode> getChildren() {
|
|
||||||
return children;
|
|
||||||
}
|
}
|
||||||
/** Set the children list. */
|
/** Set the children list. */
|
||||||
public void setChildren(List<INode> children) {
|
public void setChildren(List<INode> children) {
|
||||||
|
@ -545,11 +527,19 @@ public class INodeDirectory extends INode {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return the snapshot associated to the path.
|
* For non-snapshot paths, return the latest snapshot found in the path.
|
||||||
* @see #snapshot
|
* For snapshot paths, return null.
|
||||||
*/
|
*/
|
||||||
public Snapshot getSnapshot() {
|
public Snapshot getLatestSnapshot() {
|
||||||
return snapshot;
|
return isSnapshot? null: snapshot;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* For snapshot paths, return the snapshot specified in the path.
|
||||||
|
* For non-snapshot paths, return null.
|
||||||
|
*/
|
||||||
|
public Snapshot getPathSnapshot() {
|
||||||
|
return isSnapshot? snapshot: null;
|
||||||
}
|
}
|
||||||
|
|
||||||
private void setSnapshot(Snapshot s) {
|
private void setSnapshot(Snapshot s) {
|
||||||
|
@ -576,6 +566,11 @@ public class INodeDirectory extends INode {
|
||||||
return inodes;
|
return inodes;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** @return the i-th inode. */
|
||||||
|
INode getINode(int i) {
|
||||||
|
return inodes[i];
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return index of the {@link INodeDirectoryWithSnapshot} in
|
* @return index of the {@link INodeDirectoryWithSnapshot} in
|
||||||
* {@link #inodes} for snapshot path, else -1.
|
* {@link #inodes} for snapshot path, else -1.
|
||||||
|
@ -626,7 +621,7 @@ public class INodeDirectory extends INode {
|
||||||
for(int i = 1; i < inodes.length; i++) {
|
for(int i = 1; i < inodes.length; i++) {
|
||||||
b.append(", ").append(toString(inodes[i]));
|
b.append(", ").append(toString(inodes[i]));
|
||||||
}
|
}
|
||||||
b.append("]");
|
b.append("], length=").append(inodes.length);
|
||||||
}
|
}
|
||||||
b.append("\n numNonNull = ").append(numNonNull)
|
b.append("\n numNonNull = ").append(numNonNull)
|
||||||
.append("\n capacity = ").append(capacity)
|
.append("\n capacity = ").append(capacity)
|
||||||
|
|
|
@ -29,6 +29,7 @@ import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
|
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeFileUnderConstruction;
|
import org.apache.hadoop.hdfs.server.namenode.INodeFileUnderConstruction;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeSymlink;
|
import org.apache.hadoop.hdfs.server.namenode.INodeSymlink;
|
||||||
|
import org.apache.hadoop.hdfs.util.ReadOnlyList;
|
||||||
|
|
||||||
/** Manage snapshottable directories and their snapshots. */
|
/** Manage snapshottable directories and their snapshots. */
|
||||||
public class SnapshotManager implements SnapshotStats {
|
public class SnapshotManager implements SnapshotStats {
|
||||||
|
@ -124,10 +125,10 @@ public class SnapshotManager implements SnapshotStats {
|
||||||
/** Process snapshot creation recursively. */
|
/** Process snapshot creation recursively. */
|
||||||
private void processRecursively(final INodeDirectory srcDir,
|
private void processRecursively(final INodeDirectory srcDir,
|
||||||
final INodeDirectory dstDir) throws IOException {
|
final INodeDirectory dstDir) throws IOException {
|
||||||
final List<INode> children = srcDir.getChildren();
|
final ReadOnlyList<INode> children = srcDir.getChildrenList(null);
|
||||||
if (children != null) {
|
if (!children.isEmpty()) {
|
||||||
final List<INode> inodes = new ArrayList<INode>(children.size());
|
final List<INode> inodes = new ArrayList<INode>(children.size());
|
||||||
for(final INode c : new ArrayList<INode>(children)) {
|
for(final INode c : new ArrayList<INode>(ReadOnlyList.Util.asList(children))) {
|
||||||
final INode i;
|
final INode i;
|
||||||
if (c == null) {
|
if (c == null) {
|
||||||
i = null;
|
i = null;
|
||||||
|
|
|
@ -0,0 +1,222 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs.util;
|
||||||
|
|
||||||
|
import java.util.Collection;
|
||||||
|
import java.util.Collections;
|
||||||
|
import java.util.Iterator;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.ListIterator;
|
||||||
|
|
||||||
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A {@link ReadOnlyList} is a unmodifiable list,
|
||||||
|
* which supports read-only operations.
|
||||||
|
*
|
||||||
|
* @param <E> The type of the list elements.
|
||||||
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
|
public interface ReadOnlyList<E> extends Iterable<E> {
|
||||||
|
/**
|
||||||
|
* Is this an empty list?
|
||||||
|
*/
|
||||||
|
boolean isEmpty();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return the size of this list.
|
||||||
|
*/
|
||||||
|
int size();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return the i-th element.
|
||||||
|
*/
|
||||||
|
E get(int i);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Utilities for {@link ReadOnlyList}
|
||||||
|
*/
|
||||||
|
public static class Util {
|
||||||
|
/**
|
||||||
|
* The same as {@link Collections#binarySearch(List, Object)}
|
||||||
|
* except that the list is a {@link ReadOnlyList}.
|
||||||
|
*
|
||||||
|
* @return the insertion point defined
|
||||||
|
* in {@link Collections#binarySearch(List, Object)}.
|
||||||
|
*/
|
||||||
|
public static <K, E extends Comparable<K>> int binarySearch(
|
||||||
|
final ReadOnlyList<E> list, final K key) {
|
||||||
|
return Collections.binarySearch(asList(list), key);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return a {@link ReadOnlyList} view of the given list.
|
||||||
|
*/
|
||||||
|
public static <E> ReadOnlyList<E> asReadOnlyList(final List<E> list) {
|
||||||
|
return new ReadOnlyList<E>() {
|
||||||
|
@Override
|
||||||
|
public Iterator<E> iterator() {
|
||||||
|
return list.iterator();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isEmpty() {
|
||||||
|
return list.isEmpty();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int size() {
|
||||||
|
return list.size();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public E get(int i) {
|
||||||
|
return list.get(i);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return a {@link List} view of the given list.
|
||||||
|
*/
|
||||||
|
public static <E> List<E> asList(final ReadOnlyList<E> list) {
|
||||||
|
return new List<E>() {
|
||||||
|
@Override
|
||||||
|
public Iterator<E> iterator() {
|
||||||
|
return list.iterator();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isEmpty() {
|
||||||
|
return list.isEmpty();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int size() {
|
||||||
|
return list.size();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public E get(int i) {
|
||||||
|
return list.get(i);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Object[] toArray() {
|
||||||
|
final Object[] a = new Object[size()];
|
||||||
|
for(int i = 0; i < a.length; i++) {
|
||||||
|
a[i] = get(i);
|
||||||
|
}
|
||||||
|
return a;
|
||||||
|
}
|
||||||
|
|
||||||
|
//All methods below are not supported.
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean add(E e) {
|
||||||
|
throw new UnsupportedOperationException();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void add(int index, E element) {
|
||||||
|
throw new UnsupportedOperationException();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean addAll(Collection<? extends E> c) {
|
||||||
|
throw new UnsupportedOperationException();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean addAll(int index, Collection<? extends E> c) {
|
||||||
|
throw new UnsupportedOperationException();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void clear() {
|
||||||
|
throw new UnsupportedOperationException();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean contains(Object o) {
|
||||||
|
throw new UnsupportedOperationException();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean containsAll(Collection<?> c) {
|
||||||
|
throw new UnsupportedOperationException();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int indexOf(Object o) {
|
||||||
|
throw new UnsupportedOperationException();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int lastIndexOf(Object o) {
|
||||||
|
throw new UnsupportedOperationException();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ListIterator<E> listIterator() {
|
||||||
|
throw new UnsupportedOperationException();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ListIterator<E> listIterator(int index) {
|
||||||
|
throw new UnsupportedOperationException();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean remove(Object o) {
|
||||||
|
throw new UnsupportedOperationException();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public E remove(int index) {
|
||||||
|
throw new UnsupportedOperationException();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean removeAll(Collection<?> c) {
|
||||||
|
throw new UnsupportedOperationException();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean retainAll(Collection<?> c) {
|
||||||
|
throw new UnsupportedOperationException();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public E set(int index, E element) {
|
||||||
|
throw new UnsupportedOperationException();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<E> subList(int fromIndex, int toIndex) {
|
||||||
|
throw new UnsupportedOperationException();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public <T> T[] toArray(T[] a) {
|
||||||
|
throw new UnsupportedOperationException();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -118,7 +118,8 @@ public class TestSnapshotPathINodes {
|
||||||
final Snapshot snapshot, int index) {
|
final Snapshot snapshot, int index) {
|
||||||
assertEquals(isSnapshot, inodesInPath.isSnapshot());
|
assertEquals(isSnapshot, inodesInPath.isSnapshot());
|
||||||
assertEquals(index, inodesInPath.getSnapshotRootIndex());
|
assertEquals(index, inodesInPath.getSnapshotRootIndex());
|
||||||
assertEquals(snapshot, inodesInPath.getSnapshot());
|
assertEquals(isSnapshot? snapshot: null, inodesInPath.getPathSnapshot());
|
||||||
|
assertEquals(isSnapshot? null: snapshot, inodesInPath.getLatestSnapshot());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
Loading…
Reference in New Issue