HDFS-4550. Refactor INodeDirectory.INodesInPath to a standalone class.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1468725 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2013-04-17 02:41:38 +00:00
parent 38bd7061c1
commit 9280468b1a
12 changed files with 445 additions and 422 deletions

View File

@ -247,3 +247,6 @@ Branch-2802 Snapshot (Unreleased)
HDFS-4529. Disallow concat when one of the src files is in some snapshot. HDFS-4529. Disallow concat when one of the src files is in some snapshot.
(szetszwo) (szetszwo)
HDFS-4550. Refactor INodeDirectory.INodesInPath to a standalone class.
(szetszwo)

View File

@ -61,7 +61,6 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.INodesInPath;
import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount; import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
@ -1536,7 +1535,7 @@ public class FSDirectory implements Closeable {
INodesInPath getExistingPathINodes(byte[][] components) INodesInPath getExistingPathINodes(byte[][] components)
throws UnresolvedLinkException { throws UnresolvedLinkException {
return rootDir.getExistingPathINodes(components, components.length, false); return INodesInPath.resolve(rootDir, components);
} }
/** /**

View File

@ -64,7 +64,6 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.TimesOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateBlocksOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateBlocksOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateMasterKeyOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateMasterKeyOp;
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.INodesInPath;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease; import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
import org.apache.hadoop.hdfs.util.Holder; import org.apache.hadoop.hdfs.util.Holder;

View File

@ -51,7 +51,6 @@ import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException; import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.INodesInPath;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiffList; import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiffList;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;

View File

@ -171,7 +171,6 @@ import org.apache.hadoop.hdfs.server.common.Storage.StorageDirType;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.common.Util; import org.apache.hadoop.hdfs.server.common.Util;
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.INodesInPath;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease; import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory; import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
import org.apache.hadoop.hdfs.server.namenode.ha.EditLogTailer; import org.apache.hadoop.hdfs.server.namenode.ha.EditLogTailer;

View File

@ -29,7 +29,6 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.INodesInPath;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;

View File

@ -600,24 +600,6 @@ public abstract class INode implements Diff.Element<byte[]> {
return StringUtils.split(path, Path.SEPARATOR_CHAR); return StringUtils.split(path, Path.SEPARATOR_CHAR);
} }
/**
* Given some components, create a path name.
* @param components The path components
* @param start index
* @param end index
* @return concatenated path
*/
static String constructPath(byte[][] components, int start, int end) {
StringBuilder buf = new StringBuilder();
for (int i = start; i < end; i++) {
buf.append(DFSUtil.bytes2String(components[i]));
if (i < end - 1) {
buf.append(Path.SEPARATOR);
}
}
return buf.toString();
}
@Override @Override
public final int compareTo(byte[] bytes) { public final int compareTo(byte[] bytes) {
final byte[] name = getLocalNameBytes(); final byte[] name = getLocalNameBytes();

View File

@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs.server.namenode;
import java.io.FileNotFoundException; import java.io.FileNotFoundException;
import java.io.PrintWriter; import java.io.PrintWriter;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections; import java.util.Collections;
import java.util.Iterator; import java.util.Iterator;
import java.util.List; import java.util.List;
@ -29,9 +28,7 @@ import org.apache.hadoop.fs.PathIsNotDirectoryException;
import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
import org.apache.hadoop.hdfs.server.namenode.Content.CountsMap.Key; import org.apache.hadoop.hdfs.server.namenode.Content.CountsMap.Key;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
@ -309,14 +306,14 @@ public class INodeDirectory extends INodeWithAdditionalFields {
/** @return the {@link INodesInPath} containing only the last inode. */ /** @return the {@link INodesInPath} containing only the last inode. */
INodesInPath getLastINodeInPath(String path, boolean resolveLink INodesInPath getLastINodeInPath(String path, boolean resolveLink
) throws UnresolvedLinkException { ) throws UnresolvedLinkException {
return getExistingPathINodes(getPathComponents(path), 1, resolveLink); return INodesInPath.resolve(this, getPathComponents(path), 1, resolveLink);
} }
/** @return the {@link INodesInPath} containing all inodes in the path. */ /** @return the {@link INodesInPath} containing all inodes in the path. */
INodesInPath getINodesInPath(String path, boolean resolveLink INodesInPath getINodesInPath(String path, boolean resolveLink
) throws UnresolvedLinkException { ) throws UnresolvedLinkException {
final byte[][] components = getPathComponents(path); final byte[][] components = getPathComponents(path);
return getExistingPathINodes(components, components.length, resolveLink); return INodesInPath.resolve(this, components, components.length, resolveLink);
} }
/** @return the last inode in the path. */ /** @return the last inode in the path. */
@ -344,7 +341,7 @@ public class INodeDirectory extends INodeWithAdditionalFields {
INodesInPath getINodesInPath4Write(String src, boolean resolveLink) INodesInPath getINodesInPath4Write(String src, boolean resolveLink)
throws UnresolvedLinkException, SnapshotAccessControlException { throws UnresolvedLinkException, SnapshotAccessControlException {
final byte[][] components = INode.getPathComponents(src); final byte[][] components = INode.getPathComponents(src);
INodesInPath inodesInPath = getExistingPathINodes(components, INodesInPath inodesInPath = INodesInPath.resolve(this, components,
components.length, resolveLink); components.length, resolveLink);
if (inodesInPath.isSnapshot()) { if (inodesInPath.isSnapshot()) {
throw new SnapshotAccessControlException( throw new SnapshotAccessControlException(
@ -353,170 +350,6 @@ public class INodeDirectory extends INodeWithAdditionalFields {
return inodesInPath; return inodesInPath;
} }
/**
* Retrieve existing INodes from a path. If existing is big enough to store
* all path components (existing and non-existing), then existing INodes
* will be stored starting from the root INode into existing[0]; if
* existing is not big enough to store all path components, then only the
* last existing and non existing INodes will be stored so that
* existing[existing.length-1] refers to the INode of the final component.
*
* An UnresolvedPathException is always thrown when an intermediate path
* component refers to a symbolic link. If the final path component refers
* to a symbolic link then an UnresolvedPathException is only thrown if
* resolveLink is true.
*
* <p>
* Example: <br>
* Given the path /c1/c2/c3 where only /c1/c2 exists, resulting in the
* following path components: ["","c1","c2","c3"],
*
* <p>
* <code>getExistingPathINodes(["","c1","c2"], [?])</code> should fill the
* array with [c2] <br>
* <code>getExistingPathINodes(["","c1","c2","c3"], [?])</code> should fill the
* array with [null]
*
* <p>
* <code>getExistingPathINodes(["","c1","c2"], [?,?])</code> should fill the
* array with [c1,c2] <br>
* <code>getExistingPathINodes(["","c1","c2","c3"], [?,?])</code> should fill
* the array with [c2,null]
*
* <p>
* <code>getExistingPathINodes(["","c1","c2"], [?,?,?,?])</code> should fill
* the array with [rootINode,c1,c2,null], <br>
* <code>getExistingPathINodes(["","c1","c2","c3"], [?,?,?,?])</code> should
* fill the array with [rootINode,c1,c2,null]
*
* @param components array of path component name
* @param numOfINodes number of INodes to return
* @param resolveLink indicates whether UnresolvedLinkException should
* be thrown when the path refers to a symbolic link.
* @return the specified number of existing INodes in the path
*/
INodesInPath getExistingPathINodes(byte[][] components, int numOfINodes,
boolean resolveLink) throws UnresolvedLinkException {
assert this.compareTo(components[0]) == 0 :
"Incorrect name " + getLocalName() + " expected "
+ (components[0] == null? null: DFSUtil.bytes2String(components[0]));
INodesInPath existing = new INodesInPath(components, numOfINodes);
INode curNode = this;
int count = 0;
int index = numOfINodes - components.length;
if (index > 0) {
index = 0;
}
while (count < components.length && curNode != null) {
final boolean lastComp = (count == components.length - 1);
if (index >= 0) {
existing.addNode(curNode);
}
final boolean isRef = curNode.isReference();
final boolean isDir = curNode.isDirectory();
final INodeDirectory dir = isDir? curNode.asDirectory(): null;
if (!isRef && isDir && dir instanceof INodeDirectoryWithSnapshot) {
//if the path is a non-snapshot path, update the latest snapshot.
if (!existing.isSnapshot()) {
existing.updateLatestSnapshot(
((INodeDirectoryWithSnapshot)dir).getLastSnapshot());
}
} else if (isRef && isDir && !lastComp) {
// If the curNode is a reference node, need to check its dstSnapshot:
// 1. if the existing snapshot is no later than the dstSnapshot (which
// is the latest snapshot in dst before the rename), the changes
// should be recorded in previous snapshots (belonging to src).
// 2. however, if the ref node is already the last component, we still
// need to know the latest snapshot among the ref node's ancestors,
// in case of processing a deletion operation. Thus we do not overwrite
// the latest snapshot if lastComp is true. In case of the operation is
// a modification operation, we do a similar check in corresponding
// recordModification method.
if (!existing.isSnapshot()) {
int dstSnapshotId = curNode.asReference().getDstSnapshotId();
Snapshot latest = existing.getLatestSnapshot();
if (latest == null || // no snapshot in dst tree of rename
dstSnapshotId >= latest.getId()) { // the above scenario
Snapshot lastSnapshot = null;
if (curNode.isDirectory()
&& curNode.asDirectory() instanceof INodeDirectoryWithSnapshot) {
lastSnapshot = ((INodeDirectoryWithSnapshot) curNode
.asDirectory()).getLastSnapshot();
} else if (curNode.isFile()
&& curNode.asFile() instanceof INodeFileWithSnapshot) {
lastSnapshot = ((INodeFileWithSnapshot) curNode
.asFile()).getDiffs().getLastSnapshot();
}
existing.setSnapshot(lastSnapshot);
}
}
}
if (curNode.isSymlink() && (!lastComp || (lastComp && resolveLink))) {
final String path = constructPath(components, 0, components.length);
final String preceding = constructPath(components, 0, count);
final String remainder =
constructPath(components, count + 1, components.length);
final String link = DFSUtil.bytes2String(components[count]);
final String target = curNode.asSymlink().getSymlinkString();
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("UnresolvedPathException " +
" path: " + path + " preceding: " + preceding +
" count: " + count + " link: " + link + " target: " + target +
" remainder: " + remainder);
}
throw new UnresolvedPathException(path, preceding, remainder, target);
}
if (lastComp || !isDir) {
break;
}
final byte[] childName = components[count + 1];
// check if the next byte[] in components is for ".snapshot"
if (isDotSnapshotDir(childName)
&& isDir && dir instanceof INodeDirectoryWithSnapshot) {
// skip the ".snapshot" in components
count++;
index++;
existing.isSnapshot = true;
if (index >= 0) { // decrease the capacity by 1 to account for .snapshot
existing.capacity--;
}
// check if ".snapshot" is the last element of components
if (count == components.length - 1) {
break;
}
// Resolve snapshot root
final Snapshot s = ((INodeDirectorySnapshottable)dir).getSnapshot(
components[count + 1]);
if (s == null) {
//snapshot not found
curNode = null;
} else {
curNode = s.getRoot();
existing.setSnapshot(s);
}
if (index >= -1) {
existing.snapshotRootIndex = existing.numNonNull;
}
} else {
// normal case, and also for resolving file/dir under snapshot root
curNode = dir.getChild(childName, existing.getPathSnapshot());
}
count++;
index++;
}
return existing;
}
/**
* @return true if path component is {@link HdfsConstants#DOT_SNAPSHOT_DIR}
*/
private static boolean isDotSnapshotDir(byte[] pathComponent) {
return pathComponent == null ? false
: Arrays.equals(HdfsConstants.DOT_SNAPSHOT_DIR_BYTES, pathComponent);
}
/** /**
* Given a child's name, return the index of the next child * Given a child's name, return the index of the next child
* *
@ -714,207 +547,6 @@ public class INodeDirectory extends INodeWithAdditionalFields {
&& getFsPermission().equals(other.getFsPermission()); && getFsPermission().equals(other.getFsPermission());
} }
/**
* Used by
* {@link INodeDirectory#getExistingPathINodes(byte[][], int, boolean)}.
* Contains INodes information resolved from a given path.
*/
public static class INodesInPath {
private final byte[][] path;
/**
* Array with the specified number of INodes resolved for a given path.
*/
private INode[] inodes;
/**
* Indicate the number of non-null elements in {@link #inodes}
*/
private int numNonNull;
/**
* The path for a snapshot file/dir contains the .snapshot thus makes the
* length of the path components larger the number of inodes. We use
* the capacity to control this special case.
*/
private int capacity;
/**
* true if this path corresponds to a snapshot
*/
private boolean isSnapshot;
/**
* Index of {@link INodeDirectoryWithSnapshot} for snapshot path, else -1
*/
private int snapshotRootIndex;
/**
* For snapshot paths, it is the reference to the snapshot; or null if the
* snapshot does not exist. For non-snapshot paths, it is the reference to
* the latest snapshot found in the path; or null if no snapshot is found.
*/
private Snapshot snapshot = null;
private INodesInPath(byte[][] path, int number) {
this.path = path;
assert (number >= 0);
inodes = new INode[number];
capacity = number;
numNonNull = 0;
isSnapshot = false;
snapshotRootIndex = -1;
}
/**
* For non-snapshot paths, return the latest snapshot found in the path.
* For snapshot paths, return null.
*/
public Snapshot getLatestSnapshot() {
return isSnapshot? null: snapshot;
}
/**
* For snapshot paths, return the snapshot specified in the path.
* For non-snapshot paths, return null.
*/
public Snapshot getPathSnapshot() {
return isSnapshot? snapshot: null;
}
private void setSnapshot(Snapshot s) {
snapshot = s;
}
private void updateLatestSnapshot(Snapshot s) {
if (snapshot == null
|| (s != null && Snapshot.ID_COMPARATOR.compare(snapshot, s) < 0)) {
snapshot = s;
}
}
/**
* @return the whole inodes array including the null elements.
*/
INode[] getINodes() {
if (capacity < inodes.length) {
INode[] newNodes = new INode[capacity];
System.arraycopy(inodes, 0, newNodes, 0, capacity);
inodes = newNodes;
}
return inodes;
}
/**
* @return the i-th inode if i >= 0;
* otherwise, i < 0, return the (length + i)-th inode.
*/
public INode getINode(int i) {
return inodes[i >= 0? i: inodes.length + i];
}
/** @return the last inode. */
public INode getLastINode() {
return inodes[inodes.length - 1];
}
byte[] getLastLocalName() {
return path[path.length - 1];
}
/**
* @return index of the {@link INodeDirectoryWithSnapshot} in
* {@link #inodes} for snapshot path, else -1.
*/
int getSnapshotRootIndex() {
return this.snapshotRootIndex;
}
/**
* @return isSnapshot true for a snapshot path
*/
boolean isSnapshot() {
return this.isSnapshot;
}
/**
* Add an INode at the end of the array
*/
private void addNode(INode node) {
inodes[numNonNull++] = node;
}
void setINode(int i, INode inode) {
inodes[i >= 0? i: inodes.length + i] = inode;
}
void setLastINode(INode last) {
inodes[inodes.length - 1] = last;
}
/**
* @return The number of non-null elements
*/
int getNumNonNull() {
return numNonNull;
}
static String toString(INode inode) {
return inode == null? null: inode.getLocalName();
}
@Override
public String toString() {
return toString(true);
}
private String toString(boolean vaildateObject) {
if (vaildateObject) {
vaildate();
}
final StringBuilder b = new StringBuilder(getClass().getSimpleName())
.append(": path = ").append(DFSUtil.byteArray2PathString(path))
.append("\n inodes = ");
if (inodes == null) {
b.append("null");
} else if (inodes.length == 0) {
b.append("[]");
} else {
b.append("[").append(toString(inodes[0]));
for(int i = 1; i < inodes.length; i++) {
b.append(", ").append(toString(inodes[i]));
}
b.append("], length=").append(inodes.length);
}
b.append("\n numNonNull = ").append(numNonNull)
.append("\n capacity = ").append(capacity)
.append("\n isSnapshot = ").append(isSnapshot)
.append("\n snapshotRootIndex = ").append(snapshotRootIndex)
.append("\n snapshot = ").append(snapshot);
return b.toString();
}
void vaildate() {
// check parent up to snapshotRootIndex or numNonNull
final int n = snapshotRootIndex >= 0? snapshotRootIndex + 1: numNonNull;
int i = 0;
if (inodes[i] != null) {
for(i++; i < n && inodes[i] != null; i++) {
final INodeDirectory parent_i = inodes[i].getParent();
final INodeDirectory parent_i_1 = inodes[i-1].getParent();
if (parent_i != inodes[i-1] &&
(parent_i_1 == null || !parent_i_1.isSnapshottable()
|| parent_i != parent_i_1)) {
throw new AssertionError(
"inodes[" + i + "].getParent() != inodes[" + (i-1)
+ "]\n inodes[" + i + "]=" + inodes[i].toDetailString()
+ "\n inodes[" + (i-1) + "]=" + inodes[i-1].toDetailString()
+ "\n this=" + toString(false));
}
}
}
if (i != n) {
throw new AssertionError("i = " + i + " != " + n
+ ", this=" + toString(false));
}
}
}
/* /*
* The following code is to dump the tree recursively for testing. * The following code is to dump the tree recursively for testing.
* *

View File

@ -0,0 +1,422 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.util.Arrays;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import com.google.common.base.Preconditions;
/**
* Contains INodes information resolved from a given path.
*/
public class INodesInPath {
public static final Log LOG = LogFactory.getLog(INodesInPath.class);
/**
* @return true if path component is {@link HdfsConstants#DOT_SNAPSHOT_DIR}
*/
private static boolean isDotSnapshotDir(byte[] pathComponent) {
return pathComponent == null ? false
: Arrays.equals(HdfsConstants.DOT_SNAPSHOT_DIR_BYTES, pathComponent);
}
/**
* Given some components, create a path name.
* @param components The path components
* @param start index
* @param end index
* @return concatenated path
*/
private static String constructPath(byte[][] components, int start, int end) {
StringBuilder buf = new StringBuilder();
for (int i = start; i < end; i++) {
buf.append(DFSUtil.bytes2String(components[i]));
if (i < end - 1) {
buf.append(Path.SEPARATOR);
}
}
return buf.toString();
}
static INodesInPath resolve(final INodeDirectory startingDir,
final byte[][] components) throws UnresolvedLinkException {
return resolve(startingDir, components, components.length, false);
}
/**
* Retrieve existing INodes from a path. If existing is big enough to store
* all path components (existing and non-existing), then existing INodes
* will be stored starting from the root INode into existing[0]; if
* existing is not big enough to store all path components, then only the
* last existing and non existing INodes will be stored so that
* existing[existing.length-1] refers to the INode of the final component.
*
* An UnresolvedPathException is always thrown when an intermediate path
* component refers to a symbolic link. If the final path component refers
* to a symbolic link then an UnresolvedPathException is only thrown if
* resolveLink is true.
*
* <p>
* Example: <br>
* Given the path /c1/c2/c3 where only /c1/c2 exists, resulting in the
* following path components: ["","c1","c2","c3"],
*
* <p>
* <code>getExistingPathINodes(["","c1","c2"], [?])</code> should fill the
* array with [c2] <br>
* <code>getExistingPathINodes(["","c1","c2","c3"], [?])</code> should fill the
* array with [null]
*
* <p>
* <code>getExistingPathINodes(["","c1","c2"], [?,?])</code> should fill the
* array with [c1,c2] <br>
* <code>getExistingPathINodes(["","c1","c2","c3"], [?,?])</code> should fill
* the array with [c2,null]
*
* <p>
* <code>getExistingPathINodes(["","c1","c2"], [?,?,?,?])</code> should fill
* the array with [rootINode,c1,c2,null], <br>
* <code>getExistingPathINodes(["","c1","c2","c3"], [?,?,?,?])</code> should
* fill the array with [rootINode,c1,c2,null]
*
* @param startingDir the starting directory
* @param components array of path component name
* @param numOfINodes number of INodes to return
* @param resolveLink indicates whether UnresolvedLinkException should
* be thrown when the path refers to a symbolic link.
* @return the specified number of existing INodes in the path
*/
static INodesInPath resolve(final INodeDirectory startingDir,
final byte[][] components, final int numOfINodes,
final boolean resolveLink) throws UnresolvedLinkException {
Preconditions.checkArgument(startingDir.compareTo(components[0]) == 0);
INode curNode = startingDir;
final INodesInPath existing = new INodesInPath(components, numOfINodes);
int count = 0;
int index = numOfINodes - components.length;
if (index > 0) {
index = 0;
}
while (count < components.length && curNode != null) {
final boolean lastComp = (count == components.length - 1);
if (index >= 0) {
existing.addNode(curNode);
}
final boolean isRef = curNode.isReference();
final boolean isDir = curNode.isDirectory();
final INodeDirectory dir = isDir? curNode.asDirectory(): null;
if (!isRef && isDir && dir instanceof INodeDirectoryWithSnapshot) {
//if the path is a non-snapshot path, update the latest snapshot.
if (!existing.isSnapshot()) {
existing.updateLatestSnapshot(
((INodeDirectoryWithSnapshot)dir).getLastSnapshot());
}
} else if (isRef && isDir && !lastComp) {
// If the curNode is a reference node, need to check its dstSnapshot:
// 1. if the existing snapshot is no later than the dstSnapshot (which
// is the latest snapshot in dst before the rename), the changes
// should be recorded in previous snapshots (belonging to src).
// 2. however, if the ref node is already the last component, we still
// need to know the latest snapshot among the ref node's ancestors,
// in case of processing a deletion operation. Thus we do not overwrite
// the latest snapshot if lastComp is true. In case of the operation is
// a modification operation, we do a similar check in corresponding
// recordModification method.
if (!existing.isSnapshot()) {
int dstSnapshotId = curNode.asReference().getDstSnapshotId();
Snapshot latest = existing.getLatestSnapshot();
if (latest == null || // no snapshot in dst tree of rename
dstSnapshotId >= latest.getId()) { // the above scenario
Snapshot lastSnapshot = null;
if (curNode.isDirectory()
&& curNode.asDirectory() instanceof INodeDirectoryWithSnapshot) {
lastSnapshot = ((INodeDirectoryWithSnapshot) curNode
.asDirectory()).getLastSnapshot();
} else if (curNode.isFile()
&& curNode.asFile() instanceof INodeFileWithSnapshot) {
lastSnapshot = ((INodeFileWithSnapshot) curNode
.asFile()).getDiffs().getLastSnapshot();
}
existing.setSnapshot(lastSnapshot);
}
}
}
if (curNode.isSymlink() && (!lastComp || (lastComp && resolveLink))) {
final String path = constructPath(components, 0, components.length);
final String preceding = constructPath(components, 0, count);
final String remainder =
constructPath(components, count + 1, components.length);
final String link = DFSUtil.bytes2String(components[count]);
final String target = curNode.asSymlink().getSymlinkString();
if (LOG.isDebugEnabled()) {
LOG.debug("UnresolvedPathException " +
" path: " + path + " preceding: " + preceding +
" count: " + count + " link: " + link + " target: " + target +
" remainder: " + remainder);
}
throw new UnresolvedPathException(path, preceding, remainder, target);
}
if (lastComp || !isDir) {
break;
}
final byte[] childName = components[count + 1];
// check if the next byte[] in components is for ".snapshot"
if (isDotSnapshotDir(childName)
&& isDir && dir instanceof INodeDirectoryWithSnapshot) {
// skip the ".snapshot" in components
count++;
index++;
existing.isSnapshot = true;
if (index >= 0) { // decrease the capacity by 1 to account for .snapshot
existing.capacity--;
}
// check if ".snapshot" is the last element of components
if (count == components.length - 1) {
break;
}
// Resolve snapshot root
final Snapshot s = ((INodeDirectorySnapshottable)dir).getSnapshot(
components[count + 1]);
if (s == null) {
//snapshot not found
curNode = null;
} else {
curNode = s.getRoot();
existing.setSnapshot(s);
}
if (index >= -1) {
existing.snapshotRootIndex = existing.numNonNull;
}
} else {
// normal case, and also for resolving file/dir under snapshot root
curNode = dir.getChild(childName, existing.getPathSnapshot());
}
count++;
index++;
}
return existing;
}
private final byte[][] path;
/**
* Array with the specified number of INodes resolved for a given path.
*/
private INode[] inodes;
/**
* Indicate the number of non-null elements in {@link #inodes}
*/
private int numNonNull;
/**
* The path for a snapshot file/dir contains the .snapshot thus makes the
* length of the path components larger the number of inodes. We use
* the capacity to control this special case.
*/
private int capacity;
/**
* true if this path corresponds to a snapshot
*/
private boolean isSnapshot;
/**
* Index of {@link INodeDirectoryWithSnapshot} for snapshot path, else -1
*/
private int snapshotRootIndex;
/**
* For snapshot paths, it is the reference to the snapshot; or null if the
* snapshot does not exist. For non-snapshot paths, it is the reference to
* the latest snapshot found in the path; or null if no snapshot is found.
*/
private Snapshot snapshot = null;
private INodesInPath(byte[][] path, int number) {
this.path = path;
assert (number >= 0);
inodes = new INode[number];
capacity = number;
numNonNull = 0;
isSnapshot = false;
snapshotRootIndex = -1;
}
/**
* For non-snapshot paths, return the latest snapshot found in the path.
* For snapshot paths, return null.
*/
public Snapshot getLatestSnapshot() {
return isSnapshot? null: snapshot;
}
/**
* For snapshot paths, return the snapshot specified in the path.
* For non-snapshot paths, return null.
*/
public Snapshot getPathSnapshot() {
return isSnapshot? snapshot: null;
}
private void setSnapshot(Snapshot s) {
snapshot = s;
}
private void updateLatestSnapshot(Snapshot s) {
if (snapshot == null
|| (s != null && Snapshot.ID_COMPARATOR.compare(snapshot, s) < 0)) {
snapshot = s;
}
}
/**
* @return the whole inodes array including the null elements.
*/
INode[] getINodes() {
if (capacity < inodes.length) {
INode[] newNodes = new INode[capacity];
System.arraycopy(inodes, 0, newNodes, 0, capacity);
inodes = newNodes;
}
return inodes;
}
/**
* @return the i-th inode if i >= 0;
* otherwise, i < 0, return the (length + i)-th inode.
*/
public INode getINode(int i) {
return inodes[i >= 0? i: inodes.length + i];
}
/** @return the last inode. */
public INode getLastINode() {
return inodes[inodes.length - 1];
}
byte[] getLastLocalName() {
return path[path.length - 1];
}
/**
* @return index of the {@link INodeDirectoryWithSnapshot} in
* {@link #inodes} for snapshot path, else -1.
*/
int getSnapshotRootIndex() {
return this.snapshotRootIndex;
}
/**
* @return isSnapshot true for a snapshot path
*/
boolean isSnapshot() {
return this.isSnapshot;
}
/**
* Add an INode at the end of the array
*/
private void addNode(INode node) {
inodes[numNonNull++] = node;
}
void setINode(int i, INode inode) {
inodes[i >= 0? i: inodes.length + i] = inode;
}
void setLastINode(INode last) {
inodes[inodes.length - 1] = last;
}
/**
* @return The number of non-null elements
*/
int getNumNonNull() {
return numNonNull;
}
private static String toString(INode inode) {
return inode == null? null: inode.getLocalName();
}
@Override
public String toString() {
return toString(true);
}
private String toString(boolean vaildateObject) {
if (vaildateObject) {
vaildate();
}
final StringBuilder b = new StringBuilder(getClass().getSimpleName())
.append(": path = ").append(DFSUtil.byteArray2PathString(path))
.append("\n inodes = ");
if (inodes == null) {
b.append("null");
} else if (inodes.length == 0) {
b.append("[]");
} else {
b.append("[").append(toString(inodes[0]));
for(int i = 1; i < inodes.length; i++) {
b.append(", ").append(toString(inodes[i]));
}
b.append("], length=").append(inodes.length);
}
b.append("\n numNonNull = ").append(numNonNull)
.append("\n capacity = ").append(capacity)
.append("\n isSnapshot = ").append(isSnapshot)
.append("\n snapshotRootIndex = ").append(snapshotRootIndex)
.append("\n snapshot = ").append(snapshot);
return b.toString();
}
void vaildate() {
// check parent up to snapshotRootIndex or numNonNull
final int n = snapshotRootIndex >= 0? snapshotRootIndex + 1: numNonNull;
int i = 0;
if (inodes[i] != null) {
for(i++; i < n && inodes[i] != null; i++) {
final INodeDirectory parent_i = inodes[i].getParent();
final INodeDirectory parent_i_1 = inodes[i-1].getParent();
if (parent_i != inodes[i-1] &&
(parent_i_1 == null || !parent_i_1.isSnapshottable()
|| parent_i != parent_i_1)) {
throw new AssertionError(
"inodes[" + i + "].getParent() != inodes[" + (i-1)
+ "]\n inodes[" + i + "]=" + inodes[i].toDetailString()
+ "\n inodes[" + (i-1) + "]=" + inodes[i-1].toDetailString()
+ "\n this=" + toString(false));
}
}
}
if (i != n) {
throw new AssertionError("i = " + i + " != " + n
+ ", this=" + toString(false));
}
}
}

View File

@ -34,7 +34,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSImageFormat;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.INodesInPath; import org.apache.hadoop.hdfs.server.namenode.INodesInPath;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable.SnapshotDiffInfo; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable.SnapshotDiffInfo;
/** /**

View File

@ -28,7 +28,6 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.INodesInPath;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithSnapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithSnapshot;
@ -139,8 +138,7 @@ public class TestSnapshotPathINodes {
// Get the inodes by resolving the path of a normal file // Get the inodes by resolving the path of a normal file
String[] names = INode.getPathNames(file1.toString()); String[] names = INode.getPathNames(file1.toString());
byte[][] components = INode.getPathComponents(names); byte[][] components = INode.getPathComponents(names);
INodesInPath nodesInPath = fsdir.rootDir.getExistingPathINodes(components, INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
components.length, false);
INode[] inodes = nodesInPath.getINodes(); INode[] inodes = nodesInPath.getINodes();
// The number of inodes should be equal to components.length // The number of inodes should be equal to components.length
assertEquals(inodes.length, components.length); assertEquals(inodes.length, components.length);
@ -159,7 +157,7 @@ public class TestSnapshotPathINodes {
// Call getExistingPathINodes and request only one INode. This is used // Call getExistingPathINodes and request only one INode. This is used
// when identifying the INode for a given path. // when identifying the INode for a given path.
nodesInPath = fsdir.rootDir.getExistingPathINodes(components, 1, false); nodesInPath = INodesInPath.resolve(fsdir.rootDir, components, 1, false);
inodes = nodesInPath.getINodes(); inodes = nodesInPath.getINodes();
assertEquals(inodes.length, 1); assertEquals(inodes.length, 1);
assertSnapshot(nodesInPath, false, null, -1); assertSnapshot(nodesInPath, false, null, -1);
@ -167,7 +165,7 @@ public class TestSnapshotPathINodes {
// Call getExistingPathINodes and request 2 INodes. This is usually used // Call getExistingPathINodes and request 2 INodes. This is usually used
// when identifying the parent INode of a given path. // when identifying the parent INode of a given path.
nodesInPath = fsdir.rootDir.getExistingPathINodes(components, 2, false); nodesInPath = INodesInPath.resolve(fsdir.rootDir, components, 2, false);
inodes = nodesInPath.getINodes(); inodes = nodesInPath.getINodes();
assertEquals(inodes.length, 2); assertEquals(inodes.length, 2);
assertSnapshot(nodesInPath, false, null, -1); assertSnapshot(nodesInPath, false, null, -1);
@ -190,8 +188,7 @@ public class TestSnapshotPathINodes {
String snapshotPath = sub1.toString() + "/.snapshot/s1/file1"; String snapshotPath = sub1.toString() + "/.snapshot/s1/file1";
String[] names = INode.getPathNames(snapshotPath); String[] names = INode.getPathNames(snapshotPath);
byte[][] components = INode.getPathComponents(names); byte[][] components = INode.getPathComponents(names);
INodesInPath nodesInPath = fsdir.rootDir.getExistingPathINodes(components, INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
components.length, false);
INode[] inodes = nodesInPath.getINodes(); INode[] inodes = nodesInPath.getINodes();
// Length of inodes should be (components.length - 1), since we will ignore // Length of inodes should be (components.length - 1), since we will ignore
// ".snapshot" // ".snapshot"
@ -206,7 +203,7 @@ public class TestSnapshotPathINodes {
INodeDirectoryWithSnapshot); INodeDirectoryWithSnapshot);
// Call getExistingPathINodes and request only one INode. // Call getExistingPathINodes and request only one INode.
nodesInPath = fsdir.rootDir.getExistingPathINodes(components, 1, false); nodesInPath = INodesInPath.resolve(fsdir.rootDir, components, 1, false);
inodes = nodesInPath.getINodes(); inodes = nodesInPath.getINodes();
assertEquals(inodes.length, 1); assertEquals(inodes.length, 1);
// The snapshotroot (s1) is not included in inodes. Thus the // The snapshotroot (s1) is not included in inodes. Thus the
@ -216,7 +213,7 @@ public class TestSnapshotPathINodes {
assertINodeFile(nodesInPath.getLastINode(), file1); assertINodeFile(nodesInPath.getLastINode(), file1);
// Call getExistingPathINodes and request 2 INodes. // Call getExistingPathINodes and request 2 INodes.
nodesInPath = fsdir.rootDir.getExistingPathINodes(components, 2, false); nodesInPath = INodesInPath.resolve(fsdir.rootDir, components, 2, false);
inodes = nodesInPath.getINodes(); inodes = nodesInPath.getINodes();
assertEquals(inodes.length, 2); assertEquals(inodes.length, 2);
// There should be two INodes in inodes: s1 and snapshot of file1. Thus the // There should be two INodes in inodes: s1 and snapshot of file1. Thus the
@ -228,8 +225,7 @@ public class TestSnapshotPathINodes {
String dotSnapshotPath = sub1.toString() + "/.snapshot"; String dotSnapshotPath = sub1.toString() + "/.snapshot";
names = INode.getPathNames(dotSnapshotPath); names = INode.getPathNames(dotSnapshotPath);
components = INode.getPathComponents(names); components = INode.getPathComponents(names);
nodesInPath = fsdir.rootDir.getExistingPathINodes(components, nodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
components.length, false);
inodes = nodesInPath.getINodes(); inodes = nodesInPath.getINodes();
// The number of INodes returned should be components.length - 1 since we // The number of INodes returned should be components.length - 1 since we
// will ignore ".snapshot" // will ignore ".snapshot"
@ -264,8 +260,7 @@ public class TestSnapshotPathINodes {
String snapshotPath = sub1.toString() + "/.snapshot/s2/file1"; String snapshotPath = sub1.toString() + "/.snapshot/s2/file1";
String[] names = INode.getPathNames(snapshotPath); String[] names = INode.getPathNames(snapshotPath);
byte[][] components = INode.getPathComponents(names); byte[][] components = INode.getPathComponents(names);
INodesInPath nodesInPath = fsdir.rootDir.getExistingPathINodes(components, INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
components.length, false);
INode[] inodes = nodesInPath.getINodes(); INode[] inodes = nodesInPath.getINodes();
// Length of inodes should be (components.length - 1), since we will ignore // Length of inodes should be (components.length - 1), since we will ignore
// ".snapshot" // ".snapshot"
@ -283,8 +278,7 @@ public class TestSnapshotPathINodes {
// Check the INodes for path /TestSnapshot/sub1/file1 // Check the INodes for path /TestSnapshot/sub1/file1
String[] names = INode.getPathNames(file1.toString()); String[] names = INode.getPathNames(file1.toString());
byte[][] components = INode.getPathComponents(names); byte[][] components = INode.getPathComponents(names);
INodesInPath nodesInPath = fsdir.rootDir.getExistingPathINodes(components, INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
components.length, false);
INode[] inodes = nodesInPath.getINodes(); INode[] inodes = nodesInPath.getINodes();
// The length of inodes should be equal to components.length // The length of inodes should be equal to components.length
assertEquals(inodes.length, components.length); assertEquals(inodes.length, components.length);
@ -324,8 +318,7 @@ public class TestSnapshotPathINodes {
String snapshotPath = sub1.toString() + "/.snapshot/s4/file3"; String snapshotPath = sub1.toString() + "/.snapshot/s4/file3";
String[] names = INode.getPathNames(snapshotPath); String[] names = INode.getPathNames(snapshotPath);
byte[][] components = INode.getPathComponents(names); byte[][] components = INode.getPathComponents(names);
INodesInPath nodesInPath = fsdir.rootDir.getExistingPathINodes(components, INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
components.length, false);
INode[] inodes = nodesInPath.getINodes(); INode[] inodes = nodesInPath.getINodes();
// Length of inodes should be (components.length - 1), since we will ignore // Length of inodes should be (components.length - 1), since we will ignore
// ".snapshot" // ".snapshot"
@ -345,8 +338,7 @@ public class TestSnapshotPathINodes {
// Check the inodes for /TestSnapshot/sub1/file3 // Check the inodes for /TestSnapshot/sub1/file3
String[] names = INode.getPathNames(file3.toString()); String[] names = INode.getPathNames(file3.toString());
byte[][] components = INode.getPathComponents(names); byte[][] components = INode.getPathComponents(names);
INodesInPath nodesInPath = fsdir.rootDir.getExistingPathINodes(components, INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
components.length, false);
INode[] inodes = nodesInPath.getINodes(); INode[] inodes = nodesInPath.getINodes();
// The number of inodes should be equal to components.length // The number of inodes should be equal to components.length
assertEquals(inodes.length, components.length); assertEquals(inodes.length, components.length);
@ -375,8 +367,7 @@ public class TestSnapshotPathINodes {
// First check the INode for /TestSnapshot/sub1/file1 // First check the INode for /TestSnapshot/sub1/file1
String[] names = INode.getPathNames(file1.toString()); String[] names = INode.getPathNames(file1.toString());
byte[][] components = INode.getPathComponents(names); byte[][] components = INode.getPathComponents(names);
INodesInPath nodesInPath = fsdir.rootDir.getExistingPathINodes(components, INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
components.length, false);
INode[] inodes = nodesInPath.getINodes(); INode[] inodes = nodesInPath.getINodes();
// The number of inodes should be equal to components.length // The number of inodes should be equal to components.length
assertEquals(inodes.length, components.length); assertEquals(inodes.length, components.length);
@ -398,8 +389,7 @@ public class TestSnapshotPathINodes {
String snapshotPath = sub1.toString() + "/.snapshot/s3/file1"; String snapshotPath = sub1.toString() + "/.snapshot/s3/file1";
names = INode.getPathNames(snapshotPath); names = INode.getPathNames(snapshotPath);
components = INode.getPathComponents(names); components = INode.getPathComponents(names);
INodesInPath ssNodesInPath = fsdir.rootDir.getExistingPathINodes( INodesInPath ssNodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
components, components.length, false);
INode[] ssInodes = ssNodesInPath.getINodes(); INode[] ssInodes = ssNodesInPath.getINodes();
// Length of ssInodes should be (components.length - 1), since we will // Length of ssInodes should be (components.length - 1), since we will
// ignore ".snapshot" // ignore ".snapshot"
@ -418,8 +408,7 @@ public class TestSnapshotPathINodes {
// Check the INode for /TestSnapshot/sub1/file1 again // Check the INode for /TestSnapshot/sub1/file1 again
names = INode.getPathNames(file1.toString()); names = INode.getPathNames(file1.toString());
components = INode.getPathComponents(names); components = INode.getPathComponents(names);
INodesInPath newNodesInPath = fsdir.rootDir INodesInPath newNodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
.getExistingPathINodes(components, components.length, false);
assertSnapshot(newNodesInPath, false, s3, -1); assertSnapshot(newNodesInPath, false, s3, -1);
INode[] newInodes = newNodesInPath.getINodes(); INode[] newInodes = newNodesInPath.getINodes();
// The number of inodes should be equal to components.length // The number of inodes should be equal to components.length

View File

@ -32,7 +32,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.INodesInPath; import org.apache.hadoop.hdfs.server.namenode.INodesInPath;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;