HDFS-4124. Merge r1403304 from trunk
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1471500 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
611b3fc5e6
commit
40aaaf000e
|
@ -53,6 +53,13 @@ Release 2.0.5-beta - UNRELEASED
|
|||
HDFS-3817. Avoid printing SafeModeException stack trace.
|
||||
(Brandon Li via suresh)
|
||||
|
||||
HDFS-4112. A few improvements on INodeDirectory include adding a utility
|
||||
method for casting; avoiding creation of new empty lists; cleaning up
|
||||
some code and rewriting some javadoc. (szetszwo)
|
||||
|
||||
HDFS-4124. Refactor INodeDirectory#getExistingPathINodes() to enable
|
||||
returning more than INode array. (Jing Zhao via suresh)
|
||||
|
||||
HDFS-4129. Add utility methods to dump NameNode in memory tree for
|
||||
testing. (szetszwo via suresh)
|
||||
|
||||
|
|
|
@ -44,10 +44,10 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
|
|||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.FSLimitException;
|
||||
import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException;
|
||||
import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||
|
@ -57,6 +57,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
|
|||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.INodesInPath;
|
||||
import org.apache.hadoop.hdfs.util.ByteArray;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
|
@ -551,8 +552,9 @@ public class FSDirectory implements Closeable {
|
|||
}
|
||||
|
||||
byte[][] dstComponents = INode.getPathComponents(dst);
|
||||
INode[] dstInodes = new INode[dstComponents.length];
|
||||
rootDir.getExistingPathINodes(dstComponents, dstInodes, false);
|
||||
INodesInPath dstInodesInPath = rootDir.getExistingPathINodes(dstComponents,
|
||||
dstComponents.length, false);
|
||||
INode[] dstInodes = dstInodesInPath.getINodes();
|
||||
if (dstInodes[dstInodes.length-1] != null) {
|
||||
NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
|
||||
+"failed to rename "+src+" to "+dst+
|
||||
|
@ -567,7 +569,7 @@ public class FSDirectory implements Closeable {
|
|||
}
|
||||
|
||||
// Ensure dst has quota to accommodate rename
|
||||
verifyQuotaForRename(srcInodes,dstInodes);
|
||||
verifyQuotaForRename(srcInodes, dstInodes);
|
||||
|
||||
INode dstChild = null;
|
||||
INode srcChild = null;
|
||||
|
@ -673,8 +675,9 @@ public class FSDirectory implements Closeable {
|
|||
throw new IOException(error);
|
||||
}
|
||||
final byte[][] dstComponents = INode.getPathComponents(dst);
|
||||
final INode[] dstInodes = new INode[dstComponents.length];
|
||||
rootDir.getExistingPathINodes(dstComponents, dstInodes, false);
|
||||
INodesInPath dstInodesInPath = rootDir.getExistingPathINodes(dstComponents,
|
||||
dstComponents.length, false);
|
||||
final INode[] dstInodes = dstInodesInPath.getINodes();
|
||||
INode dstInode = dstInodes[dstInodes.length - 1];
|
||||
if (dstInodes.length == 1) {
|
||||
error = "rename destination cannot be the root";
|
||||
|
@ -1455,12 +1458,13 @@ public class FSDirectory implements Closeable {
|
|||
src = normalizePath(src);
|
||||
String[] names = INode.getPathNames(src);
|
||||
byte[][] components = INode.getPathComponents(names);
|
||||
INode[] inodes = new INode[components.length];
|
||||
final int lastInodeIndex = inodes.length - 1;
|
||||
final int lastInodeIndex = components.length - 1;
|
||||
|
||||
writeLock();
|
||||
try {
|
||||
rootDir.getExistingPathINodes(components, inodes, false);
|
||||
INodesInPath inodesInPath = rootDir.getExistingPathINodes(components,
|
||||
components.length, false);
|
||||
INode[] inodes = inodesInPath.getINodes();
|
||||
|
||||
// find the index of the first null in inodes[]
|
||||
StringBuilder pathbuilder = new StringBuilder();
|
||||
|
@ -1530,16 +1534,14 @@ public class FSDirectory implements Closeable {
|
|||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
*/
|
||||
INode unprotectedMkdir(String src, PermissionStatus permissions,
|
||||
long timestamp) throws QuotaExceededException,
|
||||
UnresolvedLinkException {
|
||||
assert hasWriteLock();
|
||||
byte[][] components = INode.getPathComponents(src);
|
||||
INode[] inodes = new INode[components.length];
|
||||
|
||||
rootDir.getExistingPathINodes(components, inodes, false);
|
||||
INodesInPath inodesInPath = rootDir.getExistingPathINodes(components,
|
||||
components.length, false);
|
||||
INode[] inodes = inodesInPath.getINodes();
|
||||
unprotectedMkdir(inodes, inodes.length-1, components[inodes.length-1],
|
||||
permissions, timestamp);
|
||||
return inodes[inodes.length-1];
|
||||
|
@ -1568,10 +1570,11 @@ public class FSDirectory implements Closeable {
|
|||
byte[] path = components[components.length-1];
|
||||
child.setLocalName(path);
|
||||
cacheName(child);
|
||||
INode[] inodes = new INode[components.length];
|
||||
writeLock();
|
||||
try {
|
||||
rootDir.getExistingPathINodes(components, inodes, false);
|
||||
INodesInPath inodesInPath = rootDir.getExistingPathINodes(components,
|
||||
components.length, false);
|
||||
INode[] inodes = inodesInPath.getINodes();
|
||||
return addChild(inodes, inodes.length-1, child, childDiskspace);
|
||||
} finally {
|
||||
writeUnlock();
|
||||
|
|
|
@ -131,9 +131,9 @@ class INodeDirectory extends INode {
|
|||
*/
|
||||
private INode getNode(byte[][] components, boolean resolveLink
|
||||
) throws UnresolvedLinkException {
|
||||
INode[] inode = new INode[1];
|
||||
getExistingPathINodes(components, inode, resolveLink);
|
||||
return inode[0];
|
||||
INodesInPath inodesInPath = getExistingPathINodes(components, 1,
|
||||
resolveLink);
|
||||
return inodesInPath.inodes[0];
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -181,27 +181,29 @@ class INodeDirectory extends INode {
|
|||
* fill the array with [rootINode,c1,c2,null]
|
||||
*
|
||||
* @param components array of path component name
|
||||
* @param existing array to fill with existing INodes
|
||||
* @param numOfINodes number of INodes to return
|
||||
* @param resolveLink indicates whether UnresolvedLinkException should
|
||||
* be thrown when the path refers to a symbolic link.
|
||||
* @return number of existing INodes in the path
|
||||
* @return the specified number of existing INodes in the path
|
||||
*/
|
||||
int getExistingPathINodes(byte[][] components, INode[] existing,
|
||||
boolean resolveLink) throws UnresolvedLinkException {
|
||||
INodesInPath getExistingPathINodes(byte[][] components, int numOfINodes,
|
||||
boolean resolveLink)
|
||||
throws UnresolvedLinkException {
|
||||
assert this.compareTo(components[0]) == 0 :
|
||||
"Incorrect name " + getLocalName() + " expected "
|
||||
+ (components[0] == null? null: DFSUtil.bytes2String(components[0]));
|
||||
|
||||
INodesInPath existing = new INodesInPath(numOfINodes);
|
||||
INode curNode = this;
|
||||
int count = 0;
|
||||
int index = existing.length - components.length;
|
||||
int index = numOfINodes - components.length;
|
||||
if (index > 0) {
|
||||
index = 0;
|
||||
}
|
||||
while (count < components.length && curNode != null) {
|
||||
final boolean lastComp = (count == components.length - 1);
|
||||
if (index >= 0) {
|
||||
existing[index] = curNode;
|
||||
existing.inodes[index] = curNode;
|
||||
}
|
||||
if (curNode.isSymlink() && (!lastComp || (lastComp && resolveLink))) {
|
||||
final String path = constructPath(components, 0, components.length);
|
||||
|
@ -226,7 +228,7 @@ class INodeDirectory extends INode {
|
|||
count++;
|
||||
index++;
|
||||
}
|
||||
return count;
|
||||
return existing;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -247,11 +249,9 @@ class INodeDirectory extends INode {
|
|||
INode[] getExistingPathINodes(String path, boolean resolveLink)
|
||||
throws UnresolvedLinkException {
|
||||
byte[][] components = getPathComponents(path);
|
||||
INode[] inodes = new INode[components.length];
|
||||
|
||||
this.getExistingPathINodes(components, inodes, resolveLink);
|
||||
|
||||
return inodes;
|
||||
INodesInPath inodes = this.getExistingPathINodes(components,
|
||||
components.length, resolveLink);
|
||||
return inodes.inodes;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -342,9 +342,8 @@ class INodeDirectory extends INode {
|
|||
if (pathComponents.length < 2) // add root
|
||||
return null;
|
||||
// Gets the parent INode
|
||||
INode[] inodes = new INode[2];
|
||||
getExistingPathINodes(pathComponents, inodes, false);
|
||||
INode inode = inodes[0];
|
||||
INodesInPath inodes = getExistingPathINodes(pathComponents, 2, false);
|
||||
INode inode = inodes.inodes[0];
|
||||
if (inode == null) {
|
||||
throw new FileNotFoundException("Parent path does not exist: "+
|
||||
DFSUtil.byteArray2String(pathComponents));
|
||||
|
@ -492,4 +491,22 @@ class INodeDirectory extends INode {
|
|||
}
|
||||
prefix.setLength(prefix.length() - 2);
|
||||
}
|
||||
|
||||
/**
|
||||
* Used by
|
||||
* {@link INodeDirectory#getExistingPathINodes(byte[][], int, boolean)}.
|
||||
* Containing INodes information resolved from a given path.
|
||||
*/
|
||||
static class INodesInPath {
|
||||
private INode[] inodes;
|
||||
|
||||
public INodesInPath(int number) {
|
||||
assert (number >= 0);
|
||||
this.inodes = new INode[number];
|
||||
}
|
||||
|
||||
INode[] getINodes() {
|
||||
return inodes;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue