HDFS-4151. Merge r1406006 from trunk

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1471567 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Suresh Srinivas 2013-04-24 18:01:46 +00:00
parent 1fb58d2322
commit c68bb9cb51
5 changed files with 130 additions and 149 deletions

View File

@ -56,6 +56,9 @@ Release 2.0.5-beta - UNRELEASED
HDFS-4124. Refactor INodeDirectory#getExistingPathINodes() to enable HDFS-4124. Refactor INodeDirectory#getExistingPathINodes() to enable
returning more than INode array. (Jing Zhao via suresh) returning more than INode array. (Jing Zhao via suresh)
HDFS-4151. Change the methods in FSDirectory to pass INodesInPath instead
of INode[] as a parameter. (szetszwo)
HDFS-4129. Add utility methods to dump NameNode in memory tree for HDFS-4129. Add utility methods to dump NameNode in memory tree for
testing. (szetszwo via suresh) testing. (szetszwo via suresh)

View File

@ -310,22 +310,18 @@ INode unprotectedAddFile( String path,
/** /**
* Add a block to the file. Returns a reference to the added block. * Add a block to the file. Returns a reference to the added block.
*/ */
BlockInfo addBlock(String path, BlockInfo addBlock(String path, INodesInPath inodesInPath, Block block,
INode[] inodes, DatanodeDescriptor targets[]) throws IOException {
Block block,
DatanodeDescriptor targets[]
) throws QuotaExceededException {
waitForReady(); waitForReady();
writeLock(); writeLock();
try { try {
assert inodes[inodes.length-1].isUnderConstruction() : final INode[] inodes = inodesInPath.getINodes();
"INode should correspond to a file under construction"; final INodeFileUnderConstruction fileINode =
INodeFileUnderConstruction fileINode = INodeFileUnderConstruction.valueOf(inodes[inodes.length-1], path);
(INodeFileUnderConstruction)inodes[inodes.length-1];
// check quota limits and updated space consumed // check quota limits and updated space consumed
updateCount(inodes, inodes.length-1, 0, updateCount(inodesInPath, inodes.length-1, 0,
fileINode.getPreferredBlockSize()*fileINode.getBlockReplication(), true); fileINode.getPreferredBlockSize()*fileINode.getBlockReplication(), true);
// associate new last block for the file // associate new last block for the file
@ -418,8 +414,9 @@ void unprotectedRemoveBlock(String path, INodeFileUnderConstruction fileNode,
} }
// update space consumed // update space consumed
INode[] pathINodes = getExistingPathINodes(path); final INodesInPath inodesInPath = rootDir.getExistingPathINodes(path, true);
updateCount(pathINodes, pathINodes.length-1, 0, final INode[] inodes = inodesInPath.getINodes();
updateCount(inodesInPath, inodes.length-1, 0,
-fileNode.getPreferredBlockSize()*fileNode.getBlockReplication(), true); -fileNode.getPreferredBlockSize()*fileNode.getBlockReplication(), true);
} }
@ -487,7 +484,8 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp)
throws QuotaExceededException, UnresolvedLinkException, throws QuotaExceededException, UnresolvedLinkException,
FileAlreadyExistsException { FileAlreadyExistsException {
assert hasWriteLock(); assert hasWriteLock();
INode[] srcInodes = rootDir.getExistingPathINodes(src, false); INodesInPath srcInodesInPath = rootDir.getExistingPathINodes(src, false);
INode[] srcInodes = srcInodesInPath.getINodes();
INode srcInode = srcInodes[srcInodes.length-1]; INode srcInode = srcInodes[srcInodes.length-1];
// check the validation of the source // check the validation of the source
@ -550,7 +548,7 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp)
String srcChildName = null; String srcChildName = null;
try { try {
// remove src // remove src
srcChild = removeChild(srcInodes, srcInodes.length-1); srcChild = removeChild(srcInodesInPath, srcInodes.length-1);
if (srcChild == null) { if (srcChild == null) {
NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
+ "failed to rename " + src + " to " + dst + "failed to rename " + src + " to " + dst
@ -561,7 +559,7 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp)
srcChild.setLocalName(dstComponents[dstInodes.length-1]); srcChild.setLocalName(dstComponents[dstInodes.length-1]);
// add src to the destination // add src to the destination
dstChild = addChildNoQuotaCheck(dstInodes, dstInodes.length - 1, dstChild = addChildNoQuotaCheck(dstInodesInPath, dstInodes.length-1,
srcChild, UNKNOWN_DISK_SPACE); srcChild, UNKNOWN_DISK_SPACE);
if (dstChild != null) { if (dstChild != null) {
srcChild = null; srcChild = null;
@ -580,7 +578,7 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp)
if (dstChild == null && srcChild != null) { if (dstChild == null && srcChild != null) {
// put it back // put it back
srcChild.setLocalName(srcChildName); srcChild.setLocalName(srcChildName);
addChildNoQuotaCheck(srcInodes, srcInodes.length - 1, srcChild, addChildNoQuotaCheck(srcInodesInPath, srcInodes.length - 1, srcChild,
UNKNOWN_DISK_SPACE); UNKNOWN_DISK_SPACE);
} }
} }
@ -613,7 +611,8 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp,
} }
} }
String error = null; String error = null;
final INode[] srcInodes = rootDir.getExistingPathINodes(src, false); final INodesInPath srcInodesInPath = rootDir.getExistingPathINodes(src, false);
final INode[] srcInodes = srcInodesInPath.getINodes();
final INode srcInode = srcInodes[srcInodes.length - 1]; final INode srcInode = srcInodes[srcInodes.length - 1];
// validate source // validate source
if (srcInode == null) { if (srcInode == null) {
@ -700,7 +699,7 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp,
// Ensure dst has quota to accommodate rename // Ensure dst has quota to accommodate rename
verifyQuotaForRename(srcInodes, dstInodes); verifyQuotaForRename(srcInodes, dstInodes);
INode removedSrc = removeChild(srcInodes, srcInodes.length - 1); INode removedSrc = removeChild(srcInodesInPath, srcInodes.length - 1);
if (removedSrc == null) { if (removedSrc == null) {
error = "Failed to rename " + src + " to " + dst error = "Failed to rename " + src + " to " + dst
+ " because the source can not be removed"; + " because the source can not be removed";
@ -713,14 +712,14 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp,
INode removedDst = null; INode removedDst = null;
try { try {
if (dstInode != null) { // dst exists remove it if (dstInode != null) { // dst exists remove it
removedDst = removeChild(dstInodes, dstInodes.length - 1); removedDst = removeChild(dstInodesInPath, dstInodes.length - 1);
dstChildName = removedDst.getLocalName(); dstChildName = removedDst.getLocalName();
} }
INode dstChild = null; INode dstChild = null;
removedSrc.setLocalName(dstComponents[dstInodes.length - 1]); removedSrc.setLocalName(dstComponents[dstInodes.length - 1]);
// add src as dst to complete rename // add src as dst to complete rename
dstChild = addChildNoQuotaCheck(dstInodes, dstInodes.length - 1, dstChild = addChildNoQuotaCheck(dstInodesInPath, dstInodes.length - 1,
removedSrc, UNKNOWN_DISK_SPACE); removedSrc, UNKNOWN_DISK_SPACE);
int filesDeleted = 0; int filesDeleted = 0;
@ -750,13 +749,13 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp,
if (removedSrc != null) { if (removedSrc != null) {
// Rename failed - restore src // Rename failed - restore src
removedSrc.setLocalName(srcChildName); removedSrc.setLocalName(srcChildName);
addChildNoQuotaCheck(srcInodes, srcInodes.length - 1, removedSrc, addChildNoQuotaCheck(srcInodesInPath, srcInodes.length - 1, removedSrc,
UNKNOWN_DISK_SPACE); UNKNOWN_DISK_SPACE);
} }
if (removedDst != null) { if (removedDst != null) {
// Rename failed - restore dst // Rename failed - restore dst
removedDst.setLocalName(dstChildName); removedDst.setLocalName(dstChildName);
addChildNoQuotaCheck(dstInodes, dstInodes.length - 1, removedDst, addChildNoQuotaCheck(dstInodesInPath, dstInodes.length - 1, removedDst,
UNKNOWN_DISK_SPACE); UNKNOWN_DISK_SPACE);
} }
} }
@ -796,7 +795,8 @@ Block[] unprotectedSetReplication(String src,
UnresolvedLinkException { UnresolvedLinkException {
assert hasWriteLock(); assert hasWriteLock();
INode[] inodes = rootDir.getExistingPathINodes(src, true); final INodesInPath inodesInPath = rootDir.getExistingPathINodes(src, true);
final INode[] inodes = inodesInPath.getINodes();
INode inode = inodes[inodes.length - 1]; INode inode = inodes[inodes.length - 1];
if (inode == null || !inode.isFile()) { if (inode == null || !inode.isFile()) {
return null; return null;
@ -806,7 +806,7 @@ Block[] unprotectedSetReplication(String src,
// check disk quota // check disk quota
long dsDelta = (replication - oldRepl) * (fileNode.diskspaceConsumed()/oldRepl); long dsDelta = (replication - oldRepl) * (fileNode.diskspaceConsumed()/oldRepl);
updateCount(inodes, inodes.length-1, 0, dsDelta, true); updateCount(inodesInPath, inodes.length-1, 0, dsDelta, true);
fileNode.setReplication(replication); fileNode.setReplication(replication);
@ -927,7 +927,8 @@ public void unprotectedConcat(String target, String [] srcs, long timestamp)
} }
// do the move // do the move
INode [] trgINodes = getExistingPathINodes(target); final INodesInPath trgINodesInPath = rootDir.getExistingPathINodes(target, true);
final INode[] trgINodes = trgINodesInPath.getINodes();
INodeFile trgInode = (INodeFile) trgINodes[trgINodes.length-1]; INodeFile trgInode = (INodeFile) trgINodes[trgINodes.length-1];
INodeDirectory trgParent = (INodeDirectory)trgINodes[trgINodes.length-2]; INodeDirectory trgParent = (INodeDirectory)trgINodes[trgINodes.length-2];
@ -954,7 +955,7 @@ public void unprotectedConcat(String target, String [] srcs, long timestamp)
trgInode.setModificationTimeForce(timestamp); trgInode.setModificationTimeForce(timestamp);
trgParent.setModificationTime(timestamp); trgParent.setModificationTime(timestamp);
// update quota on the parent directory ('count' files removed, 0 space) // update quota on the parent directory ('count' files removed, 0 space)
unprotectedUpdateCount(trgINodes, trgINodes.length-1, - count, 0); unprotectedUpdateCount(trgINodesInPath, trgINodes.length-1, -count, 0);
} }
/** /**
@ -1037,7 +1038,8 @@ int unprotectedDelete(String src, BlocksMapUpdateInfo collectedBlocks,
assert hasWriteLock(); assert hasWriteLock();
src = normalizePath(src); src = normalizePath(src);
INode[] inodes = rootDir.getExistingPathINodes(src, false); final INodesInPath inodesInPath = rootDir.getExistingPathINodes(src, false);
final INode[] inodes = inodesInPath.getINodes();
INode targetNode = inodes[inodes.length-1]; INode targetNode = inodes[inodes.length-1];
if (targetNode == null) { // non-existent src if (targetNode == null) { // non-existent src
@ -1055,7 +1057,7 @@ int unprotectedDelete(String src, BlocksMapUpdateInfo collectedBlocks,
} }
int pos = inodes.length - 1; int pos = inodes.length - 1;
// Remove the node from the namespace // Remove the node from the namespace
targetNode = removeChild(inodes, pos); targetNode = removeChild(inodesInPath, pos);
if (targetNode == null) { if (targetNode == null) {
return 0; return 0;
} }
@ -1190,28 +1192,6 @@ INode getINode(String src) throws UnresolvedLinkException {
readUnlock(); readUnlock();
} }
} }
/**
* Retrieve the existing INodes along the given path.
*
* @param path the path to explore
* @return INodes array containing the existing INodes in the order they
* appear when following the path from the root INode to the
* deepest INodes. The array size will be the number of expected
* components in the path, and non existing components will be
* filled with null
*
* @see INodeDirectory#getExistingPathINodes(byte[][], INode[])
*/
INode[] getExistingPathINodes(String path)
throws UnresolvedLinkException {
readLock();
try {
return rootDir.getExistingPathINodes(path, true);
} finally {
readUnlock();
}
}
/** /**
* Get the parent node of path. * Get the parent node of path.
@ -1277,13 +1257,14 @@ void updateSpaceConsumed(String path, long nsDelta, long dsDelta)
UnresolvedLinkException { UnresolvedLinkException {
writeLock(); writeLock();
try { try {
INode[] inodes = rootDir.getExistingPathINodes(path, false); final INodesInPath inodesInPath = rootDir.getExistingPathINodes(path, false);
final INode[] inodes = inodesInPath.getINodes();
int len = inodes.length; int len = inodes.length;
if (inodes[len - 1] == null) { if (inodes[len - 1] == null) {
throw new FileNotFoundException(path + throw new FileNotFoundException(path +
" does not exist under rootDir."); " does not exist under rootDir.");
} }
updateCount(inodes, len-1, nsDelta, dsDelta, true); updateCount(inodesInPath, len-1, nsDelta, dsDelta, true);
} finally { } finally {
writeUnlock(); writeUnlock();
} }
@ -1298,7 +1279,7 @@ void updateSpaceConsumed(String path, long nsDelta, long dsDelta)
* @param checkQuota if true then check if quota is exceeded * @param checkQuota if true then check if quota is exceeded
* @throws QuotaExceededException if the new count violates any quota limit * @throws QuotaExceededException if the new count violates any quota limit
*/ */
private void updateCount(INode[] inodes, int numOfINodes, private void updateCount(INodesInPath inodesInPath, int numOfINodes,
long nsDelta, long dsDelta, boolean checkQuota) long nsDelta, long dsDelta, boolean checkQuota)
throws QuotaExceededException { throws QuotaExceededException {
assert hasWriteLock(); assert hasWriteLock();
@ -1306,29 +1287,25 @@ private void updateCount(INode[] inodes, int numOfINodes,
//still initializing. do not check or update quotas. //still initializing. do not check or update quotas.
return; return;
} }
if (numOfINodes>inodes.length) { final INode[] inodes = inodesInPath.getINodes();
if (numOfINodes > inodes.length) {
numOfINodes = inodes.length; numOfINodes = inodes.length;
} }
if (checkQuota) { if (checkQuota) {
verifyQuota(inodes, numOfINodes, nsDelta, dsDelta, null); verifyQuota(inodes, numOfINodes, nsDelta, dsDelta, null);
} }
for(int i = 0; i < numOfINodes; i++) { unprotectedUpdateCount(inodesInPath, numOfINodes, nsDelta, dsDelta);
if (inodes[i].isQuotaSet()) { // a directory with quota
INodeDirectoryWithQuota node =(INodeDirectoryWithQuota)inodes[i];
node.addSpaceConsumed(nsDelta, dsDelta);
}
}
} }
/** /**
* update quota of each inode and check to see if quota is exceeded. * update quota of each inode and check to see if quota is exceeded.
* See {@link #updateCount(INode[], int, long, long, boolean)} * See {@link #updateCount(INode[], int, long, long, boolean)}
*/ */
private void updateCountNoQuotaCheck(INode[] inodes, int numOfINodes, private void updateCountNoQuotaCheck(INodesInPath inodesInPath,
long nsDelta, long dsDelta) { int numOfINodes, long nsDelta, long dsDelta) {
assert hasWriteLock(); assert hasWriteLock();
try { try {
updateCount(inodes, numOfINodes, nsDelta, dsDelta, false); updateCount(inodesInPath, numOfINodes, nsDelta, dsDelta, false);
} catch (QuotaExceededException e) { } catch (QuotaExceededException e) {
NameNode.LOG.warn("FSDirectory.updateCountNoQuotaCheck - unexpected ", e); NameNode.LOG.warn("FSDirectory.updateCountNoQuotaCheck - unexpected ", e);
} }
@ -1342,9 +1319,10 @@ private void updateCountNoQuotaCheck(INode[] inodes, int numOfINodes,
* @param nsDelta * @param nsDelta
* @param dsDelta * @param dsDelta
*/ */
void unprotectedUpdateCount(INode[] inodes, int numOfINodes, private void unprotectedUpdateCount(INodesInPath inodesInPath,
long nsDelta, long dsDelta) { int numOfINodes, long nsDelta, long dsDelta) {
assert hasWriteLock(); assert hasWriteLock();
final INode[] inodes = inodesInPath.getINodes();
for(int i=0; i < numOfINodes; i++) { for(int i=0; i < numOfINodes; i++) {
if (inodes[i].isQuotaSet()) { // a directory with quota if (inodes[i].isQuotaSet()) { // a directory with quota
INodeDirectoryWithQuota node =(INodeDirectoryWithQuota)inodes[i]; INodeDirectoryWithQuota node =(INodeDirectoryWithQuota)inodes[i];
@ -1426,7 +1404,7 @@ boolean mkdirs(String src, PermissionStatus permissions,
StringBuilder pathbuilder = new StringBuilder(); StringBuilder pathbuilder = new StringBuilder();
int i = 1; int i = 1;
for(; i < inodes.length && inodes[i] != null; i++) { for(; i < inodes.length && inodes[i] != null; i++) {
pathbuilder.append(Path.SEPARATOR + names[i]); pathbuilder.append(Path.SEPARATOR).append(names[i]);
if (!inodes[i].isDirectory()) { if (!inodes[i].isDirectory()) {
throw new FileAlreadyExistsException("Parent path is not a directory: " throw new FileAlreadyExistsException("Parent path is not a directory: "
+ pathbuilder+ " "+inodes[i].getLocalName()); + pathbuilder+ " "+inodes[i].getLocalName());
@ -1468,8 +1446,7 @@ boolean mkdirs(String src, PermissionStatus permissions,
// create directories beginning from the first null index // create directories beginning from the first null index
for(; i < inodes.length; i++) { for(; i < inodes.length; i++) {
pathbuilder.append(Path.SEPARATOR + names[i]); pathbuilder.append(Path.SEPARATOR + names[i]);
String cur = pathbuilder.toString(); unprotectedMkdir(inodesInPath, i, components[i],
unprotectedMkdir(inodes, i, components[i],
(i < lastInodeIndex) ? parentPermissions : permissions, now); (i < lastInodeIndex) ? parentPermissions : permissions, now);
if (inodes[i] == null) { if (inodes[i] == null) {
return false; return false;
@ -1478,6 +1455,8 @@ boolean mkdirs(String src, PermissionStatus permissions,
// to match count of FilesDeleted metric. // to match count of FilesDeleted metric.
if (getFSNamesystem() != null) if (getFSNamesystem() != null)
NameNode.getNameNodeMetrics().incrFilesCreated(); NameNode.getNameNodeMetrics().incrFilesCreated();
final String cur = pathbuilder.toString();
fsImage.getEditLog().logMkDir(cur, inodes[i]); fsImage.getEditLog().logMkDir(cur, inodes[i]);
if(NameNode.stateChangeLog.isDebugEnabled()) { if(NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug( NameNode.stateChangeLog.debug(
@ -1498,30 +1477,30 @@ INode unprotectedMkdir(String src, PermissionStatus permissions,
INodesInPath inodesInPath = rootDir.getExistingPathINodes(components, INodesInPath inodesInPath = rootDir.getExistingPathINodes(components,
components.length, false); components.length, false);
INode[] inodes = inodesInPath.getINodes(); INode[] inodes = inodesInPath.getINodes();
unprotectedMkdir(inodes, inodes.length-1, components[inodes.length-1], final int pos = inodes.length - 1;
permissions, timestamp); unprotectedMkdir(inodesInPath, pos, components[pos], permissions, timestamp);
return inodes[inodes.length-1]; return inodes[pos];
} }
/** create a directory at index pos. /** create a directory at index pos.
* The parent path to the directory is at [0, pos-1]. * The parent path to the directory is at [0, pos-1].
* All ancestors exist. Newly created one stored at index pos. * All ancestors exist. Newly created one stored at index pos.
*/ */
private void unprotectedMkdir(INode[] inodes, int pos, private void unprotectedMkdir(INodesInPath inodesInPath, int pos,
byte[] name, PermissionStatus permission, byte[] name, PermissionStatus permission,
long timestamp) throws QuotaExceededException { long timestamp) throws QuotaExceededException {
assert hasWriteLock(); assert hasWriteLock();
inodes[pos] = addChild(inodes, pos, final INodeDirectory dir = new INodeDirectory(name, permission, timestamp);
new INodeDirectory(name, permission, timestamp), final INode inode = addChild(inodesInPath, pos, dir, -1, true);
-1); inodesInPath.setINode(pos, inode);
} }
/** Add a node child to the namespace. The full path name of the node is src. /** Add a node child to the namespace. The full path name of the node is src.
* childDiskspace should be -1, if unknown. * childDiskspace should be -1, if unknown.
* QuotaExceededException is thrown if it violates quota limit */ * @throw QuotaExceededException is thrown if it violates quota limit
private <T extends INode> T addNode(String src, T child, */
long childDiskspace) private <T extends INode> T addNode(String src, T child, long childDiskspace
throws QuotaExceededException, UnresolvedLinkException { ) throws QuotaExceededException, UnresolvedLinkException {
byte[][] components = INode.getPathComponents(src); byte[][] components = INode.getPathComponents(src);
byte[] path = components[components.length-1]; byte[] path = components[components.length-1];
child.setLocalName(path); child.setLocalName(path);
@ -1530,8 +1509,8 @@ private <T extends INode> T addNode(String src, T child,
try { try {
INodesInPath inodesInPath = rootDir.getExistingPathINodes(components, INodesInPath inodesInPath = rootDir.getExistingPathINodes(components,
components.length, false); components.length, false);
INode[] inodes = inodesInPath.getINodes(); return addChild(inodesInPath, inodesInPath.getINodes().length-1, child,
return addChild(inodes, inodes.length-1, child, childDiskspace); childDiskspace, true);
} finally { } finally {
writeUnlock(); writeUnlock();
} }
@ -1656,19 +1635,22 @@ protected <T extends INode> void verifyFsLimits(INode[] pathComponents,
} }
/** Add a node child to the inodes at index pos. /** Add a node child to the inodes at index pos.
* Its ancestors are stored at [0, pos-1]. * Its ancestors are stored at [0, pos-1].
* QuotaExceededException is thrown if it violates quota limit */ * @return the added node.
private <T extends INode> T addChild(INode[] pathComponents, int pos, * @throw QuotaExceededException is thrown if it violates quota limit
*/
private <T extends INode> T addChild(INodesInPath inodesInPath, int pos,
T child, long childDiskspace, T child, long childDiskspace,
boolean checkQuota) throws QuotaExceededException { boolean checkQuota) throws QuotaExceededException {
// The filesystem limits are not really quotas, so this check may appear final INode[] inodes = inodesInPath.getINodes();
// odd. It's because a rename operation deletes the src, tries to add // The filesystem limits are not really quotas, so this check may appear
// to the dest, if that fails, re-adds the src from whence it came. // odd. It's because a rename operation deletes the src, tries to add
// The rename code disables the quota when it's restoring to the // to the dest, if that fails, re-adds the src from whence it came.
// original location becase a quota violation would cause the the item // The rename code disables the quota when it's restoring to the
// to go "poof". The fs limits must be bypassed for the same reason. // original location becase a quota violation would cause the the item
// to go "poof". The fs limits must be bypassed for the same reason.
if (checkQuota) { if (checkQuota) {
verifyFsLimits(pathComponents, pos, child); verifyFsLimits(inodes, pos, child);
} }
INode.DirCounts counts = new INode.DirCounts(); INode.DirCounts counts = new INode.DirCounts();
@ -1676,31 +1658,22 @@ private <T extends INode> T addChild(INode[] pathComponents, int pos,
if (childDiskspace < 0) { if (childDiskspace < 0) {
childDiskspace = counts.getDsCount(); childDiskspace = counts.getDsCount();
} }
updateCount(pathComponents, pos, counts.getNsCount(), childDiskspace, updateCount(inodesInPath, pos, counts.getNsCount(), childDiskspace, checkQuota);
checkQuota); if (inodes[pos-1] == null) {
if (pathComponents[pos-1] == null) {
throw new NullPointerException("Panic: parent does not exist"); throw new NullPointerException("Panic: parent does not exist");
} }
T addedNode = ((INodeDirectory)pathComponents[pos-1]).addChild( final T addedNode = ((INodeDirectory)inodes[pos-1]).addChild(child, true);
child, true);
if (addedNode == null) { if (addedNode == null) {
updateCount(pathComponents, pos, -counts.getNsCount(), updateCount(inodesInPath, pos, -counts.getNsCount(), -childDiskspace, true);
-childDiskspace, true);
} }
return addedNode; return addedNode;
} }
private <T extends INode> T addChild(INode[] pathComponents, int pos,
T child, long childDiskspace)
throws QuotaExceededException {
return addChild(pathComponents, pos, child, childDiskspace, true);
}
private <T extends INode> T addChildNoQuotaCheck(INode[] pathComponents, private <T extends INode> T addChildNoQuotaCheck(INodesInPath inodesInPath,
int pos, T child, long childDiskspace) { int pos, T child, long childDiskspace) {
T inode = null; T inode = null;
try { try {
inode = addChild(pathComponents, pos, child, childDiskspace, false); inode = addChild(inodesInPath, pos, child, childDiskspace, false);
} catch (QuotaExceededException e) { } catch (QuotaExceededException e) {
NameNode.LOG.warn("FSDirectory.addChildNoQuotaCheck - unexpected", e); NameNode.LOG.warn("FSDirectory.addChildNoQuotaCheck - unexpected", e);
} }
@ -1712,13 +1685,13 @@ private <T extends INode> T addChildNoQuotaCheck(INode[] pathComponents,
* Count of each ancestor with quota is also updated. * Count of each ancestor with quota is also updated.
* Return the removed node; null if the removal fails. * Return the removed node; null if the removal fails.
*/ */
private INode removeChild(INode[] pathComponents, int pos) { private INode removeChild(final INodesInPath inodesInPath, int pos) {
INode removedNode = final INode[] inodes = inodesInPath.getINodes();
((INodeDirectory)pathComponents[pos-1]).removeChild(pathComponents[pos]); INode removedNode = ((INodeDirectory)inodes[pos-1]).removeChild(inodes[pos]);
if (removedNode != null) { if (removedNode != null) {
INode.DirCounts counts = new INode.DirCounts(); INode.DirCounts counts = new INode.DirCounts();
removedNode.spaceConsumedInTree(counts); removedNode.spaceConsumedInTree(counts);
updateCountNoQuotaCheck(pathComponents, pos, updateCountNoQuotaCheck(inodesInPath, pos,
-counts.getNsCount(), -counts.getDsCount()); -counts.getNsCount(), -counts.getDsCount());
} }
return removedNode; return removedNode;
@ -1853,7 +1826,8 @@ INodeDirectory unprotectedSetQuota(String src, long nsQuota, long dsQuota)
String srcs = normalizePath(src); String srcs = normalizePath(src);
INode[] inodes = rootDir.getExistingPathINodes(src, true); final INodesInPath inodesInPath = rootDir.getExistingPathINodes(src, true);
final INode[] inodes = inodesInPath.getINodes();
INode targetNode = inodes[inodes.length-1]; INode targetNode = inodes[inodes.length-1];
if (targetNode == null) { if (targetNode == null) {
throw new FileNotFoundException("Directory does not exist: " + srcs); throw new FileNotFoundException("Directory does not exist: " + srcs);

View File

@ -167,6 +167,7 @@
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.common.Util; import org.apache.hadoop.hdfs.server.common.Util;
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.INodesInPath;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease; import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory; import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
import org.apache.hadoop.hdfs.server.namenode.ha.EditLogTailer; import org.apache.hadoop.hdfs.server.namenode.ha.EditLogTailer;
@ -1688,7 +1689,7 @@ long getPreferredBlockSize(String filename)
} }
} }
/* /**
* Verify that parent directory of src exists. * Verify that parent directory of src exists.
*/ */
private void verifyParentDir(String src) throws FileNotFoundException, private void verifyParentDir(String src) throws FileNotFoundException,
@ -1696,14 +1697,13 @@ private void verifyParentDir(String src) throws FileNotFoundException,
assert hasReadOrWriteLock(); assert hasReadOrWriteLock();
Path parent = new Path(src).getParent(); Path parent = new Path(src).getParent();
if (parent != null) { if (parent != null) {
INode[] pathINodes = dir.getExistingPathINodes(parent.toString()); final INode parentNode = dir.getINode(parent.toString());
INode parentNode = pathINodes[pathINodes.length - 1];
if (parentNode == null) { if (parentNode == null) {
throw new FileNotFoundException("Parent directory doesn't exist: " throw new FileNotFoundException("Parent directory doesn't exist: "
+ parent.toString()); + parent);
} else if (!parentNode.isDirectory() && !parentNode.isSymlink()) { } else if (!parentNode.isDirectory() && !parentNode.isSymlink()) {
throw new ParentNotDirectoryException("Parent path is not a directory: " throw new ParentNotDirectoryException("Parent path is not a directory: "
+ parent.toString()); + parent);
} }
} }
} }
@ -2149,7 +2149,7 @@ LocatedBlock getAdditionalBlock(String src,
checkOperation(OperationCategory.READ); checkOperation(OperationCategory.READ);
LocatedBlock[] onRetryBlock = new LocatedBlock[1]; LocatedBlock[] onRetryBlock = new LocatedBlock[1];
final INode[] inodes = analyzeFileState( final INode[] inodes = analyzeFileState(
src, clientName, previous, onRetryBlock); src, clientName, previous, onRetryBlock).getINodes();
final INodeFileUnderConstruction pendingFile = final INodeFileUnderConstruction pendingFile =
(INodeFileUnderConstruction) inodes[inodes.length - 1]; (INodeFileUnderConstruction) inodes[inodes.length - 1];
@ -2180,8 +2180,9 @@ LocatedBlock getAdditionalBlock(String src,
// Run the full analysis again, since things could have changed // Run the full analysis again, since things could have changed
// while chooseTarget() was executing. // while chooseTarget() was executing.
LocatedBlock[] onRetryBlock = new LocatedBlock[1]; LocatedBlock[] onRetryBlock = new LocatedBlock[1];
INode[] inodes = INodesInPath inodesInPath =
analyzeFileState(src, clientName, previous, onRetryBlock); analyzeFileState(src, clientName, previous, onRetryBlock);
final INode[] inodes = inodesInPath.getINodes();
final INodeFileUnderConstruction pendingFile = final INodeFileUnderConstruction pendingFile =
(INodeFileUnderConstruction) inodes[inodes.length - 1]; (INodeFileUnderConstruction) inodes[inodes.length - 1];
@ -2196,7 +2197,7 @@ LocatedBlock getAdditionalBlock(String src,
// allocate new block, record block locations in INode. // allocate new block, record block locations in INode.
newBlock = createNewBlock(); newBlock = createNewBlock();
saveAllocatedBlock(src, inodes, newBlock, targets); saveAllocatedBlock(src, inodesInPath, newBlock, targets);
dir.persistBlocks(src, pendingFile); dir.persistBlocks(src, pendingFile);
offset = pendingFile.computeFileSize(true); offset = pendingFile.computeFileSize(true);
@ -2211,7 +2212,7 @@ LocatedBlock getAdditionalBlock(String src,
return makeLocatedBlock(newBlock, targets, offset); return makeLocatedBlock(newBlock, targets, offset);
} }
INode[] analyzeFileState(String src, INodesInPath analyzeFileState(String src,
String clientName, String clientName,
ExtendedBlock previous, ExtendedBlock previous,
LocatedBlock[] onRetryBlock) LocatedBlock[] onRetryBlock)
@ -2229,7 +2230,8 @@ INode[] analyzeFileState(String src,
checkFsObjectLimit(); checkFsObjectLimit();
Block previousBlock = ExtendedBlock.getLocalBlock(previous); Block previousBlock = ExtendedBlock.getLocalBlock(previous);
final INode[] inodes = dir.rootDir.getExistingPathINodes(src, true); final INodesInPath inodesInPath = dir.rootDir.getExistingPathINodes(src, true);
final INode[] inodes = inodesInPath.getINodes();
final INodeFileUnderConstruction pendingFile final INodeFileUnderConstruction pendingFile
= checkLease(src, clientName, inodes[inodes.length - 1]); = checkLease(src, clientName, inodes[inodes.length - 1]);
BlockInfo lastBlockInFile = pendingFile.getLastBlock(); BlockInfo lastBlockInFile = pendingFile.getLastBlock();
@ -2289,7 +2291,7 @@ INode[] analyzeFileState(String src,
onRetryBlock[0] = makeLocatedBlock(lastBlockInFile, onRetryBlock[0] = makeLocatedBlock(lastBlockInFile,
((BlockInfoUnderConstruction)lastBlockInFile).getExpectedLocations(), ((BlockInfoUnderConstruction)lastBlockInFile).getExpectedLocations(),
offset); offset);
return inodes; return inodesInPath;
} else { } else {
// Case 3 // Case 3
throw new IOException("Cannot allocate block in " + src + ": " + throw new IOException("Cannot allocate block in " + src + ": " +
@ -2302,7 +2304,7 @@ INode[] analyzeFileState(String src,
if (!checkFileProgress(pendingFile, false)) { if (!checkFileProgress(pendingFile, false)) {
throw new NotReplicatedYetException("Not replicated yet: " + src); throw new NotReplicatedYetException("Not replicated yet: " + src);
} }
return inodes; return inodesInPath;
} }
LocatedBlock makeLocatedBlock(Block blk, LocatedBlock makeLocatedBlock(Block blk,
@ -2512,7 +2514,7 @@ private boolean completeFileInternal(String src,
* The last INode is the INode for the file. * The last INode is the INode for the file.
* @throws QuotaExceededException If addition of block exceeds space quota * @throws QuotaExceededException If addition of block exceeds space quota
*/ */
BlockInfo saveAllocatedBlock(String src, INode[] inodes, BlockInfo saveAllocatedBlock(String src, INodesInPath inodes,
Block newBlock, DatanodeDescriptor targets[]) throws IOException { Block newBlock, DatanodeDescriptor targets[]) throws IOException {
assert hasWriteLock(); assert hasWriteLock();
BlockInfo b = dir.addBlock(src, inodes, newBlock, targets); BlockInfo b = dir.addBlock(src, inodes, newBlock, targets);

View File

@ -132,7 +132,7 @@ void checkPermission(String path, INodeDirectory root, boolean doCheckOwner,
} }
// check if (parentAccess != null) && file exists, then check sb // check if (parentAccess != null) && file exists, then check sb
// Resolve symlinks, the check is performed on the link target. // Resolve symlinks, the check is performed on the link target.
INode[] inodes = root.getExistingPathINodes(path, true); final INode[] inodes = root.getExistingPathINodes(path, true).getINodes();
int ancestorIndex = inodes.length - 2; int ancestorIndex = inodes.length - 2;
for(; ancestorIndex >= 0 && inodes[ancestorIndex] == null; for(; ancestorIndex >= 0 && inodes[ancestorIndex] == null;
ancestorIndex--); ancestorIndex--);

View File

@ -245,14 +245,12 @@ INodesInPath getExistingPathINodes(byte[][] components, int numOfINodes,
* components in the path, and non existing components will be * components in the path, and non existing components will be
* filled with null * filled with null
* *
* @see #getExistingPathINodes(byte[][], INode[]) * @see #getExistingPathINodes(byte[][], int, boolean)
*/ */
INode[] getExistingPathINodes(String path, boolean resolveLink) INodesInPath getExistingPathINodes(String path, boolean resolveLink)
throws UnresolvedLinkException { throws UnresolvedLinkException {
byte[][] components = getPathComponents(path); byte[][] components = getPathComponents(path);
INodesInPath inodes = this.getExistingPathINodes(components, return getExistingPathINodes(components, components.length, resolveLink);
components.length, resolveLink);
return inodes.inodes;
} }
/** /**
@ -419,6 +417,28 @@ int collectSubtreeBlocksAndClear(BlocksMapUpdateInfo info) {
children = null; children = null;
return total; return total;
} }
/**
* Used by
* {@link INodeDirectory#getExistingPathINodes(byte[][], int, boolean)}.
* Containing INodes information resolved from a given path.
*/
static class INodesInPath {
private INode[] inodes;
public INodesInPath(int number) {
assert (number >= 0);
this.inodes = new INode[number];
}
INode[] getINodes() {
return inodes;
}
void setINode(int i, INode inode) {
inodes[i] = inode;
}
}
/* /*
* The following code is to dump the tree recursively for testing. * The following code is to dump the tree recursively for testing.
@ -467,22 +487,4 @@ protected static void dumpTreeRecursively(PrintWriter out,
} }
prefix.setLength(prefix.length() - 2); prefix.setLength(prefix.length() - 2);
} }
/**
* Used by
* {@link INodeDirectory#getExistingPathINodes(byte[][], int, boolean)}.
* Containing INodes information resolved from a given path.
*/
static class INodesInPath {
private INode[] inodes;
public INodesInPath(int number) {
assert (number >= 0);
this.inodes = new INode[number];
}
INode[] getINodes() {
return inodes;
}
}
} }