HDFS-7462. Consolidate implementation of mkdirs() into a single class. Contributed by Haohui Mai.

This commit is contained in:
Haohui Mai 2014-12-02 14:53:45 -08:00
parent 5067ac098b
commit f32af158ed
17 changed files with 346 additions and 320 deletions

View File

@ -156,6 +156,9 @@ Release 2.7.0 - UNRELEASED
HDFS-7438. Consolidate the implementation of rename() into a single class. HDFS-7438. Consolidate the implementation of rename() into a single class.
(wheat9) (wheat9)
HDFS-7462. Consolidate implementation of mkdirs() into a single class.
(wheat9)
OPTIMIZATIONS OPTIMIZATIONS
BUG FIXES BUG FIXES

View File

@ -0,0 +1,238 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.InvalidPathException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.AclException;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import java.io.IOException;
import java.util.List;
import static org.apache.hadoop.util.Time.now;
class FSDirMkdirOp {
static HdfsFileStatus mkdirs(
FSNamesystem fsn, String src, PermissionStatus permissions,
boolean createParent) throws IOException {
FSDirectory fsd = fsn.getFSDirectory();
final String srcArg = src;
if(NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* NameSystem.mkdirs: " + src);
}
if (!DFSUtil.isValidName(src)) {
throw new InvalidPathException(src);
}
FSPermissionChecker pc = fsd.getPermissionChecker();
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath
(src);
src = fsd.resolvePath(pc, src, pathComponents);
if (fsd.isPermissionEnabled()) {
fsd.checkTraverse(pc, src);
}
if (!isDirMutable(fsd, src)) {
if (fsd.isPermissionEnabled()) {
fsd.checkAncestorAccess(pc, src, FsAction.WRITE);
}
if (!createParent) {
fsd.verifyParentDir(src);
}
// validate that we have enough inodes. This is, at best, a
// heuristic because the mkdirs() operation might need to
// create multiple inodes.
fsn.checkFsObjectLimit();
if (!mkdirsRecursively(fsd, src, permissions, false, now())) {
throw new IOException("Failed to create directory: " + src);
}
}
return fsd.getAuditFileInfo(srcArg, false);
}
static INode unprotectedMkdir(
FSDirectory fsd, long inodeId, String src,
PermissionStatus permissions, List<AclEntry> aclEntries, long timestamp)
throws QuotaExceededException, UnresolvedLinkException, AclException {
assert fsd.hasWriteLock();
byte[][] components = INode.getPathComponents(src);
INodesInPath iip = fsd.getExistingPathINodes(components);
INode[] inodes = iip.getINodes();
final int pos = inodes.length - 1;
unprotectedMkdir(fsd, inodeId, iip, pos, components[pos], permissions,
aclEntries, timestamp);
return inodes[pos];
}
/**
* Create a directory
* If ancestor directories do not exist, automatically create them.
* @param fsd FSDirectory
* @param src string representation of the path to the directory
* @param permissions the permission of the directory
* @param inheritPermission
* if the permission of the directory should inherit from its parent or not.
* u+wx is implicitly added to the automatically created directories,
* and to the given directory if inheritPermission is true
* @param now creation time
* @return true if the operation succeeds false otherwise
* @throws QuotaExceededException if directory creation violates
* any quota limit
* @throws UnresolvedLinkException if a symlink is encountered in src.
* @throws SnapshotAccessControlException if path is in RO snapshot
*/
static boolean mkdirsRecursively(
FSDirectory fsd, String src, PermissionStatus permissions,
boolean inheritPermission, long now)
throws FileAlreadyExistsException, QuotaExceededException,
UnresolvedLinkException, SnapshotAccessControlException,
AclException {
src = FSDirectory.normalizePath(src);
String[] names = INode.getPathNames(src);
byte[][] components = INode.getPathComponents(names);
final int lastInodeIndex = components.length - 1;
fsd.writeLock();
try {
INodesInPath iip = fsd.getExistingPathINodes(components);
if (iip.isSnapshot()) {
throw new SnapshotAccessControlException(
"Modification on RO snapshot is disallowed");
}
INode[] inodes = iip.getINodes();
// find the index of the first null in inodes[]
StringBuilder pathbuilder = new StringBuilder();
int i = 1;
for(; i < inodes.length && inodes[i] != null; i++) {
pathbuilder.append(Path.SEPARATOR).append(names[i]);
if (!inodes[i].isDirectory()) {
throw new FileAlreadyExistsException(
"Parent path is not a directory: "
+ pathbuilder + " "+inodes[i].getLocalName());
}
}
// default to creating parent dirs with the given perms
PermissionStatus parentPermissions = permissions;
// if not inheriting and it's the last inode, there's no use in
// computing perms that won't be used
if (inheritPermission || (i < lastInodeIndex)) {
// if inheriting (ie. creating a file or symlink), use the parent dir,
// else the supplied permissions
// NOTE: the permissions of the auto-created directories violate posix
FsPermission parentFsPerm = inheritPermission
? inodes[i-1].getFsPermission() : permissions.getPermission();
// ensure that the permissions allow user write+execute
if (!parentFsPerm.getUserAction().implies(FsAction.WRITE_EXECUTE)) {
parentFsPerm = new FsPermission(
parentFsPerm.getUserAction().or(FsAction.WRITE_EXECUTE),
parentFsPerm.getGroupAction(),
parentFsPerm.getOtherAction()
);
}
if (!parentPermissions.getPermission().equals(parentFsPerm)) {
parentPermissions = new PermissionStatus(
parentPermissions.getUserName(),
parentPermissions.getGroupName(),
parentFsPerm
);
// when inheriting, use same perms for entire path
if (inheritPermission) permissions = parentPermissions;
}
}
// create directories beginning from the first null index
for(; i < inodes.length; i++) {
pathbuilder.append(Path.SEPARATOR).append(names[i]);
unprotectedMkdir(fsd, fsd.allocateNewInodeId(), iip, i, components[i],
(i < lastInodeIndex) ? parentPermissions : permissions, null, now);
if (inodes[i] == null) {
return false;
}
// Directory creation also count towards FilesCreated
// to match count of FilesDeleted metric.
NameNode.getNameNodeMetrics().incrFilesCreated();
final String cur = pathbuilder.toString();
fsd.getEditLog().logMkDir(cur, inodes[i]);
if(NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug(
"mkdirs: created directory " + cur);
}
}
} finally {
fsd.writeUnlock();
}
return true;
}
/**
* Check whether the path specifies a directory
* @throws SnapshotAccessControlException if path is in RO snapshot
*/
private static boolean isDirMutable(
FSDirectory fsd, String src) throws UnresolvedLinkException,
SnapshotAccessControlException {
src = FSDirectory.normalizePath(src);
fsd.readLock();
try {
INode node = fsd.getINode4Write(src, false);
return node != null && node.isDirectory();
} finally {
fsd.readUnlock();
}
}
/** create a directory at index pos.
* The parent path to the directory is at [0, pos-1].
* All ancestors exist. Newly created one stored at index pos.
*/
private static void unprotectedMkdir(
FSDirectory fsd, long inodeId, INodesInPath inodesInPath, int pos,
byte[] name, PermissionStatus permission, List<AclEntry> aclEntries,
long timestamp)
throws QuotaExceededException, AclException {
assert fsd.hasWriteLock();
final INodeDirectory dir = new INodeDirectory(inodeId, name, permission,
timestamp);
if (fsd.addChild(inodesInPath, pos, dir, true)) {
if (aclEntries != null) {
AclStorage.updateINodeAcl(dir, aclEntries, Snapshot.CURRENT_STATE_ID);
}
inodesInPath.setINode(pos, dir);
}
}
}

View File

@ -39,9 +39,9 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.CipherSuite; import org.apache.hadoop.crypto.CipherSuite;
import org.apache.hadoop.crypto.CryptoProtocolVersion; import org.apache.hadoop.crypto.CryptoProtocolVersion;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FileEncryptionInfo; import org.apache.hadoop.fs.FileEncryptionInfo;
import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathIsNotDirectoryException; import org.apache.hadoop.fs.PathIsNotDirectoryException;
import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.UnresolvedLinkException;
@ -66,7 +66,6 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException; import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
import org.apache.hadoop.hdfs.protocol.SnapshotException;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
import org.apache.hadoop.hdfs.protocolPB.PBHelper; import org.apache.hadoop.hdfs.protocolPB.PBHelper;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
@ -83,7 +82,6 @@ import org.apache.hadoop.hdfs.util.ChunkedArrayList;
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import com.google.common.collect.Lists; import com.google.common.collect.Lists;
import org.apache.hadoop.hdfs.util.ReadOnlyList;
import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.slf4j.Logger; import org.slf4j.Logger;
@ -147,6 +145,7 @@ public class FSDirectory implements Closeable {
private final boolean isPermissionEnabled; private final boolean isPermissionEnabled;
private final String fsOwnerShortUserName; private final String fsOwnerShortUserName;
private final String supergroup; private final String supergroup;
private final INodeId inodeId;
private final FSEditLog editLog; private final FSEditLog editLog;
@ -194,6 +193,7 @@ public class FSDirectory implements Closeable {
FSDirectory(FSNamesystem ns, Configuration conf) throws IOException { FSDirectory(FSNamesystem ns, Configuration conf) throws IOException {
this.dirLock = new ReentrantReadWriteLock(true); // fair this.dirLock = new ReentrantReadWriteLock(true); // fair
this.inodeId = new INodeId();
rootDir = createRoot(ns); rootDir = createRoot(ns);
inodeMap = INodeMap.newInstance(rootDir); inodeMap = INodeMap.newInstance(rootDir);
this.isPermissionEnabled = conf.getBoolean( this.isPermissionEnabled = conf.getBoolean(
@ -329,8 +329,7 @@ public class FSDirectory implements Closeable {
UnresolvedLinkException, SnapshotAccessControlException, AclException { UnresolvedLinkException, SnapshotAccessControlException, AclException {
long modTime = now(); long modTime = now();
INodeFile newNode = newINodeFile(namesystem.allocateNewInodeId(), INodeFile newNode = newINodeFile(allocateNewInodeId(), permissions, modTime, modTime, replication, preferredBlockSize);
permissions, modTime, modTime, replication, preferredBlockSize);
newNode.toUnderConstruction(clientName, clientMachine); newNode.toUnderConstruction(clientName, clientMachine);
boolean added = false; boolean added = false;
@ -929,22 +928,6 @@ public class FSDirectory implements Closeable {
readUnlock(); readUnlock();
} }
} }
/**
* Check whether the path specifies a directory
* @throws SnapshotAccessControlException if path is in RO snapshot
*/
boolean isDirMutable(String src) throws UnresolvedLinkException,
SnapshotAccessControlException {
src = normalizePath(src);
readLock();
try {
INode node = getINode4Write(src, false);
return node != null && node.isDirectory();
} finally {
readUnlock();
}
}
/** Updates namespace and diskspace consumed for all /** Updates namespace and diskspace consumed for all
* directories until the parent directory of file represented by path. * directories until the parent directory of file represented by path.
@ -1081,38 +1064,6 @@ public class FSDirectory implements Closeable {
return inodes == null ? "" : getFullPathName(inodes, inodes.length - 1); return inodes == null ? "" : getFullPathName(inodes, inodes.length - 1);
} }
INode unprotectedMkdir(long inodeId, String src, PermissionStatus permissions,
List<AclEntry> aclEntries, long timestamp)
throws QuotaExceededException, UnresolvedLinkException, AclException {
assert hasWriteLock();
byte[][] components = INode.getPathComponents(src);
INodesInPath iip = getExistingPathINodes(components);
INode[] inodes = iip.getINodes();
final int pos = inodes.length - 1;
unprotectedMkdir(inodeId, iip, pos, components[pos], permissions, aclEntries,
timestamp);
return inodes[pos];
}
/** create a directory at index pos.
* The parent path to the directory is at [0, pos-1].
* All ancestors exist. Newly created one stored at index pos.
*/
void unprotectedMkdir(long inodeId, INodesInPath inodesInPath,
int pos, byte[] name, PermissionStatus permission,
List<AclEntry> aclEntries, long timestamp)
throws QuotaExceededException, AclException {
assert hasWriteLock();
final INodeDirectory dir = new INodeDirectory(inodeId, name, permission,
timestamp);
if (addChild(inodesInPath, pos, dir, true)) {
if (aclEntries != null) {
AclStorage.updateINodeAcl(dir, aclEntries, Snapshot.CURRENT_STATE_ID);
}
inodesInPath.setINode(pos, dir);
}
}
/** /**
* Add the given child to the namespace. * Add the given child to the namespace.
* @param src The full path name of the child node. * @param src The full path name of the child node.
@ -1314,8 +1265,8 @@ public class FSDirectory implements Closeable {
* otherwise return true; * otherwise return true;
* @throws QuotaExceededException is thrown if it violates quota limit * @throws QuotaExceededException is thrown if it violates quota limit
*/ */
private boolean addChild(INodesInPath iip, int pos, boolean addChild(INodesInPath iip, int pos, INode child, boolean checkQuota)
INode child, boolean checkQuota) throws QuotaExceededException { throws QuotaExceededException {
final INode[] inodes = iip.getINodes(); final INode[] inodes = iip.getINodes();
// Disallow creation of /.reserved. This may be created when loading // Disallow creation of /.reserved. This may be created when loading
// editlog/fsimage during upgrade since /.reserved was a valid name in older // editlog/fsimage during upgrade since /.reserved was a valid name in older
@ -1626,6 +1577,7 @@ public class FSDirectory implements Closeable {
inodeMap.clear(); inodeMap.clear();
addToInodeMap(rootDir); addToInodeMap(rootDir);
nameCache.reset(); nameCache.reset();
inodeId.setCurrentValue(INodeId.LAST_RESERVED_ID);
} finally { } finally {
writeUnlock(); writeUnlock();
} }
@ -2381,7 +2333,7 @@ public class FSDirectory implements Closeable {
* @throws UnresolvedLinkException if symlink can't be resolved * @throws UnresolvedLinkException if symlink can't be resolved
* @throws SnapshotAccessControlException if path is in RO snapshot * @throws SnapshotAccessControlException if path is in RO snapshot
*/ */
private INode getINode4Write(String src, boolean resolveLink) INode getINode4Write(String src, boolean resolveLink)
throws UnresolvedLinkException, SnapshotAccessControlException { throws UnresolvedLinkException, SnapshotAccessControlException {
return getINodesInPath4Write(src, resolveLink).getLastINode(); return getINodesInPath4Write(src, resolveLink).getLastINode();
} }
@ -2481,4 +2433,51 @@ public class FSDirectory implements Closeable {
? FSDirStatAndListingOp.getFileInfo(this, path, resolveSymlink, false, ? FSDirStatAndListingOp.getFileInfo(this, path, resolveSymlink, false,
false) : null; false) : null;
} }
/**
* Verify that parent directory of src exists.
*/
void verifyParentDir(String src)
throws FileNotFoundException, ParentNotDirectoryException,
UnresolvedLinkException {
Path parent = new Path(src).getParent();
if (parent != null) {
final INode parentNode = getINode(parent.toString());
if (parentNode == null) {
throw new FileNotFoundException("Parent directory doesn't exist: "
+ parent);
} else if (!parentNode.isDirectory() && !parentNode.isSymlink()) {
throw new ParentNotDirectoryException("Parent path is not a directory: "
+ parent);
}
}
}
/** Allocate a new inode ID. */
long allocateNewInodeId() {
return inodeId.nextValue();
}
/** @return the last inode ID. */
public long getLastInodeId() {
return inodeId.getCurrentValue();
}
/**
* Set the last allocated inode id when fsimage or editlog is loaded.
* @param newValue
*/
void resetLastInodeId(long newValue) throws IOException {
try {
inodeId.skipTo(newValue);
} catch(IllegalStateException ise) {
throw new IOException(ise);
}
}
/** Should only be used for tests to reset to any value
* @param newValue*/
void resetLastInodeIdWithoutChecking(long newValue) {
inodeId.setCurrentValue(newValue);
}
} }

View File

@ -177,7 +177,7 @@ public class FSEditLogLoader {
prog.setTotal(Phase.LOADING_EDITS, step, numTxns); prog.setTotal(Phase.LOADING_EDITS, step, numTxns);
Counter counter = prog.getCounter(Phase.LOADING_EDITS, step); Counter counter = prog.getCounter(Phase.LOADING_EDITS, step);
long lastLogTime = now(); long lastLogTime = now();
long lastInodeId = fsNamesys.getLastInodeId(); long lastInodeId = fsNamesys.dir.getLastInodeId();
try { try {
while (true) { while (true) {
@ -277,7 +277,7 @@ public class FSEditLogLoader {
} }
} }
} finally { } finally {
fsNamesys.resetLastInodeId(lastInodeId); fsNamesys.dir.resetLastInodeId(lastInodeId);
if(closeOnExit) { if(closeOnExit) {
in.close(); in.close();
} }
@ -306,12 +306,12 @@ public class FSEditLogLoader {
throw new IOException("The layout version " + logVersion throw new IOException("The layout version " + logVersion
+ " supports inodeId but gave bogus inodeId"); + " supports inodeId but gave bogus inodeId");
} }
inodeId = fsNamesys.allocateNewInodeId(); inodeId = fsNamesys.dir.allocateNewInodeId();
} else { } else {
// need to reset lastInodeId. fsnamesys gets lastInodeId firstly from // need to reset lastInodeId. fsnamesys gets lastInodeId firstly from
// fsimage but editlog captures more recent inodeId allocations // fsimage but editlog captures more recent inodeId allocations
if (inodeId > lastInodeId) { if (inodeId > lastInodeId) {
fsNamesys.resetLastInodeId(inodeId); fsNamesys.dir.resetLastInodeId(inodeId);
} }
} }
return inodeId; return inodeId;
@ -531,7 +531,7 @@ public class FSEditLogLoader {
MkdirOp mkdirOp = (MkdirOp)op; MkdirOp mkdirOp = (MkdirOp)op;
inodeId = getAndUpdateLastInodeId(mkdirOp.inodeId, logVersion, inodeId = getAndUpdateLastInodeId(mkdirOp.inodeId, logVersion,
lastInodeId); lastInodeId);
fsDir.unprotectedMkdir(inodeId, FSDirMkdirOp.unprotectedMkdir(fsDir, inodeId,
renameReservedPathsOnUpgrade(mkdirOp.path, logVersion), renameReservedPathsOnUpgrade(mkdirOp.path, logVersion),
mkdirOp.permissions, mkdirOp.aclEntries, mkdirOp.timestamp); mkdirOp.permissions, mkdirOp.aclEntries, mkdirOp.timestamp);
break; break;

View File

@ -380,7 +380,7 @@ public class FSImageFormat {
if (NameNodeLayoutVersion.supports( if (NameNodeLayoutVersion.supports(
LayoutVersion.Feature.ADD_INODE_ID, imgVersion)) { LayoutVersion.Feature.ADD_INODE_ID, imgVersion)) {
long lastInodeId = in.readLong(); long lastInodeId = in.readLong();
namesystem.resetLastInodeId(lastInodeId); namesystem.dir.resetLastInodeId(lastInodeId);
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug("load last allocated InodeId from fsimage:" + lastInodeId); LOG.debug("load last allocated InodeId from fsimage:" + lastInodeId);
} }
@ -734,7 +734,7 @@ public class FSImageFormat {
long inodeId = NameNodeLayoutVersion.supports( long inodeId = NameNodeLayoutVersion.supports(
LayoutVersion.Feature.ADD_INODE_ID, imgVersion) ? in.readLong() LayoutVersion.Feature.ADD_INODE_ID, imgVersion) ? in.readLong()
: namesystem.allocateNewInodeId(); : namesystem.dir.allocateNewInodeId();
final short replication = namesystem.getBlockManager().adjustReplication( final short replication = namesystem.getBlockManager().adjustReplication(
in.readShort()); in.readShort());
@ -1265,7 +1265,7 @@ public class FSImageFormat {
out.writeLong(sourceNamesystem.getBlockIdManager().getGenerationStampAtblockIdSwitch()); out.writeLong(sourceNamesystem.getBlockIdManager().getGenerationStampAtblockIdSwitch());
out.writeLong(sourceNamesystem.getBlockIdManager().getLastAllocatedBlockId()); out.writeLong(sourceNamesystem.getBlockIdManager().getLastAllocatedBlockId());
out.writeLong(context.getTxId()); out.writeLong(context.getTxId());
out.writeLong(sourceNamesystem.getLastInodeId()); out.writeLong(sourceNamesystem.dir.getLastInodeId());
sourceNamesystem.getSnapshotManager().write(out); sourceNamesystem.getSnapshotManager().write(out);

View File

@ -210,7 +210,7 @@ public final class FSImageFormatPBINode {
void loadINodeSection(InputStream in) throws IOException { void loadINodeSection(InputStream in) throws IOException {
INodeSection s = INodeSection.parseDelimitedFrom(in); INodeSection s = INodeSection.parseDelimitedFrom(in);
fsn.resetLastInodeId(s.getLastInodeId()); fsn.dir.resetLastInodeId(s.getLastInodeId());
LOG.info("Loading " + s.getNumInodes() + " INodes."); LOG.info("Loading " + s.getNumInodes() + " INodes.");
for (int i = 0; i < s.getNumInodes(); ++i) { for (int i = 0; i < s.getNumInodes(); ++i) {
INodeSection.INode p = INodeSection.INode.parseDelimitedFrom(in); INodeSection.INode p = INodeSection.INode.parseDelimitedFrom(in);
@ -487,7 +487,7 @@ public final class FSImageFormatPBINode {
INodeMap inodesMap = fsn.dir.getINodeMap(); INodeMap inodesMap = fsn.dir.getINodeMap();
INodeSection.Builder b = INodeSection.newBuilder() INodeSection.Builder b = INodeSection.newBuilder()
.setLastInodeId(fsn.getLastInodeId()).setNumInodes(inodesMap.size()); .setLastInodeId(fsn.dir.getLastInodeId()).setNumInodes(inodesMap.size());
INodeSection s = b.build(); INodeSection s = b.build();
s.writeDelimitedTo(out); s.writeDelimitedTo(out);

View File

@ -120,7 +120,7 @@ public class FSImageSerialization {
byte[] name = readBytes(in); byte[] name = readBytes(in);
long inodeId = NameNodeLayoutVersion.supports( long inodeId = NameNodeLayoutVersion.supports(
LayoutVersion.Feature.ADD_INODE_ID, imgVersion) ? in.readLong() LayoutVersion.Feature.ADD_INODE_ID, imgVersion) ? in.readLong()
: fsNamesys.allocateNewInodeId(); : fsNamesys.dir.allocateNewInodeId();
short blockReplication = in.readShort(); short blockReplication = in.readShort();
long modificationTime = in.readLong(); long modificationTime = in.readLong();
long preferredBlockSize = in.readLong(); long preferredBlockSize = in.readLong();

View File

@ -518,8 +518,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
* Whether the namenode is in the middle of starting the active service * Whether the namenode is in the middle of starting the active service
*/ */
private volatile boolean startingActiveService = false; private volatile boolean startingActiveService = false;
private INodeId inodeId;
private final RetryCache retryCache; private final RetryCache retryCache;
@ -583,32 +581,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
} }
} }
/**
* Set the last allocated inode id when fsimage or editlog is loaded.
*/
public void resetLastInodeId(long newValue) throws IOException {
try {
inodeId.skipTo(newValue);
} catch(IllegalStateException ise) {
throw new IOException(ise);
}
}
/** Should only be used for tests to reset to any value */
void resetLastInodeIdWithoutChecking(long newValue) {
inodeId.setCurrentValue(newValue);
}
/** @return the last inode ID. */
public long getLastInodeId() {
return inodeId.getCurrentValue();
}
/** Allocate a new inode ID. */
public long allocateNewInodeId() {
return inodeId.nextValue();
}
/** /**
* Clear all loaded data * Clear all loaded data
*/ */
@ -617,7 +589,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
dtSecretManager.reset(); dtSecretManager.reset();
blockIdManager.clear(); blockIdManager.clear();
leaseManager.removeAllLeases(); leaseManager.removeAllLeases();
inodeId.setCurrentValue(INodeId.LAST_RESERVED_ID);
snapshotManager.clearSnapshottableDirs(); snapshotManager.clearSnapshottableDirs();
cacheManager.clear(); cacheManager.clear();
setImageLoaded(false); setImageLoaded(false);
@ -843,8 +814,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
this.editLogRollerInterval = conf.getInt( this.editLogRollerInterval = conf.getInt(
DFS_NAMENODE_EDIT_LOG_AUTOROLL_CHECK_INTERVAL_MS, DFS_NAMENODE_EDIT_LOG_AUTOROLL_CHECK_INTERVAL_MS,
DFS_NAMENODE_EDIT_LOG_AUTOROLL_CHECK_INTERVAL_MS_DEFAULT); DFS_NAMENODE_EDIT_LOG_AUTOROLL_CHECK_INTERVAL_MS_DEFAULT);
this.inodeId = new INodeId();
this.lazyPersistFileScrubIntervalSec = conf.getInt( this.lazyPersistFileScrubIntervalSec = conf.getInt(
DFS_NAMENODE_LAZY_PERSIST_FILE_SCRUB_INTERVAL_SEC, DFS_NAMENODE_LAZY_PERSIST_FILE_SCRUB_INTERVAL_SEC,
DFS_NAMENODE_LAZY_PERSIST_FILE_SCRUB_INTERVAL_SEC_DEFAULT); DFS_NAMENODE_LAZY_PERSIST_FILE_SCRUB_INTERVAL_SEC_DEFAULT);
@ -2076,7 +2046,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
checkNameNodeSafeMode("Cannot create symlink " + link); checkNameNodeSafeMode("Cannot create symlink " + link);
link = dir.resolvePath(pc, link, pathComponents); link = dir.resolvePath(pc, link, pathComponents);
if (!createParent) { if (!createParent) {
verifyParentDir(link); dir.verifyParentDir(link);
} }
if (!dir.isValidToCreate(link)) { if (!dir.isValidToCreate(link)) {
throw new IOException("failed to create link " + link throw new IOException("failed to create link " + link
@ -2251,25 +2221,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
} }
} }
/**
* Verify that parent directory of src exists.
*/
private void verifyParentDir(String src) throws FileNotFoundException,
ParentNotDirectoryException, UnresolvedLinkException {
assert hasReadLock();
Path parent = new Path(src).getParent();
if (parent != null) {
final INode parentNode = dir.getINode(parent.toString());
if (parentNode == null) {
throw new FileNotFoundException("Parent directory doesn't exist: "
+ parent);
} else if (!parentNode.isDirectory() && !parentNode.isSymlink()) {
throw new ParentNotDirectoryException("Parent path is not a directory: "
+ parent);
}
}
}
/** /**
* If the file is within an encryption zone, select the appropriate * If the file is within an encryption zone, select the appropriate
* CryptoProtocolVersion from the list provided by the client. Since the * CryptoProtocolVersion from the list provided by the client. Since the
@ -2548,7 +2499,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
} }
if (!createParent) { if (!createParent) {
verifyParentDir(src); dir.verifyParentDir(src);
} }
try { try {
@ -2580,8 +2531,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
// Always do an implicit mkdirs for parent directory tree. // Always do an implicit mkdirs for parent directory tree.
Path parent = new Path(src).getParent(); Path parent = new Path(src).getParent();
if (parent != null && mkdirsRecursively(parent.toString(), if (parent != null && FSDirMkdirOp.mkdirsRecursively(dir,
permissions, true, now())) { parent.toString(), permissions, true, now())) {
newNode = dir.addFile(src, permissions, replication, blockSize, newNode = dir.addFile(src, permissions, replication, blockSize,
holder, clientMachine); holder, clientMachine);
} }
@ -3875,186 +3826,22 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
* Create all the necessary directories * Create all the necessary directories
*/ */
boolean mkdirs(String src, PermissionStatus permissions, boolean mkdirs(String src, PermissionStatus permissions,
boolean createParent) throws IOException, UnresolvedLinkException { boolean createParent) throws IOException {
boolean ret = false; HdfsFileStatus auditStat = null;
checkOperation(OperationCategory.WRITE);
writeLock();
try { try {
ret = mkdirsInt(src, permissions, createParent); checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot create directory " + src);
auditStat = FSDirMkdirOp.mkdirs(this, src, permissions, createParent);
} catch (AccessControlException e) { } catch (AccessControlException e) {
logAuditEvent(false, "mkdirs", src); logAuditEvent(false, "mkdirs", src);
throw e; throw e;
}
return ret;
}
private boolean mkdirsInt(final String srcArg, PermissionStatus permissions,
boolean createParent) throws IOException, UnresolvedLinkException {
String src = srcArg;
if(NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* NameSystem.mkdirs: " + src);
}
if (!DFSUtil.isValidName(src)) {
throw new InvalidPathException(src);
}
FSPermissionChecker pc = getPermissionChecker();
checkOperation(OperationCategory.WRITE);
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
HdfsFileStatus resultingStat = null;
boolean status = false;
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot create directory " + src);
src = dir.resolvePath(pc, src, pathComponents);
status = mkdirsInternal(pc, src, permissions, createParent);
if (status) {
resultingStat = getAuditFileInfo(src, false);
}
} finally { } finally {
writeUnlock(); writeUnlock();
} }
getEditLog().logSync(); getEditLog().logSync();
if (status) { logAuditEvent(true, "mkdirs", src, null, auditStat);
logAuditEvent(true, "mkdirs", srcArg, null, resultingStat);
}
return status;
}
/**
* Create all the necessary directories
*/
private boolean mkdirsInternal(FSPermissionChecker pc, String src,
PermissionStatus permissions, boolean createParent)
throws IOException, UnresolvedLinkException {
assert hasWriteLock();
if (isPermissionEnabled) {
checkTraverse(pc, src);
}
if (dir.isDirMutable(src)) {
// all the users of mkdirs() are used to expect 'true' even if
// a new directory is not created.
return true;
}
if (isPermissionEnabled) {
checkAncestorAccess(pc, src, FsAction.WRITE);
}
if (!createParent) {
verifyParentDir(src);
}
// validate that we have enough inodes. This is, at best, a
// heuristic because the mkdirs() operation might need to
// create multiple inodes.
checkFsObjectLimit();
if (!mkdirsRecursively(src, permissions, false, now())) {
throw new IOException("Failed to create directory: " + src);
}
return true;
}
/**
* Create a directory
* If ancestor directories do not exist, automatically create them.
* @param src string representation of the path to the directory
* @param permissions the permission of the directory
* @param inheritPermission if the permission of the directory should inherit
* from its parent or not. u+wx is implicitly added to
* the automatically created directories, and to the
* given directory if inheritPermission is true
* @param now creation time
* @return true if the operation succeeds false otherwise
* @throws QuotaExceededException if directory creation violates
* any quota limit
* @throws UnresolvedLinkException if a symlink is encountered in src.
* @throws SnapshotAccessControlException if path is in RO snapshot
*/
private boolean mkdirsRecursively(String src, PermissionStatus permissions,
boolean inheritPermission, long now)
throws FileAlreadyExistsException, QuotaExceededException,
UnresolvedLinkException, SnapshotAccessControlException,
AclException {
src = FSDirectory.normalizePath(src);
String[] names = INode.getPathNames(src);
byte[][] components = INode.getPathComponents(names);
final int lastInodeIndex = components.length - 1;
dir.writeLock();
try {
INodesInPath iip = dir.getExistingPathINodes(components);
if (iip.isSnapshot()) {
throw new SnapshotAccessControlException(
"Modification on RO snapshot is disallowed");
}
INode[] inodes = iip.getINodes();
// find the index of the first null in inodes[]
StringBuilder pathbuilder = new StringBuilder();
int i = 1;
for(; i < inodes.length && inodes[i] != null; i++) {
pathbuilder.append(Path.SEPARATOR).append(names[i]);
if (!inodes[i].isDirectory()) {
throw new FileAlreadyExistsException(
"Parent path is not a directory: "
+ pathbuilder + " "+inodes[i].getLocalName());
}
}
// default to creating parent dirs with the given perms
PermissionStatus parentPermissions = permissions;
// if not inheriting and it's the last inode, there's no use in
// computing perms that won't be used
if (inheritPermission || (i < lastInodeIndex)) {
// if inheriting (ie. creating a file or symlink), use the parent dir,
// else the supplied permissions
// NOTE: the permissions of the auto-created directories violate posix
FsPermission parentFsPerm = inheritPermission
? inodes[i-1].getFsPermission() : permissions.getPermission();
// ensure that the permissions allow user write+execute
if (!parentFsPerm.getUserAction().implies(FsAction.WRITE_EXECUTE)) {
parentFsPerm = new FsPermission(
parentFsPerm.getUserAction().or(FsAction.WRITE_EXECUTE),
parentFsPerm.getGroupAction(),
parentFsPerm.getOtherAction()
);
}
if (!parentPermissions.getPermission().equals(parentFsPerm)) {
parentPermissions = new PermissionStatus(
parentPermissions.getUserName(),
parentPermissions.getGroupName(),
parentFsPerm
);
// when inheriting, use same perms for entire path
if (inheritPermission) permissions = parentPermissions;
}
}
// create directories beginning from the first null index
for(; i < inodes.length; i++) {
pathbuilder.append(Path.SEPARATOR).append(names[i]);
dir.unprotectedMkdir(allocateNewInodeId(), iip, i, components[i],
(i < lastInodeIndex) ? parentPermissions : permissions, null,
now);
if (inodes[i] == null) {
return false;
}
// Directory creation also count towards FilesCreated
// to match count of FilesDeleted metric.
NameNode.getNameNodeMetrics().incrFilesCreated();
final String cur = pathbuilder.toString();
getEditLog().logMkDir(cur, inodes[i]);
if(NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug(
"mkdirs: created directory " + cur);
}
}
} finally {
dir.writeUnlock();
}
return true; return true;
} }
@ -4763,12 +4550,13 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
final long modTime = now(); final long modTime = now();
if (createParent) { if (createParent) {
final String parent = new Path(path).getParent().toString(); final String parent = new Path(path).getParent().toString();
if (!mkdirsRecursively(parent, dirPerms, true, modTime)) { if (!FSDirMkdirOp.mkdirsRecursively(dir, parent, dirPerms, true,
modTime)) {
return null; return null;
} }
} }
final String userName = dirPerms.getUserName(); final String userName = dirPerms.getUserName();
long id = allocateNewInodeId(); long id = dir.allocateNewInodeId();
INodeSymlink newNode = dir.addSymlink(id, path, target, modTime, modTime, INodeSymlink newNode = dir.addSymlink(id, path, target, modTime, modTime,
new PermissionStatus(userName, null, FsPermission.getDefault())); new PermissionStatus(userName, null, FsPermission.getDefault()));
if (newNode == null) { if (newNode == null) {

View File

@ -201,13 +201,11 @@ public class DFSTestUtil {
logicalName, "nn2"), "127.0.0.1:12346"); logicalName, "nn2"), "127.0.0.1:12346");
} }
public static void setEditLogForTesting(NameNode nn, FSEditLog newLog) { public static void setEditLogForTesting(FSNamesystem fsn, FSEditLog newLog) {
Whitebox.setInternalState(nn.getFSImage(), "editLog", newLog); Whitebox.setInternalState(fsn.getFSImage(), "editLog", newLog);
Whitebox.setInternalState(nn.getNamesystem().getFSDirectory(), "editLog", Whitebox.setInternalState(fsn.getFSDirectory(), "editLog", newLog);
newLog);
} }
/** class MyFile contains enough information to recreate the contents of /** class MyFile contains enough information to recreate the contents of
* a single file. * a single file.
*/ */

View File

@ -77,7 +77,7 @@ public class TestRenameWhileOpen {
FSEditLog spyLog = FSEditLog spyLog =
spy(cluster.getNameNode().getFSImage().getEditLog()); spy(cluster.getNameNode().getFSImage().getEditLog());
doNothing().when(spyLog).endCurrentLogSegment(Mockito.anyBoolean()); doNothing().when(spyLog).endCurrentLogSegment(Mockito.anyBoolean());
DFSTestUtil.setEditLogForTesting(cluster.getNameNode(), spyLog); DFSTestUtil.setEditLogForTesting(cluster.getNamesystem(), spyLog);
final int nnport = cluster.getNameNodePort(); final int nnport = cluster.getNameNodePort();

View File

@ -188,7 +188,7 @@ public class NameNodeAdapter {
public static FSEditLog spyOnEditLog(NameNode nn) { public static FSEditLog spyOnEditLog(NameNode nn) {
FSEditLog spyEditLog = spy(nn.getNamesystem().getFSImage().getEditLog()); FSEditLog spyEditLog = spy(nn.getNamesystem().getFSImage().getEditLog());
DFSTestUtil.setEditLogForTesting(nn, spyEditLog); DFSTestUtil.setEditLogForTesting(nn.getNamesystem(), spyEditLog);
EditLogTailer tailer = nn.getNamesystem().getEditLogTailer(); EditLogTailer tailer = nn.getNamesystem().getEditLogTailer();
if (tailer != null) { if (tailer != null) {
tailer.setEditLog(spyEditLog); tailer.setEditLog(spyEditLog);

View File

@ -204,7 +204,7 @@ public class TestEditLog {
FSEditLog editLog = namesystem.getEditLog(); FSEditLog editLog = namesystem.getEditLog();
for (int i = 0; i < numTransactions; i++) { for (int i = 0; i < numTransactions; i++) {
INodeFile inode = new INodeFile(namesystem.allocateNewInodeId(), null, INodeFile inode = new INodeFile(namesystem.dir.allocateNewInodeId(), null,
p, 0L, 0L, BlockInfo.EMPTY_ARRAY, replication, blockSize, (byte)0); p, 0L, 0L, BlockInfo.EMPTY_ARRAY, replication, blockSize, (byte)0);
inode.toUnderConstruction("", ""); inode.toUnderConstruction("", "");
@ -375,7 +375,7 @@ public class TestEditLog {
// Remember the current lastInodeId and will reset it back to test // Remember the current lastInodeId and will reset it back to test
// loading editlog segments.The transactions in the following allocate new // loading editlog segments.The transactions in the following allocate new
// inode id to write to editlogs but doesn't create ionde in namespace // inode id to write to editlogs but doesn't create ionde in namespace
long originalLastInodeId = namesystem.getLastInodeId(); long originalLastInodeId = namesystem.dir.getLastInodeId();
// Create threads and make them run transactions concurrently. // Create threads and make them run transactions concurrently.
Thread threadId[] = new Thread[NUM_THREADS]; Thread threadId[] = new Thread[NUM_THREADS];
@ -409,7 +409,7 @@ public class TestEditLog {
// If there were any corruptions, it is likely that the reading in // If there were any corruptions, it is likely that the reading in
// of these transactions will throw an exception. // of these transactions will throw an exception.
// //
namesystem.resetLastInodeIdWithoutChecking(originalLastInodeId); namesystem.dir.resetLastInodeIdWithoutChecking(originalLastInodeId);
for (Iterator<StorageDirectory> it = for (Iterator<StorageDirectory> it =
fsimage.getStorage().dirIterator(NameNodeDirType.EDITS); it.hasNext();) { fsimage.getStorage().dirIterator(NameNodeDirType.EDITS); it.hasNext();) {
FSEditLogLoader loader = new FSEditLogLoader(namesystem, 0); FSEditLogLoader loader = new FSEditLogLoader(namesystem, 0);

View File

@ -458,8 +458,8 @@ public class TestEditLogRace {
try { try {
FSImage fsimage = namesystem.getFSImage(); FSImage fsimage = namesystem.getFSImage();
FSEditLog editLog = spy(fsimage.getEditLog()); FSEditLog editLog = spy(fsimage.getEditLog());
fsimage.editLog = editLog; DFSTestUtil.setEditLogForTesting(namesystem, editLog);
final AtomicReference<Throwable> deferredException = final AtomicReference<Throwable> deferredException =
new AtomicReference<Throwable>(); new AtomicReference<Throwable>();
final CountDownLatch waitToEnterSync = new CountDownLatch(1); final CountDownLatch waitToEnterSync = new CountDownLatch(1);

View File

@ -392,7 +392,7 @@ public class TestINodeFile {
cluster.waitActive(); cluster.waitActive();
FSNamesystem fsn = cluster.getNamesystem(); FSNamesystem fsn = cluster.getNamesystem();
long lastId = fsn.getLastInodeId(); long lastId = fsn.dir.getLastInodeId();
// Ensure root has the correct inode ID // Ensure root has the correct inode ID
// Last inode ID should be root inode ID and inode map size should be 1 // Last inode ID should be root inode ID and inode map size should be 1
@ -407,14 +407,14 @@ public class TestINodeFile {
FileSystem fs = cluster.getFileSystem(); FileSystem fs = cluster.getFileSystem();
Path path = new Path("/test1"); Path path = new Path("/test1");
assertTrue(fs.mkdirs(path)); assertTrue(fs.mkdirs(path));
assertEquals(++expectedLastInodeId, fsn.getLastInodeId()); assertEquals(++expectedLastInodeId, fsn.dir.getLastInodeId());
assertEquals(++inodeCount, fsn.dir.getInodeMapSize()); assertEquals(++inodeCount, fsn.dir.getInodeMapSize());
// Create a file // Create a file
// Last inode ID and inode map size should increase by 1 // Last inode ID and inode map size should increase by 1
NamenodeProtocols nnrpc = cluster.getNameNodeRpc(); NamenodeProtocols nnrpc = cluster.getNameNodeRpc();
DFSTestUtil.createFile(fs, new Path("/test1/file"), 1024, (short) 1, 0); DFSTestUtil.createFile(fs, new Path("/test1/file"), 1024, (short) 1, 0);
assertEquals(++expectedLastInodeId, fsn.getLastInodeId()); assertEquals(++expectedLastInodeId, fsn.dir.getLastInodeId());
assertEquals(++inodeCount, fsn.dir.getInodeMapSize()); assertEquals(++inodeCount, fsn.dir.getInodeMapSize());
// Ensure right inode ID is returned in file status // Ensure right inode ID is returned in file status
@ -425,7 +425,7 @@ public class TestINodeFile {
// Last inode ID and inode map size should not change // Last inode ID and inode map size should not change
Path renamedPath = new Path("/test2"); Path renamedPath = new Path("/test2");
assertTrue(fs.rename(path, renamedPath)); assertTrue(fs.rename(path, renamedPath));
assertEquals(expectedLastInodeId, fsn.getLastInodeId()); assertEquals(expectedLastInodeId, fsn.dir.getLastInodeId());
assertEquals(inodeCount, fsn.dir.getInodeMapSize()); assertEquals(inodeCount, fsn.dir.getInodeMapSize());
// Delete test2/file and test2 and ensure inode map size decreases // Delete test2/file and test2 and ensure inode map size decreases
@ -442,12 +442,12 @@ public class TestINodeFile {
inodeCount += 3; // test1, file1 and file2 are created inodeCount += 3; // test1, file1 and file2 are created
expectedLastInodeId += 3; expectedLastInodeId += 3;
assertEquals(inodeCount, fsn.dir.getInodeMapSize()); assertEquals(inodeCount, fsn.dir.getInodeMapSize());
assertEquals(expectedLastInodeId, fsn.getLastInodeId()); assertEquals(expectedLastInodeId, fsn.dir.getLastInodeId());
// Concat the /test1/file1 /test1/file2 into /test1/file2 // Concat the /test1/file1 /test1/file2 into /test1/file2
nnrpc.concat(file2, new String[] {file1}); nnrpc.concat(file2, new String[] {file1});
inodeCount--; // file1 and file2 are concatenated to file2 inodeCount--; // file1 and file2 are concatenated to file2
assertEquals(inodeCount, fsn.dir.getInodeMapSize()); assertEquals(inodeCount, fsn.dir.getInodeMapSize());
assertEquals(expectedLastInodeId, fsn.getLastInodeId()); assertEquals(expectedLastInodeId, fsn.dir.getLastInodeId());
assertTrue(fs.delete(new Path("/test1"), true)); assertTrue(fs.delete(new Path("/test1"), true));
inodeCount -= 2; // test1 and file2 is deleted inodeCount -= 2; // test1 and file2 is deleted
assertEquals(inodeCount, fsn.dir.getInodeMapSize()); assertEquals(inodeCount, fsn.dir.getInodeMapSize());
@ -456,14 +456,14 @@ public class TestINodeFile {
cluster.restartNameNode(); cluster.restartNameNode();
cluster.waitActive(); cluster.waitActive();
fsn = cluster.getNamesystem(); fsn = cluster.getNamesystem();
assertEquals(expectedLastInodeId, fsn.getLastInodeId()); assertEquals(expectedLastInodeId, fsn.dir.getLastInodeId());
assertEquals(inodeCount, fsn.dir.getInodeMapSize()); assertEquals(inodeCount, fsn.dir.getInodeMapSize());
// Create two inodes test2 and test2/file2 // Create two inodes test2 and test2/file2
DFSTestUtil.createFile(fs, new Path("/test2/file2"), 1024, (short) 1, 0); DFSTestUtil.createFile(fs, new Path("/test2/file2"), 1024, (short) 1, 0);
expectedLastInodeId += 2; expectedLastInodeId += 2;
inodeCount += 2; inodeCount += 2;
assertEquals(expectedLastInodeId, fsn.getLastInodeId()); assertEquals(expectedLastInodeId, fsn.dir.getLastInodeId());
assertEquals(inodeCount, fsn.dir.getInodeMapSize()); assertEquals(inodeCount, fsn.dir.getInodeMapSize());
// create /test3, and /test3/file. // create /test3, and /test3/file.
@ -472,7 +472,7 @@ public class TestINodeFile {
assertTrue(outStream != null); assertTrue(outStream != null);
expectedLastInodeId += 2; expectedLastInodeId += 2;
inodeCount += 2; inodeCount += 2;
assertEquals(expectedLastInodeId, fsn.getLastInodeId()); assertEquals(expectedLastInodeId, fsn.dir.getLastInodeId());
assertEquals(inodeCount, fsn.dir.getInodeMapSize()); assertEquals(inodeCount, fsn.dir.getInodeMapSize());
// Apply editlogs to fsimage, ensure inodeUnderConstruction is handled // Apply editlogs to fsimage, ensure inodeUnderConstruction is handled
@ -486,7 +486,7 @@ public class TestINodeFile {
cluster.restartNameNode(); cluster.restartNameNode();
cluster.waitActive(); cluster.waitActive();
fsn = cluster.getNamesystem(); fsn = cluster.getNamesystem();
assertEquals(expectedLastInodeId, fsn.getLastInodeId()); assertEquals(expectedLastInodeId, fsn.dir.getLastInodeId());
assertEquals(inodeCount, fsn.dir.getInodeMapSize()); assertEquals(inodeCount, fsn.dir.getInodeMapSize());
} finally { } finally {
if (cluster != null) { if (cluster != null) {

View File

@ -535,7 +535,7 @@ public class TestNameNodeRecovery {
FSEditLog spyLog = FSEditLog spyLog =
spy(cluster.getNameNode().getFSImage().getEditLog()); spy(cluster.getNameNode().getFSImage().getEditLog());
doNothing().when(spyLog).endCurrentLogSegment(true); doNothing().when(spyLog).endCurrentLogSegment(true);
DFSTestUtil.setEditLogForTesting(cluster.getNameNode(), spyLog); DFSTestUtil.setEditLogForTesting(cluster.getNamesystem(), spyLog);
} }
fileSys = cluster.getFileSystem(); fileSys = cluster.getFileSystem();
final FSNamesystem namesystem = cluster.getNamesystem(); final FSNamesystem namesystem = cluster.getNamesystem();

View File

@ -142,7 +142,7 @@ public class TestEditLogsDuringFailover {
File sharedDir = new File(sharedUri.getPath(), "current"); File sharedDir = new File(sharedUri.getPath(), "current");
FSNamesystem fsn = cluster.getNamesystem(0); FSNamesystem fsn = cluster.getNamesystem(0);
FSImageTestUtil.createAbortedLogWithMkdirs(sharedDir, NUM_DIRS_IN_LOG, 1, FSImageTestUtil.createAbortedLogWithMkdirs(sharedDir, NUM_DIRS_IN_LOG, 1,
fsn.getLastInodeId() + 1); fsn.getFSDirectory().getLastInodeId() + 1);
assertEditFiles(Collections.singletonList(sharedUri), assertEditFiles(Collections.singletonList(sharedUri),
NNStorage.getInProgressEditsFileName(1)); NNStorage.getInProgressEditsFileName(1));

View File

@ -251,7 +251,7 @@ public class TestStandbyCheckpoints {
"testCheckpointCancellation-tmp"); "testCheckpointCancellation-tmp");
FSNamesystem fsn = cluster.getNamesystem(0); FSNamesystem fsn = cluster.getNamesystem(0);
FSImageTestUtil.createAbortedLogWithMkdirs(tmpDir, NUM_DIRS_IN_LOG, 3, FSImageTestUtil.createAbortedLogWithMkdirs(tmpDir, NUM_DIRS_IN_LOG, 3,
fsn.getLastInodeId() + 1); fsn.getFSDirectory().getLastInodeId() + 1);
String fname = NNStorage.getInProgressEditsFileName(3); String fname = NNStorage.getInProgressEditsFileName(3);
new File(tmpDir, fname).renameTo(new File(sharedDir, fname)); new File(tmpDir, fname).renameTo(new File(sharedDir, fname));