HDFS-4334. Add a unique id to INode. Contributed by Brandon Li

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1426429 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2012-12-28 08:26:33 +00:00
parent a7d444d002
commit 0fa9c7a825
20 changed files with 369 additions and 131 deletions

View File

@ -172,6 +172,8 @@ Trunk (Unreleased)
HDFS-4234. Use generic code for choosing datanode in Balancer. (szetszwo)
HDFS-4334. Add a unique id to INode. (Brandon Li via szetszwo)
OPTIMIZATIONS
BUG FIXES

View File

@ -76,8 +76,9 @@
*************************************************/
public class FSDirectory implements Closeable {
private static INodeDirectoryWithQuota createRoot(FSNamesystem namesystem) {
return new INodeDirectoryWithQuota(INodeDirectory.ROOT_NAME,
namesystem.createFsOwnerPermissions(new FsPermission((short)0755)));
return new INodeDirectoryWithQuota(namesystem.allocateNewInodeId(),
INodeDirectory.ROOT_NAME,
namesystem.createFsOwnerPermissions(new FsPermission((short) 0755)));
}
INodeDirectoryWithQuota rootDir;
@ -253,7 +254,9 @@ INodeFileUnderConstruction addFile(String path,
if (!mkdirs(parent.toString(), permissions, true, modTime)) {
return null;
}
long id = namesystem.allocateNewInodeId();
INodeFileUnderConstruction newNode = new INodeFileUnderConstruction(
id,
permissions,replication,
preferredBlockSize, modTime, clientName,
clientMachine, clientNode);
@ -275,7 +278,8 @@ INodeFileUnderConstruction addFile(String path,
return newNode;
}
INode unprotectedAddFile( String path,
INode unprotectedAddFile( long id,
String path,
PermissionStatus permissions,
short replication,
long modificationTime,
@ -287,13 +291,11 @@ INode unprotectedAddFile( String path,
final INode newNode;
assert hasWriteLock();
if (underConstruction) {
newNode = new INodeFileUnderConstruction(
permissions, replication,
preferredBlockSize, modificationTime, clientName,
clientMachine, null);
newNode = new INodeFileUnderConstruction(id, permissions, replication,
preferredBlockSize, modificationTime, clientName, clientMachine, null);
} else {
newNode = new INodeFile(permissions, BlockInfo.EMPTY_ARRAY, replication,
modificationTime, atime, preferredBlockSize);
newNode = new INodeFile(id, permissions, BlockInfo.EMPTY_ARRAY,
replication, modificationTime, atime, preferredBlockSize);
}
try {
@ -1428,8 +1430,9 @@ boolean mkdirs(String src, PermissionStatus permissions,
// create directories beginning from the first null index
for(; i < inodes.length; i++) {
pathbuilder.append(Path.SEPARATOR + names[i]);
unprotectedMkdir(inodesInPath, i, components[i],
(i < lastInodeIndex) ? parentPermissions : permissions, now);
unprotectedMkdir(namesystem.allocateNewInodeId(), inodesInPath, i,
components[i], (i < lastInodeIndex) ? parentPermissions
: permissions, now);
if (inodes[i] == null) {
return false;
}
@ -1451,7 +1454,7 @@ boolean mkdirs(String src, PermissionStatus permissions,
return true;
}
INode unprotectedMkdir(String src, PermissionStatus permissions,
INode unprotectedMkdir(long inodeId, String src, PermissionStatus permissions,
long timestamp) throws QuotaExceededException,
UnresolvedLinkException {
assert hasWriteLock();
@ -1460,7 +1463,8 @@ INode unprotectedMkdir(String src, PermissionStatus permissions,
components.length, false);
INode[] inodes = inodesInPath.getINodes();
final int pos = inodes.length - 1;
unprotectedMkdir(inodesInPath, pos, components[pos], permissions, timestamp);
unprotectedMkdir(inodeId, inodesInPath, pos, components[pos], permissions,
timestamp);
return inodes[pos];
}
@ -1468,11 +1472,12 @@ INode unprotectedMkdir(String src, PermissionStatus permissions,
* The parent path to the directory is at [0, pos-1].
* All ancestors exist. Newly created one stored at index pos.
*/
private void unprotectedMkdir(INodesInPath inodesInPath, int pos,
byte[] name, PermissionStatus permission,
long timestamp) throws QuotaExceededException {
private void unprotectedMkdir(long inodeId, INodesInPath inodesInPath,
int pos, byte[] name, PermissionStatus permission, long timestamp)
throws QuotaExceededException {
assert hasWriteLock();
final INodeDirectory dir = new INodeDirectory(name, permission, timestamp);
final INodeDirectory dir = new INodeDirectory(inodeId, name, permission,
timestamp);
if (addChild(inodesInPath, pos, dir, true)) {
inodesInPath.setINode(pos, dir);
}
@ -2042,9 +2047,10 @@ INodeSymlink addSymlink(String path, String target,
}
final String userName = dirPerms.getUserName();
INodeSymlink newNode = null;
long id = namesystem.allocateNewInodeId();
writeLock();
try {
newNode = unprotectedAddSymlink(path, target, modTime, modTime,
newNode = unprotectedAddSymlink(id, path, target, modTime, modTime,
new PermissionStatus(userName, null, FsPermission.getDefault()));
} finally {
writeUnlock();
@ -2064,12 +2070,13 @@ INodeSymlink addSymlink(String path, String target,
/**
* Add the specified path into the namespace. Invoked from edit log processing.
*/
INodeSymlink unprotectedAddSymlink(String path, String target, long mtime,
long atime, PermissionStatus perm)
INodeSymlink unprotectedAddSymlink(long id, String path, String target,
long mtime, long atime, PermissionStatus perm)
throws UnresolvedLinkException, QuotaExceededException {
assert hasWriteLock();
final INodeSymlink symlink = new INodeSymlink(target, mtime, atime, perm);
return addINode(path, symlink)? symlink: null;
final INodeSymlink symlink = new INodeSymlink(id, target, mtime, atime,
perm);
return addINode(path, symlink) ? symlink : null;
}
/**

View File

@ -120,6 +120,7 @@ long loadEditRecords(EditLogInputStream in, boolean closeOnExit,
long lastTxId = in.getLastTxId();
long numTxns = (lastTxId - expectedStartingTxId) + 1;
long lastLogTime = now();
long lastInodeId = fsNamesys.getLastInodeId();
if (LOG.isDebugEnabled()) {
LOG.debug("edit log length: " + in.length() + ", start txid: "
@ -170,7 +171,10 @@ long loadEditRecords(EditLogInputStream in, boolean closeOnExit,
}
}
try {
applyEditLogOp(op, fsDir, in.getVersion());
long inodeId = applyEditLogOp(op, fsDir, in.getVersion());
if (lastInodeId < inodeId) {
lastInodeId = inodeId;
}
} catch (Throwable e) {
LOG.error("Encountered exception on operation " + op, e);
MetaRecoveryContext.editLogLoaderPrompt("Failed to " +
@ -205,6 +209,7 @@ long loadEditRecords(EditLogInputStream in, boolean closeOnExit,
}
}
} finally {
fsNamesys.resetLastInodeId(lastInodeId);
if(closeOnExit) {
in.close();
}
@ -223,9 +228,9 @@ long loadEditRecords(EditLogInputStream in, boolean closeOnExit,
}
@SuppressWarnings("deprecation")
private void applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
int logVersion) throws IOException {
long inodeId = INodeId.GRANDFATHER_INODE_ID;
if (LOG.isTraceEnabled()) {
LOG.trace("replaying edit log: " + op);
}
@ -255,11 +260,11 @@ private void applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
assert addCloseOp.blocks.length == 0;
// add to the file tree
newFile = (INodeFile)fsDir.unprotectedAddFile(
addCloseOp.path, addCloseOp.permissions,
replication, addCloseOp.mtime,
addCloseOp.atime, addCloseOp.blockSize,
true, addCloseOp.clientName, addCloseOp.clientMachine);
inodeId = fsNamesys.allocateNewInodeId();
newFile = (INodeFile) fsDir.unprotectedAddFile(inodeId,
addCloseOp.path, addCloseOp.permissions, replication,
addCloseOp.mtime, addCloseOp.atime, addCloseOp.blockSize, true,
addCloseOp.clientName, addCloseOp.clientMachine);
fsNamesys.leaseManager.addLease(addCloseOp.clientName, addCloseOp.path);
} else { // This is OP_ADD on an existing file
@ -370,7 +375,8 @@ private void applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
}
case OP_MKDIR: {
MkdirOp mkdirOp = (MkdirOp)op;
fsDir.unprotectedMkdir(mkdirOp.path, mkdirOp.permissions,
inodeId = fsNamesys.allocateNewInodeId();
fsDir.unprotectedMkdir(inodeId, mkdirOp.path, mkdirOp.permissions,
mkdirOp.timestamp);
break;
}
@ -423,9 +429,10 @@ private void applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
}
case OP_SYMLINK: {
SymlinkOp symlinkOp = (SymlinkOp)op;
fsDir.unprotectedAddSymlink(symlinkOp.path, symlinkOp.value,
symlinkOp.mtime, symlinkOp.atime,
symlinkOp.permissionStatus);
inodeId = fsNamesys.allocateNewInodeId();
fsDir.unprotectedAddSymlink(inodeId, symlinkOp.path,
symlinkOp.value, symlinkOp.mtime,
symlinkOp.atime, symlinkOp.permissionStatus);
break;
}
case OP_RENAME: {
@ -485,6 +492,7 @@ private void applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
default:
throw new IOException("Invalid operation read " + op.opCode);
}
return inodeId;
}
private static String formatEditLogReplayError(EditLogInputStream in,

View File

@ -166,7 +166,8 @@ void load(File curFile)
in = compression.unwrapInputStream(fin);
LOG.info("Loading image file " + curFile + " using " + compression);
// reset INodeId. TODO: remove this after inodeId is persisted in fsimage
namesystem.resetLastInodeIdWithoutChecking(INodeId.LAST_RESERVED_ID);
// load all inodes
LOG.info("Number of files = " + numFiles);
if (LayoutVersion.supports(Feature.FSIMAGE_NAME_OPTIMIZATION,
@ -334,6 +335,8 @@ private INode loadINode(DataInputStream in)
long blockSize = 0;
int imgVersion = getLayoutVersion();
long inodeId = namesystem.allocateNewInodeId();
short replication = in.readShort();
replication = namesystem.getBlockManager().adjustReplication(replication);
modificationTime = in.readLong();
@ -371,7 +374,7 @@ private INode loadINode(DataInputStream in)
PermissionStatus permissions = PermissionStatus.read(in);
return INode.newINode(permissions, blocks, symlink, replication,
return INode.newINode(inodeId, permissions, blocks, symlink, replication,
modificationTime, atime, nsQuota, dsQuota, blockSize);
}

View File

@ -107,7 +107,9 @@ static INodeFileUnderConstruction readINodeUnderConstruction(
int numLocs = in.readInt();
assert numLocs == 0 : "Unexpected block locations";
return new INodeFileUnderConstruction(name,
//TODO: get inodeId from fsimage after inodeId is persisted
return new INodeFileUnderConstruction(INodeId.GRANDFATHER_INODE_ID,
name,
blockReplication,
modificationTime,
preferredBlockSize,

View File

@ -375,6 +375,30 @@ private void logAuditEvent(boolean succeeded,
private final boolean haEnabled;
private INodeId inodeId;
/**
* Set the last allocated inode id when fsimage is loaded or editlog is
* applied.
* @throws IOException
*/
public void resetLastInodeId(long newValue) throws IOException {
inodeId.resetLastInodeId(newValue);
}
/** Should only be used for tests to reset to any value */
void resetLastInodeIdWithoutChecking(long newValue) {
inodeId.resetLastInodeIdWithoutChecking(newValue);
}
public long getLastInodeId() {
return inodeId.getLastInodeId();
}
public long allocateNewInodeId() {
return inodeId.allocateNewInodeId();
}
/**
* Clear all loaded data
*/
@ -383,6 +407,7 @@ void clear() {
dtSecretManager.reset();
generationStamp.setStamp(GenerationStamp.FIRST_VALID_STAMP);
leaseManager.removeAllLeases();
inodeId.resetLastInodeIdWithoutChecking(INodeId.LAST_RESERVED_ID);
}
@VisibleForTesting
@ -534,6 +559,8 @@ public static FSNamesystem loadFromDisk(Configuration conf,
this.standbyShouldCheckpoint = conf.getBoolean(
DFS_HA_STANDBY_CHECKPOINTS_KEY, DFS_HA_STANDBY_CHECKPOINTS_DEFAULT);
this.inodeId = new INodeId();
// For testing purposes, allow the DT secret manager to be started regardless
// of whether security is enabled.
alwaysUseDelegationTokensForTests = conf.getBoolean(
@ -1931,6 +1958,7 @@ LocatedBlock prepareFileForWrite(String src, INodeFile file,
String leaseHolder, String clientMachine, DatanodeDescriptor clientNode,
boolean writeToEditLog) throws IOException {
INodeFileUnderConstruction cons = new INodeFileUnderConstruction(
file.getId(),
file.getLocalNameBytes(),
file.getBlockReplication(),
file.getModificationTime(),

View File

@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.util.ArrayList;
@ -101,6 +102,11 @@ static long toLong(PermissionStatus ps) {
}
}
/**
* The inode id
*/
final private long id;
/**
* The inode name is in java UTF8 encoding;
* The name in HdfsFileStatus should keep the same encoding as this.
@ -120,8 +126,9 @@ static long toLong(PermissionStatus ps) {
protected long modificationTime = 0L;
protected long accessTime = 0L;
private INode(byte[] name, long permission, INodeDirectory parent,
private INode(long id, byte[] name, long permission, INodeDirectory parent,
long modificationTime, long accessTime) {
this.id = id;
this.name = name;
this.permission = permission;
this.parent = parent;
@ -129,24 +136,29 @@ private INode(byte[] name, long permission, INodeDirectory parent,
this.accessTime = accessTime;
}
INode(byte[] name, PermissionStatus permissions, INodeDirectory parent,
long modificationTime, long accessTime) {
this(name, PermissionStatusFormat.toLong(permissions), parent,
INode(long id, byte[] name, PermissionStatus permissions,
INodeDirectory parent, long modificationTime, long accessTime) {
this(id, name, PermissionStatusFormat.toLong(permissions), parent,
modificationTime, accessTime);
}
INode(PermissionStatus permissions, long mtime, long atime) {
this(null, permissions, null, mtime, atime);
INode(long id, PermissionStatus permissions, long mtime, long atime) {
this(id, null, PermissionStatusFormat.toLong(permissions), null, mtime, atime);
}
protected INode(String name, PermissionStatus permissions) {
this(DFSUtil.string2Bytes(name), permissions, null, 0L, 0L);
protected INode(long id, String name, PermissionStatus permissions) {
this(id, DFSUtil.string2Bytes(name), permissions, null, 0L, 0L);
}
/** @param other Other node to be copied */
INode(INode other) {
this(other.getLocalNameBytes(), other.permission, other.getParent(),
other.getModificationTime(), other.getAccessTime());
this(other.getId(), other.getLocalNameBytes(), other.permission, other
.getParent(), other.getModificationTime(), other.getAccessTime());
}
/** Get inode id */
public long getId() {
return this.id;
}
/**
@ -459,6 +471,7 @@ public final int hashCode() {
/**
* Create an INode; the inode's name is not set yet
*
* @param id preassigned inode id
* @param permissions permissions
* @param blocks blocks if a file
* @param symlink symblic link if a symbolic link
@ -470,7 +483,8 @@ public final int hashCode() {
* @param preferredBlockSize block size
* @return an inode
*/
static INode newINode(PermissionStatus permissions,
static INode newINode(long id,
PermissionStatus permissions,
BlockInfo[] blocks,
String symlink,
short replication,
@ -480,17 +494,17 @@ static INode newINode(PermissionStatus permissions,
long dsQuota,
long preferredBlockSize) {
if (symlink.length() != 0) { // check if symbolic link
return new INodeSymlink(symlink, modificationTime, atime, permissions);
return new INodeSymlink(id, symlink, modificationTime, atime, permissions);
} else if (blocks == null) { //not sym link and blocks null? directory!
if (nsQuota >= 0 || dsQuota >= 0) {
return new INodeDirectoryWithQuota(
permissions, modificationTime, nsQuota, dsQuota);
id, permissions, modificationTime, nsQuota, dsQuota);
}
// regular directory
return new INodeDirectory(permissions, modificationTime);
return new INodeDirectory(id, permissions, modificationTime);
}
// file
return new INodeFile(permissions, blocks, replication,
return new INodeFile(id, permissions, blocks, replication,
modificationTime, atime, preferredBlockSize);
}

View File

@ -53,17 +53,17 @@ public static INodeDirectory valueOf(INode inode, Object path
private List<INode> children = null;
INodeDirectory(String name, PermissionStatus permissions) {
super(name, permissions);
INodeDirectory(long id, String name, PermissionStatus permissions) {
super(id, name, permissions);
}
public INodeDirectory(PermissionStatus permissions, long mTime) {
super(permissions, mTime, 0);
public INodeDirectory(long id, PermissionStatus permissions, long mTime) {
super(id, permissions, mTime, 0);
}
/** constructor */
INodeDirectory(byte[] name, PermissionStatus permissions, long mtime) {
super(name, permissions, null, mtime, 0L);
INodeDirectory(long id, byte[] name, PermissionStatus permissions, long mtime) {
super(id, name, permissions, null, mtime, 0L);
}
/** copy constructor

View File

@ -54,16 +54,16 @@ class INodeDirectoryWithQuota extends INodeDirectory {
}
/** constructor with no quota verification */
INodeDirectoryWithQuota(PermissionStatus permissions, long modificationTime,
long nsQuota, long dsQuota) {
super(permissions, modificationTime);
INodeDirectoryWithQuota(long id, PermissionStatus permissions,
long modificationTime, long nsQuota, long dsQuota) {
super(id, permissions, modificationTime);
this.nsQuota = nsQuota;
this.dsQuota = dsQuota;
}
/** constructor with no quota verification */
INodeDirectoryWithQuota(String name, PermissionStatus permissions) {
super(name, permissions);
INodeDirectoryWithQuota(long id, String name, PermissionStatus permissions) {
super(id, name, permissions);
}
/** Get this directory's namespace quota

View File

@ -86,10 +86,10 @@ static long combinePreferredBlockSize(long header, long blockSize) {
private BlockInfo[] blocks;
INodeFile(PermissionStatus permissions, BlockInfo[] blklist,
short replication, long modificationTime,
long atime, long preferredBlockSize) {
super(permissions, modificationTime, atime);
INodeFile(long id, PermissionStatus permissions, BlockInfo[] blklist,
short replication, long modificationTime, long atime,
long preferredBlockSize) {
super(id, permissions, modificationTime, atime);
header = HeaderFormat.combineReplication(header, replication);
header = HeaderFormat.combinePreferredBlockSize(header, preferredBlockSize);
this.blocks = blklist;

View File

@ -49,21 +49,23 @@ public static INodeFileUnderConstruction valueOf(INode inode, String path
private final String clientMachine;
private final DatanodeDescriptor clientNode; // if client is a cluster node too.
INodeFileUnderConstruction(PermissionStatus permissions,
INodeFileUnderConstruction(long id,
PermissionStatus permissions,
short replication,
long preferredBlockSize,
long modTime,
String clientName,
String clientMachine,
DatanodeDescriptor clientNode) {
super(permissions.applyUMask(UMASK), BlockInfo.EMPTY_ARRAY, replication,
modTime, modTime, preferredBlockSize);
super(id, permissions.applyUMask(UMASK), BlockInfo.EMPTY_ARRAY,
replication, modTime, modTime, preferredBlockSize);
this.clientName = clientName;
this.clientMachine = clientMachine;
this.clientNode = clientNode;
}
INodeFileUnderConstruction(byte[] name,
INodeFileUnderConstruction(long id,
byte[] name,
short blockReplication,
long modificationTime,
long preferredBlockSize,
@ -72,8 +74,8 @@ public static INodeFileUnderConstruction valueOf(INode inode, String path
String clientName,
String clientMachine,
DatanodeDescriptor clientNode) {
super(perm, blocks, blockReplication, modificationTime, modificationTime,
preferredBlockSize);
super(id, perm, blocks, blockReplication, modificationTime,
modificationTime, preferredBlockSize);
setLocalName(name);
this.clientName = clientName;
this.clientMachine = clientMachine;
@ -112,7 +114,8 @@ INodeFile convertToInodeFile() {
assert allBlocksComplete() : "Can't finalize inode " + this
+ " since it contains non-complete blocks! Blocks are "
+ Arrays.asList(getBlocks());
INodeFile obj = new INodeFile(getPermissionStatus(),
INodeFile obj = new INodeFile(getId(),
getPermissionStatus(),
getBlocks(),
getBlockReplication(),
getModificationTime(),

View File

@ -0,0 +1,105 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* An id which uniquely identifies an inode
*/
@InterfaceAudience.Private
class INodeId implements Comparable<INodeId> {
/**
* The last reserved inode id. Reserve id 1 to 1000 for potential future
* usage. The id won't be recycled and is not expected to wrap around in a
* very long time. Root inode id will be 1001.
*/
public static final long LAST_RESERVED_ID = 1000L;
/**
* The inode id validation of lease check will be skipped when the request
* uses GRANDFATHER_INODE_ID for backward compatibility.
*/
public static final long GRANDFATHER_INODE_ID = 0;
private AtomicLong lastInodeId = new AtomicLong();
/**
* Create a new instance, initialized to LAST_RESERVED_ID.
*/
INodeId() {
lastInodeId.set(INodeId.LAST_RESERVED_ID);
}
/**
* Set the last allocated inode id when fsimage is loaded or editlog is
* applied.
* @throws IOException
*/
void resetLastInodeId(long newValue) throws IOException {
if (newValue < getLastInodeId()) {
throw new IOException(
"Can't reset lastInodeId to be less than its current value "
+ getLastInodeId() + ", newValue=" + newValue);
}
lastInodeId.set(newValue);
}
void resetLastInodeIdWithoutChecking(long newValue) {
lastInodeId.set(newValue);
}
long getLastInodeId() {
return lastInodeId.get();
}
/**
* First increment the counter and then get the id.
*/
long allocateNewInodeId() {
return lastInodeId.incrementAndGet();
}
@Override
// Comparable
public int compareTo(INodeId that) {
long id1 = this.getLastInodeId();
long id2 = that.getLastInodeId();
return id1 < id2 ? -1 : id1 > id2 ? 1 : 0;
}
@Override
// Object
public boolean equals(Object o) {
if (!(o instanceof INodeId)) {
return false;
}
return compareTo((INodeId) o) == 0;
}
@Override
// Object
public int hashCode() {
long id = getLastInodeId();
return (int) (id ^ (id >>> 32));
}
}

View File

@ -28,9 +28,9 @@
public class INodeSymlink extends INode {
private final byte[] symlink; // The target URI
INodeSymlink(String value, long mtime, long atime,
PermissionStatus permissions) {
super(permissions, mtime, atime);
INodeSymlink(long id, String value, long mtime, long atime,
PermissionStatus permissions) {
super(id, permissions, mtime, atime);
this.symlink = DFSUtil.string2Bytes(value);
}

View File

@ -62,7 +62,8 @@ static void addFiles(FSEditLog editLog, int numFiles, short replication,
PermissionStatus p = new PermissionStatus("joeDoe", "people",
new FsPermission((short)0777));
INodeDirectory dirInode = new INodeDirectory(p, 0L);
INodeDirectory dirInode = new INodeDirectory(INodeId.GRANDFATHER_INODE_ID,
p, 0L);
editLog.logMkDir(BASE_PATH, dirInode);
long blockSize = 10;
BlockInfo[] blocks = new BlockInfo[blocksPerFile];
@ -81,8 +82,9 @@ static void addFiles(FSEditLog editLog, int numFiles, short replication,
}
INodeFileUnderConstruction inode = new INodeFileUnderConstruction(
null, replication, 0, blockSize, blocks, p, "", "", null);
// Append path to filename with information about blockIDs
INodeId.GRANDFATHER_INODE_ID, null, replication, 0, blockSize,
blocks, p, "", "", null);
// Append path to filename with information about blockIDs
String path = "_" + iF + "_B" + blocks[0].getBlockId() +
"_to_B" + blocks[blocksPerFile-1].getBlockId() + "_";
String filePath = nameGenerator.getNextFileName("");
@ -90,12 +92,12 @@ static void addFiles(FSEditLog editLog, int numFiles, short replication,
// Log the new sub directory in edits
if ((iF % nameGenerator.getFilesPerDirectory()) == 0) {
String currentDir = nameGenerator.getCurrentDir();
dirInode = new INodeDirectory(p, 0L);
dirInode = new INodeDirectory(INodeId.GRANDFATHER_INODE_ID, p, 0L);
editLog.logMkDir(currentDir, dirInode);
}
editLog.logOpenFile(filePath,
new INodeFileUnderConstruction(
p, replication, 0, blockSize, "", "", null));
editLog.logOpenFile(filePath, new INodeFileUnderConstruction(
INodeId.GRANDFATHER_INODE_ID, p, replication, 0, blockSize, "", "",
null));
editLog.logCloseFile(filePath, inode);
if (currentBlockId - bidAtSync >= 2000) { // sync every 2K blocks

View File

@ -208,7 +208,7 @@ public static FSEditLog createStandaloneEditLog(File logDir)
* only a specified number of "mkdirs" operations.
*/
public static void createAbortedLogWithMkdirs(File editsLogDir, int numDirs,
long firstTxId) throws IOException {
long firstTxId, long newInodeId) throws IOException {
FSEditLog editLog = FSImageTestUtil.createStandaloneEditLog(editsLogDir);
editLog.setNextTxId(firstTxId);
editLog.openForWrite();
@ -217,7 +217,7 @@ public static void createAbortedLogWithMkdirs(File editsLogDir, int numDirs,
FsPermission.createImmutable((short)0755));
for (int i = 1; i <= numDirs; i++) {
String dirName = "dir" + i;
INodeDirectory dir = new INodeDirectory(dirName, perms);
INodeDirectory dir = new INodeDirectory(newInodeId + i -1, dirName, perms);
editLog.logMkDir("/" + dirName, dir);
}
editLog.logSync();

View File

@ -152,7 +152,8 @@ public void run() {
for (int i = 0; i < numTransactions; i++) {
INodeFileUnderConstruction inode = new INodeFileUnderConstruction(
p, replication, blockSize, 0, "", "", null);
namesystem.allocateNewInodeId(), p, replication, blockSize, 0, "",
"", null);
editLog.logOpenFile("/filename" + (startIndex + i), inode);
editLog.logCloseFile("/filename" + (startIndex + i), inode);
editLog.logSync();
@ -317,6 +318,11 @@ private void testEditLog(int initialSize) throws IOException {
// we should now be writing to edits_inprogress_3
fsimage.rollEditLog();
// Remember the current lastInodeId and will reset it back to test
// loading editlog segments.The transactions in the following allocate new
// inode id to write to editlogs but doesn't create ionde in namespace
long originalLastInodeId = namesystem.getLastInodeId();
// Create threads and make them run transactions concurrently.
Thread threadId[] = new Thread[NUM_THREADS];
for (int i = 0; i < NUM_THREADS; i++) {
@ -349,6 +355,7 @@ private void testEditLog(int initialSize) throws IOException {
// If there were any corruptions, it is likely that the reading in
// of these transactions will throw an exception.
//
namesystem.resetLastInodeIdWithoutChecking(originalLastInodeId);
for (Iterator<StorageDirectory> it =
fsimage.getStorage().dirIterator(NameNodeDirType.EDITS); it.hasNext();) {
FSEditLogLoader loader = new FSEditLogLoader(namesystem, 0);

View File

@ -73,7 +73,8 @@ public void setUp() throws IOException {
fileAsURI(new File(MiniDFSCluster.getBaseDirectory(),
"namenode")).toString());
rootInode = new INodeDirectoryWithQuota(INodeDirectory.ROOT_NAME, perms);
rootInode = new INodeDirectoryWithQuota(getMockNamesystem()
.allocateNewInodeId(), INodeDirectory.ROOT_NAME, perms);
inodes = new INode[]{ rootInode, null };
fs = null;
fsIsReady = true;
@ -152,7 +153,8 @@ private void addChildWithName(String name, Class<?> expected)
// have to create after the caller has had a chance to set conf values
if (fs == null) fs = new MockFSDirectory();
INode child = new INodeDirectory(name, perms);
INode child = new INodeDirectory(getMockNamesystem().allocateNewInodeId(),
name, perms);
child.setLocalName(name);
Class<?> generated = null;

View File

@ -26,14 +26,18 @@
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathIsNotDirectoryException;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.junit.Test;
@ -54,9 +58,9 @@ public class TestINodeFile {
public void testReplication () {
replication = 3;
preferredBlockSize = 128*1024*1024;
INodeFile inf = new INodeFile(new PermissionStatus(userName, null,
FsPermission.getDefault()), null, replication,
0L, 0L, preferredBlockSize);
INodeFile inf = new INodeFile(INodeId.GRANDFATHER_INODE_ID,
new PermissionStatus(userName, null, FsPermission.getDefault()), null,
replication, 0L, 0L, preferredBlockSize);
assertEquals("True has to be returned in this case", replication,
inf.getBlockReplication());
}
@ -71,9 +75,9 @@ public void testReplicationBelowLowerBound ()
throws IllegalArgumentException {
replication = -1;
preferredBlockSize = 128*1024*1024;
new INodeFile(new PermissionStatus(userName, null,
FsPermission.getDefault()), null, replication,
0L, 0L, preferredBlockSize);
new INodeFile(INodeId.GRANDFATHER_INODE_ID, new PermissionStatus(userName,
null, FsPermission.getDefault()), null, replication, 0L, 0L,
preferredBlockSize);
}
/**
@ -84,20 +88,20 @@ public void testReplicationBelowLowerBound ()
public void testPreferredBlockSize () {
replication = 3;
preferredBlockSize = 128*1024*1024;
INodeFile inf = new INodeFile(new PermissionStatus(userName, null,
FsPermission.getDefault()), null, replication,
0L, 0L, preferredBlockSize);
assertEquals("True has to be returned in this case", preferredBlockSize,
inf.getPreferredBlockSize());
}
INodeFile inf = new INodeFile(INodeId.GRANDFATHER_INODE_ID,
new PermissionStatus(userName, null, FsPermission.getDefault()), null,
replication, 0L, 0L, preferredBlockSize);
assertEquals("True has to be returned in this case", preferredBlockSize,
inf.getPreferredBlockSize());
}
@Test
public void testPreferredBlockSizeUpperBound () {
replication = 3;
preferredBlockSize = BLKSIZE_MAXVALUE;
INodeFile inf = new INodeFile(new PermissionStatus(userName, null,
FsPermission.getDefault()), null, replication,
0L, 0L, preferredBlockSize);
INodeFile inf = new INodeFile(INodeId.GRANDFATHER_INODE_ID,
new PermissionStatus(userName, null, FsPermission.getDefault()), null,
replication, 0L, 0L, preferredBlockSize);
assertEquals("True has to be returned in this case", BLKSIZE_MAXVALUE,
inf.getPreferredBlockSize());
}
@ -112,9 +116,9 @@ public void testPreferredBlockSizeBelowLowerBound ()
throws IllegalArgumentException {
replication = 3;
preferredBlockSize = -1;
new INodeFile(new PermissionStatus(userName, null,
FsPermission.getDefault()), null, replication,
0L, 0L, preferredBlockSize);
new INodeFile(INodeId.GRANDFATHER_INODE_ID, new PermissionStatus(userName,
null, FsPermission.getDefault()), null, replication, 0L, 0L,
preferredBlockSize);
}
/**
@ -127,10 +131,10 @@ public void testPreferredBlockSizeAboveUpperBound ()
throws IllegalArgumentException {
replication = 3;
preferredBlockSize = BLKSIZE_MAXVALUE+1;
new INodeFile(new PermissionStatus(userName, null,
FsPermission.getDefault()), null, replication,
0L, 0L, preferredBlockSize);
}
new INodeFile(INodeId.GRANDFATHER_INODE_ID, new PermissionStatus(userName,
null, FsPermission.getDefault()), null, replication, 0L, 0L,
preferredBlockSize);
}
@Test
public void testGetFullPathName() {
@ -139,12 +143,14 @@ public void testGetFullPathName() {
replication = 3;
preferredBlockSize = 128*1024*1024;
INodeFile inf = new INodeFile(perms, null, replication,
0L, 0L, preferredBlockSize);
INodeFile inf = new INodeFile(INodeId.GRANDFATHER_INODE_ID, perms, null,
replication, 0L, 0L, preferredBlockSize);
inf.setLocalName("f");
INodeDirectory root = new INodeDirectory(INodeDirectory.ROOT_NAME, perms);
INodeDirectory dir = new INodeDirectory("d", perms);
INodeDirectory root = new INodeDirectory(INodeId.GRANDFATHER_INODE_ID,
INodeDirectory.ROOT_NAME, perms);
INodeDirectory dir = new INodeDirectory(INodeId.GRANDFATHER_INODE_ID, "d",
perms);
assertEquals("f", inf.getFullPathName());
assertEquals("", inf.getLocalParentDir());
@ -242,7 +248,7 @@ private INodeFile[] createINodeFiles(int nCount, String fileNamePrefix) {
for (int i = 0; i < nCount; i++) {
PermissionStatus perms = new PermissionStatus(userName, null,
FsPermission.getDefault());
iNodes[i] = new INodeFile(perms, null, replication, 0L, 0L,
iNodes[i] = new INodeFile(i, perms, null, replication, 0L, 0L,
preferredBlockSize);
iNodes[i].setLocalName(fileNamePrefix + Integer.toString(i));
BlockInfo newblock = new BlockInfo(replication);
@ -293,10 +299,10 @@ public void testValueOf () throws IOException {
}
{//cast from INodeFile
final INode from = new INodeFile(
perm, null, replication, 0L, 0L, preferredBlockSize);
final INode from = new INodeFile(INodeId.GRANDFATHER_INODE_ID, perm,
null, replication, 0L, 0L, preferredBlockSize);
//cast to INodeFile, should success
//cast to INodeFile, should success
final INodeFile f = INodeFile.valueOf(from, path);
assertTrue(f == from);
@ -318,7 +324,8 @@ public void testValueOf () throws IOException {
{//cast from INodeFileUnderConstruction
final INode from = new INodeFileUnderConstruction(
perm, replication, 0L, 0L, "client", "machine", null);
INodeId.GRANDFATHER_INODE_ID, perm, replication, 0L, 0L, "client",
"machine", null);
//cast to INodeFile, should success
final INodeFile f = INodeFile.valueOf(from, path);
@ -338,7 +345,8 @@ public void testValueOf () throws IOException {
}
{//cast from INodeDirectory
final INode from = new INodeDirectory(perm, 0L);
final INode from = new INodeDirectory(INodeId.GRANDFATHER_INODE_ID, perm,
0L);
//cast to INodeFile, should fail
try {
@ -361,4 +369,47 @@ public void testValueOf () throws IOException {
assertTrue(d == from);
}
}
/**
* Verify root always has inode id 1001 and new formated fsimage has last
* allocated inode id 1000. Validate correct lastInodeId is persisted.
* @throws IOException
*/
@Test
public void TestInodeId() throws IOException {
Configuration conf = new Configuration();
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
.build();
cluster.waitActive();
FSNamesystem fsn = cluster.getNamesystem();
long lastId = fsn.getLastInodeId();
assertTrue(lastId == 1001);
// Create one directory and the last inode id should increase to 1002
FileSystem fs = cluster.getFileSystem();
Path path = new Path("/test1");
assertTrue(fs.mkdirs(path));
assertTrue(fsn.getLastInodeId() == 1002);
Path filePath = new Path("/test1/file");
fs.create(filePath);
assertTrue(fsn.getLastInodeId() == 1003);
// Rename doesn't increase inode id
Path renamedPath = new Path("/test2");
fs.rename(path, renamedPath);
assertTrue(fsn.getLastInodeId() == 1003);
cluster.restartNameNode();
cluster.waitActive();
// Make sure empty editlog can be handled
cluster.restartNameNode();
cluster.waitActive();
assertTrue(fsn.getLastInodeId() == 1003);
}
}

View File

@ -36,6 +36,7 @@
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NNStorage;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.io.IOUtils;
@ -139,7 +140,9 @@ private void testFailoverFinalizesAndReadsInProgress(
// Create a fake in-progress edit-log in the shared directory
URI sharedUri = cluster.getSharedEditsDir(0, 1);
File sharedDir = new File(sharedUri.getPath(), "current");
FSImageTestUtil.createAbortedLogWithMkdirs(sharedDir, NUM_DIRS_IN_LOG, 1);
FSNamesystem fsn = cluster.getNamesystem(0);
FSImageTestUtil.createAbortedLogWithMkdirs(sharedDir, NUM_DIRS_IN_LOG, 1,
fsn.getLastInodeId() + 1);
assertEditFiles(Collections.singletonList(sharedUri),
NNStorage.getInProgressEditsFileName(1));

View File

@ -204,8 +204,9 @@ public void testCheckpointCancellation() throws Exception {
File sharedDir = new File(sharedUri.getPath(), "current");
File tmpDir = new File(MiniDFSCluster.getBaseDirectory(),
"testCheckpointCancellation-tmp");
FSImageTestUtil.createAbortedLogWithMkdirs(tmpDir, NUM_DIRS_IN_LOG,
3);
FSNamesystem fsn = cluster.getNamesystem(0);
FSImageTestUtil.createAbortedLogWithMkdirs(tmpDir, NUM_DIRS_IN_LOG, 3,
fsn.getLastInodeId() + 1);
String fname = NNStorage.getInProgressEditsFileName(3);
new File(tmpDir, fname).renameTo(new File(sharedDir, fname));