HDFS-4334. Merge r1426429 from trunk
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1471582 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
0b2af91bc6
commit
2620203b1a
|
@ -74,6 +74,8 @@ Release 2.0.5-beta - UNRELEASED
|
||||||
HDFS-4209. Clean up the addNode/addChild/addChildNoQuotaCheck methods in
|
HDFS-4209. Clean up the addNode/addChild/addChildNoQuotaCheck methods in
|
||||||
FSDirectory and INodeDirectory. (szetszwo)
|
FSDirectory and INodeDirectory. (szetszwo)
|
||||||
|
|
||||||
|
HDFS-4334. Add a unique id to INode. (Brandon Li via szetszwo)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
BUG FIXES
|
BUG FIXES
|
||||||
|
|
|
@ -76,8 +76,9 @@ import com.google.common.base.Preconditions;
|
||||||
*************************************************/
|
*************************************************/
|
||||||
public class FSDirectory implements Closeable {
|
public class FSDirectory implements Closeable {
|
||||||
private static INodeDirectoryWithQuota createRoot(FSNamesystem namesystem) {
|
private static INodeDirectoryWithQuota createRoot(FSNamesystem namesystem) {
|
||||||
return new INodeDirectoryWithQuota(INodeDirectory.ROOT_NAME,
|
return new INodeDirectoryWithQuota(namesystem.allocateNewInodeId(),
|
||||||
namesystem.createFsOwnerPermissions(new FsPermission((short)0755)));
|
INodeDirectory.ROOT_NAME,
|
||||||
|
namesystem.createFsOwnerPermissions(new FsPermission((short) 0755)));
|
||||||
}
|
}
|
||||||
|
|
||||||
INodeDirectoryWithQuota rootDir;
|
INodeDirectoryWithQuota rootDir;
|
||||||
|
@ -252,7 +253,9 @@ public class FSDirectory implements Closeable {
|
||||||
if (!mkdirs(parent.toString(), permissions, true, modTime)) {
|
if (!mkdirs(parent.toString(), permissions, true, modTime)) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
long id = namesystem.allocateNewInodeId();
|
||||||
INodeFileUnderConstruction newNode = new INodeFileUnderConstruction(
|
INodeFileUnderConstruction newNode = new INodeFileUnderConstruction(
|
||||||
|
id,
|
||||||
permissions,replication,
|
permissions,replication,
|
||||||
preferredBlockSize, modTime, clientName,
|
preferredBlockSize, modTime, clientName,
|
||||||
clientMachine, clientNode);
|
clientMachine, clientNode);
|
||||||
|
@ -274,7 +277,8 @@ public class FSDirectory implements Closeable {
|
||||||
return newNode;
|
return newNode;
|
||||||
}
|
}
|
||||||
|
|
||||||
INode unprotectedAddFile( String path,
|
INode unprotectedAddFile( long id,
|
||||||
|
String path,
|
||||||
PermissionStatus permissions,
|
PermissionStatus permissions,
|
||||||
short replication,
|
short replication,
|
||||||
long modificationTime,
|
long modificationTime,
|
||||||
|
@ -286,13 +290,11 @@ public class FSDirectory implements Closeable {
|
||||||
final INode newNode;
|
final INode newNode;
|
||||||
assert hasWriteLock();
|
assert hasWriteLock();
|
||||||
if (underConstruction) {
|
if (underConstruction) {
|
||||||
newNode = new INodeFileUnderConstruction(
|
newNode = new INodeFileUnderConstruction(id, permissions, replication,
|
||||||
permissions, replication,
|
preferredBlockSize, modificationTime, clientName, clientMachine, null);
|
||||||
preferredBlockSize, modificationTime, clientName,
|
|
||||||
clientMachine, null);
|
|
||||||
} else {
|
} else {
|
||||||
newNode = new INodeFile(permissions, BlockInfo.EMPTY_ARRAY, replication,
|
newNode = new INodeFile(id, permissions, BlockInfo.EMPTY_ARRAY,
|
||||||
modificationTime, atime, preferredBlockSize);
|
replication, modificationTime, atime, preferredBlockSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
|
@ -1421,8 +1423,9 @@ public class FSDirectory implements Closeable {
|
||||||
// create directories beginning from the first null index
|
// create directories beginning from the first null index
|
||||||
for(; i < inodes.length; i++) {
|
for(; i < inodes.length; i++) {
|
||||||
pathbuilder.append(Path.SEPARATOR + names[i]);
|
pathbuilder.append(Path.SEPARATOR + names[i]);
|
||||||
unprotectedMkdir(inodesInPath, i, components[i],
|
unprotectedMkdir(namesystem.allocateNewInodeId(), inodesInPath, i,
|
||||||
(i < lastInodeIndex) ? parentPermissions : permissions, now);
|
components[i], (i < lastInodeIndex) ? parentPermissions
|
||||||
|
: permissions, now);
|
||||||
if (inodes[i] == null) {
|
if (inodes[i] == null) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -1444,7 +1447,7 @@ public class FSDirectory implements Closeable {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
INode unprotectedMkdir(String src, PermissionStatus permissions,
|
INode unprotectedMkdir(long inodeId, String src, PermissionStatus permissions,
|
||||||
long timestamp) throws QuotaExceededException,
|
long timestamp) throws QuotaExceededException,
|
||||||
UnresolvedLinkException {
|
UnresolvedLinkException {
|
||||||
assert hasWriteLock();
|
assert hasWriteLock();
|
||||||
|
@ -1453,7 +1456,8 @@ public class FSDirectory implements Closeable {
|
||||||
components.length, false);
|
components.length, false);
|
||||||
INode[] inodes = inodesInPath.getINodes();
|
INode[] inodes = inodesInPath.getINodes();
|
||||||
final int pos = inodes.length - 1;
|
final int pos = inodes.length - 1;
|
||||||
unprotectedMkdir(inodesInPath, pos, components[pos], permissions, timestamp);
|
unprotectedMkdir(inodeId, inodesInPath, pos, components[pos], permissions,
|
||||||
|
timestamp);
|
||||||
return inodes[pos];
|
return inodes[pos];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1461,11 +1465,12 @@ public class FSDirectory implements Closeable {
|
||||||
* The parent path to the directory is at [0, pos-1].
|
* The parent path to the directory is at [0, pos-1].
|
||||||
* All ancestors exist. Newly created one stored at index pos.
|
* All ancestors exist. Newly created one stored at index pos.
|
||||||
*/
|
*/
|
||||||
private void unprotectedMkdir(INodesInPath inodesInPath, int pos,
|
private void unprotectedMkdir(long inodeId, INodesInPath inodesInPath,
|
||||||
byte[] name, PermissionStatus permission,
|
int pos, byte[] name, PermissionStatus permission, long timestamp)
|
||||||
long timestamp) throws QuotaExceededException {
|
throws QuotaExceededException {
|
||||||
assert hasWriteLock();
|
assert hasWriteLock();
|
||||||
final INodeDirectory dir = new INodeDirectory(name, permission, timestamp);
|
final INodeDirectory dir = new INodeDirectory(inodeId, name, permission,
|
||||||
|
timestamp);
|
||||||
if (addChild(inodesInPath, pos, dir, true)) {
|
if (addChild(inodesInPath, pos, dir, true)) {
|
||||||
inodesInPath.setINode(pos, dir);
|
inodesInPath.setINode(pos, dir);
|
||||||
}
|
}
|
||||||
|
@ -2035,9 +2040,10 @@ public class FSDirectory implements Closeable {
|
||||||
}
|
}
|
||||||
final String userName = dirPerms.getUserName();
|
final String userName = dirPerms.getUserName();
|
||||||
INodeSymlink newNode = null;
|
INodeSymlink newNode = null;
|
||||||
|
long id = namesystem.allocateNewInodeId();
|
||||||
writeLock();
|
writeLock();
|
||||||
try {
|
try {
|
||||||
newNode = unprotectedAddSymlink(path, target, modTime, modTime,
|
newNode = unprotectedAddSymlink(id, path, target, modTime, modTime,
|
||||||
new PermissionStatus(userName, null, FsPermission.getDefault()));
|
new PermissionStatus(userName, null, FsPermission.getDefault()));
|
||||||
} finally {
|
} finally {
|
||||||
writeUnlock();
|
writeUnlock();
|
||||||
|
@ -2057,12 +2063,13 @@ public class FSDirectory implements Closeable {
|
||||||
/**
|
/**
|
||||||
* Add the specified path into the namespace. Invoked from edit log processing.
|
* Add the specified path into the namespace. Invoked from edit log processing.
|
||||||
*/
|
*/
|
||||||
INodeSymlink unprotectedAddSymlink(String path, String target, long mtime,
|
INodeSymlink unprotectedAddSymlink(long id, String path, String target,
|
||||||
long atime, PermissionStatus perm)
|
long mtime, long atime, PermissionStatus perm)
|
||||||
throws UnresolvedLinkException, QuotaExceededException {
|
throws UnresolvedLinkException, QuotaExceededException {
|
||||||
assert hasWriteLock();
|
assert hasWriteLock();
|
||||||
final INodeSymlink symlink = new INodeSymlink(target, mtime, atime, perm);
|
final INodeSymlink symlink = new INodeSymlink(id, target, mtime, atime,
|
||||||
return addINode(path, symlink)? symlink: null;
|
perm);
|
||||||
|
return addINode(path, symlink) ? symlink : null;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -122,7 +122,8 @@ public class FSEditLogLoader {
|
||||||
long lastTxId = in.getLastTxId();
|
long lastTxId = in.getLastTxId();
|
||||||
long numTxns = (lastTxId - expectedStartingTxId) + 1;
|
long numTxns = (lastTxId - expectedStartingTxId) + 1;
|
||||||
long lastLogTime = now();
|
long lastLogTime = now();
|
||||||
|
long lastInodeId = fsNamesys.getLastInodeId();
|
||||||
|
|
||||||
try {
|
try {
|
||||||
while (true) {
|
while (true) {
|
||||||
try {
|
try {
|
||||||
|
@ -168,7 +169,10 @@ public class FSEditLogLoader {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
try {
|
try {
|
||||||
applyEditLogOp(op, fsDir, in.getVersion());
|
long inodeId = applyEditLogOp(op, fsDir, in.getVersion());
|
||||||
|
if (lastInodeId < inodeId) {
|
||||||
|
lastInodeId = inodeId;
|
||||||
|
}
|
||||||
} catch (Throwable e) {
|
} catch (Throwable e) {
|
||||||
LOG.error("Encountered exception on operation " + op, e);
|
LOG.error("Encountered exception on operation " + op, e);
|
||||||
MetaRecoveryContext.editLogLoaderPrompt("Failed to " +
|
MetaRecoveryContext.editLogLoaderPrompt("Failed to " +
|
||||||
|
@ -203,6 +207,7 @@ public class FSEditLogLoader {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} finally {
|
} finally {
|
||||||
|
fsNamesys.resetLastInodeId(lastInodeId);
|
||||||
if(closeOnExit) {
|
if(closeOnExit) {
|
||||||
in.close();
|
in.close();
|
||||||
}
|
}
|
||||||
|
@ -221,9 +226,9 @@ public class FSEditLogLoader {
|
||||||
}
|
}
|
||||||
|
|
||||||
@SuppressWarnings("deprecation")
|
@SuppressWarnings("deprecation")
|
||||||
private void applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
|
private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
|
||||||
int logVersion) throws IOException {
|
int logVersion) throws IOException {
|
||||||
|
long inodeId = INodeId.GRANDFATHER_INODE_ID;
|
||||||
if (LOG.isTraceEnabled()) {
|
if (LOG.isTraceEnabled()) {
|
||||||
LOG.trace("replaying edit log: " + op);
|
LOG.trace("replaying edit log: " + op);
|
||||||
}
|
}
|
||||||
|
@ -253,11 +258,11 @@ public class FSEditLogLoader {
|
||||||
assert addCloseOp.blocks.length == 0;
|
assert addCloseOp.blocks.length == 0;
|
||||||
|
|
||||||
// add to the file tree
|
// add to the file tree
|
||||||
newFile = (INodeFile)fsDir.unprotectedAddFile(
|
inodeId = fsNamesys.allocateNewInodeId();
|
||||||
addCloseOp.path, addCloseOp.permissions,
|
newFile = (INodeFile) fsDir.unprotectedAddFile(inodeId,
|
||||||
replication, addCloseOp.mtime,
|
addCloseOp.path, addCloseOp.permissions, replication,
|
||||||
addCloseOp.atime, addCloseOp.blockSize,
|
addCloseOp.mtime, addCloseOp.atime, addCloseOp.blockSize, true,
|
||||||
true, addCloseOp.clientName, addCloseOp.clientMachine);
|
addCloseOp.clientName, addCloseOp.clientMachine);
|
||||||
fsNamesys.leaseManager.addLease(addCloseOp.clientName, addCloseOp.path);
|
fsNamesys.leaseManager.addLease(addCloseOp.clientName, addCloseOp.path);
|
||||||
|
|
||||||
} else { // This is OP_ADD on an existing file
|
} else { // This is OP_ADD on an existing file
|
||||||
|
@ -368,7 +373,8 @@ public class FSEditLogLoader {
|
||||||
}
|
}
|
||||||
case OP_MKDIR: {
|
case OP_MKDIR: {
|
||||||
MkdirOp mkdirOp = (MkdirOp)op;
|
MkdirOp mkdirOp = (MkdirOp)op;
|
||||||
fsDir.unprotectedMkdir(mkdirOp.path, mkdirOp.permissions,
|
inodeId = fsNamesys.allocateNewInodeId();
|
||||||
|
fsDir.unprotectedMkdir(inodeId, mkdirOp.path, mkdirOp.permissions,
|
||||||
mkdirOp.timestamp);
|
mkdirOp.timestamp);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -421,9 +427,10 @@ public class FSEditLogLoader {
|
||||||
}
|
}
|
||||||
case OP_SYMLINK: {
|
case OP_SYMLINK: {
|
||||||
SymlinkOp symlinkOp = (SymlinkOp)op;
|
SymlinkOp symlinkOp = (SymlinkOp)op;
|
||||||
fsDir.unprotectedAddSymlink(symlinkOp.path, symlinkOp.value,
|
inodeId = fsNamesys.allocateNewInodeId();
|
||||||
symlinkOp.mtime, symlinkOp.atime,
|
fsDir.unprotectedAddSymlink(inodeId, symlinkOp.path,
|
||||||
symlinkOp.permissionStatus);
|
symlinkOp.value, symlinkOp.mtime,
|
||||||
|
symlinkOp.atime, symlinkOp.permissionStatus);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case OP_RENAME: {
|
case OP_RENAME: {
|
||||||
|
@ -483,6 +490,7 @@ public class FSEditLogLoader {
|
||||||
default:
|
default:
|
||||||
throw new IOException("Invalid operation read " + op.opCode);
|
throw new IOException("Invalid operation read " + op.opCode);
|
||||||
}
|
}
|
||||||
|
return inodeId;
|
||||||
}
|
}
|
||||||
|
|
||||||
private static String formatEditLogReplayError(EditLogInputStream in,
|
private static String formatEditLogReplayError(EditLogInputStream in,
|
||||||
|
|
|
@ -216,7 +216,8 @@ class FSImageFormat {
|
||||||
in = compression.unwrapInputStream(fin);
|
in = compression.unwrapInputStream(fin);
|
||||||
|
|
||||||
LOG.info("Loading image file " + curFile + " using " + compression);
|
LOG.info("Loading image file " + curFile + " using " + compression);
|
||||||
|
// reset INodeId. TODO: remove this after inodeId is persisted in fsimage
|
||||||
|
namesystem.resetLastInodeIdWithoutChecking(INodeId.LAST_RESERVED_ID);
|
||||||
// load all inodes
|
// load all inodes
|
||||||
LOG.info("Number of files = " + numFiles);
|
LOG.info("Number of files = " + numFiles);
|
||||||
if (LayoutVersion.supports(Feature.FSIMAGE_NAME_OPTIMIZATION,
|
if (LayoutVersion.supports(Feature.FSIMAGE_NAME_OPTIMIZATION,
|
||||||
|
@ -384,6 +385,8 @@ class FSImageFormat {
|
||||||
long blockSize = 0;
|
long blockSize = 0;
|
||||||
|
|
||||||
int imgVersion = getLayoutVersion();
|
int imgVersion = getLayoutVersion();
|
||||||
|
long inodeId = namesystem.allocateNewInodeId();
|
||||||
|
|
||||||
short replication = in.readShort();
|
short replication = in.readShort();
|
||||||
replication = namesystem.getBlockManager().adjustReplication(replication);
|
replication = namesystem.getBlockManager().adjustReplication(replication);
|
||||||
modificationTime = in.readLong();
|
modificationTime = in.readLong();
|
||||||
|
@ -421,7 +424,7 @@ class FSImageFormat {
|
||||||
|
|
||||||
PermissionStatus permissions = PermissionStatus.read(in);
|
PermissionStatus permissions = PermissionStatus.read(in);
|
||||||
|
|
||||||
return INode.newINode(permissions, blocks, symlink, replication,
|
return INode.newINode(inodeId, permissions, blocks, symlink, replication,
|
||||||
modificationTime, atime, nsQuota, dsQuota, blockSize);
|
modificationTime, atime, nsQuota, dsQuota, blockSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -107,7 +107,9 @@ public class FSImageSerialization {
|
||||||
int numLocs = in.readInt();
|
int numLocs = in.readInt();
|
||||||
assert numLocs == 0 : "Unexpected block locations";
|
assert numLocs == 0 : "Unexpected block locations";
|
||||||
|
|
||||||
return new INodeFileUnderConstruction(name,
|
//TODO: get inodeId from fsimage after inodeId is persisted
|
||||||
|
return new INodeFileUnderConstruction(INodeId.GRANDFATHER_INODE_ID,
|
||||||
|
name,
|
||||||
blockReplication,
|
blockReplication,
|
||||||
modificationTime,
|
modificationTime,
|
||||||
preferredBlockSize,
|
preferredBlockSize,
|
||||||
|
|
|
@ -378,6 +378,30 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
||||||
|
|
||||||
private final boolean haEnabled;
|
private final boolean haEnabled;
|
||||||
|
|
||||||
|
private INodeId inodeId;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set the last allocated inode id when fsimage is loaded or editlog is
|
||||||
|
* applied.
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
public void resetLastInodeId(long newValue) throws IOException {
|
||||||
|
inodeId.resetLastInodeId(newValue);
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Should only be used for tests to reset to any value */
|
||||||
|
void resetLastInodeIdWithoutChecking(long newValue) {
|
||||||
|
inodeId.resetLastInodeIdWithoutChecking(newValue);
|
||||||
|
}
|
||||||
|
|
||||||
|
public long getLastInodeId() {
|
||||||
|
return inodeId.getLastInodeId();
|
||||||
|
}
|
||||||
|
|
||||||
|
public long allocateNewInodeId() {
|
||||||
|
return inodeId.allocateNewInodeId();
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Clear all loaded data
|
* Clear all loaded data
|
||||||
*/
|
*/
|
||||||
|
@ -386,6 +410,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
||||||
dtSecretManager.reset();
|
dtSecretManager.reset();
|
||||||
generationStamp.setStamp(GenerationStamp.FIRST_VALID_STAMP);
|
generationStamp.setStamp(GenerationStamp.FIRST_VALID_STAMP);
|
||||||
leaseManager.removeAllLeases();
|
leaseManager.removeAllLeases();
|
||||||
|
inodeId.resetLastInodeIdWithoutChecking(INodeId.LAST_RESERVED_ID);
|
||||||
}
|
}
|
||||||
|
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
|
@ -561,6 +586,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
||||||
this.standbyShouldCheckpoint = conf.getBoolean(
|
this.standbyShouldCheckpoint = conf.getBoolean(
|
||||||
DFS_HA_STANDBY_CHECKPOINTS_KEY, DFS_HA_STANDBY_CHECKPOINTS_DEFAULT);
|
DFS_HA_STANDBY_CHECKPOINTS_KEY, DFS_HA_STANDBY_CHECKPOINTS_DEFAULT);
|
||||||
|
|
||||||
|
this.inodeId = new INodeId();
|
||||||
|
|
||||||
// For testing purposes, allow the DT secret manager to be started regardless
|
// For testing purposes, allow the DT secret manager to be started regardless
|
||||||
// of whether security is enabled.
|
// of whether security is enabled.
|
||||||
alwaysUseDelegationTokensForTests = conf.getBoolean(
|
alwaysUseDelegationTokensForTests = conf.getBoolean(
|
||||||
|
@ -1895,6 +1922,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
||||||
String leaseHolder, String clientMachine, DatanodeDescriptor clientNode,
|
String leaseHolder, String clientMachine, DatanodeDescriptor clientNode,
|
||||||
boolean writeToEditLog) throws IOException {
|
boolean writeToEditLog) throws IOException {
|
||||||
INodeFileUnderConstruction cons = new INodeFileUnderConstruction(
|
INodeFileUnderConstruction cons = new INodeFileUnderConstruction(
|
||||||
|
file.getId(),
|
||||||
file.getLocalNameBytes(),
|
file.getLocalNameBytes(),
|
||||||
file.getBlockReplication(),
|
file.getBlockReplication(),
|
||||||
file.getModificationTime(),
|
file.getModificationTime(),
|
||||||
|
|
|
@ -17,6 +17,7 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.server.namenode;
|
package org.apache.hadoop.hdfs.server.namenode;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
import java.io.PrintWriter;
|
import java.io.PrintWriter;
|
||||||
import java.io.StringWriter;
|
import java.io.StringWriter;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
|
@ -101,6 +102,11 @@ abstract class INode implements Comparable<byte[]> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The inode id
|
||||||
|
*/
|
||||||
|
final private long id;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The inode name is in java UTF8 encoding;
|
* The inode name is in java UTF8 encoding;
|
||||||
* The name in HdfsFileStatus should keep the same encoding as this.
|
* The name in HdfsFileStatus should keep the same encoding as this.
|
||||||
|
@ -120,8 +126,9 @@ abstract class INode implements Comparable<byte[]> {
|
||||||
protected long modificationTime = 0L;
|
protected long modificationTime = 0L;
|
||||||
protected long accessTime = 0L;
|
protected long accessTime = 0L;
|
||||||
|
|
||||||
private INode(byte[] name, long permission, INodeDirectory parent,
|
private INode(long id, byte[] name, long permission, INodeDirectory parent,
|
||||||
long modificationTime, long accessTime) {
|
long modificationTime, long accessTime) {
|
||||||
|
this.id = id;
|
||||||
this.name = name;
|
this.name = name;
|
||||||
this.permission = permission;
|
this.permission = permission;
|
||||||
this.parent = parent;
|
this.parent = parent;
|
||||||
|
@ -129,26 +136,31 @@ abstract class INode implements Comparable<byte[]> {
|
||||||
this.accessTime = accessTime;
|
this.accessTime = accessTime;
|
||||||
}
|
}
|
||||||
|
|
||||||
INode(byte[] name, PermissionStatus permissions, INodeDirectory parent,
|
INode(long id, byte[] name, PermissionStatus permissions,
|
||||||
long modificationTime, long accessTime) {
|
INodeDirectory parent, long modificationTime, long accessTime) {
|
||||||
this(name, PermissionStatusFormat.toLong(permissions), parent,
|
this(id, name, PermissionStatusFormat.toLong(permissions), parent,
|
||||||
modificationTime, accessTime);
|
modificationTime, accessTime);
|
||||||
}
|
}
|
||||||
|
|
||||||
INode(PermissionStatus permissions, long mtime, long atime) {
|
INode(long id, PermissionStatus permissions, long mtime, long atime) {
|
||||||
this(null, permissions, null, mtime, atime);
|
this(id, null, PermissionStatusFormat.toLong(permissions), null, mtime, atime);
|
||||||
}
|
}
|
||||||
|
|
||||||
protected INode(String name, PermissionStatus permissions) {
|
protected INode(long id, String name, PermissionStatus permissions) {
|
||||||
this(DFSUtil.string2Bytes(name), permissions, null, 0L, 0L);
|
this(id, DFSUtil.string2Bytes(name), permissions, null, 0L, 0L);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** @param other Other node to be copied */
|
/** @param other Other node to be copied */
|
||||||
INode(INode other) {
|
INode(INode other) {
|
||||||
this(other.getLocalNameBytes(), other.permission, other.getParent(),
|
this(other.getId(), other.getLocalNameBytes(), other.permission, other
|
||||||
other.getModificationTime(), other.getAccessTime());
|
.getParent(), other.getModificationTime(), other.getAccessTime());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Get inode id */
|
||||||
|
public long getId() {
|
||||||
|
return this.id;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Check whether this is the root inode.
|
* Check whether this is the root inode.
|
||||||
*/
|
*/
|
||||||
|
@ -463,6 +475,7 @@ abstract class INode implements Comparable<byte[]> {
|
||||||
/**
|
/**
|
||||||
* Create an INode; the inode's name is not set yet
|
* Create an INode; the inode's name is not set yet
|
||||||
*
|
*
|
||||||
|
* @param id preassigned inode id
|
||||||
* @param permissions permissions
|
* @param permissions permissions
|
||||||
* @param blocks blocks if a file
|
* @param blocks blocks if a file
|
||||||
* @param symlink symblic link if a symbolic link
|
* @param symlink symblic link if a symbolic link
|
||||||
|
@ -474,7 +487,8 @@ abstract class INode implements Comparable<byte[]> {
|
||||||
* @param preferredBlockSize block size
|
* @param preferredBlockSize block size
|
||||||
* @return an inode
|
* @return an inode
|
||||||
*/
|
*/
|
||||||
static INode newINode(PermissionStatus permissions,
|
static INode newINode(long id,
|
||||||
|
PermissionStatus permissions,
|
||||||
BlockInfo[] blocks,
|
BlockInfo[] blocks,
|
||||||
String symlink,
|
String symlink,
|
||||||
short replication,
|
short replication,
|
||||||
|
@ -484,17 +498,17 @@ abstract class INode implements Comparable<byte[]> {
|
||||||
long dsQuota,
|
long dsQuota,
|
||||||
long preferredBlockSize) {
|
long preferredBlockSize) {
|
||||||
if (symlink.length() != 0) { // check if symbolic link
|
if (symlink.length() != 0) { // check if symbolic link
|
||||||
return new INodeSymlink(symlink, modificationTime, atime, permissions);
|
return new INodeSymlink(id, symlink, modificationTime, atime, permissions);
|
||||||
} else if (blocks == null) { //not sym link and blocks null? directory!
|
} else if (blocks == null) { //not sym link and blocks null? directory!
|
||||||
if (nsQuota >= 0 || dsQuota >= 0) {
|
if (nsQuota >= 0 || dsQuota >= 0) {
|
||||||
return new INodeDirectoryWithQuota(
|
return new INodeDirectoryWithQuota(
|
||||||
permissions, modificationTime, nsQuota, dsQuota);
|
id, permissions, modificationTime, nsQuota, dsQuota);
|
||||||
}
|
}
|
||||||
// regular directory
|
// regular directory
|
||||||
return new INodeDirectory(permissions, modificationTime);
|
return new INodeDirectory(id, permissions, modificationTime);
|
||||||
}
|
}
|
||||||
// file
|
// file
|
||||||
return new INodeFile(permissions, blocks, replication,
|
return new INodeFile(id, permissions, blocks, replication,
|
||||||
modificationTime, atime, preferredBlockSize);
|
modificationTime, atime, preferredBlockSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -53,17 +53,17 @@ class INodeDirectory extends INode {
|
||||||
|
|
||||||
private List<INode> children = null;
|
private List<INode> children = null;
|
||||||
|
|
||||||
INodeDirectory(String name, PermissionStatus permissions) {
|
INodeDirectory(long id, String name, PermissionStatus permissions) {
|
||||||
super(name, permissions);
|
super(id, name, permissions);
|
||||||
}
|
}
|
||||||
|
|
||||||
public INodeDirectory(PermissionStatus permissions, long mTime) {
|
public INodeDirectory(long id, PermissionStatus permissions, long mTime) {
|
||||||
super(permissions, mTime, 0);
|
super(id, permissions, mTime, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** constructor */
|
/** constructor */
|
||||||
INodeDirectory(byte[] name, PermissionStatus permissions, long mtime) {
|
INodeDirectory(long id, byte[] name, PermissionStatus permissions, long mtime) {
|
||||||
super(name, permissions, null, mtime, 0L);
|
super(id, name, permissions, null, mtime, 0L);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** copy constructor
|
/** copy constructor
|
||||||
|
|
|
@ -54,16 +54,16 @@ class INodeDirectoryWithQuota extends INodeDirectory {
|
||||||
}
|
}
|
||||||
|
|
||||||
/** constructor with no quota verification */
|
/** constructor with no quota verification */
|
||||||
INodeDirectoryWithQuota(PermissionStatus permissions, long modificationTime,
|
INodeDirectoryWithQuota(long id, PermissionStatus permissions,
|
||||||
long nsQuota, long dsQuota) {
|
long modificationTime, long nsQuota, long dsQuota) {
|
||||||
super(permissions, modificationTime);
|
super(id, permissions, modificationTime);
|
||||||
this.nsQuota = nsQuota;
|
this.nsQuota = nsQuota;
|
||||||
this.dsQuota = dsQuota;
|
this.dsQuota = dsQuota;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** constructor with no quota verification */
|
/** constructor with no quota verification */
|
||||||
INodeDirectoryWithQuota(String name, PermissionStatus permissions) {
|
INodeDirectoryWithQuota(long id, String name, PermissionStatus permissions) {
|
||||||
super(name, permissions);
|
super(id, name, permissions);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Get this directory's namespace quota
|
/** Get this directory's namespace quota
|
||||||
|
|
|
@ -86,15 +86,15 @@ class INodeFile extends INode implements BlockCollection {
|
||||||
|
|
||||||
private BlockInfo[] blocks;
|
private BlockInfo[] blocks;
|
||||||
|
|
||||||
INodeFile(PermissionStatus permissions, BlockInfo[] blklist,
|
INodeFile(long id, PermissionStatus permissions, BlockInfo[] blklist,
|
||||||
short replication, long modificationTime,
|
short replication, long modificationTime, long atime,
|
||||||
long atime, long preferredBlockSize) {
|
long preferredBlockSize) {
|
||||||
super(permissions, modificationTime, atime);
|
super(id, permissions, modificationTime, atime);
|
||||||
header = HeaderFormat.combineReplication(header, replication);
|
header = HeaderFormat.combineReplication(header, replication);
|
||||||
header = HeaderFormat.combinePreferredBlockSize(header, preferredBlockSize);
|
header = HeaderFormat.combinePreferredBlockSize(header, preferredBlockSize);
|
||||||
this.blocks = blklist;
|
this.blocks = blklist;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** @return true unconditionally. */
|
/** @return true unconditionally. */
|
||||||
@Override
|
@Override
|
||||||
public final boolean isFile() {
|
public final boolean isFile() {
|
||||||
|
|
|
@ -49,21 +49,23 @@ class INodeFileUnderConstruction extends INodeFile implements MutableBlockCollec
|
||||||
private final String clientMachine;
|
private final String clientMachine;
|
||||||
private final DatanodeDescriptor clientNode; // if client is a cluster node too.
|
private final DatanodeDescriptor clientNode; // if client is a cluster node too.
|
||||||
|
|
||||||
INodeFileUnderConstruction(PermissionStatus permissions,
|
INodeFileUnderConstruction(long id,
|
||||||
|
PermissionStatus permissions,
|
||||||
short replication,
|
short replication,
|
||||||
long preferredBlockSize,
|
long preferredBlockSize,
|
||||||
long modTime,
|
long modTime,
|
||||||
String clientName,
|
String clientName,
|
||||||
String clientMachine,
|
String clientMachine,
|
||||||
DatanodeDescriptor clientNode) {
|
DatanodeDescriptor clientNode) {
|
||||||
super(permissions.applyUMask(UMASK), BlockInfo.EMPTY_ARRAY, replication,
|
super(id, permissions.applyUMask(UMASK), BlockInfo.EMPTY_ARRAY,
|
||||||
modTime, modTime, preferredBlockSize);
|
replication, modTime, modTime, preferredBlockSize);
|
||||||
this.clientName = clientName;
|
this.clientName = clientName;
|
||||||
this.clientMachine = clientMachine;
|
this.clientMachine = clientMachine;
|
||||||
this.clientNode = clientNode;
|
this.clientNode = clientNode;
|
||||||
}
|
}
|
||||||
|
|
||||||
INodeFileUnderConstruction(byte[] name,
|
INodeFileUnderConstruction(long id,
|
||||||
|
byte[] name,
|
||||||
short blockReplication,
|
short blockReplication,
|
||||||
long modificationTime,
|
long modificationTime,
|
||||||
long preferredBlockSize,
|
long preferredBlockSize,
|
||||||
|
@ -72,8 +74,8 @@ class INodeFileUnderConstruction extends INodeFile implements MutableBlockCollec
|
||||||
String clientName,
|
String clientName,
|
||||||
String clientMachine,
|
String clientMachine,
|
||||||
DatanodeDescriptor clientNode) {
|
DatanodeDescriptor clientNode) {
|
||||||
super(perm, blocks, blockReplication, modificationTime, modificationTime,
|
super(id, perm, blocks, blockReplication, modificationTime,
|
||||||
preferredBlockSize);
|
modificationTime, preferredBlockSize);
|
||||||
setLocalName(name);
|
setLocalName(name);
|
||||||
this.clientName = clientName;
|
this.clientName = clientName;
|
||||||
this.clientMachine = clientMachine;
|
this.clientMachine = clientMachine;
|
||||||
|
@ -112,7 +114,8 @@ class INodeFileUnderConstruction extends INodeFile implements MutableBlockCollec
|
||||||
assert allBlocksComplete() : "Can't finalize inode " + this
|
assert allBlocksComplete() : "Can't finalize inode " + this
|
||||||
+ " since it contains non-complete blocks! Blocks are "
|
+ " since it contains non-complete blocks! Blocks are "
|
||||||
+ Arrays.asList(getBlocks());
|
+ Arrays.asList(getBlocks());
|
||||||
INodeFile obj = new INodeFile(getPermissionStatus(),
|
INodeFile obj = new INodeFile(getId(),
|
||||||
|
getPermissionStatus(),
|
||||||
getBlocks(),
|
getBlocks(),
|
||||||
getBlockReplication(),
|
getBlockReplication(),
|
||||||
getModificationTime(),
|
getModificationTime(),
|
||||||
|
|
|
@ -0,0 +1,105 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs.server.namenode;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.concurrent.atomic.AtomicLong;
|
||||||
|
|
||||||
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* An id which uniquely identifies an inode
|
||||||
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
|
class INodeId implements Comparable<INodeId> {
|
||||||
|
/**
|
||||||
|
* The last reserved inode id. Reserve id 1 to 1000 for potential future
|
||||||
|
* usage. The id won't be recycled and is not expected to wrap around in a
|
||||||
|
* very long time. Root inode id will be 1001.
|
||||||
|
*/
|
||||||
|
public static final long LAST_RESERVED_ID = 1000L;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The inode id validation of lease check will be skipped when the request
|
||||||
|
* uses GRANDFATHER_INODE_ID for backward compatibility.
|
||||||
|
*/
|
||||||
|
public static final long GRANDFATHER_INODE_ID = 0;
|
||||||
|
|
||||||
|
private AtomicLong lastInodeId = new AtomicLong();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create a new instance, initialized to LAST_RESERVED_ID.
|
||||||
|
*/
|
||||||
|
INodeId() {
|
||||||
|
lastInodeId.set(INodeId.LAST_RESERVED_ID);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set the last allocated inode id when fsimage is loaded or editlog is
|
||||||
|
* applied.
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
void resetLastInodeId(long newValue) throws IOException {
|
||||||
|
if (newValue < getLastInodeId()) {
|
||||||
|
throw new IOException(
|
||||||
|
"Can't reset lastInodeId to be less than its current value "
|
||||||
|
+ getLastInodeId() + ", newValue=" + newValue);
|
||||||
|
}
|
||||||
|
|
||||||
|
lastInodeId.set(newValue);
|
||||||
|
}
|
||||||
|
|
||||||
|
void resetLastInodeIdWithoutChecking(long newValue) {
|
||||||
|
lastInodeId.set(newValue);
|
||||||
|
}
|
||||||
|
|
||||||
|
long getLastInodeId() {
|
||||||
|
return lastInodeId.get();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* First increment the counter and then get the id.
|
||||||
|
*/
|
||||||
|
long allocateNewInodeId() {
|
||||||
|
return lastInodeId.incrementAndGet();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
// Comparable
|
||||||
|
public int compareTo(INodeId that) {
|
||||||
|
long id1 = this.getLastInodeId();
|
||||||
|
long id2 = that.getLastInodeId();
|
||||||
|
return id1 < id2 ? -1 : id1 > id2 ? 1 : 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
// Object
|
||||||
|
public boolean equals(Object o) {
|
||||||
|
if (!(o instanceof INodeId)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return compareTo((INodeId) o) == 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
// Object
|
||||||
|
public int hashCode() {
|
||||||
|
long id = getLastInodeId();
|
||||||
|
return (int) (id ^ (id >>> 32));
|
||||||
|
}
|
||||||
|
}
|
|
@ -28,9 +28,9 @@ import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
public class INodeSymlink extends INode {
|
public class INodeSymlink extends INode {
|
||||||
private final byte[] symlink; // The target URI
|
private final byte[] symlink; // The target URI
|
||||||
|
|
||||||
INodeSymlink(String value, long mtime, long atime,
|
INodeSymlink(long id, String value, long mtime, long atime,
|
||||||
PermissionStatus permissions) {
|
PermissionStatus permissions) {
|
||||||
super(permissions, mtime, atime);
|
super(id, permissions, mtime, atime);
|
||||||
this.symlink = DFSUtil.string2Bytes(value);
|
this.symlink = DFSUtil.string2Bytes(value);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -62,7 +62,8 @@ public class CreateEditsLog {
|
||||||
|
|
||||||
PermissionStatus p = new PermissionStatus("joeDoe", "people",
|
PermissionStatus p = new PermissionStatus("joeDoe", "people",
|
||||||
new FsPermission((short)0777));
|
new FsPermission((short)0777));
|
||||||
INodeDirectory dirInode = new INodeDirectory(p, 0L);
|
INodeDirectory dirInode = new INodeDirectory(INodeId.GRANDFATHER_INODE_ID,
|
||||||
|
p, 0L);
|
||||||
editLog.logMkDir(BASE_PATH, dirInode);
|
editLog.logMkDir(BASE_PATH, dirInode);
|
||||||
long blockSize = 10;
|
long blockSize = 10;
|
||||||
BlockInfo[] blocks = new BlockInfo[blocksPerFile];
|
BlockInfo[] blocks = new BlockInfo[blocksPerFile];
|
||||||
|
@ -81,8 +82,9 @@ public class CreateEditsLog {
|
||||||
}
|
}
|
||||||
|
|
||||||
INodeFileUnderConstruction inode = new INodeFileUnderConstruction(
|
INodeFileUnderConstruction inode = new INodeFileUnderConstruction(
|
||||||
null, replication, 0, blockSize, blocks, p, "", "", null);
|
INodeId.GRANDFATHER_INODE_ID, null, replication, 0, blockSize,
|
||||||
// Append path to filename with information about blockIDs
|
blocks, p, "", "", null);
|
||||||
|
// Append path to filename with information about blockIDs
|
||||||
String path = "_" + iF + "_B" + blocks[0].getBlockId() +
|
String path = "_" + iF + "_B" + blocks[0].getBlockId() +
|
||||||
"_to_B" + blocks[blocksPerFile-1].getBlockId() + "_";
|
"_to_B" + blocks[blocksPerFile-1].getBlockId() + "_";
|
||||||
String filePath = nameGenerator.getNextFileName("");
|
String filePath = nameGenerator.getNextFileName("");
|
||||||
|
@ -90,12 +92,12 @@ public class CreateEditsLog {
|
||||||
// Log the new sub directory in edits
|
// Log the new sub directory in edits
|
||||||
if ((iF % nameGenerator.getFilesPerDirectory()) == 0) {
|
if ((iF % nameGenerator.getFilesPerDirectory()) == 0) {
|
||||||
String currentDir = nameGenerator.getCurrentDir();
|
String currentDir = nameGenerator.getCurrentDir();
|
||||||
dirInode = new INodeDirectory(p, 0L);
|
dirInode = new INodeDirectory(INodeId.GRANDFATHER_INODE_ID, p, 0L);
|
||||||
editLog.logMkDir(currentDir, dirInode);
|
editLog.logMkDir(currentDir, dirInode);
|
||||||
}
|
}
|
||||||
editLog.logOpenFile(filePath,
|
editLog.logOpenFile(filePath, new INodeFileUnderConstruction(
|
||||||
new INodeFileUnderConstruction(
|
INodeId.GRANDFATHER_INODE_ID, p, replication, 0, blockSize, "", "",
|
||||||
p, replication, 0, blockSize, "", "", null));
|
null));
|
||||||
editLog.logCloseFile(filePath, inode);
|
editLog.logCloseFile(filePath, inode);
|
||||||
|
|
||||||
if (currentBlockId - bidAtSync >= 2000) { // sync every 2K blocks
|
if (currentBlockId - bidAtSync >= 2000) { // sync every 2K blocks
|
||||||
|
|
|
@ -208,7 +208,7 @@ public abstract class FSImageTestUtil {
|
||||||
* only a specified number of "mkdirs" operations.
|
* only a specified number of "mkdirs" operations.
|
||||||
*/
|
*/
|
||||||
public static void createAbortedLogWithMkdirs(File editsLogDir, int numDirs,
|
public static void createAbortedLogWithMkdirs(File editsLogDir, int numDirs,
|
||||||
long firstTxId) throws IOException {
|
long firstTxId, long newInodeId) throws IOException {
|
||||||
FSEditLog editLog = FSImageTestUtil.createStandaloneEditLog(editsLogDir);
|
FSEditLog editLog = FSImageTestUtil.createStandaloneEditLog(editsLogDir);
|
||||||
editLog.setNextTxId(firstTxId);
|
editLog.setNextTxId(firstTxId);
|
||||||
editLog.openForWrite();
|
editLog.openForWrite();
|
||||||
|
@ -217,7 +217,7 @@ public abstract class FSImageTestUtil {
|
||||||
FsPermission.createImmutable((short)0755));
|
FsPermission.createImmutable((short)0755));
|
||||||
for (int i = 1; i <= numDirs; i++) {
|
for (int i = 1; i <= numDirs; i++) {
|
||||||
String dirName = "dir" + i;
|
String dirName = "dir" + i;
|
||||||
INodeDirectory dir = new INodeDirectory(dirName, perms);
|
INodeDirectory dir = new INodeDirectory(newInodeId + i -1, dirName, perms);
|
||||||
editLog.logMkDir("/" + dirName, dir);
|
editLog.logMkDir("/" + dirName, dir);
|
||||||
}
|
}
|
||||||
editLog.logSync();
|
editLog.logSync();
|
||||||
|
|
|
@ -153,7 +153,8 @@ public class TestEditLog {
|
||||||
|
|
||||||
for (int i = 0; i < numTransactions; i++) {
|
for (int i = 0; i < numTransactions; i++) {
|
||||||
INodeFileUnderConstruction inode = new INodeFileUnderConstruction(
|
INodeFileUnderConstruction inode = new INodeFileUnderConstruction(
|
||||||
p, replication, blockSize, 0, "", "", null);
|
namesystem.allocateNewInodeId(), p, replication, blockSize, 0, "",
|
||||||
|
"", null);
|
||||||
editLog.logOpenFile("/filename" + (startIndex + i), inode);
|
editLog.logOpenFile("/filename" + (startIndex + i), inode);
|
||||||
editLog.logCloseFile("/filename" + (startIndex + i), inode);
|
editLog.logCloseFile("/filename" + (startIndex + i), inode);
|
||||||
editLog.logSync();
|
editLog.logSync();
|
||||||
|
@ -318,6 +319,11 @@ public class TestEditLog {
|
||||||
// we should now be writing to edits_inprogress_3
|
// we should now be writing to edits_inprogress_3
|
||||||
fsimage.rollEditLog();
|
fsimage.rollEditLog();
|
||||||
|
|
||||||
|
// Remember the current lastInodeId and will reset it back to test
|
||||||
|
// loading editlog segments.The transactions in the following allocate new
|
||||||
|
// inode id to write to editlogs but doesn't create ionde in namespace
|
||||||
|
long originalLastInodeId = namesystem.getLastInodeId();
|
||||||
|
|
||||||
// Create threads and make them run transactions concurrently.
|
// Create threads and make them run transactions concurrently.
|
||||||
Thread threadId[] = new Thread[NUM_THREADS];
|
Thread threadId[] = new Thread[NUM_THREADS];
|
||||||
for (int i = 0; i < NUM_THREADS; i++) {
|
for (int i = 0; i < NUM_THREADS; i++) {
|
||||||
|
@ -350,6 +356,7 @@ public class TestEditLog {
|
||||||
// If there were any corruptions, it is likely that the reading in
|
// If there were any corruptions, it is likely that the reading in
|
||||||
// of these transactions will throw an exception.
|
// of these transactions will throw an exception.
|
||||||
//
|
//
|
||||||
|
namesystem.resetLastInodeIdWithoutChecking(originalLastInodeId);
|
||||||
for (Iterator<StorageDirectory> it =
|
for (Iterator<StorageDirectory> it =
|
||||||
fsimage.getStorage().dirIterator(NameNodeDirType.EDITS); it.hasNext();) {
|
fsimage.getStorage().dirIterator(NameNodeDirType.EDITS); it.hasNext();) {
|
||||||
FSEditLogLoader loader = new FSEditLogLoader(namesystem, 0);
|
FSEditLogLoader loader = new FSEditLogLoader(namesystem, 0);
|
||||||
|
|
|
@ -73,7 +73,8 @@ public class TestFsLimits {
|
||||||
fileAsURI(new File(MiniDFSCluster.getBaseDirectory(),
|
fileAsURI(new File(MiniDFSCluster.getBaseDirectory(),
|
||||||
"namenode")).toString());
|
"namenode")).toString());
|
||||||
|
|
||||||
rootInode = new INodeDirectoryWithQuota(INodeDirectory.ROOT_NAME, perms);
|
rootInode = new INodeDirectoryWithQuota(getMockNamesystem()
|
||||||
|
.allocateNewInodeId(), INodeDirectory.ROOT_NAME, perms);
|
||||||
inodes = new INode[]{ rootInode, null };
|
inodes = new INode[]{ rootInode, null };
|
||||||
fs = null;
|
fs = null;
|
||||||
fsIsReady = true;
|
fsIsReady = true;
|
||||||
|
@ -152,7 +153,8 @@ public class TestFsLimits {
|
||||||
// have to create after the caller has had a chance to set conf values
|
// have to create after the caller has had a chance to set conf values
|
||||||
if (fs == null) fs = new MockFSDirectory();
|
if (fs == null) fs = new MockFSDirectory();
|
||||||
|
|
||||||
INode child = new INodeDirectory(name, perms);
|
INode child = new INodeDirectory(getMockNamesystem().allocateNewInodeId(),
|
||||||
|
name, perms);
|
||||||
child.setLocalName(name);
|
child.setLocalName(name);
|
||||||
|
|
||||||
Class<?> generated = null;
|
Class<?> generated = null;
|
||||||
|
|
|
@ -25,10 +25,14 @@ import static org.junit.Assert.fail;
|
||||||
import java.io.FileNotFoundException;
|
import java.io.FileNotFoundException;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.fs.PathIsNotDirectoryException;
|
import org.apache.hadoop.fs.PathIsNotDirectoryException;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.fs.permission.PermissionStatus;
|
import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||||
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
|
@ -49,9 +53,9 @@ public class TestINodeFile {
|
||||||
public void testReplication () {
|
public void testReplication () {
|
||||||
replication = 3;
|
replication = 3;
|
||||||
preferredBlockSize = 128*1024*1024;
|
preferredBlockSize = 128*1024*1024;
|
||||||
INodeFile inf = new INodeFile(new PermissionStatus(userName, null,
|
INodeFile inf = new INodeFile(INodeId.GRANDFATHER_INODE_ID,
|
||||||
FsPermission.getDefault()), null, replication,
|
new PermissionStatus(userName, null, FsPermission.getDefault()), null,
|
||||||
0L, 0L, preferredBlockSize);
|
replication, 0L, 0L, preferredBlockSize);
|
||||||
assertEquals("True has to be returned in this case", replication,
|
assertEquals("True has to be returned in this case", replication,
|
||||||
inf.getBlockReplication());
|
inf.getBlockReplication());
|
||||||
}
|
}
|
||||||
|
@ -66,9 +70,9 @@ public class TestINodeFile {
|
||||||
throws IllegalArgumentException {
|
throws IllegalArgumentException {
|
||||||
replication = -1;
|
replication = -1;
|
||||||
preferredBlockSize = 128*1024*1024;
|
preferredBlockSize = 128*1024*1024;
|
||||||
new INodeFile(new PermissionStatus(userName, null,
|
new INodeFile(INodeId.GRANDFATHER_INODE_ID, new PermissionStatus(userName,
|
||||||
FsPermission.getDefault()), null, replication,
|
null, FsPermission.getDefault()), null, replication, 0L, 0L,
|
||||||
0L, 0L, preferredBlockSize);
|
preferredBlockSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -79,20 +83,20 @@ public class TestINodeFile {
|
||||||
public void testPreferredBlockSize () {
|
public void testPreferredBlockSize () {
|
||||||
replication = 3;
|
replication = 3;
|
||||||
preferredBlockSize = 128*1024*1024;
|
preferredBlockSize = 128*1024*1024;
|
||||||
INodeFile inf = new INodeFile(new PermissionStatus(userName, null,
|
INodeFile inf = new INodeFile(INodeId.GRANDFATHER_INODE_ID,
|
||||||
FsPermission.getDefault()), null, replication,
|
new PermissionStatus(userName, null, FsPermission.getDefault()), null,
|
||||||
0L, 0L, preferredBlockSize);
|
replication, 0L, 0L, preferredBlockSize);
|
||||||
assertEquals("True has to be returned in this case", preferredBlockSize,
|
assertEquals("True has to be returned in this case", preferredBlockSize,
|
||||||
inf.getPreferredBlockSize());
|
inf.getPreferredBlockSize());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testPreferredBlockSizeUpperBound () {
|
public void testPreferredBlockSizeUpperBound () {
|
||||||
replication = 3;
|
replication = 3;
|
||||||
preferredBlockSize = BLKSIZE_MAXVALUE;
|
preferredBlockSize = BLKSIZE_MAXVALUE;
|
||||||
INodeFile inf = new INodeFile(new PermissionStatus(userName, null,
|
INodeFile inf = new INodeFile(INodeId.GRANDFATHER_INODE_ID,
|
||||||
FsPermission.getDefault()), null, replication,
|
new PermissionStatus(userName, null, FsPermission.getDefault()), null,
|
||||||
0L, 0L, preferredBlockSize);
|
replication, 0L, 0L, preferredBlockSize);
|
||||||
assertEquals("True has to be returned in this case", BLKSIZE_MAXVALUE,
|
assertEquals("True has to be returned in this case", BLKSIZE_MAXVALUE,
|
||||||
inf.getPreferredBlockSize());
|
inf.getPreferredBlockSize());
|
||||||
}
|
}
|
||||||
|
@ -107,9 +111,9 @@ public class TestINodeFile {
|
||||||
throws IllegalArgumentException {
|
throws IllegalArgumentException {
|
||||||
replication = 3;
|
replication = 3;
|
||||||
preferredBlockSize = -1;
|
preferredBlockSize = -1;
|
||||||
new INodeFile(new PermissionStatus(userName, null,
|
new INodeFile(INodeId.GRANDFATHER_INODE_ID, new PermissionStatus(userName,
|
||||||
FsPermission.getDefault()), null, replication,
|
null, FsPermission.getDefault()), null, replication, 0L, 0L,
|
||||||
0L, 0L, preferredBlockSize);
|
preferredBlockSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -122,10 +126,10 @@ public class TestINodeFile {
|
||||||
throws IllegalArgumentException {
|
throws IllegalArgumentException {
|
||||||
replication = 3;
|
replication = 3;
|
||||||
preferredBlockSize = BLKSIZE_MAXVALUE+1;
|
preferredBlockSize = BLKSIZE_MAXVALUE+1;
|
||||||
new INodeFile(new PermissionStatus(userName, null,
|
new INodeFile(INodeId.GRANDFATHER_INODE_ID, new PermissionStatus(userName,
|
||||||
FsPermission.getDefault()), null, replication,
|
null, FsPermission.getDefault()), null, replication, 0L, 0L,
|
||||||
0L, 0L, preferredBlockSize);
|
preferredBlockSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testGetFullPathName() {
|
public void testGetFullPathName() {
|
||||||
|
@ -134,12 +138,14 @@ public class TestINodeFile {
|
||||||
|
|
||||||
replication = 3;
|
replication = 3;
|
||||||
preferredBlockSize = 128*1024*1024;
|
preferredBlockSize = 128*1024*1024;
|
||||||
INodeFile inf = new INodeFile(perms, null, replication,
|
INodeFile inf = new INodeFile(INodeId.GRANDFATHER_INODE_ID, perms, null,
|
||||||
0L, 0L, preferredBlockSize);
|
replication, 0L, 0L, preferredBlockSize);
|
||||||
inf.setLocalName("f");
|
inf.setLocalName("f");
|
||||||
|
|
||||||
INodeDirectory root = new INodeDirectory(INodeDirectory.ROOT_NAME, perms);
|
INodeDirectory root = new INodeDirectory(INodeId.GRANDFATHER_INODE_ID,
|
||||||
INodeDirectory dir = new INodeDirectory("d", perms);
|
INodeDirectory.ROOT_NAME, perms);
|
||||||
|
INodeDirectory dir = new INodeDirectory(INodeId.GRANDFATHER_INODE_ID, "d",
|
||||||
|
perms);
|
||||||
|
|
||||||
assertEquals("f", inf.getFullPathName());
|
assertEquals("f", inf.getFullPathName());
|
||||||
assertEquals("", inf.getLocalParentDir());
|
assertEquals("", inf.getLocalParentDir());
|
||||||
|
@ -195,7 +201,7 @@ public class TestINodeFile {
|
||||||
for (int i = 0; i < nCount; i++) {
|
for (int i = 0; i < nCount; i++) {
|
||||||
PermissionStatus perms = new PermissionStatus(userName, null,
|
PermissionStatus perms = new PermissionStatus(userName, null,
|
||||||
FsPermission.getDefault());
|
FsPermission.getDefault());
|
||||||
iNodes[i] = new INodeFile(perms, null, replication, 0L, 0L,
|
iNodes[i] = new INodeFile(i, perms, null, replication, 0L, 0L,
|
||||||
preferredBlockSize);
|
preferredBlockSize);
|
||||||
iNodes[i].setLocalName(fileNamePrefix + Integer.toString(i));
|
iNodes[i].setLocalName(fileNamePrefix + Integer.toString(i));
|
||||||
BlockInfo newblock = new BlockInfo(replication);
|
BlockInfo newblock = new BlockInfo(replication);
|
||||||
|
@ -246,10 +252,10 @@ public class TestINodeFile {
|
||||||
}
|
}
|
||||||
|
|
||||||
{//cast from INodeFile
|
{//cast from INodeFile
|
||||||
final INode from = new INodeFile(
|
final INode from = new INodeFile(INodeId.GRANDFATHER_INODE_ID, perm,
|
||||||
perm, null, replication, 0L, 0L, preferredBlockSize);
|
null, replication, 0L, 0L, preferredBlockSize);
|
||||||
|
|
||||||
//cast to INodeFile, should success
|
//cast to INodeFile, should success
|
||||||
final INodeFile f = INodeFile.valueOf(from, path);
|
final INodeFile f = INodeFile.valueOf(from, path);
|
||||||
assertTrue(f == from);
|
assertTrue(f == from);
|
||||||
|
|
||||||
|
@ -271,8 +277,9 @@ public class TestINodeFile {
|
||||||
|
|
||||||
{//cast from INodeFileUnderConstruction
|
{//cast from INodeFileUnderConstruction
|
||||||
final INode from = new INodeFileUnderConstruction(
|
final INode from = new INodeFileUnderConstruction(
|
||||||
perm, replication, 0L, 0L, "client", "machine", null);
|
INodeId.GRANDFATHER_INODE_ID, perm, replication, 0L, 0L, "client",
|
||||||
|
"machine", null);
|
||||||
|
|
||||||
//cast to INodeFile, should success
|
//cast to INodeFile, should success
|
||||||
final INodeFile f = INodeFile.valueOf(from, path);
|
final INodeFile f = INodeFile.valueOf(from, path);
|
||||||
assertTrue(f == from);
|
assertTrue(f == from);
|
||||||
|
@ -291,7 +298,8 @@ public class TestINodeFile {
|
||||||
}
|
}
|
||||||
|
|
||||||
{//cast from INodeDirectory
|
{//cast from INodeDirectory
|
||||||
final INode from = new INodeDirectory(perm, 0L);
|
final INode from = new INodeDirectory(INodeId.GRANDFATHER_INODE_ID, perm,
|
||||||
|
0L);
|
||||||
|
|
||||||
//cast to INodeFile, should fail
|
//cast to INodeFile, should fail
|
||||||
try {
|
try {
|
||||||
|
@ -314,4 +322,47 @@ public class TestINodeFile {
|
||||||
assertTrue(d == from);
|
assertTrue(d == from);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Verify root always has inode id 1001 and new formated fsimage has last
|
||||||
|
* allocated inode id 1000. Validate correct lastInodeId is persisted.
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void TestInodeId() throws IOException {
|
||||||
|
|
||||||
|
Configuration conf = new Configuration();
|
||||||
|
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
|
||||||
|
DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT);
|
||||||
|
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
|
||||||
|
.build();
|
||||||
|
cluster.waitActive();
|
||||||
|
|
||||||
|
FSNamesystem fsn = cluster.getNamesystem();
|
||||||
|
long lastId = fsn.getLastInodeId();
|
||||||
|
|
||||||
|
assertTrue(lastId == 1001);
|
||||||
|
|
||||||
|
// Create one directory and the last inode id should increase to 1002
|
||||||
|
FileSystem fs = cluster.getFileSystem();
|
||||||
|
Path path = new Path("/test1");
|
||||||
|
assertTrue(fs.mkdirs(path));
|
||||||
|
assertTrue(fsn.getLastInodeId() == 1002);
|
||||||
|
|
||||||
|
Path filePath = new Path("/test1/file");
|
||||||
|
fs.create(filePath);
|
||||||
|
assertTrue(fsn.getLastInodeId() == 1003);
|
||||||
|
|
||||||
|
// Rename doesn't increase inode id
|
||||||
|
Path renamedPath = new Path("/test2");
|
||||||
|
fs.rename(path, renamedPath);
|
||||||
|
assertTrue(fsn.getLastInodeId() == 1003);
|
||||||
|
|
||||||
|
cluster.restartNameNode();
|
||||||
|
cluster.waitActive();
|
||||||
|
// Make sure empty editlog can be handled
|
||||||
|
cluster.restartNameNode();
|
||||||
|
cluster.waitActive();
|
||||||
|
assertTrue(fsn.getLastInodeId() == 1003);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,6 +36,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
|
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
|
import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
|
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NNStorage;
|
import org.apache.hadoop.hdfs.server.namenode.NNStorage;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
|
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
|
@ -139,7 +140,9 @@ public class TestEditLogsDuringFailover {
|
||||||
// Create a fake in-progress edit-log in the shared directory
|
// Create a fake in-progress edit-log in the shared directory
|
||||||
URI sharedUri = cluster.getSharedEditsDir(0, 1);
|
URI sharedUri = cluster.getSharedEditsDir(0, 1);
|
||||||
File sharedDir = new File(sharedUri.getPath(), "current");
|
File sharedDir = new File(sharedUri.getPath(), "current");
|
||||||
FSImageTestUtil.createAbortedLogWithMkdirs(sharedDir, NUM_DIRS_IN_LOG, 1);
|
FSNamesystem fsn = cluster.getNamesystem(0);
|
||||||
|
FSImageTestUtil.createAbortedLogWithMkdirs(sharedDir, NUM_DIRS_IN_LOG, 1,
|
||||||
|
fsn.getLastInodeId() + 1);
|
||||||
|
|
||||||
assertEditFiles(Collections.singletonList(sharedUri),
|
assertEditFiles(Collections.singletonList(sharedUri),
|
||||||
NNStorage.getInProgressEditsFileName(1));
|
NNStorage.getInProgressEditsFileName(1));
|
||||||
|
|
|
@ -212,8 +212,9 @@ public class TestStandbyCheckpoints {
|
||||||
File sharedDir = new File(sharedUri.getPath(), "current");
|
File sharedDir = new File(sharedUri.getPath(), "current");
|
||||||
File tmpDir = new File(MiniDFSCluster.getBaseDirectory(),
|
File tmpDir = new File(MiniDFSCluster.getBaseDirectory(),
|
||||||
"testCheckpointCancellation-tmp");
|
"testCheckpointCancellation-tmp");
|
||||||
FSImageTestUtil.createAbortedLogWithMkdirs(tmpDir, NUM_DIRS_IN_LOG,
|
FSNamesystem fsn = cluster.getNamesystem(0);
|
||||||
3);
|
FSImageTestUtil.createAbortedLogWithMkdirs(tmpDir, NUM_DIRS_IN_LOG, 3,
|
||||||
|
fsn.getLastInodeId() + 1);
|
||||||
String fname = NNStorage.getInProgressEditsFileName(3);
|
String fname = NNStorage.getInProgressEditsFileName(3);
|
||||||
new File(tmpDir, fname).renameTo(new File(sharedDir, fname));
|
new File(tmpDir, fname).renameTo(new File(sharedDir, fname));
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue