diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 8327e7a8edf..ab1f494f0b4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -74,6 +74,8 @@ Release 2.0.5-beta - UNRELEASED HDFS-4209. Clean up the addNode/addChild/addChildNoQuotaCheck methods in FSDirectory and INodeDirectory. (szetszwo) + HDFS-4334. Add a unique id to INode. (Brandon Li via szetszwo) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 6eaa69f29d0..b2cb6350916 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -76,8 +76,9 @@ import com.google.common.base.Preconditions; *************************************************/ public class FSDirectory implements Closeable { private static INodeDirectoryWithQuota createRoot(FSNamesystem namesystem) { - return new INodeDirectoryWithQuota(INodeDirectory.ROOT_NAME, - namesystem.createFsOwnerPermissions(new FsPermission((short)0755))); + return new INodeDirectoryWithQuota(namesystem.allocateNewInodeId(), + INodeDirectory.ROOT_NAME, + namesystem.createFsOwnerPermissions(new FsPermission((short) 0755))); } INodeDirectoryWithQuota rootDir; @@ -252,7 +253,9 @@ public class FSDirectory implements Closeable { if (!mkdirs(parent.toString(), permissions, true, modTime)) { return null; } + long id = namesystem.allocateNewInodeId(); INodeFileUnderConstruction newNode = new INodeFileUnderConstruction( + id, permissions,replication, preferredBlockSize, modTime, clientName, clientMachine, clientNode); @@ -274,7 +277,8 @@ public class FSDirectory implements Closeable { return newNode; } - INode unprotectedAddFile( String path, + INode unprotectedAddFile( long id, + String path, PermissionStatus permissions, short replication, long modificationTime, @@ -286,13 +290,11 @@ public class FSDirectory implements Closeable { final INode newNode; assert hasWriteLock(); if (underConstruction) { - newNode = new INodeFileUnderConstruction( - permissions, replication, - preferredBlockSize, modificationTime, clientName, - clientMachine, null); + newNode = new INodeFileUnderConstruction(id, permissions, replication, + preferredBlockSize, modificationTime, clientName, clientMachine, null); } else { - newNode = new INodeFile(permissions, BlockInfo.EMPTY_ARRAY, replication, - modificationTime, atime, preferredBlockSize); + newNode = new INodeFile(id, permissions, BlockInfo.EMPTY_ARRAY, + replication, modificationTime, atime, preferredBlockSize); } try { @@ -1421,8 +1423,9 @@ public class FSDirectory implements Closeable { // create directories beginning from the first null index for(; i < inodes.length; i++) { pathbuilder.append(Path.SEPARATOR + names[i]); - unprotectedMkdir(inodesInPath, i, components[i], - (i < lastInodeIndex) ? parentPermissions : permissions, now); + unprotectedMkdir(namesystem.allocateNewInodeId(), inodesInPath, i, + components[i], (i < lastInodeIndex) ? parentPermissions + : permissions, now); if (inodes[i] == null) { return false; } @@ -1444,7 +1447,7 @@ public class FSDirectory implements Closeable { return true; } - INode unprotectedMkdir(String src, PermissionStatus permissions, + INode unprotectedMkdir(long inodeId, String src, PermissionStatus permissions, long timestamp) throws QuotaExceededException, UnresolvedLinkException { assert hasWriteLock(); @@ -1453,7 +1456,8 @@ public class FSDirectory implements Closeable { components.length, false); INode[] inodes = inodesInPath.getINodes(); final int pos = inodes.length - 1; - unprotectedMkdir(inodesInPath, pos, components[pos], permissions, timestamp); + unprotectedMkdir(inodeId, inodesInPath, pos, components[pos], permissions, + timestamp); return inodes[pos]; } @@ -1461,11 +1465,12 @@ public class FSDirectory implements Closeable { * The parent path to the directory is at [0, pos-1]. * All ancestors exist. Newly created one stored at index pos. */ - private void unprotectedMkdir(INodesInPath inodesInPath, int pos, - byte[] name, PermissionStatus permission, - long timestamp) throws QuotaExceededException { + private void unprotectedMkdir(long inodeId, INodesInPath inodesInPath, + int pos, byte[] name, PermissionStatus permission, long timestamp) + throws QuotaExceededException { assert hasWriteLock(); - final INodeDirectory dir = new INodeDirectory(name, permission, timestamp); + final INodeDirectory dir = new INodeDirectory(inodeId, name, permission, + timestamp); if (addChild(inodesInPath, pos, dir, true)) { inodesInPath.setINode(pos, dir); } @@ -2035,9 +2040,10 @@ public class FSDirectory implements Closeable { } final String userName = dirPerms.getUserName(); INodeSymlink newNode = null; + long id = namesystem.allocateNewInodeId(); writeLock(); try { - newNode = unprotectedAddSymlink(path, target, modTime, modTime, + newNode = unprotectedAddSymlink(id, path, target, modTime, modTime, new PermissionStatus(userName, null, FsPermission.getDefault())); } finally { writeUnlock(); @@ -2057,12 +2063,13 @@ public class FSDirectory implements Closeable { /** * Add the specified path into the namespace. Invoked from edit log processing. */ - INodeSymlink unprotectedAddSymlink(String path, String target, long mtime, - long atime, PermissionStatus perm) + INodeSymlink unprotectedAddSymlink(long id, String path, String target, + long mtime, long atime, PermissionStatus perm) throws UnresolvedLinkException, QuotaExceededException { assert hasWriteLock(); - final INodeSymlink symlink = new INodeSymlink(target, mtime, atime, perm); - return addINode(path, symlink)? symlink: null; + final INodeSymlink symlink = new INodeSymlink(id, target, mtime, atime, + perm); + return addINode(path, symlink) ? symlink : null; } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index 2c82ef2f10c..c3a9cb694c1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -122,7 +122,8 @@ public class FSEditLogLoader { long lastTxId = in.getLastTxId(); long numTxns = (lastTxId - expectedStartingTxId) + 1; long lastLogTime = now(); - + long lastInodeId = fsNamesys.getLastInodeId(); + try { while (true) { try { @@ -168,7 +169,10 @@ public class FSEditLogLoader { } } try { - applyEditLogOp(op, fsDir, in.getVersion()); + long inodeId = applyEditLogOp(op, fsDir, in.getVersion()); + if (lastInodeId < inodeId) { + lastInodeId = inodeId; + } } catch (Throwable e) { LOG.error("Encountered exception on operation " + op, e); MetaRecoveryContext.editLogLoaderPrompt("Failed to " + @@ -203,6 +207,7 @@ public class FSEditLogLoader { } } } finally { + fsNamesys.resetLastInodeId(lastInodeId); if(closeOnExit) { in.close(); } @@ -221,9 +226,9 @@ public class FSEditLogLoader { } @SuppressWarnings("deprecation") - private void applyEditLogOp(FSEditLogOp op, FSDirectory fsDir, + private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir, int logVersion) throws IOException { - + long inodeId = INodeId.GRANDFATHER_INODE_ID; if (LOG.isTraceEnabled()) { LOG.trace("replaying edit log: " + op); } @@ -253,11 +258,11 @@ public class FSEditLogLoader { assert addCloseOp.blocks.length == 0; // add to the file tree - newFile = (INodeFile)fsDir.unprotectedAddFile( - addCloseOp.path, addCloseOp.permissions, - replication, addCloseOp.mtime, - addCloseOp.atime, addCloseOp.blockSize, - true, addCloseOp.clientName, addCloseOp.clientMachine); + inodeId = fsNamesys.allocateNewInodeId(); + newFile = (INodeFile) fsDir.unprotectedAddFile(inodeId, + addCloseOp.path, addCloseOp.permissions, replication, + addCloseOp.mtime, addCloseOp.atime, addCloseOp.blockSize, true, + addCloseOp.clientName, addCloseOp.clientMachine); fsNamesys.leaseManager.addLease(addCloseOp.clientName, addCloseOp.path); } else { // This is OP_ADD on an existing file @@ -368,7 +373,8 @@ public class FSEditLogLoader { } case OP_MKDIR: { MkdirOp mkdirOp = (MkdirOp)op; - fsDir.unprotectedMkdir(mkdirOp.path, mkdirOp.permissions, + inodeId = fsNamesys.allocateNewInodeId(); + fsDir.unprotectedMkdir(inodeId, mkdirOp.path, mkdirOp.permissions, mkdirOp.timestamp); break; } @@ -421,9 +427,10 @@ public class FSEditLogLoader { } case OP_SYMLINK: { SymlinkOp symlinkOp = (SymlinkOp)op; - fsDir.unprotectedAddSymlink(symlinkOp.path, symlinkOp.value, - symlinkOp.mtime, symlinkOp.atime, - symlinkOp.permissionStatus); + inodeId = fsNamesys.allocateNewInodeId(); + fsDir.unprotectedAddSymlink(inodeId, symlinkOp.path, + symlinkOp.value, symlinkOp.mtime, + symlinkOp.atime, symlinkOp.permissionStatus); break; } case OP_RENAME: { @@ -483,6 +490,7 @@ public class FSEditLogLoader { default: throw new IOException("Invalid operation read " + op.opCode); } + return inodeId; } private static String formatEditLogReplayError(EditLogInputStream in, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java index 7cc3a765f4a..02d7d4400c6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java @@ -216,7 +216,8 @@ class FSImageFormat { in = compression.unwrapInputStream(fin); LOG.info("Loading image file " + curFile + " using " + compression); - + // reset INodeId. TODO: remove this after inodeId is persisted in fsimage + namesystem.resetLastInodeIdWithoutChecking(INodeId.LAST_RESERVED_ID); // load all inodes LOG.info("Number of files = " + numFiles); if (LayoutVersion.supports(Feature.FSIMAGE_NAME_OPTIMIZATION, @@ -384,6 +385,8 @@ class FSImageFormat { long blockSize = 0; int imgVersion = getLayoutVersion(); + long inodeId = namesystem.allocateNewInodeId(); + short replication = in.readShort(); replication = namesystem.getBlockManager().adjustReplication(replication); modificationTime = in.readLong(); @@ -421,7 +424,7 @@ class FSImageFormat { PermissionStatus permissions = PermissionStatus.read(in); - return INode.newINode(permissions, blocks, symlink, replication, + return INode.newINode(inodeId, permissions, blocks, symlink, replication, modificationTime, atime, nsQuota, dsQuota, blockSize); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java index 5649833db7a..200313ed49f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java @@ -107,7 +107,9 @@ public class FSImageSerialization { int numLocs = in.readInt(); assert numLocs == 0 : "Unexpected block locations"; - return new INodeFileUnderConstruction(name, + //TODO: get inodeId from fsimage after inodeId is persisted + return new INodeFileUnderConstruction(INodeId.GRANDFATHER_INODE_ID, + name, blockReplication, modificationTime, preferredBlockSize, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 32400f682be..783ca31396f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -378,6 +378,30 @@ public class FSNamesystem implements Namesystem, FSClusterStats, private final boolean haEnabled; + private INodeId inodeId; + + /** + * Set the last allocated inode id when fsimage is loaded or editlog is + * applied. + * @throws IOException + */ + public void resetLastInodeId(long newValue) throws IOException { + inodeId.resetLastInodeId(newValue); + } + + /** Should only be used for tests to reset to any value */ + void resetLastInodeIdWithoutChecking(long newValue) { + inodeId.resetLastInodeIdWithoutChecking(newValue); + } + + public long getLastInodeId() { + return inodeId.getLastInodeId(); + } + + public long allocateNewInodeId() { + return inodeId.allocateNewInodeId(); + } + /** * Clear all loaded data */ @@ -386,6 +410,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, dtSecretManager.reset(); generationStamp.setStamp(GenerationStamp.FIRST_VALID_STAMP); leaseManager.removeAllLeases(); + inodeId.resetLastInodeIdWithoutChecking(INodeId.LAST_RESERVED_ID); } @VisibleForTesting @@ -561,6 +586,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats, this.standbyShouldCheckpoint = conf.getBoolean( DFS_HA_STANDBY_CHECKPOINTS_KEY, DFS_HA_STANDBY_CHECKPOINTS_DEFAULT); + this.inodeId = new INodeId(); + // For testing purposes, allow the DT secret manager to be started regardless // of whether security is enabled. alwaysUseDelegationTokensForTests = conf.getBoolean( @@ -1895,6 +1922,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, String leaseHolder, String clientMachine, DatanodeDescriptor clientNode, boolean writeToEditLog) throws IOException { INodeFileUnderConstruction cons = new INodeFileUnderConstruction( + file.getId(), file.getLocalNameBytes(), file.getBlockReplication(), file.getModificationTime(), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java index 6c6b388421f..b407a62da97 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import java.io.IOException; import java.io.PrintWriter; import java.io.StringWriter; import java.util.ArrayList; @@ -101,6 +102,11 @@ abstract class INode implements Comparable { } } + /** + * The inode id + */ + final private long id; + /** * The inode name is in java UTF8 encoding; * The name in HdfsFileStatus should keep the same encoding as this. @@ -120,8 +126,9 @@ abstract class INode implements Comparable { protected long modificationTime = 0L; protected long accessTime = 0L; - private INode(byte[] name, long permission, INodeDirectory parent, + private INode(long id, byte[] name, long permission, INodeDirectory parent, long modificationTime, long accessTime) { + this.id = id; this.name = name; this.permission = permission; this.parent = parent; @@ -129,26 +136,31 @@ abstract class INode implements Comparable { this.accessTime = accessTime; } - INode(byte[] name, PermissionStatus permissions, INodeDirectory parent, - long modificationTime, long accessTime) { - this(name, PermissionStatusFormat.toLong(permissions), parent, + INode(long id, byte[] name, PermissionStatus permissions, + INodeDirectory parent, long modificationTime, long accessTime) { + this(id, name, PermissionStatusFormat.toLong(permissions), parent, modificationTime, accessTime); } - - INode(PermissionStatus permissions, long mtime, long atime) { - this(null, permissions, null, mtime, atime); + + INode(long id, PermissionStatus permissions, long mtime, long atime) { + this(id, null, PermissionStatusFormat.toLong(permissions), null, mtime, atime); } - - protected INode(String name, PermissionStatus permissions) { - this(DFSUtil.string2Bytes(name), permissions, null, 0L, 0L); + + protected INode(long id, String name, PermissionStatus permissions) { + this(id, DFSUtil.string2Bytes(name), permissions, null, 0L, 0L); } /** @param other Other node to be copied */ INode(INode other) { - this(other.getLocalNameBytes(), other.permission, other.getParent(), - other.getModificationTime(), other.getAccessTime()); + this(other.getId(), other.getLocalNameBytes(), other.permission, other + .getParent(), other.getModificationTime(), other.getAccessTime()); } + /** Get inode id */ + public long getId() { + return this.id; + } + /** * Check whether this is the root inode. */ @@ -463,6 +475,7 @@ abstract class INode implements Comparable { /** * Create an INode; the inode's name is not set yet * + * @param id preassigned inode id * @param permissions permissions * @param blocks blocks if a file * @param symlink symblic link if a symbolic link @@ -474,7 +487,8 @@ abstract class INode implements Comparable { * @param preferredBlockSize block size * @return an inode */ - static INode newINode(PermissionStatus permissions, + static INode newINode(long id, + PermissionStatus permissions, BlockInfo[] blocks, String symlink, short replication, @@ -484,17 +498,17 @@ abstract class INode implements Comparable { long dsQuota, long preferredBlockSize) { if (symlink.length() != 0) { // check if symbolic link - return new INodeSymlink(symlink, modificationTime, atime, permissions); + return new INodeSymlink(id, symlink, modificationTime, atime, permissions); } else if (blocks == null) { //not sym link and blocks null? directory! if (nsQuota >= 0 || dsQuota >= 0) { return new INodeDirectoryWithQuota( - permissions, modificationTime, nsQuota, dsQuota); + id, permissions, modificationTime, nsQuota, dsQuota); } // regular directory - return new INodeDirectory(permissions, modificationTime); + return new INodeDirectory(id, permissions, modificationTime); } // file - return new INodeFile(permissions, blocks, replication, + return new INodeFile(id, permissions, blocks, replication, modificationTime, atime, preferredBlockSize); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java index 1b193edf427..a84b747246a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java @@ -53,17 +53,17 @@ class INodeDirectory extends INode { private List children = null; - INodeDirectory(String name, PermissionStatus permissions) { - super(name, permissions); + INodeDirectory(long id, String name, PermissionStatus permissions) { + super(id, name, permissions); } - public INodeDirectory(PermissionStatus permissions, long mTime) { - super(permissions, mTime, 0); + public INodeDirectory(long id, PermissionStatus permissions, long mTime) { + super(id, permissions, mTime, 0); } - + /** constructor */ - INodeDirectory(byte[] name, PermissionStatus permissions, long mtime) { - super(name, permissions, null, mtime, 0L); + INodeDirectory(long id, byte[] name, PermissionStatus permissions, long mtime) { + super(id, name, permissions, null, mtime, 0L); } /** copy constructor diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java index b05f7a5af6c..e942e304a21 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java @@ -54,16 +54,16 @@ class INodeDirectoryWithQuota extends INodeDirectory { } /** constructor with no quota verification */ - INodeDirectoryWithQuota(PermissionStatus permissions, long modificationTime, - long nsQuota, long dsQuota) { - super(permissions, modificationTime); + INodeDirectoryWithQuota(long id, PermissionStatus permissions, + long modificationTime, long nsQuota, long dsQuota) { + super(id, permissions, modificationTime); this.nsQuota = nsQuota; this.dsQuota = dsQuota; } /** constructor with no quota verification */ - INodeDirectoryWithQuota(String name, PermissionStatus permissions) { - super(name, permissions); + INodeDirectoryWithQuota(long id, String name, PermissionStatus permissions) { + super(id, name, permissions); } /** Get this directory's namespace quota diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java index 5eb88166c89..b5235b84513 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java @@ -86,15 +86,15 @@ class INodeFile extends INode implements BlockCollection { private BlockInfo[] blocks; - INodeFile(PermissionStatus permissions, BlockInfo[] blklist, - short replication, long modificationTime, - long atime, long preferredBlockSize) { - super(permissions, modificationTime, atime); + INodeFile(long id, PermissionStatus permissions, BlockInfo[] blklist, + short replication, long modificationTime, long atime, + long preferredBlockSize) { + super(id, permissions, modificationTime, atime); header = HeaderFormat.combineReplication(header, replication); header = HeaderFormat.combinePreferredBlockSize(header, preferredBlockSize); this.blocks = blklist; } - + /** @return true unconditionally. */ @Override public final boolean isFile() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java index 15b4be509a8..65020e8d2ef 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java @@ -49,21 +49,23 @@ class INodeFileUnderConstruction extends INodeFile implements MutableBlockCollec private final String clientMachine; private final DatanodeDescriptor clientNode; // if client is a cluster node too. - INodeFileUnderConstruction(PermissionStatus permissions, + INodeFileUnderConstruction(long id, + PermissionStatus permissions, short replication, long preferredBlockSize, long modTime, String clientName, String clientMachine, DatanodeDescriptor clientNode) { - super(permissions.applyUMask(UMASK), BlockInfo.EMPTY_ARRAY, replication, - modTime, modTime, preferredBlockSize); + super(id, permissions.applyUMask(UMASK), BlockInfo.EMPTY_ARRAY, + replication, modTime, modTime, preferredBlockSize); this.clientName = clientName; this.clientMachine = clientMachine; this.clientNode = clientNode; } - INodeFileUnderConstruction(byte[] name, + INodeFileUnderConstruction(long id, + byte[] name, short blockReplication, long modificationTime, long preferredBlockSize, @@ -72,8 +74,8 @@ class INodeFileUnderConstruction extends INodeFile implements MutableBlockCollec String clientName, String clientMachine, DatanodeDescriptor clientNode) { - super(perm, blocks, blockReplication, modificationTime, modificationTime, - preferredBlockSize); + super(id, perm, blocks, blockReplication, modificationTime, + modificationTime, preferredBlockSize); setLocalName(name); this.clientName = clientName; this.clientMachine = clientMachine; @@ -112,7 +114,8 @@ class INodeFileUnderConstruction extends INodeFile implements MutableBlockCollec assert allBlocksComplete() : "Can't finalize inode " + this + " since it contains non-complete blocks! Blocks are " + Arrays.asList(getBlocks()); - INodeFile obj = new INodeFile(getPermissionStatus(), + INodeFile obj = new INodeFile(getId(), + getPermissionStatus(), getBlocks(), getBlockReplication(), getModificationTime(), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeId.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeId.java new file mode 100644 index 00000000000..e6f682d0bc4 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeId.java @@ -0,0 +1,105 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import java.io.IOException; +import java.util.concurrent.atomic.AtomicLong; + +import org.apache.hadoop.classification.InterfaceAudience; + +/** + * An id which uniquely identifies an inode + */ +@InterfaceAudience.Private +class INodeId implements Comparable { + /** + * The last reserved inode id. Reserve id 1 to 1000 for potential future + * usage. The id won't be recycled and is not expected to wrap around in a + * very long time. Root inode id will be 1001. + */ + public static final long LAST_RESERVED_ID = 1000L; + + /** + * The inode id validation of lease check will be skipped when the request + * uses GRANDFATHER_INODE_ID for backward compatibility. + */ + public static final long GRANDFATHER_INODE_ID = 0; + + private AtomicLong lastInodeId = new AtomicLong(); + + /** + * Create a new instance, initialized to LAST_RESERVED_ID. + */ + INodeId() { + lastInodeId.set(INodeId.LAST_RESERVED_ID); + } + + /** + * Set the last allocated inode id when fsimage is loaded or editlog is + * applied. + * @throws IOException + */ + void resetLastInodeId(long newValue) throws IOException { + if (newValue < getLastInodeId()) { + throw new IOException( + "Can't reset lastInodeId to be less than its current value " + + getLastInodeId() + ", newValue=" + newValue); + } + + lastInodeId.set(newValue); + } + + void resetLastInodeIdWithoutChecking(long newValue) { + lastInodeId.set(newValue); + } + + long getLastInodeId() { + return lastInodeId.get(); + } + + /** + * First increment the counter and then get the id. + */ + long allocateNewInodeId() { + return lastInodeId.incrementAndGet(); + } + + @Override + // Comparable + public int compareTo(INodeId that) { + long id1 = this.getLastInodeId(); + long id2 = that.getLastInodeId(); + return id1 < id2 ? -1 : id1 > id2 ? 1 : 0; + } + + @Override + // Object + public boolean equals(Object o) { + if (!(o instanceof INodeId)) { + return false; + } + return compareTo((INodeId) o) == 0; + } + + @Override + // Object + public int hashCode() { + long id = getLastInodeId(); + return (int) (id ^ (id >>> 32)); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java index dcde5a36b8a..4b33fc853a8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java @@ -28,9 +28,9 @@ import org.apache.hadoop.hdfs.DFSUtil; public class INodeSymlink extends INode { private final byte[] symlink; // The target URI - INodeSymlink(String value, long mtime, long atime, - PermissionStatus permissions) { - super(permissions, mtime, atime); + INodeSymlink(long id, String value, long mtime, long atime, + PermissionStatus permissions) { + super(id, permissions, mtime, atime); this.symlink = DFSUtil.string2Bytes(value); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java index 7962d4a9e2d..8fdcdb150d4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java @@ -62,7 +62,8 @@ public class CreateEditsLog { PermissionStatus p = new PermissionStatus("joeDoe", "people", new FsPermission((short)0777)); - INodeDirectory dirInode = new INodeDirectory(p, 0L); + INodeDirectory dirInode = new INodeDirectory(INodeId.GRANDFATHER_INODE_ID, + p, 0L); editLog.logMkDir(BASE_PATH, dirInode); long blockSize = 10; BlockInfo[] blocks = new BlockInfo[blocksPerFile]; @@ -81,8 +82,9 @@ public class CreateEditsLog { } INodeFileUnderConstruction inode = new INodeFileUnderConstruction( - null, replication, 0, blockSize, blocks, p, "", "", null); - // Append path to filename with information about blockIDs + INodeId.GRANDFATHER_INODE_ID, null, replication, 0, blockSize, + blocks, p, "", "", null); + // Append path to filename with information about blockIDs String path = "_" + iF + "_B" + blocks[0].getBlockId() + "_to_B" + blocks[blocksPerFile-1].getBlockId() + "_"; String filePath = nameGenerator.getNextFileName(""); @@ -90,12 +92,12 @@ public class CreateEditsLog { // Log the new sub directory in edits if ((iF % nameGenerator.getFilesPerDirectory()) == 0) { String currentDir = nameGenerator.getCurrentDir(); - dirInode = new INodeDirectory(p, 0L); + dirInode = new INodeDirectory(INodeId.GRANDFATHER_INODE_ID, p, 0L); editLog.logMkDir(currentDir, dirInode); } - editLog.logOpenFile(filePath, - new INodeFileUnderConstruction( - p, replication, 0, blockSize, "", "", null)); + editLog.logOpenFile(filePath, new INodeFileUnderConstruction( + INodeId.GRANDFATHER_INODE_ID, p, replication, 0, blockSize, "", "", + null)); editLog.logCloseFile(filePath, inode); if (currentBlockId - bidAtSync >= 2000) { // sync every 2K blocks diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java index d2fdc6647f2..6dfbf1d2044 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java @@ -208,7 +208,7 @@ public abstract class FSImageTestUtil { * only a specified number of "mkdirs" operations. */ public static void createAbortedLogWithMkdirs(File editsLogDir, int numDirs, - long firstTxId) throws IOException { + long firstTxId, long newInodeId) throws IOException { FSEditLog editLog = FSImageTestUtil.createStandaloneEditLog(editsLogDir); editLog.setNextTxId(firstTxId); editLog.openForWrite(); @@ -217,7 +217,7 @@ public abstract class FSImageTestUtil { FsPermission.createImmutable((short)0755)); for (int i = 1; i <= numDirs; i++) { String dirName = "dir" + i; - INodeDirectory dir = new INodeDirectory(dirName, perms); + INodeDirectory dir = new INodeDirectory(newInodeId + i -1, dirName, perms); editLog.logMkDir("/" + dirName, dir); } editLog.logSync(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java index aa9b984204d..80f449e1f2b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java @@ -153,7 +153,8 @@ public class TestEditLog { for (int i = 0; i < numTransactions; i++) { INodeFileUnderConstruction inode = new INodeFileUnderConstruction( - p, replication, blockSize, 0, "", "", null); + namesystem.allocateNewInodeId(), p, replication, blockSize, 0, "", + "", null); editLog.logOpenFile("/filename" + (startIndex + i), inode); editLog.logCloseFile("/filename" + (startIndex + i), inode); editLog.logSync(); @@ -318,6 +319,11 @@ public class TestEditLog { // we should now be writing to edits_inprogress_3 fsimage.rollEditLog(); + // Remember the current lastInodeId and will reset it back to test + // loading editlog segments.The transactions in the following allocate new + // inode id to write to editlogs but doesn't create ionde in namespace + long originalLastInodeId = namesystem.getLastInodeId(); + // Create threads and make them run transactions concurrently. Thread threadId[] = new Thread[NUM_THREADS]; for (int i = 0; i < NUM_THREADS; i++) { @@ -350,6 +356,7 @@ public class TestEditLog { // If there were any corruptions, it is likely that the reading in // of these transactions will throw an exception. // + namesystem.resetLastInodeIdWithoutChecking(originalLastInodeId); for (Iterator it = fsimage.getStorage().dirIterator(NameNodeDirType.EDITS); it.hasNext();) { FSEditLogLoader loader = new FSEditLogLoader(namesystem, 0); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java index b275cebc19f..4c8d135eb5f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java @@ -73,7 +73,8 @@ public class TestFsLimits { fileAsURI(new File(MiniDFSCluster.getBaseDirectory(), "namenode")).toString()); - rootInode = new INodeDirectoryWithQuota(INodeDirectory.ROOT_NAME, perms); + rootInode = new INodeDirectoryWithQuota(getMockNamesystem() + .allocateNewInodeId(), INodeDirectory.ROOT_NAME, perms); inodes = new INode[]{ rootInode, null }; fs = null; fsIsReady = true; @@ -152,7 +153,8 @@ public class TestFsLimits { // have to create after the caller has had a chance to set conf values if (fs == null) fs = new MockFSDirectory(); - INode child = new INodeDirectory(name, perms); + INode child = new INodeDirectory(getMockNamesystem().allocateNewInodeId(), + name, perms); child.setLocalName(name); Class generated = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java index 695490e3391..42f90798daa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java @@ -25,10 +25,14 @@ import static org.junit.Assert.fail; import java.io.FileNotFoundException; import java.io.IOException; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathIsNotDirectoryException; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.junit.Test; @@ -49,9 +53,9 @@ public class TestINodeFile { public void testReplication () { replication = 3; preferredBlockSize = 128*1024*1024; - INodeFile inf = new INodeFile(new PermissionStatus(userName, null, - FsPermission.getDefault()), null, replication, - 0L, 0L, preferredBlockSize); + INodeFile inf = new INodeFile(INodeId.GRANDFATHER_INODE_ID, + new PermissionStatus(userName, null, FsPermission.getDefault()), null, + replication, 0L, 0L, preferredBlockSize); assertEquals("True has to be returned in this case", replication, inf.getBlockReplication()); } @@ -66,9 +70,9 @@ public class TestINodeFile { throws IllegalArgumentException { replication = -1; preferredBlockSize = 128*1024*1024; - new INodeFile(new PermissionStatus(userName, null, - FsPermission.getDefault()), null, replication, - 0L, 0L, preferredBlockSize); + new INodeFile(INodeId.GRANDFATHER_INODE_ID, new PermissionStatus(userName, + null, FsPermission.getDefault()), null, replication, 0L, 0L, + preferredBlockSize); } /** @@ -79,20 +83,20 @@ public class TestINodeFile { public void testPreferredBlockSize () { replication = 3; preferredBlockSize = 128*1024*1024; - INodeFile inf = new INodeFile(new PermissionStatus(userName, null, - FsPermission.getDefault()), null, replication, - 0L, 0L, preferredBlockSize); - assertEquals("True has to be returned in this case", preferredBlockSize, - inf.getPreferredBlockSize()); - } + INodeFile inf = new INodeFile(INodeId.GRANDFATHER_INODE_ID, + new PermissionStatus(userName, null, FsPermission.getDefault()), null, + replication, 0L, 0L, preferredBlockSize); + assertEquals("True has to be returned in this case", preferredBlockSize, + inf.getPreferredBlockSize()); + } @Test public void testPreferredBlockSizeUpperBound () { replication = 3; preferredBlockSize = BLKSIZE_MAXVALUE; - INodeFile inf = new INodeFile(new PermissionStatus(userName, null, - FsPermission.getDefault()), null, replication, - 0L, 0L, preferredBlockSize); + INodeFile inf = new INodeFile(INodeId.GRANDFATHER_INODE_ID, + new PermissionStatus(userName, null, FsPermission.getDefault()), null, + replication, 0L, 0L, preferredBlockSize); assertEquals("True has to be returned in this case", BLKSIZE_MAXVALUE, inf.getPreferredBlockSize()); } @@ -107,9 +111,9 @@ public class TestINodeFile { throws IllegalArgumentException { replication = 3; preferredBlockSize = -1; - new INodeFile(new PermissionStatus(userName, null, - FsPermission.getDefault()), null, replication, - 0L, 0L, preferredBlockSize); + new INodeFile(INodeId.GRANDFATHER_INODE_ID, new PermissionStatus(userName, + null, FsPermission.getDefault()), null, replication, 0L, 0L, + preferredBlockSize); } /** @@ -122,10 +126,10 @@ public class TestINodeFile { throws IllegalArgumentException { replication = 3; preferredBlockSize = BLKSIZE_MAXVALUE+1; - new INodeFile(new PermissionStatus(userName, null, - FsPermission.getDefault()), null, replication, - 0L, 0L, preferredBlockSize); - } + new INodeFile(INodeId.GRANDFATHER_INODE_ID, new PermissionStatus(userName, + null, FsPermission.getDefault()), null, replication, 0L, 0L, + preferredBlockSize); + } @Test public void testGetFullPathName() { @@ -134,12 +138,14 @@ public class TestINodeFile { replication = 3; preferredBlockSize = 128*1024*1024; - INodeFile inf = new INodeFile(perms, null, replication, - 0L, 0L, preferredBlockSize); + INodeFile inf = new INodeFile(INodeId.GRANDFATHER_INODE_ID, perms, null, + replication, 0L, 0L, preferredBlockSize); inf.setLocalName("f"); - INodeDirectory root = new INodeDirectory(INodeDirectory.ROOT_NAME, perms); - INodeDirectory dir = new INodeDirectory("d", perms); + INodeDirectory root = new INodeDirectory(INodeId.GRANDFATHER_INODE_ID, + INodeDirectory.ROOT_NAME, perms); + INodeDirectory dir = new INodeDirectory(INodeId.GRANDFATHER_INODE_ID, "d", + perms); assertEquals("f", inf.getFullPathName()); assertEquals("", inf.getLocalParentDir()); @@ -195,7 +201,7 @@ public class TestINodeFile { for (int i = 0; i < nCount; i++) { PermissionStatus perms = new PermissionStatus(userName, null, FsPermission.getDefault()); - iNodes[i] = new INodeFile(perms, null, replication, 0L, 0L, + iNodes[i] = new INodeFile(i, perms, null, replication, 0L, 0L, preferredBlockSize); iNodes[i].setLocalName(fileNamePrefix + Integer.toString(i)); BlockInfo newblock = new BlockInfo(replication); @@ -246,10 +252,10 @@ public class TestINodeFile { } {//cast from INodeFile - final INode from = new INodeFile( - perm, null, replication, 0L, 0L, preferredBlockSize); - - //cast to INodeFile, should success + final INode from = new INodeFile(INodeId.GRANDFATHER_INODE_ID, perm, + null, replication, 0L, 0L, preferredBlockSize); + + //cast to INodeFile, should success final INodeFile f = INodeFile.valueOf(from, path); assertTrue(f == from); @@ -271,8 +277,9 @@ public class TestINodeFile { {//cast from INodeFileUnderConstruction final INode from = new INodeFileUnderConstruction( - perm, replication, 0L, 0L, "client", "machine", null); - + INodeId.GRANDFATHER_INODE_ID, perm, replication, 0L, 0L, "client", + "machine", null); + //cast to INodeFile, should success final INodeFile f = INodeFile.valueOf(from, path); assertTrue(f == from); @@ -291,7 +298,8 @@ public class TestINodeFile { } {//cast from INodeDirectory - final INode from = new INodeDirectory(perm, 0L); + final INode from = new INodeDirectory(INodeId.GRANDFATHER_INODE_ID, perm, + 0L); //cast to INodeFile, should fail try { @@ -314,4 +322,47 @@ public class TestINodeFile { assertTrue(d == from); } } + + /** + * Verify root always has inode id 1001 and new formated fsimage has last + * allocated inode id 1000. Validate correct lastInodeId is persisted. + * @throws IOException + */ + @Test + public void TestInodeId() throws IOException { + + Configuration conf = new Configuration(); + conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, + DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1) + .build(); + cluster.waitActive(); + + FSNamesystem fsn = cluster.getNamesystem(); + long lastId = fsn.getLastInodeId(); + + assertTrue(lastId == 1001); + + // Create one directory and the last inode id should increase to 1002 + FileSystem fs = cluster.getFileSystem(); + Path path = new Path("/test1"); + assertTrue(fs.mkdirs(path)); + assertTrue(fsn.getLastInodeId() == 1002); + + Path filePath = new Path("/test1/file"); + fs.create(filePath); + assertTrue(fsn.getLastInodeId() == 1003); + + // Rename doesn't increase inode id + Path renamedPath = new Path("/test2"); + fs.rename(path, renamedPath); + assertTrue(fsn.getLastInodeId() == 1003); + + cluster.restartNameNode(); + cluster.waitActive(); + // Make sure empty editlog can be handled + cluster.restartNameNode(); + cluster.waitActive(); + assertTrue(fsn.getLastInodeId() == 1003); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogsDuringFailover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogsDuringFailover.java index 5e18381e7a5..f1444a49f57 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogsDuringFailover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogsDuringFailover.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSNNTopology; import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; +import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NNStorage; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.io.IOUtils; @@ -139,7 +140,9 @@ public class TestEditLogsDuringFailover { // Create a fake in-progress edit-log in the shared directory URI sharedUri = cluster.getSharedEditsDir(0, 1); File sharedDir = new File(sharedUri.getPath(), "current"); - FSImageTestUtil.createAbortedLogWithMkdirs(sharedDir, NUM_DIRS_IN_LOG, 1); + FSNamesystem fsn = cluster.getNamesystem(0); + FSImageTestUtil.createAbortedLogWithMkdirs(sharedDir, NUM_DIRS_IN_LOG, 1, + fsn.getLastInodeId() + 1); assertEditFiles(Collections.singletonList(sharedUri), NNStorage.getInProgressEditsFileName(1)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java index 04750685e0d..2f93cca5dd8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java @@ -212,8 +212,9 @@ public class TestStandbyCheckpoints { File sharedDir = new File(sharedUri.getPath(), "current"); File tmpDir = new File(MiniDFSCluster.getBaseDirectory(), "testCheckpointCancellation-tmp"); - FSImageTestUtil.createAbortedLogWithMkdirs(tmpDir, NUM_DIRS_IN_LOG, - 3); + FSNamesystem fsn = cluster.getNamesystem(0); + FSImageTestUtil.createAbortedLogWithMkdirs(tmpDir, NUM_DIRS_IN_LOG, 3, + fsn.getLastInodeId() + 1); String fname = NNStorage.getInProgressEditsFileName(3); new File(tmpDir, fname).renameTo(new File(sharedDir, fname));