From a973b2f55e4ad6f55eb1f48da4fe416c2bb37719 Mon Sep 17 00:00:00 2001 From: arp Date: Thu, 18 Sep 2014 19:20:25 -0700 Subject: [PATCH] HDFS-7091. Add forwarding constructor for INodeFile for existing callers. (Arpit Agarwal) Conflicts: hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java --- .../apache/hadoop/hdfs/server/namenode/FSImageFormat.java | 2 +- .../hadoop/hdfs/server/namenode/FSImageSerialization.java | 2 +- .../org/apache/hadoop/hdfs/server/namenode/INodeFile.java | 7 +++++++ .../hadoop/hdfs/server/namenode/CreateEditsLog.java | 4 ++-- .../apache/hadoop/hdfs/server/namenode/TestEditLog.java | 2 +- .../hdfs/server/namenode/TestFSPermissionChecker.java | 2 +- .../apache/hadoop/hdfs/server/namenode/TestINodeFile.java | 8 ++++---- 7 files changed, 17 insertions(+), 10 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java index 6d050e9d8ef..fd0f467383a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java @@ -789,7 +789,7 @@ INode loadINode(final byte[] localName, boolean isSnapshotINode, // Images in the old format will not have the lazyPersist flag so it is // safe to pass false always. final INodeFile file = new INodeFile(inodeId, localName, permissions, - modificationTime, atime, blocks, replication, blockSize, (byte)0, false); + modificationTime, atime, blocks, replication, blockSize, (byte)0); if (underConstruction) { file.toUnderConstruction(clientName, clientMachine); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java index 1872898d741..e2a9a933841 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java @@ -151,7 +151,7 @@ static INodeFile readINodeUnderConstruction( // Images in the pre-protobuf format will not have the lazyPersist flag, // so it is safe to pass false always. INodeFile file = new INodeFile(inodeId, name, perm, modificationTime, - modificationTime, blocks, blockReplication, preferredBlockSize, (byte)0, false); + modificationTime, blocks, blockReplication, preferredBlockSize, (byte)0); file.toUnderConstruction(clientName, clientMachine); return file; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java index 05e7bd2ec04..aeddcad29ec 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java @@ -119,6 +119,13 @@ static boolean getLazyPersistFlag(long header) { private BlockInfo[] blocks; + INodeFile(long id, byte[] name, PermissionStatus permissions, long mtime, + long atime, BlockInfo[] blklist, short replication, + long preferredBlockSize) { + this(id, name, permissions, mtime, atime, blklist, replication, + preferredBlockSize, false); + } + INodeFile(long id, byte[] name, PermissionStatus permissions, long mtime, long atime, BlockInfo[] blklist, short replication, long preferredBlockSize, byte storagePolicyID, boolean isLazyPersist) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java index bcce0fd504e..94b139b261b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java @@ -82,7 +82,7 @@ static void addFiles(FSEditLog editLog, int numFiles, short replication, } final INodeFile inode = new INodeFile(inodeId.nextValue(), null, - p, 0L, 0L, blocks, replication, blockSize, (byte)0, false); + p, 0L, 0L, blocks, replication, blockSize, (byte)0); inode.toUnderConstruction("", ""); // Append path to filename with information about blockIDs @@ -97,7 +97,7 @@ static void addFiles(FSEditLog editLog, int numFiles, short replication, editLog.logMkDir(currentDir, dirInode); } INodeFile fileUc = new INodeFile(inodeId.nextValue(), null, - p, 0L, 0L, BlockInfo.EMPTY_ARRAY, replication, blockSize, (byte)0, false); + p, 0L, 0L, BlockInfo.EMPTY_ARRAY, replication, blockSize, (byte)0); fileUc.toUnderConstruction("", ""); editLog.logOpenFile(filePath, fileUc, false, false); editLog.logCloseFile(filePath, inode); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java index 2cc9be4176f..89ef2151ea2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java @@ -194,7 +194,7 @@ public void run() { for (int i = 0; i < numTransactions; i++) { INodeFile inode = new INodeFile(namesystem.allocateNewInodeId(), null, - p, 0L, 0L, BlockInfo.EMPTY_ARRAY, replication, blockSize, (byte)0, false); + p, 0L, 0L, BlockInfo.EMPTY_ARRAY, replication, blockSize, (byte)0); inode.toUnderConstruction("", ""); editLog.logOpenFile("/filename" + (startIndex + i), inode, false, false); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java index 079218aadcc..9bee4a9bffb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java @@ -432,7 +432,7 @@ private static INodeFile createINodeFile(INodeDirectory parent, String name, FsPermission.createImmutable(perm)); INodeFile inodeFile = new INodeFile(INodeId.GRANDFATHER_INODE_ID, name.getBytes("UTF-8"), permStatus, 0L, 0L, null, REPLICATION, - PREFERRED_BLOCK_SIZE, (byte)0, false); + PREFERRED_BLOCK_SIZE, (byte)0); parent.addChild(inodeFile); return inodeFile; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java index bfaf9d8e280..d688724fd25 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java @@ -85,7 +85,7 @@ public class TestINodeFile { INodeFile createINodeFile(short replication, long preferredBlockSize) { return new INodeFile(INodeId.GRANDFATHER_INODE_ID, null, perm, 0L, 0L, - null, replication, preferredBlockSize, (byte)0, false); + null, replication, preferredBlockSize, (byte)0); } private static INodeFile createINodeFile(byte storagePolicyID) { @@ -286,7 +286,7 @@ private INodeFile[] createINodeFiles(int nCount, String fileNamePrefix) { INodeFile[] iNodes = new INodeFile[nCount]; for (int i = 0; i < nCount; i++) { iNodes[i] = new INodeFile(i, null, perm, 0L, 0L, null, replication, - preferredBlockSize, (byte)0, false); + preferredBlockSize, (byte)0); iNodes[i].setLocalName(DFSUtil.string2Bytes(fileNamePrefix + i)); BlockInfo newblock = new BlockInfo(replication); iNodes[i].addBlock(newblock); @@ -344,7 +344,7 @@ public void testValueOf () throws IOException { {//cast from INodeFileUnderConstruction final INode from = new INodeFile( INodeId.GRANDFATHER_INODE_ID, null, perm, 0L, 0L, null, replication, - 1024L, (byte)0, false); + 1024L, (byte)0); from.asFile().toUnderConstruction("client", "machine"); //cast to INodeFile, should success @@ -1068,7 +1068,7 @@ public void testFilesInGetListingOps() throws Exception { public void testFileUnderConstruction() { replication = 3; final INodeFile file = new INodeFile(INodeId.GRANDFATHER_INODE_ID, null, - perm, 0L, 0L, null, replication, 1024L, (byte)0, false); + perm, 0L, 0L, null, replication, 1024L, (byte)0); assertFalse(file.isUnderConstruction()); final String clientName = "client";