diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 02410f21bc4..98c09819994 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -76,6 +76,9 @@ Release 2.0.3-alpha - Unreleased HDFS-4099. Clean up replication code and add more javadoc. (szetszwo) + HDFS-4107. Add utility methods for casting INode to INodeFile and + INodeFileUnderConstruction. (szetszwo) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 50b34076b23..5d2cb27c68b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -966,7 +966,7 @@ public class FSDirectory implements Closeable { int i = 0; int totalBlocks = 0; for(String src : srcs) { - INodeFile srcInode = getFileINode(src); + INodeFile srcInode = (INodeFile)getINode(src); allSrcInodes[i++] = srcInode; totalBlocks += srcInode.blocks.length; } @@ -1230,25 +1230,13 @@ public class FSDirectory implements Closeable { } } - /** - * Get {@link INode} associated with the file. - */ - INodeFile getFileINode(String src) throws UnresolvedLinkException { - INode inode = getINode(src); - if (inode == null || inode.isDirectory()) - return null; - assert !inode.isLink(); - return (INodeFile) inode; - } - /** * Get {@link INode} associated with the file / directory. */ INode getINode(String src) throws UnresolvedLinkException { readLock(); try { - INode iNode = rootDir.getNode(src, true); - return iNode; + return rootDir.getNode(src, true); } finally { readUnlock(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index d98b2472288..9dd5f7fc550 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -479,8 +479,8 @@ public class FSEditLogLoader { Lease lease = fsNamesys.leaseManager.getLease( reassignLeaseOp.leaseHolder); INodeFileUnderConstruction pendingFile = - (INodeFileUnderConstruction) fsDir.getFileINode( - reassignLeaseOp.path); + INodeFileUnderConstruction.valueOf( + fsDir.getINode(reassignLeaseOp.path), reassignLeaseOp.path); fsNamesys.reassignLeaseInternal(lease, reassignLeaseOp.path, reassignLeaseOp.newHolder, pendingFile); break; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java index e1882d94814..0ec2ae00cc0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java @@ -365,14 +365,7 @@ class FSImageFormat { // verify that file exists in namespace String path = cons.getLocalName(); - INode old = fsDir.getFileINode(path); - if (old == null) { - throw new IOException("Found lease for non-existent file " + path); - } - if (old.isDirectory()) { - throw new IOException("Found lease for directory " + path); - } - INodeFile oldnode = (INodeFile) old; + INodeFile oldnode = INodeFile.valueOf(fsDir.getINode(path), path); fsDir.replaceNode(path, oldnode, cons); namesystem.leaseManager.addLease(cons.getClientName(), path); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 65cab18d9d4..8eb25327798 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -1251,11 +1251,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, } long now = now(); - INodeFile inode = dir.getFileINode(src); - if (inode == null) { - throw new FileNotFoundException("File does not exist: " + src); - } - assert !inode.isLink(); + final INodeFile inode = INodeFile.valueOf(dir.getINode(src), src); if (doAccessTime && isAccessTimeSupported()) { if (now <= inode.getAccessTime() + getAccessTimePrecision()) { // if we have to set access time but we only have the readlock, then @@ -1371,28 +1367,27 @@ public class FSNamesystem implements Namesystem, FSClusterStats, // we put the following prerequisite for the operation // replication and blocks sizes should be the same for ALL the blocks + // check the target - INode inode = dir.getFileINode(target); - - if(inode == null) { - throw new IllegalArgumentException("concat: trg file doesn't exist"); + final INodeFile trgInode = INodeFile.valueOf(dir.getINode(target), target); + if(trgInode.isUnderConstruction()) { + throw new HadoopIllegalArgumentException("concat: target file " + + target + " is under construction"); } - if(inode.isUnderConstruction()) { - throw new IllegalArgumentException("concat: trg file is uner construction"); - } - - INodeFile trgInode = (INodeFile) inode; - - // per design trg shouldn't be empty and all the blocks same size + // per design target shouldn't be empty and all the blocks same size if(trgInode.blocks.length == 0) { - throw new IllegalArgumentException("concat: "+ target + " file is empty"); + throw new HadoopIllegalArgumentException("concat: target file " + + target + " is empty"); } long blockSize = trgInode.getPreferredBlockSize(); // check the end block to be full if(blockSize != trgInode.blocks[trgInode.blocks.length-1].getNumBytes()) { - throw new IllegalArgumentException(target + " blocks size should be the same"); + throw new HadoopIllegalArgumentException("The last block in " + target + + " is not full; last block size = " + + trgInode.blocks[trgInode.blocks.length-1].getNumBytes() + + " but file block size = " + blockSize); } si.add(trgInode); @@ -1405,21 +1400,21 @@ public class FSNamesystem implements Namesystem, FSClusterStats, if(i==srcs.length-1) endSrc=true; - INodeFile srcInode = dir.getFileINode(src); - + final INodeFile srcInode = INodeFile.valueOf(dir.getINode(src), src); if(src.isEmpty() - || srcInode == null || srcInode.isUnderConstruction() || srcInode.blocks.length == 0) { - throw new IllegalArgumentException("concat: file " + src + - " is invalid or empty or underConstruction"); + throw new HadoopIllegalArgumentException("concat: source file " + src + + " is invalid or empty or underConstruction"); } // check replication and blocks size if(repl != srcInode.getBlockReplication()) { - throw new IllegalArgumentException(src + " and " + target + " " + - "should have same replication: " - + repl + " vs. " + srcInode.getBlockReplication()); + throw new HadoopIllegalArgumentException("concat: the soruce file " + + src + " and the target file " + target + + " should have the same replication: source replication is " + + srcInode.getBlockReplication() + + " but target replication is " + repl); } //boolean endBlock=false; @@ -1429,8 +1424,11 @@ public class FSNamesystem implements Namesystem, FSClusterStats, if(endSrc) idx = srcInode.blocks.length-2; // end block of endSrc is OK not to be full if(idx >= 0 && srcInode.blocks[idx].getNumBytes() != blockSize) { - throw new IllegalArgumentException("concat: blocks sizes of " + - src + " and " + target + " should all be the same"); + throw new HadoopIllegalArgumentException("concat: the soruce file " + + src + " and the target file " + target + + " should have the same blocks sizes: target block size is " + + blockSize + " but the size of source block " + idx + " is " + + srcInode.blocks[idx].getNumBytes()); } si.add(srcInode); @@ -1439,7 +1437,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats, // make sure no two files are the same if(si.size() < srcs.length+1) { // trg + srcs // it means at least two files are the same - throw new IllegalArgumentException("at least two files are the same"); + throw new HadoopIllegalArgumentException( + "concat: at least two of the source files are the same"); } if(NameNode.stateChangeLog.isDebugEnabled()) { @@ -1778,13 +1777,9 @@ public class FSNamesystem implements Namesystem, FSClusterStats, } try { - INodeFile myFile = dir.getFileINode(src); - try { - blockManager.verifyReplication(src, replication, clientMachine); - } catch(IOException e) { - throw new IOException("failed to create "+e.getMessage()); - } + blockManager.verifyReplication(src, replication, clientMachine); boolean create = flag.contains(CreateFlag.CREATE); + final INode myFile = dir.getINode(src); if (myFile == null) { if (!create) { throw new FileNotFoundException("failed to overwrite or append to non-existent file " @@ -1810,8 +1805,9 @@ public class FSNamesystem implements Namesystem, FSClusterStats, blockManager.getDatanodeManager().getDatanodeByHost(clientMachine); if (append && myFile != null) { + final INodeFile f = INodeFile.valueOf(myFile, src); return prepareFileForWrite( - src, myFile, holder, clientMachine, clientNode, true); + src, f, holder, clientMachine, clientNode, true); } else { // Now we can add the name to the filesystem. This file has no // blocks associated with it. @@ -1905,11 +1901,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, throw new IOException("Invalid file name: " + src); } - INode inode = dir.getFileINode(src); - if (inode == null) { - throw new FileNotFoundException("File not found " + src); - } - + final INodeFile inode = INodeFile.valueOf(dir.getINode(src), src); if (!inode.isUnderConstruction()) { return true; } @@ -2310,35 +2302,32 @@ public class FSNamesystem implements Namesystem, FSClusterStats, private INodeFileUnderConstruction checkLease(String src, String holder) throws LeaseExpiredException, UnresolvedLinkException { assert hasReadOrWriteLock(); - INodeFile file = dir.getFileINode(src); - checkLease(src, holder, file); - return (INodeFileUnderConstruction)file; + return checkLease(src, holder, dir.getINode(src)); } - private void checkLease(String src, String holder, INode file) - throws LeaseExpiredException { + private INodeFileUnderConstruction checkLease(String src, String holder, + INode file) throws LeaseExpiredException { assert hasReadOrWriteLock(); - if (file == null || file.isDirectory()) { + if (file == null || !(file instanceof INodeFile)) { Lease lease = leaseManager.getLease(holder); - throw new LeaseExpiredException("No lease on " + src + - " File does not exist. " + - (lease != null ? lease.toString() : - "Holder " + holder + - " does not have any open files.")); + throw new LeaseExpiredException( + "No lease on " + src + ": File does not exist. " + + (lease != null ? lease.toString() + : "Holder " + holder + " does not have any open files.")); } if (!file.isUnderConstruction()) { Lease lease = leaseManager.getLease(holder); - throw new LeaseExpiredException("No lease on " + src + - " File is not open for writing. " + - (lease != null ? lease.toString() : - "Holder " + holder + - " does not have any open files.")); + throw new LeaseExpiredException( + "No lease on " + src + ": File is not open for writing. " + + (lease != null ? lease.toString() + : "Holder " + holder + " does not have any open files.")); } INodeFileUnderConstruction pendingFile = (INodeFileUnderConstruction)file; if (holder != null && !pendingFile.getClientName().equals(holder)) { throw new LeaseExpiredException("Lease mismatch on " + src + " owned by " + pendingFile.getClientName() + " but is accessed by " + holder); } + return pendingFile; } /** @@ -2380,15 +2369,15 @@ public class FSNamesystem implements Namesystem, FSClusterStats, try { pendingFile = checkLease(src, holder); } catch (LeaseExpiredException lee) { - INodeFile file = dir.getFileINode(src); - if (file != null && !file.isUnderConstruction()) { + final INode inode = dir.getINode(src); + if (inode != null && inode instanceof INodeFile && !inode.isUnderConstruction()) { // This could be a retry RPC - i.e the client tried to close // the file, but missed the RPC response. Thus, it is trying // again to close the file. If the file still exists and // the client's view of the last block matches the actual // last block, then we'll treat it as a successful close. // See HDFS-3031. - Block realLastBlock = file.getLastBlock(); + final Block realLastBlock = ((INodeFile)inode).getLastBlock(); if (Block.matchingIdAndGenStamp(last, realLastBlock)) { NameNode.stateChangeLog.info("DIR* NameSystem.completeFile: " + "received request from " + holder + " to complete file " + src + @@ -2974,23 +2963,9 @@ public class FSNamesystem implements Namesystem, FSClusterStats, LOG.info("Recovering lease=" + lease + ", src=" + src); assert !isInSafeMode(); assert hasWriteLock(); - INodeFile iFile = dir.getFileINode(src); - if (iFile == null) { - final String message = "DIR* NameSystem.internalReleaseLease: " - + "attempt to release a create lock on " - + src + " file does not exist."; - NameNode.stateChangeLog.warn(message); - throw new IOException(message); - } - if (!iFile.isUnderConstruction()) { - final String message = "DIR* NameSystem.internalReleaseLease: " - + "attempt to release a create lock on " - + src + " but file is already closed."; - NameNode.stateChangeLog.warn(message); - throw new IOException(message); - } - INodeFileUnderConstruction pendingFile = (INodeFileUnderConstruction) iFile; + final INodeFileUnderConstruction pendingFile + = INodeFileUnderConstruction.valueOf(dir.getINode(src), src); int nrBlocks = pendingFile.numBlocks(); BlockInfo[] blocks = pendingFile.getBlocks(); @@ -4298,17 +4273,14 @@ public class FSNamesystem implements Namesystem, FSClusterStats, try { for (Lease lease : leaseManager.getSortedLeases()) { for (String path : lease.getPaths()) { - INode node; + final INodeFileUnderConstruction cons; try { - node = dir.getFileINode(path); + cons = INodeFileUnderConstruction.valueOf(dir.getINode(path), path); } catch (UnresolvedLinkException e) { throw new AssertionError("Lease files should reside on this FS"); + } catch (IOException e) { + throw new RuntimeException(e); } - assert node != null : "Found a lease for nonexisting file."; - assert node.isUnderConstruction() : - "Found a lease for file " + path + " that is not under construction." + - " lease=" + lease; - INodeFileUnderConstruction cons = (INodeFileUnderConstruction) node; BlockInfo[] blocks = cons.getBlocks(); if(blocks == null) continue; @@ -4891,21 +4863,12 @@ public class FSNamesystem implements Namesystem, FSClusterStats, for (Lease lease : leaseManager.getSortedLeases()) { for(String path : lease.getPaths()) { // verify that path exists in namespace - INode node; + final INodeFileUnderConstruction cons; try { - node = dir.getFileINode(path); + cons = INodeFileUnderConstruction.valueOf(dir.getINode(path), path); } catch (UnresolvedLinkException e) { throw new AssertionError("Lease files should reside on this FS"); } - if (node == null) { - throw new IOException("saveLeases found path " + path + - " but no matching entry in namespace."); - } - if (!node.isUnderConstruction()) { - throw new IOException("saveLeases found path " + path + - " but is not under construction."); - } - INodeFileUnderConstruction cons = (INodeFileUnderConstruction) node; FSImageSerialization.writeINodeUnderConstruction(out, cons, path); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java index 957f851c061..1116dd64f2b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import java.io.FileNotFoundException; import java.io.IOException; import java.util.List; @@ -32,6 +33,17 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; /** I-node for closed file. */ @InterfaceAudience.Private class INodeFile extends INode implements BlockCollection { + /** Cast INode to INodeFile. */ + public static INodeFile valueOf(INode inode, String path) throws IOException { + if (inode == null) { + throw new FileNotFoundException("File does not exist: " + path); + } + if (!(inode instanceof INodeFile)) { + throw new FileNotFoundException("Path is not a file: " + path); + } + return (INodeFile)inode; + } + static final FsPermission UMASK = FsPermission.createImmutable((short)0111); //Number of bits for Block size diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java index aff956e3cf8..12c36713287 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java @@ -25,8 +25,8 @@ import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; -import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.blockmanagement.MutableBlockCollection; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import com.google.common.base.Joiner; @@ -35,6 +35,16 @@ import com.google.common.base.Joiner; */ @InterfaceAudience.Private class INodeFileUnderConstruction extends INodeFile implements MutableBlockCollection { + /** Cast INode to INodeFileUnderConstruction. */ + public static INodeFileUnderConstruction valueOf(INode inode, String path + ) throws IOException { + final INodeFile file = INodeFile.valueOf(inode, path); + if (!file.isUnderConstruction()) { + throw new IOException("File is not under construction: " + path); + } + return (INodeFileUnderConstruction)file; + } + private String clientName; // lease holder private final String clientMachine; private final DatanodeDescriptor clientNode; // if client is a cluster node too. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java index 88b7a2a13f3..b74e61f85b3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java @@ -253,7 +253,7 @@ public class LeaseManager { private String findPath(INodeFileUnderConstruction pendingFile) { try { for (String src : paths) { - if (fsnamesystem.dir.getFileINode(src) == pendingFile) { + if (fsnamesystem.dir.getINode(src) == pendingFile) { return src; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java index a970c5dcce9..fe7ed1ad79e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java @@ -116,8 +116,6 @@ public class TestDistributedFileSystem { DFSTestUtil.createFile(fileSys, p, 1L, (short)1, 0L); DFSTestUtil.readFile(fileSys, p); - DFSClient client = ((DistributedFileSystem)fileSys).dfs; - fileSys.close(); } finally { @@ -449,7 +447,7 @@ public class TestDistributedFileSystem { fail("Expecting FileNotFoundException"); } catch (FileNotFoundException e) { assertTrue("Not throwing the intended exception message", e.getMessage() - .contains("File does not exist: /test/TestExistingDir")); + .contains("Path is not a file: /test/TestExistingDir")); } //hftp @@ -685,7 +683,6 @@ public class TestDistributedFileSystem { @Test public void testCreateWithCustomChecksum() throws Exception { Configuration conf = getTestConfiguration(); - final long grace = 1000L; MiniDFSCluster cluster = null; Path testBasePath = new Path("/test/csum"); // create args diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java index 66e60b02718..27490a2e8d3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java @@ -83,8 +83,7 @@ public class TestBlockUnderConstruction { private void verifyFileBlocks(String file, boolean isFileOpen) throws IOException { FSNamesystem ns = cluster.getNamesystem(); - INodeFile inode = ns.dir.getFileINode(file); - assertTrue("File does not exist: " + inode.toString(), inode != null); + final INodeFile inode = INodeFile.valueOf(ns.dir.getINode(file), file); assertTrue("File " + inode.toString() + " isUnderConstruction = " + inode.isUnderConstruction() + " expected to be " + isFileOpen, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java index bb802185ac3..f5a4d4f5f2b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java @@ -18,13 +18,17 @@ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.FileNotFoundException; +import java.io.IOException; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; - import org.junit.Test; public class TestINodeFile { @@ -199,4 +203,88 @@ public class TestINodeFile { return iNodes; } + + /** + * Test for the static {@link INodeFile#valueOf(INode, String)} + * and {@link INodeFileUnderConstruction#valueOf(INode, String)} methods. + * @throws IOException + */ + @Test + public void testValueOf () throws IOException { + final String path = "/testValueOf"; + final PermissionStatus perm = new PermissionStatus( + userName, null, FsPermission.getDefault()); + final short replication = 3; + + {//cast from null + final INode from = null; + + //cast to INodeFile, should fail + try { + INodeFile.valueOf(from, path); + fail(); + } catch(FileNotFoundException fnfe) { + assertTrue(fnfe.getMessage().contains("File does not exist")); + } + + //cast to INodeFileUnderConstruction, should fail + try { + INodeFileUnderConstruction.valueOf(from, path); + fail(); + } catch(FileNotFoundException fnfe) { + assertTrue(fnfe.getMessage().contains("File does not exist")); + } + } + + {//cast from INodeFile + final INode from = new INodeFile( + perm, null, replication, 0L, 0L, preferredBlockSize); + + //cast to INodeFile, should success + final INodeFile f = INodeFile.valueOf(from, path); + assertTrue(f == from); + + //cast to INodeFileUnderConstruction, should fail + try { + INodeFileUnderConstruction.valueOf(from, path); + fail(); + } catch(IOException ioe) { + assertTrue(ioe.getMessage().contains("File is not under construction")); + } + } + + {//cast from INodeFileUnderConstruction + final INode from = new INodeFileUnderConstruction( + perm, replication, 0L, 0L, "client", "machine", null); + + //cast to INodeFile, should success + final INodeFile f = INodeFile.valueOf(from, path); + assertTrue(f == from); + + //cast to INodeFileUnderConstruction, should success + final INodeFileUnderConstruction u = INodeFileUnderConstruction.valueOf( + from, path); + assertTrue(u == from); + } + + {//cast from INodeDirectory + final INode from = new INodeDirectory(perm, 0L); + + //cast to INodeFile, should fail + try { + INodeFile.valueOf(from, path); + fail(); + } catch(FileNotFoundException fnfe) { + assertTrue(fnfe.getMessage().contains("Path is not a file")); + } + + //cast to INodeFileUnderConstruction, should fail + try { + INodeFileUnderConstruction.valueOf(from, path); + fail(); + } catch(FileNotFoundException fnfe) { + assertTrue(fnfe.getMessage().contains("Path is not a file")); + } + } + } }