diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 71b87c50aa7..e752a07fb4b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -435,6 +435,8 @@ Release 2.1.0-beta - 2013-07-02 HDFS-4645. Move from randomly generated block ID to sequentially generated block ID. (Arpit Agarwal via szetszwo) + HDFS-4912. Cleanup FSNamesystem#startFileInternal. (suresh) + OPTIMIZATIONS HDFS-4465. Optimize datanode ReplicasMap and ReplicaInfo. (atm) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 134c12ee61e..08264e94d29 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -1892,6 +1892,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, if (!DFSUtil.isValidName(src)) { throw new InvalidPathException(src); } + blockManager.verifyReplication(src, replication, clientMachine); boolean skipSync = false; final HdfsFileStatus stat; @@ -1903,6 +1904,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats, + "): " + blockSize + " < " + minBlockSize); } byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); + boolean create = flag.contains(CreateFlag.CREATE); + boolean overwrite = flag.contains(CreateFlag.OVERWRITE); writeLock(); try { checkOperation(OperationCategory.WRITE); @@ -1910,8 +1913,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats, throw new SafeModeException("Cannot create file" + src, safeMode); } src = FSDirectory.resolvePath(src, pathComponents, dir); - startFileInternal(pc, src, permissions, holder, clientMachine, flag, - createParent, replication, blockSize); + startFileInternal(pc, src, permissions, holder, clientMachine, + create, overwrite, createParent, replication, blockSize); stat = dir.getFileInfo(src, false); } catch (StandbyException se) { skipSync = true; @@ -1930,25 +1933,18 @@ public class FSNamesystem implements Namesystem, FSClusterStats, } /** - * Create new or open an existing file for append.

+ * Create a new file or overwrite an existing file
* - * In case of opening the file for append, the method returns the last - * block of the file if this is a partial block, which can still be used - * for writing more data. The client uses the returned block locations - * to form the data pipeline for this block.
- * The method returns null if the last block is full or if this is a - * new file. The client then allocates a new block with the next call - * using {@link NameNode#addBlock()}.

- * - * For description of parameters and exceptions thrown see + * Once the file is create the client then allocates a new block with the next + * call using {@link NameNode#addBlock()}. + *

+ * For description of parameters and exceptions thrown see * {@link ClientProtocol#create()} - * - * @return the last block locations if the block is partial or null otherwise */ - private LocatedBlock startFileInternal(FSPermissionChecker pc, String src, + private void startFileInternal(FSPermissionChecker pc, String src, PermissionStatus permissions, String holder, String clientMachine, - EnumSet flag, boolean createParent, short replication, - long blockSize) throws SafeModeException, FileAlreadyExistsException, + boolean create, boolean overwrite, boolean createParent, + short replication, long blockSize) throws FileAlreadyExistsException, AccessControlException, UnresolvedLinkException, FileNotFoundException, ParentNotDirectoryException, IOException { assert hasWriteLock(); @@ -1960,11 +1956,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats, + "; already exists as a directory."); } final INodeFile myFile = INodeFile.valueOf(inode, src, true); - - boolean overwrite = flag.contains(CreateFlag.OVERWRITE); - boolean append = flag.contains(CreateFlag.APPEND); if (isPermissionEnabled) { - if (append || (overwrite && myFile != null)) { + if (overwrite && myFile != null) { checkPathAccess(pc, src, FsAction.WRITE); } else { checkAncestorAccess(pc, src, FsAction.WRITE); @@ -1976,65 +1969,95 @@ public class FSNamesystem implements Namesystem, FSClusterStats, } try { - blockManager.verifyReplication(src, replication, clientMachine); - boolean create = flag.contains(CreateFlag.CREATE); - if (myFile == null) { if (!create) { - throw new FileNotFoundException("failed to overwrite or append to non-existent file " + throw new FileNotFoundException("failed to overwrite non-existent file " + src + " on client " + clientMachine); } } else { - // File exists - must be one of append or overwrite if (overwrite) { - delete(src, true); + delete(src, true); // File exists - delete if overwrite } else { - // Opening an existing file for write - may need to recover lease. + // If lease soft limit time is expired, recover the lease recoverLeaseInternal(myFile, src, holder, clientMachine, false); - - if (!append) { - throw new FileAlreadyExistsException("failed to create file " + src - + " on client " + clientMachine - + " because the file exists"); - } + throw new FileAlreadyExistsException("failed to create file " + src + + " on client " + clientMachine + " because the file exists"); } } + checkFsObjectLimit(); final DatanodeDescriptor clientNode = blockManager.getDatanodeManager().getDatanodeByHost(clientMachine); - if (append && myFile != null) { - final INodeFile f = INodeFile.valueOf(myFile, src); - return prepareFileForWrite(src, f, holder, clientMachine, clientNode, - true, iip.getLatestSnapshot()); - } else { - // Now we can add the name to the filesystem. This file has no - // blocks associated with it. - // - checkFsObjectLimit(); + INodeFileUnderConstruction newNode = dir.addFile(src, permissions, + replication, blockSize, holder, clientMachine, clientNode); + if (newNode == null) { + throw new IOException("DIR* NameSystem.startFile: " + + "Unable to add file to namespace."); + } + leaseManager.addLease(newNode.getClientName(), src); - // increment global generation stamp - INodeFileUnderConstruction newNode = dir.addFile(src, permissions, - replication, blockSize, holder, clientMachine, clientNode); - if (newNode == null) { - throw new IOException("DIR* NameSystem.startFile: " + - "Unable to add file to namespace."); - } - leaseManager.addLease(newNode.getClientName(), src); - - // record file record in log, record new generation stamp - getEditLog().logOpenFile(src, newNode); - if (NameNode.stateChangeLog.isDebugEnabled()) { - NameNode.stateChangeLog.debug("DIR* NameSystem.startFile: " - +"add "+src+" to namespace for "+holder); - } + // record file record in log, record new generation stamp + getEditLog().logOpenFile(src, newNode); + if (NameNode.stateChangeLog.isDebugEnabled()) { + NameNode.stateChangeLog.debug("DIR* NameSystem.startFile: " + +"add "+src+" to namespace for "+holder); } } catch (IOException ie) { NameNode.stateChangeLog.warn("DIR* NameSystem.startFile: " +ie.getMessage()); throw ie; } - return null; + } + + /** + * Append to an existing file for append. + *

+ * + * The method returns the last block of the file if this is a partial block, + * which can still be used for writing more data. The client uses the returned + * block locations to form the data pipeline for this block.
+ * The method returns null if the last block is full. The client then + * allocates a new block with the next call using {@link NameNode#addBlock()}. + *

+ * + * For description of parameters and exceptions thrown see + * {@link ClientProtocol#append(String, String)} + * + * @return the last block locations if the block is partial or null otherwise + */ + private LocatedBlock appendFileInternal(FSPermissionChecker pc, String src, + String holder, String clientMachine) throws AccessControlException, + UnresolvedLinkException, FileNotFoundException, IOException { + assert hasWriteLock(); + // Verify that the destination does not exist as a directory already. + final INodesInPath iip = dir.getINodesInPath4Write(src); + final INode inode = iip.getLastINode(); + if (inode != null && inode.isDirectory()) { + throw new FileAlreadyExistsException("Cannot append to directory " + src + + "; already exists as a directory."); + } + if (isPermissionEnabled) { + checkPathAccess(pc, src, FsAction.WRITE); + } + + try { + if (inode == null) { + throw new FileNotFoundException("failed to append to non-existent file " + + src + " on client " + clientMachine); + } + final INodeFile myFile = INodeFile.valueOf(inode, src, true); + // Opening an existing file for write - may need to recover lease. + recoverLeaseInternal(myFile, src, holder, clientMachine, false); + + final DatanodeDescriptor clientNode = + blockManager.getDatanodeManager().getDatanodeByHost(clientMachine); + return prepareFileForWrite(src, myFile, holder, clientMachine, clientNode, + true, iip.getLatestSnapshot()); + } catch (IOException ie) { + NameNode.stateChangeLog.warn("DIR* NameSystem.append: " +ie.getMessage()); + throw ie; + } } /** @@ -2225,14 +2248,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats, "Append is not enabled on this NameNode. Use the " + DFS_SUPPORT_APPEND_KEY + " configuration option to enable it."); } - if (NameNode.stateChangeLog.isDebugEnabled()) { - NameNode.stateChangeLog.debug("DIR* NameSystem.appendFile: src=" + src - + ", holder=" + holder - + ", clientMachine=" + clientMachine); - } - if (!DFSUtil.isValidName(src)) { - throw new InvalidPathException(src); - } LocatedBlock lb = null; FSPermissionChecker pc = getPermissionChecker(); @@ -2245,9 +2260,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, throw new SafeModeException("Cannot append to file" + src, safeMode); } src = FSDirectory.resolvePath(src, pathComponents, dir); - lb = startFileInternal(pc, src, null, holder, clientMachine, - EnumSet.of(CreateFlag.APPEND), - false, blockManager.maxReplication, 0); + lb = appendFileInternal(pc, src, holder, clientMachine); } catch (StandbyException se) { skipSync = true; throw se; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java index d2d2e9172d9..8abc3edb685 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java @@ -60,6 +60,7 @@ import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; +import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; @@ -502,7 +503,7 @@ public class TestFileCreation { DistributedFileSystem dfs = null; try { cluster.waitActive(); - dfs = (DistributedFileSystem)cluster.getFileSystem(); + dfs = cluster.getFileSystem(); DFSClient client = dfs.dfs; // create a new file. @@ -562,7 +563,7 @@ public class TestFileCreation { DistributedFileSystem dfs = null; try { cluster.waitActive(); - dfs = (DistributedFileSystem)cluster.getFileSystem(); + dfs = cluster.getFileSystem(); DFSClient client = dfs.dfs; // create a new file. @@ -703,7 +704,7 @@ public class TestFileCreation { stm4.close(); // verify that new block is associated with this file - DFSClient client = ((DistributedFileSystem)fs).dfs; + DFSClient client = fs.dfs; LocatedBlocks locations = client.getNamenode().getBlockLocations( file1.toString(), 0, Long.MAX_VALUE); System.out.println("locations = " + locations.locatedBlockCount()); @@ -951,7 +952,7 @@ public class TestFileCreation { DistributedFileSystem dfs = null; try { cluster.waitActive(); - dfs = (DistributedFileSystem)cluster.getFileSystem(); + dfs = cluster.getFileSystem(); // create a new file. final String f = DIR + "foo"; @@ -1012,7 +1013,7 @@ public class TestFileCreation { DistributedFileSystem dfs = null; try { cluster.waitActive(); - dfs = (DistributedFileSystem)cluster.getFileSystem(); + dfs = cluster.getFileSystem(); // create a new file. final String f = DIR + "foofs"; @@ -1044,7 +1045,7 @@ public class TestFileCreation { DistributedFileSystem dfs = null; try { cluster.waitActive(); - dfs = (DistributedFileSystem)cluster.getFileSystem(); + dfs = cluster.getFileSystem(); // create a new file. final String f = DIR + "testFsCloseAfterClusterShutdown"; @@ -1177,7 +1178,7 @@ public class TestFileCreation { DistributedFileSystem dfs = null; try { cluster.waitActive(); - dfs = (DistributedFileSystem)cluster.getFileSystem(); + dfs = cluster.getFileSystem(); DFSClient client = dfs.dfs; final Path f = new Path("/testFileIdMismatch.txt"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java index 93b87942ed6..8bb8aae1122 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java @@ -130,7 +130,7 @@ public class TestLeaseRecovery2 { size = AppendTestUtil.nextInt(FILE_SIZE); filepath = createFile("/immediateRecoverLease-longlease", size, false); - // test recoverLese from a different client + // test recoverLease from a different client recoverLease(filepath, null); verifyFile(dfs, filepath, actual, size);