diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 0a82d20ccfb..9a80a45a780 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -876,6 +876,9 @@ Release 2.5.0 - UNRELEASED HDFS-6618. FSNamesystem#delete drops the FSN lock between removing INodes from the tree and deleting them from the inode map (kihwal via cmccabe) + HDFS-6622. Rename and AddBlock may race and produce invalid edits (kihwal + via cmccabe) + Release 2.4.1 - 2014-06-23 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 1d015b09bb5..a6aa05e0ebd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -2764,9 +2764,10 @@ public class FSNamesystem implements Namesystem, FSClusterStats, checkOperation(OperationCategory.READ); src = FSDirectory.resolvePath(src, pathComponents, dir); LocatedBlock[] onRetryBlock = new LocatedBlock[1]; - final INodeFile pendingFile = analyzeFileState( + FileState fileState = analyzeFileState( src, fileId, clientName, previous, onRetryBlock); - src = pendingFile.getFullPathName(); + final INodeFile pendingFile = fileState.inode; + src = fileState.path; if (onRetryBlock[0] != null && onRetryBlock[0].getLocations().length > 0) { // This is a retry. Just return the last block if having locations. @@ -2802,8 +2803,10 @@ public class FSNamesystem implements Namesystem, FSClusterStats, // Run the full analysis again, since things could have changed // while chooseTarget() was executing. LocatedBlock[] onRetryBlock = new LocatedBlock[1]; - final INodeFile pendingFile = + FileState fileState = analyzeFileState(src, fileId, clientName, previous, onRetryBlock); + final INodeFile pendingFile = fileState.inode; + src = fileState.path; if (onRetryBlock[0] != null) { if (onRetryBlock[0].getLocations().length > 0) { @@ -2839,7 +2842,17 @@ public class FSNamesystem implements Namesystem, FSClusterStats, return makeLocatedBlock(newBlock, targets, offset); } - INodeFile analyzeFileState(String src, + static class FileState { + public final INodeFile inode; + public final String path; + + public FileState(INodeFile inode, String fullPath) { + this.inode = inode; + this.path = fullPath; + } + } + + FileState analyzeFileState(String src, long fileId, String clientName, ExtendedBlock previous, @@ -2927,7 +2940,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, onRetryBlock[0] = makeLocatedBlock(lastBlockInFile, ((BlockInfoUnderConstruction)lastBlockInFile).getExpectedStorageLocations(), offset); - return pendingFile; + return new FileState(pendingFile, src); } else { // Case 3 throw new IOException("Cannot allocate block in " + src + ": " + @@ -2940,7 +2953,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, if (!checkFileProgress(pendingFile, false)) { throw new NotReplicatedYetException("Not replicated yet: " + src); } - return pendingFile; + return new FileState(pendingFile, src); } LocatedBlock makeLocatedBlock(Block blk, DatanodeStorageInfo[] locs, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java index cf4b29f074b..d78e3a3f0a8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java @@ -146,4 +146,62 @@ public class TestDeleteRace { } } } + + private class RenameThread extends Thread { + private FileSystem fs; + private Path from; + private Path to; + + RenameThread(FileSystem fs, Path from, Path to) { + this.fs = fs; + this.from = from; + this.to = to; + } + + @Override + public void run() { + try { + Thread.sleep(1000); + LOG.info("Renaming " + from + " to " + to); + + fs.rename(from, to); + LOG.info("Renamed " + from + " to " + to); + } catch (Exception e) { + LOG.info(e); + } + } + } + + @Test + public void testRenameRace() throws Exception { + try { + conf.setClass(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY, + SlowBlockPlacementPolicy.class, BlockPlacementPolicy.class); + cluster = new MiniDFSCluster.Builder(conf).build(); + FileSystem fs = cluster.getFileSystem(); + Path dirPath1 = new Path("/testRenameRace1"); + Path dirPath2 = new Path("/testRenameRace2"); + Path filePath = new Path("/testRenameRace1/file1"); + + + fs.mkdirs(dirPath1); + FSDataOutputStream out = fs.create(filePath); + Thread renameThread = new RenameThread(fs, dirPath1, dirPath2); + renameThread.start(); + + // write data and close to make sure a block is allocated. + out.write(new byte[32], 0, 32); + out.close(); + + // Restart name node so that it replays edit. If old path was + // logged in edit, it will fail to come up. + cluster.restartNameNode(0); + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + + + } }