diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 80e4213bb8f..a5e725b2c49 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -300,6 +300,9 @@ Release 2.2.1 - UNRELEASED HDFS-5372. In FSNamesystem, hasReadLock() returns false if the current thread holds the write lock (Vinaykumar B via umamahesh) + HDFS-4516. Client crash after block allocation and NN switch before lease recovery for + the same file can cause readers to fail forever (VinaayKumar B via umamahesh) + Release 2.2.0 - 2013-10-13 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 90f5687cee1..939700da0d4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -2331,6 +2331,11 @@ public class DFSClient implements java.io.Closeable { throw re.unwrapRemoteException(AccessControlException.class); } } + + @VisibleForTesting + ExtendedBlock getPreviousBlock(String file) { + return filesBeingWritten.get(file).getBlock(); + } /** * enable/disable restore failed storage. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java index 49ecb268646..25441ff2b4c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java @@ -290,6 +290,11 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead, final LocatedBlock last = locatedBlocks.getLastLocatedBlock(); if (last != null) { if (last.getLocations().length == 0) { + if (last.getBlockSize() == 0) { + // if the length is zero, then no data has been written to + // datanode. So no need to wait for the locations. + return 0; + } return -1; } final long len = readBlockLength(last); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java index b83c573185e..c51da3050ed 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java @@ -1717,8 +1717,9 @@ public class DFSOutputStream extends FSOutputSummer } // end synchronized waitForAckedSeqno(toWaitFor); - - if (updateLength) { + + // update the block length first time irrespective of flag + if (updateLength || persistBlocks.get()) { synchronized (this) { if (streamer != null && streamer.block != null) { lastBlockLength = streamer.block.getNumBytes(); @@ -1984,4 +1985,14 @@ public class DFSOutputStream extends FSOutputSummer public void setDropBehind(Boolean dropBehind) throws IOException { this.cachingStrategy.setDropBehind(dropBehind); } + + @VisibleForTesting + ExtendedBlock getBlock() { + return streamer.getBlock(); + } + + @VisibleForTesting + long getFileId() { + return fileId; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 73b8c2473cf..6b37c4812af 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -3684,6 +3684,19 @@ public class FSNamesystem implements Namesystem, FSClusterStats, if (uc.getNumExpectedLocations() == 0) { uc.setExpectedLocations(blockManager.getNodes(lastBlock)); } + + if (uc.getNumExpectedLocations() == 0 && uc.getNumBytes() == 0) { + // There is no datanode reported to this block. + // may be client have crashed before writing data to pipeline. + // This blocks doesn't need any recovery. + // We can remove this block and close the file. + pendingFile.removeLastBlock(lastBlock); + finalizeINodeFileUnderConstruction(src, pendingFile, + iip.getLatestSnapshot()); + NameNode.stateChangeLog.warn("BLOCK* internalReleaseLease: " + + "Removed empty last block and closed file."); + return true; + } // start recovery of the last block for this file long blockRecoveryId = nextGenerationStamp(isLegacyBlock(uc)); lease = reassignLease(lease, src, recoveryLeaseHolder, pendingFile); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSClientAdapter.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSClientAdapter.java index 7007f12a18a..5d367576091 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSClientAdapter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSClientAdapter.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs; import java.io.IOException; import org.apache.hadoop.hdfs.protocol.ClientProtocol; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; public class DFSClientAdapter { @@ -43,4 +44,21 @@ public class DFSClientAdapter { String src, long start, long length) throws IOException { return DFSClient.callGetBlockLocations(namenode, src, start, length); } + + public static ClientProtocol getNamenode(DFSClient client) throws IOException { + return client.namenode; + } + + public static DFSClient getClient(DistributedFileSystem dfs) + throws IOException { + return dfs.dfs; + } + + public static ExtendedBlock getPreviousBlock(DFSClient client, String file) { + return client.getPreviousBlock(file); + } + + public static long getFileId(DFSOutputStream out) { + return out.getFileId(); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java index a90da870d0d..b9150dad28c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java @@ -175,7 +175,7 @@ public class TestPersistBlocks { // This would mean that blocks were successfully persisted to the log FileStatus status = fs.getFileStatus(FILE_PATH); assertTrue("Length incorrect: " + status.getLen(), - status.getLen() != len - BLOCK_SIZE); + status.getLen() == len - BLOCK_SIZE); // Verify the data showed up from before restart, sans abandoned block. FSDataInputStream readStream = fs.open(FILE_PATH); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java index fd548a9422e..0c95764eba6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java @@ -34,16 +34,23 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.ha.HAServiceProtocol.RequestSource; import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo; +import org.apache.hadoop.hdfs.DFSClient; +import org.apache.hadoop.hdfs.DFSClientAdapter; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSOutputStream; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSNNTopology; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; import org.apache.hadoop.hdfs.server.namenode.FSImage; @@ -766,4 +773,50 @@ public class TestHASafeMode { assertFalse("ANN should be out of SafeMode", dfsWithFailOver.isInSafeMode()); } + /** Test NN crash and client crash/stuck immediately after block allocation */ + @Test(timeout = 100000) + public void testOpenFileWhenNNAndClientCrashAfterAddBlock() throws Exception { + cluster.getConfiguration(0).set( + DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, "1.0f"); + String testData = "testData"; + // to make sure we write the full block before creating dummy block at NN. + cluster.getConfiguration(0).setInt("io.bytes.per.checksum", + testData.length()); + cluster.restartNameNode(0); + try { + cluster.waitActive(); + cluster.transitionToActive(0); + cluster.transitionToStandby(1); + DistributedFileSystem dfs = cluster.getFileSystem(0); + String pathString = "/tmp1.txt"; + Path filePath = new Path(pathString); + FSDataOutputStream create = dfs.create(filePath, + FsPermission.getDefault(), true, 1024, (short) 3, testData.length(), + null); + create.write(testData.getBytes()); + create.hflush(); + DFSClient client = DFSClientAdapter.getClient(dfs); + // add one dummy block at NN, but not write to DataNode + ExtendedBlock previousBlock = DFSClientAdapter.getPreviousBlock(client, + pathString); + DFSClientAdapter.getNamenode(client).addBlock( + pathString, + client.getClientName(), + new ExtendedBlock(previousBlock), + new DatanodeInfo[0], + DFSClientAdapter.getFileId((DFSOutputStream) create + .getWrappedStream()), null); + cluster.restartNameNode(0, true); + cluster.restartDataNode(0); + cluster.transitionToActive(0); + // let the block reports be processed. + Thread.sleep(2000); + FSDataInputStream is = dfs.open(filePath); + is.close(); + dfs.recoverLease(filePath);// initiate recovery + assertTrue("Recovery also should be success", dfs.recoverLease(filePath)); + } finally { + cluster.shutdown(); + } + } }