HDFS-5257. Merge change r1535811 from trunk.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1535813 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jing Zhao 2013-10-25 18:46:57 +00:00
parent c86c303a62
commit 9c78f870d8
4 changed files with 52 additions and 5 deletions

View File

@ -140,6 +140,9 @@ Release 2.3.0 - UNRELEASED
HDFS-5400. DFS_CLIENT_MMAP_CACHE_THREAD_RUNS_PER_TIMEOUT constant is set HDFS-5400. DFS_CLIENT_MMAP_CACHE_THREAD_RUNS_PER_TIMEOUT constant is set
to the wrong value. (Colin Patrick McCabe) to the wrong value. (Colin Patrick McCabe)
HDFS-5257. addBlock() retry should return LocatedBlock with locations else client
will get AIOBE. (Vinay via jing9)
Release 2.2.1 - UNRELEASED Release 2.2.1 - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -1102,6 +1102,11 @@ public class DFSOutputStream extends FSOutputSummer
// //
private boolean createBlockOutputStream(DatanodeInfo[] nodes, long newGS, private boolean createBlockOutputStream(DatanodeInfo[] nodes, long newGS,
boolean recoveryFlag) { boolean recoveryFlag) {
if (nodes.length == 0) {
DFSClient.LOG.info("nodes are empty for write pipeline of block "
+ block);
return false;
}
Status pipelineStatus = SUCCESS; Status pipelineStatus = SUCCESS;
String firstBadLink = ""; String firstBadLink = "";
if (DFSClient.LOG.isDebugEnabled()) { if (DFSClient.LOG.isDebugEnabled()) {

View File

@ -2490,8 +2490,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
final INodeFileUnderConstruction pendingFile = final INodeFileUnderConstruction pendingFile =
(INodeFileUnderConstruction) inodes[inodes.length - 1]; (INodeFileUnderConstruction) inodes[inodes.length - 1];
if(onRetryBlock[0] != null) { if (onRetryBlock[0] != null && onRetryBlock[0].getLocations().length > 0) {
// This is a retry. Just return the last block. // This is a retry. Just return the last block if having locations.
return onRetryBlock[0]; return onRetryBlock[0];
} }
if (pendingFile.getBlocks().length >= maxBlocksPerFile) { if (pendingFile.getBlocks().length >= maxBlocksPerFile) {
@ -2529,8 +2529,17 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
(INodeFileUnderConstruction) inodes[inodes.length - 1]; (INodeFileUnderConstruction) inodes[inodes.length - 1];
if (onRetryBlock[0] != null) { if (onRetryBlock[0] != null) {
// This is a retry. Just return the last block. if (onRetryBlock[0].getLocations().length > 0) {
// This is a retry. Just return the last block if having locations.
return onRetryBlock[0]; return onRetryBlock[0];
} else {
// add new chosen targets to already allocated block and return
BlockInfo lastBlockInFile = pendingFile.getLastBlock();
((BlockInfoUnderConstruction) lastBlockInFile)
.setExpectedLocations(targets);
offset = pendingFile.computeFileSize();
return makeLocatedBlock(lastBlockInFile, targets, offset);
}
} }
// commit the last block and complete it if it has minimum replicas // commit the last block and complete it if it has minimum replicas

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.spy; import static org.mockito.Mockito.spy;
@ -139,4 +140,33 @@ public class TestAddBlockRetry {
assertEquals("Wrong replication", REPLICATION, lb1.getLocations().length); assertEquals("Wrong replication", REPLICATION, lb1.getLocations().length);
assertEquals("Blocks are not equal", lb1.getBlock(), lb2.getBlock()); assertEquals("Blocks are not equal", lb1.getBlock(), lb2.getBlock());
} }
/*
* Since NameNode will not persist any locations of the block, addBlock()
* retry call after restart NN should re-select the locations and return to
* client. refer HDFS-5257
*/
@Test
public void testAddBlockRetryShouldReturnBlockWithLocations()
throws Exception {
final String src = "/testAddBlockRetryShouldReturnBlockWithLocations";
NamenodeProtocols nameNodeRpc = cluster.getNameNodeRpc();
// create file
nameNodeRpc.create(src, FsPermission.getFileDefault(), "clientName",
new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true,
(short) 3, 1024);
// start first addBlock()
LOG.info("Starting first addBlock for " + src);
LocatedBlock lb1 = nameNodeRpc.addBlock(src, "clientName", null, null,
INodeId.GRANDFATHER_INODE_ID, null);
assertTrue("Block locations should be present",
lb1.getLocations().length > 0);
cluster.restartNameNode();
nameNodeRpc = cluster.getNameNodeRpc();
LocatedBlock lb2 = nameNodeRpc.addBlock(src, "clientName", null, null,
INodeId.GRANDFATHER_INODE_ID, null);
assertEquals("Blocks are not equal", lb1.getBlock(), lb2.getBlock());
assertTrue("Wrong locations with retry", lb2.getLocations().length > 0);
}
} }