HDFS-5672. Merge r1581994 from trunk.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1581996 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jing Zhao 2014-03-26 18:38:14 +00:00
parent 3e9e036099
commit c87fbeb7a8
3 changed files with 33 additions and 10 deletions

View File

@ -467,6 +467,8 @@ Release 2.4.0 - UNRELEASED
HDFS-6115. Call flush() for every append on block scan verification log. HDFS-6115. Call flush() for every append on block scan verification log.
(Vinayakumar B via szetszwo) (Vinayakumar B via szetszwo)
HDFS-5672. TestHASafeMode#testSafeBlockTracking fails in trunk. (jing9)
BREAKDOWN OF HDFS-5698 SUBTASKS AND RELATED JIRAS BREAKDOWN OF HDFS-5698 SUBTASKS AND RELATED JIRAS
HDFS-5717. Save FSImage header in protobuf. (Haohui Mai via jing9) HDFS-5717. Save FSImage header in protobuf. (Haohui Mai via jing9)

View File

@ -1877,8 +1877,9 @@ public class BlockManager {
int headIndex = 0; //currently the delimiter is in the head of the list int headIndex = 0; //currently the delimiter is in the head of the list
int curIndex; int curIndex;
if (newReport == null) if (newReport == null) {
newReport = new BlockListAsLongs(); newReport = new BlockListAsLongs();
}
// scan the report and process newly reported blocks // scan the report and process newly reported blocks
BlockReportIterator itBR = newReport.getBlockReportIterator(); BlockReportIterator itBR = newReport.getBlockReportIterator();
while(itBR.hasNext()) { while(itBR.hasNext()) {
@ -1971,9 +1972,11 @@ public class BlockManager {
// Ignore replicas already scheduled to be removed from the DN // Ignore replicas already scheduled to be removed from the DN
if(invalidateBlocks.contains(dn.getDatanodeUuid(), block)) { if(invalidateBlocks.contains(dn.getDatanodeUuid(), block)) {
/* TODO: following assertion is incorrect, see HDFS-2668 /*
assert storedBlock.findDatanode(dn) < 0 : "Block " + block * TODO: following assertion is incorrect, see HDFS-2668 assert
+ " in recentInvalidatesSet should not appear in DN " + dn; */ * storedBlock.findDatanode(dn) < 0 : "Block " + block +
* " in recentInvalidatesSet should not appear in DN " + dn;
*/
return storedBlock; return storedBlock;
} }
@ -1993,8 +1996,8 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
} }
if (isBlockUnderConstruction(storedBlock, ucState, reportedState)) { if (isBlockUnderConstruction(storedBlock, ucState, reportedState)) {
toUC.add(new StatefulBlockInfo( toUC.add(new StatefulBlockInfo((BlockInfoUnderConstruction) storedBlock,
(BlockInfoUnderConstruction)storedBlock, block, reportedState)); new Block(block), reportedState));
return storedBlock; return storedBlock;
} }
@ -2878,7 +2881,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
// about new storages from heartbeats but during NN restart we may // about new storages from heartbeats but during NN restart we may
// receive a block report or incremental report before the heartbeat. // receive a block report or incremental report before the heartbeat.
// We must handle this for protocol compatibility. This issue was // We must handle this for protocol compatibility. This issue was
// uncovered by HDFS-6904. // uncovered by HDFS-6094.
node.updateStorage(srdb.getStorage()); node.updateStorage(srdb.getStorage());
} }

View File

@ -593,6 +593,15 @@ public class TestHASafeMode {
assertSafeMode(nn1, 0, 0, 3, 0); assertSafeMode(nn1, 0, 0, 3, 0);
} }
@Test
public void testSafeBlockTracking() throws Exception {
testSafeBlockTracking(false);
}
@Test
public void testSafeBlockTracking2() throws Exception {
testSafeBlockTracking(true);
}
/** /**
* Test that the number of safe blocks is accounted correctly even when * Test that the number of safe blocks is accounted correctly even when
@ -600,9 +609,15 @@ public class TestHASafeMode {
* If a FINALIZED report arrives at the SBN before the block is marked * If a FINALIZED report arrives at the SBN before the block is marked
* COMPLETE, then when we get the OP_CLOSE we need to count it as "safe" * COMPLETE, then when we get the OP_CLOSE we need to count it as "safe"
* at that point. This is a regression test for HDFS-2742. * at that point. This is a regression test for HDFS-2742.
*
* @param noFirstBlockReport If this is set to true, we shutdown NN1 before
* closing the writing streams. In this way, when NN1 restarts, all DNs will
* first send it incremental block report before the first full block report.
* And NN1 will not treat the full block report as the first block report
* in BlockManager#processReport.
*/ */
@Test private void testSafeBlockTracking(boolean noFirstBlockReport)
public void testSafeBlockTracking() throws Exception { throws Exception {
banner("Starting with NN0 active and NN1 standby, creating some " + banner("Starting with NN0 active and NN1 standby, creating some " +
"UC blocks plus some other blocks to force safemode"); "UC blocks plus some other blocks to force safemode");
DFSTestUtil.createFile(fs, new Path("/other-blocks"), 10*BLOCK_SIZE, (short) 3, 1L); DFSTestUtil.createFile(fs, new Path("/other-blocks"), 10*BLOCK_SIZE, (short) 3, 1L);
@ -619,6 +634,9 @@ public class TestHASafeMode {
// the namespace during startup and enter safemode. // the namespace during startup and enter safemode.
nn0.getRpcServer().rollEditLog(); nn0.getRpcServer().rollEditLog();
} finally { } finally {
if (noFirstBlockReport) {
cluster.shutdownNameNode(1);
}
for (FSDataOutputStream stm : stms) { for (FSDataOutputStream stm : stms) {
IOUtils.closeStream(stm); IOUtils.closeStream(stm);
} }