HDFS-4878. On Remove Block, block is not removed from neededReplications queue. Contributed by Tao Luo.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1491672 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
e395e29279
commit
2a05c63004
|
@ -354,6 +354,9 @@ Release 2.1.0-beta - UNRELEASED
|
||||||
HDFS-4586. TestDataDirs.testGetDataDirsFromURIs fails with all directories
|
HDFS-4586. TestDataDirs.testGetDataDirsFromURIs fails with all directories
|
||||||
in dfs.datanode.data.dir are invalid. (Ivan Mitic via atm)
|
in dfs.datanode.data.dir are invalid. (Ivan Mitic via atm)
|
||||||
|
|
||||||
|
HDFS-4878. On Remove Block, block is not removed from neededReplications
|
||||||
|
queue. (Tao Luo via shv)
|
||||||
|
|
||||||
BREAKDOWN OF HDFS-347 SUBTASKS AND RELATED JIRAS
|
BREAKDOWN OF HDFS-347 SUBTASKS AND RELATED JIRAS
|
||||||
|
|
||||||
HDFS-4353. Encapsulate connections to peers in Peer and PeerServer classes.
|
HDFS-4353. Encapsulate connections to peers in Peer and PeerServer classes.
|
||||||
|
|
|
@ -2869,8 +2869,9 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
|
||||||
addToInvalidates(block);
|
addToInvalidates(block);
|
||||||
corruptReplicas.removeFromCorruptReplicasMap(block);
|
corruptReplicas.removeFromCorruptReplicasMap(block);
|
||||||
blocksMap.removeBlock(block);
|
blocksMap.removeBlock(block);
|
||||||
// Remove the block from pendingReplications
|
// Remove the block from pendingReplications and neededReplications
|
||||||
pendingReplications.remove(block);
|
pendingReplications.remove(block);
|
||||||
|
neededReplications.remove(block, UnderReplicatedBlocks.LEVEL);
|
||||||
if (postponedMisreplicatedBlocks.remove(block)) {
|
if (postponedMisreplicatedBlocks.remove(block)) {
|
||||||
postponedMisreplicatedBlocksCount.decrementAndGet();
|
postponedMisreplicatedBlocksCount.decrementAndGet();
|
||||||
}
|
}
|
||||||
|
|
|
@ -99,9 +99,12 @@ public class TestMetaSave {
|
||||||
+ "metasave.out.txt";
|
+ "metasave.out.txt";
|
||||||
FileInputStream fstream = new FileInputStream(logFile);
|
FileInputStream fstream = new FileInputStream(logFile);
|
||||||
DataInputStream in = new DataInputStream(fstream);
|
DataInputStream in = new DataInputStream(fstream);
|
||||||
BufferedReader reader = new BufferedReader(new InputStreamReader(in));
|
BufferedReader reader = null;
|
||||||
|
try {
|
||||||
|
reader = new BufferedReader(new InputStreamReader(in));
|
||||||
String line = reader.readLine();
|
String line = reader.readLine();
|
||||||
assertTrue(line.equals("3 files and directories, 2 blocks = 5 total"));
|
assertTrue(line.equals(
|
||||||
|
"3 files and directories, 2 blocks = 5 total"));
|
||||||
line = reader.readLine();
|
line = reader.readLine();
|
||||||
assertTrue(line.equals("Live Datanodes: 1"));
|
assertTrue(line.equals("Live Datanodes: 1"));
|
||||||
line = reader.readLine();
|
line = reader.readLine();
|
||||||
|
@ -109,6 +112,58 @@ public class TestMetaSave {
|
||||||
line = reader.readLine();
|
line = reader.readLine();
|
||||||
line = reader.readLine();
|
line = reader.readLine();
|
||||||
assertTrue(line.matches("^/filestatus[01]:.*"));
|
assertTrue(line.matches("^/filestatus[01]:.*"));
|
||||||
|
} finally {
|
||||||
|
if (reader != null)
|
||||||
|
reader.close();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tests metasave after delete, to make sure there are no orphaned blocks
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testMetasaveAfterDelete()
|
||||||
|
throws IOException, InterruptedException {
|
||||||
|
|
||||||
|
final FSNamesystem namesystem = cluster.getNamesystem();
|
||||||
|
|
||||||
|
for (int i = 0; i < 2; i++) {
|
||||||
|
Path file = new Path("/filestatus" + i);
|
||||||
|
createFile(fileSys, file);
|
||||||
|
}
|
||||||
|
|
||||||
|
cluster.stopDataNode(1);
|
||||||
|
// wait for namenode to discover that a datanode is dead
|
||||||
|
Thread.sleep(15000);
|
||||||
|
namesystem.setReplication("/filestatus0", (short) 4);
|
||||||
|
namesystem.delete("/filestatus0", true);
|
||||||
|
namesystem.delete("/filestatus1", true);
|
||||||
|
|
||||||
|
namesystem.metaSave("metasaveAfterDelete.out.txt");
|
||||||
|
|
||||||
|
// Verification
|
||||||
|
String logFile = System.getProperty("hadoop.log.dir") + "/"
|
||||||
|
+ "metasaveAfterDelete.out.txt";
|
||||||
|
BufferedReader reader = null;
|
||||||
|
try {
|
||||||
|
FileInputStream fstream = new FileInputStream(logFile);
|
||||||
|
DataInputStream in = new DataInputStream(fstream);
|
||||||
|
reader = new BufferedReader(new InputStreamReader(in));
|
||||||
|
reader.readLine();
|
||||||
|
String line = reader.readLine();
|
||||||
|
assertTrue(line.equals("Live Datanodes: 1"));
|
||||||
|
line = reader.readLine();
|
||||||
|
assertTrue(line.equals("Dead Datanodes: 1"));
|
||||||
|
line = reader.readLine();
|
||||||
|
assertTrue(line.equals("Metasave: Blocks waiting for replication: 0"));
|
||||||
|
line = reader.readLine();
|
||||||
|
assertTrue(line.equals("Mis-replicated blocks that have been postponed:"));
|
||||||
|
line = reader.readLine();
|
||||||
|
assertTrue(line.equals("Metasave: Blocks being replicated: 0"));
|
||||||
|
} finally {
|
||||||
|
if (reader != null)
|
||||||
|
reader.close();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@AfterClass
|
@AfterClass
|
||||||
|
|
Loading…
Reference in New Issue