HDFS-4878. On Remove Block, block is not removed from neededReplications queue. Contributed by Tao Luo.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1491671 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
5ab67b5c07
commit
8e0f166cf3
|
@ -628,6 +628,9 @@ Release 2.1.0-beta - UNRELEASED
|
||||||
HDFS-4661. A few little code cleanups of some HDFS-347-related code. (Colin
|
HDFS-4661. A few little code cleanups of some HDFS-347-related code. (Colin
|
||||||
Patrick McCabe via atm)
|
Patrick McCabe via atm)
|
||||||
|
|
||||||
|
HDFS-4878. On Remove Block, block is not removed from neededReplications
|
||||||
|
queue. (Tao Luo via shv)
|
||||||
|
|
||||||
BREAKDOWN OF HADOOP-8562 and HDFS-3602 SUBTASKS AND RELATED JIRAS
|
BREAKDOWN OF HADOOP-8562 and HDFS-3602 SUBTASKS AND RELATED JIRAS
|
||||||
|
|
||||||
HDFS-4145. Merge hdfs cmd line scripts from branch-1-win. (David Lao,
|
HDFS-4145. Merge hdfs cmd line scripts from branch-1-win. (David Lao,
|
||||||
|
|
|
@ -2869,8 +2869,9 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
|
||||||
addToInvalidates(block);
|
addToInvalidates(block);
|
||||||
corruptReplicas.removeFromCorruptReplicasMap(block);
|
corruptReplicas.removeFromCorruptReplicasMap(block);
|
||||||
blocksMap.removeBlock(block);
|
blocksMap.removeBlock(block);
|
||||||
// Remove the block from pendingReplications
|
// Remove the block from pendingReplications and neededReplications
|
||||||
pendingReplications.remove(block);
|
pendingReplications.remove(block);
|
||||||
|
neededReplications.remove(block, UnderReplicatedBlocks.LEVEL);
|
||||||
if (postponedMisreplicatedBlocks.remove(block)) {
|
if (postponedMisreplicatedBlocks.remove(block)) {
|
||||||
postponedMisreplicatedBlocksCount.decrementAndGet();
|
postponedMisreplicatedBlocksCount.decrementAndGet();
|
||||||
}
|
}
|
||||||
|
|
|
@ -87,7 +87,9 @@ public class TestMetaSave {
|
||||||
+ "metasave.out.txt";
|
+ "metasave.out.txt";
|
||||||
FileInputStream fstream = new FileInputStream(logFile);
|
FileInputStream fstream = new FileInputStream(logFile);
|
||||||
DataInputStream in = new DataInputStream(fstream);
|
DataInputStream in = new DataInputStream(fstream);
|
||||||
BufferedReader reader = new BufferedReader(new InputStreamReader(in));
|
BufferedReader reader = null;
|
||||||
|
try {
|
||||||
|
reader = new BufferedReader(new InputStreamReader(in));
|
||||||
String line = reader.readLine();
|
String line = reader.readLine();
|
||||||
assertTrue(line.equals(
|
assertTrue(line.equals(
|
||||||
"3 files and directories, 2 blocks = 5 total filesystem objects"));
|
"3 files and directories, 2 blocks = 5 total filesystem objects"));
|
||||||
|
@ -98,6 +100,59 @@ public class TestMetaSave {
|
||||||
line = reader.readLine();
|
line = reader.readLine();
|
||||||
line = reader.readLine();
|
line = reader.readLine();
|
||||||
assertTrue(line.matches("^/filestatus[01]:.*"));
|
assertTrue(line.matches("^/filestatus[01]:.*"));
|
||||||
|
} finally {
|
||||||
|
if (reader != null)
|
||||||
|
reader.close();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tests metasave after delete, to make sure there are no orphaned blocks
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testMetasaveAfterDelete()
|
||||||
|
throws IOException, InterruptedException {
|
||||||
|
|
||||||
|
final FSNamesystem namesystem = cluster.getNamesystem();
|
||||||
|
|
||||||
|
for (int i = 0; i < 2; i++) {
|
||||||
|
Path file = new Path("/filestatus" + i);
|
||||||
|
DFSTestUtil.createFile(fileSys, file, 1024, 1024, blockSize, (short) 2,
|
||||||
|
seed);
|
||||||
|
}
|
||||||
|
|
||||||
|
cluster.stopDataNode(1);
|
||||||
|
// wait for namenode to discover that a datanode is dead
|
||||||
|
Thread.sleep(15000);
|
||||||
|
namesystem.setReplication("/filestatus0", (short) 4);
|
||||||
|
namesystem.delete("/filestatus0", true);
|
||||||
|
namesystem.delete("/filestatus1", true);
|
||||||
|
|
||||||
|
namesystem.metaSave("metasaveAfterDelete.out.txt");
|
||||||
|
|
||||||
|
// Verification
|
||||||
|
String logFile = System.getProperty("hadoop.log.dir") + "/"
|
||||||
|
+ "metasaveAfterDelete.out.txt";
|
||||||
|
BufferedReader reader = null;
|
||||||
|
try {
|
||||||
|
FileInputStream fstream = new FileInputStream(logFile);
|
||||||
|
DataInputStream in = new DataInputStream(fstream);
|
||||||
|
reader = new BufferedReader(new InputStreamReader(in));
|
||||||
|
reader.readLine();
|
||||||
|
String line = reader.readLine();
|
||||||
|
assertTrue(line.equals("Live Datanodes: 1"));
|
||||||
|
line = reader.readLine();
|
||||||
|
assertTrue(line.equals("Dead Datanodes: 1"));
|
||||||
|
line = reader.readLine();
|
||||||
|
assertTrue(line.equals("Metasave: Blocks waiting for replication: 0"));
|
||||||
|
line = reader.readLine();
|
||||||
|
assertTrue(line.equals("Mis-replicated blocks that have been postponed:"));
|
||||||
|
line = reader.readLine();
|
||||||
|
assertTrue(line.equals("Metasave: Blocks being replicated: 0"));
|
||||||
|
} finally {
|
||||||
|
if (reader != null)
|
||||||
|
reader.close();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@AfterClass
|
@AfterClass
|
||||||
|
|
Loading…
Reference in New Issue