From 2a05c630044a18af10dd8c74db394f23581e35a9 Mon Sep 17 00:00:00 2001 From: Konstantin Shvachko Date: Tue, 11 Jun 2013 01:40:52 +0000 Subject: [PATCH] HDFS-4878. On Remove Block, block is not removed from neededReplications queue. Contributed by Tao Luo. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1491672 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../server/blockmanagement/BlockManager.java | 3 +- .../hdfs/server/namenode/TestMetaSave.java | 75 ++++++++++++++++--- 3 files changed, 70 insertions(+), 11 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index b9203345400..863a885caf1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -354,6 +354,9 @@ Release 2.1.0-beta - UNRELEASED HDFS-4586. TestDataDirs.testGetDataDirsFromURIs fails with all directories in dfs.datanode.data.dir are invalid. (Ivan Mitic via atm) + HDFS-4878. On Remove Block, block is not removed from neededReplications + queue. (Tao Luo via shv) + BREAKDOWN OF HDFS-347 SUBTASKS AND RELATED JIRAS HDFS-4353. Encapsulate connections to peers in Peer and PeerServer classes. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 4e082b702bc..b904ce36f4a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -2869,8 +2869,9 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block addToInvalidates(block); corruptReplicas.removeFromCorruptReplicasMap(block); blocksMap.removeBlock(block); - // Remove the block from pendingReplications + // Remove the block from pendingReplications and neededReplications pendingReplications.remove(block); + neededReplications.remove(block, UnderReplicatedBlocks.LEVEL); if (postponedMisreplicatedBlocks.remove(block)) { postponedMisreplicatedBlocksCount.decrementAndGet(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java index 2f2b6888fca..729358562b2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java @@ -99,16 +99,71 @@ public class TestMetaSave { + "metasave.out.txt"; FileInputStream fstream = new FileInputStream(logFile); DataInputStream in = new DataInputStream(fstream); - BufferedReader reader = new BufferedReader(new InputStreamReader(in)); - String line = reader.readLine(); - assertTrue(line.equals("3 files and directories, 2 blocks = 5 total")); - line = reader.readLine(); - assertTrue(line.equals("Live Datanodes: 1")); - line = reader.readLine(); - assertTrue(line.equals("Dead Datanodes: 1")); - line = reader.readLine(); - line = reader.readLine(); - assertTrue(line.matches("^/filestatus[01]:.*")); + BufferedReader reader = null; + try { + reader = new BufferedReader(new InputStreamReader(in)); + String line = reader.readLine(); + assertTrue(line.equals( + "3 files and directories, 2 blocks = 5 total")); + line = reader.readLine(); + assertTrue(line.equals("Live Datanodes: 1")); + line = reader.readLine(); + assertTrue(line.equals("Dead Datanodes: 1")); + line = reader.readLine(); + line = reader.readLine(); + assertTrue(line.matches("^/filestatus[01]:.*")); + } finally { + if (reader != null) + reader.close(); + } + } + + /** + * Tests metasave after delete, to make sure there are no orphaned blocks + */ + @Test + public void testMetasaveAfterDelete() + throws IOException, InterruptedException { + + final FSNamesystem namesystem = cluster.getNamesystem(); + + for (int i = 0; i < 2; i++) { + Path file = new Path("/filestatus" + i); + createFile(fileSys, file); + } + + cluster.stopDataNode(1); + // wait for namenode to discover that a datanode is dead + Thread.sleep(15000); + namesystem.setReplication("/filestatus0", (short) 4); + namesystem.delete("/filestatus0", true); + namesystem.delete("/filestatus1", true); + + namesystem.metaSave("metasaveAfterDelete.out.txt"); + + // Verification + String logFile = System.getProperty("hadoop.log.dir") + "/" + + "metasaveAfterDelete.out.txt"; + BufferedReader reader = null; + try { + FileInputStream fstream = new FileInputStream(logFile); + DataInputStream in = new DataInputStream(fstream); + reader = new BufferedReader(new InputStreamReader(in)); + reader.readLine(); + String line = reader.readLine(); + assertTrue(line.equals("Live Datanodes: 1")); + line = reader.readLine(); + assertTrue(line.equals("Dead Datanodes: 1")); + line = reader.readLine(); + assertTrue(line.equals("Metasave: Blocks waiting for replication: 0")); + line = reader.readLine(); + assertTrue(line.equals("Mis-replicated blocks that have been postponed:")); + line = reader.readLine(); + assertTrue(line.equals("Metasave: Blocks being replicated: 0")); + } finally { + if (reader != null) + reader.close(); + } } @AfterClass