diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 78c4b58c004..af8643d0f7e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -616,6 +616,9 @@ Release 2.0.2-alpha - 2012-09-07 HDFS-3432. TestDFSZKFailoverController tries to fail over too early (todd) + HDFS-3902. TestDatanodeBlockScanner#testBlockCorruptionPolicy is broken. + (Andy Isaacson via eli) + Release 2.0.0-alpha - 05-23-2012 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java index 757c0d79c5e..e0582e47600 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java @@ -374,7 +374,8 @@ class BlockPoolSliceScanner { throttler.setBandwidth(Math.min(bw, MAX_SCAN_RATE)); } - private void verifyBlock(ExtendedBlock block) { + @VisibleForTesting + void verifyBlock(ExtendedBlock block) { BlockSender blockSender = null; /* In case of failure, attempt to read second time to reduce diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java index 4a2b76296b8..95a883a87d3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java @@ -172,7 +172,8 @@ public class DataBlockScanner implements Runnable { return blockPoolScannerMap.size(); } - private synchronized BlockPoolSliceScanner getBPScanner(String bpid) { + @VisibleForTesting + synchronized BlockPoolSliceScanner getBPScanner(String bpid) { return blockPoolScannerMap.get(bpid); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java index dfc9e9a667a..a3cfb257312 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java @@ -34,14 +34,19 @@ import java.util.regex.Pattern; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.server.datanode.DataNode; +import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; +import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.Time; +import org.apache.log4j.Level; import org.junit.Test; /** @@ -59,6 +64,10 @@ public class TestDatanodeBlockScanner { private static Pattern pattern_blockVerify = Pattern.compile(".*?(SCAN_PERIOD)\\s*:\\s*(\\d+.*?)"); + + static { + ((Log4JLogger)FSNamesystem.auditLog).getLogger().setLevel(Level.WARN); + } /** * This connects to datanode and fetches block verification data. * It repeats this until the given block has a verification time > newTime. @@ -206,12 +215,12 @@ public class TestDatanodeBlockScanner { assertTrue(MiniDFSCluster.corruptReplica(1, block)); assertTrue(MiniDFSCluster.corruptReplica(2, block)); - // Read the file to trigger reportBadBlocks by client - try { - IOUtils.copyBytes(fs.open(file1), new IOUtils.NullOutputStream(), - conf, true); - } catch (IOException e) { - // Ignore exception + // Trigger each of the DNs to scan this block immediately. + // The block pool scanner doesn't run frequently enough on its own + // to notice these, and due to HDFS-1371, the client won't report + // bad blocks to the NN when all replicas are bad. + for (DataNode dn : cluster.getDataNodes()) { + DataNodeTestUtils.runBlockScannerForBlock(dn, block); } // We now have the blocks to be marked as corrupt and we get back all diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java index 55b4bf57db6..9a660263fc1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java @@ -114,6 +114,12 @@ public class DataNodeTestUtils { dn.getDnConf().socketTimeout, dn.getDnConf().connectToDnViaHostname); } + public static void runBlockScannerForBlock(DataNode dn, ExtendedBlock b) { + DataBlockScanner scanner = dn.getBlockScanner(); + BlockPoolSliceScanner bpScanner = scanner.getBPScanner(b.getBlockPoolId()); + bpScanner.verifyBlock(b); + } + public static void shutdownBlockScanner(DataNode dn) { if (dn.blockScanner != null) { dn.blockScanner.shutdown();