HDFS-3902. TestDatanodeBlockScanner#testBlockCorruptionPolicy is broken. Contributed by Andy Isaacson

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1384081 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Eli Collins 2012-09-12 18:36:01 +00:00
parent 80a8d57ffc
commit 2ba149f85c
5 changed files with 28 additions and 8 deletions

View File

@ -783,6 +783,9 @@ Release 2.0.2-alpha - 2012-09-07
HDFS-3833. TestDFSShell fails on windows due to concurrent file
read/write. (Brandon Li via suresh)
HDFS-3902. TestDatanodeBlockScanner#testBlockCorruptionPolicy is broken.
(Andy Isaacson via eli)
Release 2.0.0-alpha - 05-23-2012
INCOMPATIBLE CHANGES

View File

@ -374,7 +374,8 @@ class BlockPoolSliceScanner {
throttler.setBandwidth(Math.min(bw, MAX_SCAN_RATE));
}
private void verifyBlock(ExtendedBlock block) {
@VisibleForTesting
void verifyBlock(ExtendedBlock block) {
BlockSender blockSender = null;
/* In case of failure, attempt to read second time to reduce

View File

@ -172,7 +172,8 @@ public class DataBlockScanner implements Runnable {
return blockPoolScannerMap.size();
}
private synchronized BlockPoolSliceScanner getBPScanner(String bpid) {
@VisibleForTesting
synchronized BlockPoolSliceScanner getBPScanner(String bpid) {
return blockPoolScannerMap.get(bpid);
}

View File

@ -34,14 +34,19 @@ import java.util.regex.Pattern;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.Time;
import org.apache.log4j.Level;
import org.junit.Test;
/**
@ -59,6 +64,10 @@ public class TestDatanodeBlockScanner {
private static Pattern pattern_blockVerify =
Pattern.compile(".*?(SCAN_PERIOD)\\s*:\\s*(\\d+.*?)");
static {
((Log4JLogger)FSNamesystem.auditLog).getLogger().setLevel(Level.WARN);
}
/**
* This connects to datanode and fetches block verification data.
* It repeats this until the given block has a verification time > newTime.
@ -206,12 +215,12 @@ public class TestDatanodeBlockScanner {
assertTrue(MiniDFSCluster.corruptReplica(1, block));
assertTrue(MiniDFSCluster.corruptReplica(2, block));
// Read the file to trigger reportBadBlocks by client
try {
IOUtils.copyBytes(fs.open(file1), new IOUtils.NullOutputStream(),
conf, true);
} catch (IOException e) {
// Ignore exception
// Trigger each of the DNs to scan this block immediately.
// The block pool scanner doesn't run frequently enough on its own
// to notice these, and due to HDFS-1371, the client won't report
// bad blocks to the NN when all replicas are bad.
for (DataNode dn : cluster.getDataNodes()) {
DataNodeTestUtils.runBlockScannerForBlock(dn, block);
}
// We now have the blocks to be marked as corrupt and we get back all

View File

@ -114,6 +114,12 @@ public class DataNodeTestUtils {
dn.getDnConf().socketTimeout, dn.getDnConf().connectToDnViaHostname);
}
public static void runBlockScannerForBlock(DataNode dn, ExtendedBlock b) {
DataBlockScanner scanner = dn.getBlockScanner();
BlockPoolSliceScanner bpScanner = scanner.getBPScanner(b.getBlockPoolId());
bpScanner.verifyBlock(b);
}
public static void shutdownBlockScanner(DataNode dn) {
if (dn.blockScanner != null) {
dn.blockScanner.shutdown();