HBASE-2767. Fix reflection in tests that was made incompatible by HDFS-1209

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@957112 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Todd Lipcon 2010-06-23 05:26:38 +00:00
parent 77fcd6cb63
commit f81be02be8
3 changed files with 25 additions and 25 deletions

View File

@ -412,6 +412,7 @@ Release 0.21.0 - Unreleased
HBASE-2763 Cross-port HADOOP-6833 IPC parameter leak bug
HBASE-2758 META region stuck in RS2ZK_REGION_OPENED state
(Karthik Ranganathan via jgray)
HBASE-2767 Fix reflection in tests that was made incompatible by HDFS-1209
IMPROVEMENTS
HBASE-1760 Cleanup TODOs in HTable

View File

@ -73,7 +73,7 @@ import com.google.common.base.Preconditions;
* logging levels nor make changes to configuration parameters.
*/
public class HBaseTestingUtility {
private final Log LOG = LogFactory.getLog(getClass());
private final static Log LOG = LogFactory.getLog(HBaseTestingUtility.class);
private final Configuration conf;
private MiniZooKeeperCluster zkCluster = null;
private MiniDFSCluster dfsCluster = null;
@ -888,7 +888,7 @@ public class HBaseTestingUtility {
}
/**
* Set maxRecoveryErrorCount in DFSClient. Currently its hard-coded to 5 and
* Set maxRecoveryErrorCount in DFSClient. In 0.20 pre-append its hard-coded to 5 and
* makes tests linger. Here is the exception you'll see:
* <pre>
* 2010-06-15 11:52:28,511 WARN [DataStreamer for file /hbase/.logs/hlog.1276627923013 block blk_928005470262850423_1021] hdfs.DFSClient$DFSOutputStream(2657): Error Recovery for block blk_928005470262850423_1021 failed because recovery from primary datanode 127.0.0.1:53683 failed 4 times. Pipeline was 127.0.0.1:53687, 127.0.0.1:53683. Will retry...
@ -901,20 +901,23 @@ public class HBaseTestingUtility {
* @throws IllegalArgumentException
*/
public static void setMaxRecoveryErrorCount(final OutputStream stream,
final int max)
throws SecurityException, NoSuchFieldException, IllegalArgumentException, IllegalAccessException {
Class<?> [] clazzes = DFSClient.class.getDeclaredClasses();
for (Class<?> clazz: clazzes) {
String className = clazz.getSimpleName();
if (className.equals("DFSOutputStream")) {
if (clazz.isInstance(stream)) {
Field maxRecoveryErrorCountField =
stream.getClass().getDeclaredField("maxRecoveryErrorCount");
maxRecoveryErrorCountField.setAccessible(true);
maxRecoveryErrorCountField.setInt(stream, max);
break;
final int max) {
try {
Class<?> [] clazzes = DFSClient.class.getDeclaredClasses();
for (Class<?> clazz: clazzes) {
String className = clazz.getSimpleName();
if (className.equals("DFSOutputStream")) {
if (clazz.isInstance(stream)) {
Field maxRecoveryErrorCountField =
stream.getClass().getDeclaredField("maxRecoveryErrorCount");
maxRecoveryErrorCountField.setAccessible(true);
maxRecoveryErrorCountField.setInt(stream, max);
break;
}
}
}
} catch (Exception e) {
LOG.info("Could not set max recovery field", e);
}
}
}

View File

@ -26,6 +26,8 @@ import java.io.IOException;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@ -50,12 +52,12 @@ import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.mortbay.log.Log;
/**
* Test replay of edits out of a WAL split.
*/
public class TestWALReplay {
public static final Log LOG = LogFactory.getLog(TestWALReplay.class);
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private final EnvironmentEdge ee = EnvironmentEdgeManager.getDelegate();
private Path hbaseRootDir = null;
@ -68,14 +70,14 @@ public class TestWALReplay {
public static void setUpBeforeClass() throws Exception {
Configuration conf = TEST_UTIL.getConfiguration();
conf.setBoolean("dfs.support.append", true);
// The below config not supported until
// The below config supported by 0.20-append and CDH3b2
conf.setInt("dfs.client.block.recovery.retries", 2);
conf.setInt("hbase.regionserver.flushlogentries", 1);
TEST_UTIL.startMiniDFSCluster(3);
TEST_UTIL.setNameNodeNameSystemLeasePeriod(100, 10000);
Path hbaseRootDir =
TEST_UTIL.getDFSCluster().getFileSystem().makeQualified(new Path("/hbase"));
Log.info("hbase.rootdir=" + hbaseRootDir);
LOG.info("hbase.rootdir=" + hbaseRootDir);
conf.set(HConstants.HBASE_DIR, hbaseRootDir.toString());
}
@ -411,7 +413,7 @@ public class TestWALReplay {
assertEquals(1, splits.size());
// Make sure the file exists
assertTrue(fs.exists(splits.get(0)));
Log.info("Split file=" + splits.get(0));
LOG.info("Split file=" + splits.get(0));
return splits.get(0);
}
@ -424,13 +426,7 @@ public class TestWALReplay {
HLog wal = new HLog(FileSystem.get(c), logDir, oldLogDir, c, null);
// Set down maximum recovery so we dfsclient doesn't linger retrying something
// long gone.
try {
HBaseTestingUtility.setMaxRecoveryErrorCount(wal.getOutputStream(), 1);
} catch (Exception e) {
// These exceptions should never happen... make RuntimeException of them
// if they do.
throw new RuntimeException(e);
}
HBaseTestingUtility.setMaxRecoveryErrorCount(wal.getOutputStream(), 1);
return wal;
}
}