HBASE-4551 Fix pom and some test cases to compile and run against Hadoop 0.23

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1183154 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Todd Lipcon 2011-10-13 22:51:15 +00:00
parent ef5476f5ca
commit 4b902528e7
7 changed files with 10 additions and 37 deletions

View File

@ -358,6 +358,8 @@ Release 0.92.0 - Unreleased
HBASE-4078 Validate store files after flush/compaction
HBASE-3417 CacheOnWrite is using the temporary output path for block
names, need to use a more consistent block naming scheme (jgray)
HBASE-4551 Fix pom and some test cases to compile and run against
Hadoop 0.23 (todd)
TESTS
HBASE-4450 test for number of blocks read: to serve as baseline for expected

View File

@ -1525,14 +1525,16 @@
<!-- test deps for hadoop-0.23 profile -->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common-test</artifactId>
<artifactId>hadoop-common</artifactId>
<version>${hadoop.version}</version>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs-test</artifactId>
<artifactId>hadoop-hdfs</artifactId>
<version>${hadoop.version}</version>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>

View File

@ -1461,35 +1461,6 @@ public class HBaseTestingUtility {
return user;
}
/**
* Set soft and hard limits in namenode.
* You'll get a NPE if you call before you've started a minidfscluster.
* @param soft Soft limit
* @param hard Hard limit
* @throws NoSuchFieldException
* @throws SecurityException
* @throws IllegalAccessException
* @throws IllegalArgumentException
*/
public void setNameNodeNameSystemLeasePeriod(final int soft, final int hard)
throws SecurityException, NoSuchFieldException, IllegalArgumentException, IllegalAccessException {
// TODO: If 0.20 hadoop do one thing, if 0.21 hadoop do another.
// Not available in 0.20 hdfs. Use reflection to make it happen.
// private NameNode nameNode;
Field field = this.dfsCluster.getClass().getDeclaredField("nameNode");
field.setAccessible(true);
NameNode nn = (NameNode)field.get(this.dfsCluster);
field = nn.getClass().getDeclaredField("namesystem");
field.setAccessible(true);
FSNamesystem namesystem = (FSNamesystem)field.get(nn);
field = namesystem.getClass().getDeclaredField("leaseManager");
field.setAccessible(true);
LeaseManager lm = (LeaseManager)field.get(namesystem);
lm.setLeasePeriod(100, 50000);
}
/**
* Set maxRecoveryErrorCount in DFSClient. In 0.20 pre-append its hard-coded to 5 and
* makes tests linger. Here is the exception you'll see:

View File

@ -100,7 +100,6 @@ public class TestWALObserver {
conf.setInt("dfs.client.block.recovery.retries", 2);
TEST_UTIL.startMiniCluster(1);
TEST_UTIL.setNameNodeNameSystemLeasePeriod(100, 10000);
Path hbaseRootDir =
TEST_UTIL.getDFSCluster().getFileSystem().makeQualified(new Path("/hbase"));
LOG.info("hbase.rootdir=" + hbaseRootDir);

View File

@ -46,8 +46,9 @@ import org.apache.hadoop.hbase.Coprocessor;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.coprocessor.SampleRegionWALObserver;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.FSConstants;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
@ -371,7 +372,8 @@ public class TestHLog {
// Stop the cluster. (ensure restart since we're sharing MiniDFSCluster)
try {
cluster.getNameNode().setSafeMode(SafeModeAction.SAFEMODE_ENTER);
DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem();
dfs.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_ENTER);
cluster.shutdown();
try {
// wal.writer.close() will throw an exception,

View File

@ -140,8 +140,6 @@ public class TestHLogSplit {
regions = new ArrayList<String>();
Collections.addAll(regions, "bbb", "ccc");
InstrumentedSequenceFileLogWriter.activateFailure = false;
// Set the soft lease for hdfs to be down from default of 5 minutes or so.
TEST_UTIL.setNameNodeNameSystemLeasePeriod(100, 50000);
}
@After

View File

@ -79,7 +79,6 @@ public class TestWALReplay {
// The below config supported by 0.20-append and CDH3b2
conf.setInt("dfs.client.block.recovery.retries", 2);
TEST_UTIL.startMiniDFSCluster(3);
TEST_UTIL.setNameNodeNameSystemLeasePeriod(100, 10000);
Path hbaseRootDir =
TEST_UTIL.getDFSCluster().getFileSystem().makeQualified(new Path("/hbase"));
LOG.info("hbase.rootdir=" + hbaseRootDir);