HBASE-4221 Changes necessary to build and run against Hadoop 0.23

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1160919 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Todd Lipcon 2011-08-23 22:47:53 +00:00
parent 27096be10a
commit df360dccde
22 changed files with 278 additions and 60 deletions

View File

@ -451,6 +451,8 @@ Release 0.91.0 - Unreleased
HBASE-3857 Change the HFile Format (Mikhail & Liyin)
HBASE-4114 Metrics for HFile HDFS block locality (Ming Ma)
HBASE-4176 Exposing HBase Filters to the Thrift API (Anirudh Todi)
HBASE-4221 Changes necessary to build and run against Hadoop 0.23
(todd)
Release 0.90.5 - Unreleased

244
pom.xml
View File

@ -633,11 +633,6 @@
<commons-logging.version>1.1.1</commons-logging.version>
<commons-math.version>2.1</commons-math.version>
<guava.version>r09</guava.version>
<!--The below was made by patching branch-0.20-append
at revision 1034499 with this hdfs-895 patch:
https://issues.apache.org/jira/secure/attachment/12459473/hdfs-895-branch-20-append.txt
-->
<hadoop.version>0.20-append-r1057313</hadoop.version>
<jackson.version>1.5.5</jackson.version>
<jasper.version>5.5.23</jasper.version>
<jaxb-api.version>2.1</jaxb-api.version>
@ -736,33 +731,6 @@
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-core</artifactId>
<version>${hadoop.version}</version>
<exclusions>
<exclusion>
<groupId>hsqldb</groupId>
<artifactId>hsqldb</artifactId>
</exclusion>
<exclusion>
<groupId>net.sf.kosmosfs</groupId>
<artifactId>kfs</artifactId>
</exclusion>
<exclusion>
<groupId>org.eclipse.jdt</groupId>
<artifactId>core</artifactId>
</exclusion>
<exclusion>
<groupId>net.java.dev.jets3t</groupId>
<artifactId>jets3t</artifactId>
</exclusion>
<exclusion>
<groupId>oro</groupId>
<artifactId>oro</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
@ -968,12 +936,6 @@
<version>${commons-math.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-test</artifactId>
<version>${hadoop.version}</version>
<scope>test</scope>
</dependency>
</dependencies>
<!--
@ -1138,6 +1100,212 @@
</plugins>
</build>
</profile>
<!-- profile for building against Hadoop 0.20.0 -->
<profile>
<id>hadoop-0.20</id>
<activation>
<property>
<name>!hadoop23</name>
</property>
</activation>
<properties>
<!--The below was made by patching branch-0.20-append
at revision 1034499 with this hdfs-895 patch:
https://issues.apache.org/jira/secure/attachment/12459473/hdfs-895-branch-20-append.txt
-->
<hadoop.version>0.20-append-r1057313</hadoop.version>
</properties>
<dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-core</artifactId>
<version>${hadoop.version}</version>
<exclusions>
<exclusion>
<groupId>hsqldb</groupId>
<artifactId>hsqldb</artifactId>
</exclusion>
<exclusion>
<groupId>net.sf.kosmosfs</groupId>
<artifactId>kfs</artifactId>
</exclusion>
<exclusion>
<groupId>org.eclipse.jdt</groupId>
<artifactId>core</artifactId>
</exclusion>
<exclusion>
<groupId>net.java.dev.jets3t</groupId>
<artifactId>jets3t</artifactId>
</exclusion>
<exclusion>
<groupId>oro</groupId>
<artifactId>oro</artifactId>
</exclusion>
</exclusions>
</dependency>
<!-- test deps for hadoop-0.20 profile -->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-test</artifactId>
<version>${hadoop.version}</version>
<scope>test</scope>
</dependency>
</dependencies>
</profile>
<!--
profile for building against Hadoop 0.23.0. Activate using:
mvn -Dhadoop23
-->
<profile>
<id>hadoop-0.23</id>
<activation>
<property>
<name>hadoop23</name>
</property>
</activation>
<properties>
<hadoop.version>0.23.0-SNAPSHOT</hadoop.version>
</properties>
<dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>${hadoop.version}</version>
<exclusions>
<!--Needs more work, tightening-->
<exclusion>
<groupId>hsqldb</groupId>
<artifactId>hsqldb</artifactId>
</exclusion>
<exclusion>
<groupId>net.sf.kosmosfs</groupId>
<artifactId>kfs</artifactId>
</exclusion>
<exclusion>
<groupId>org.eclipse.jdt</groupId>
<artifactId>core</artifactId>
</exclusion>
<exclusion>
<groupId>net.java.dev.jets3t</groupId>
<artifactId>jets3t</artifactId>
</exclusion>
<exclusion>
<groupId>oro</groupId>
<artifactId>oro</artifactId>
</exclusion>
<exclusion>
<groupId>jdiff</groupId>
<artifactId>jdiff</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-core</artifactId>
</exclusion>
</exclusions>
</dependency>
<!-- need the annotations to avoid compiler issues. This is
really an upstream issue with Hadoop -->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-annotations</artifactId>
<version>${hadoop.version}</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<version>${hadoop.version}</version>
<exclusions>
<!--Needs more work, tightening-->
<exclusion>
<groupId>hsqldb</groupId>
<artifactId>hsqldb</artifactId>
</exclusion>
<exclusion>
<groupId>net.sf.kosmosfs</groupId>
<artifactId>kfs</artifactId>
</exclusion>
<exclusion>
<groupId>org.eclipse.jdt</groupId>
<artifactId>core</artifactId>
</exclusion>
<exclusion>
<groupId>net.java.dev.jets3t</groupId>
<artifactId>jets3t</artifactId>
</exclusion>
<exclusion>
<groupId>oro</groupId>
<artifactId>oro</artifactId>
</exclusion>
<exclusion>
<groupId>jdiff</groupId>
<artifactId>jdiff</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-core</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapred</artifactId>
<version>${hadoop.version}</version>
<exclusions>
<!--Needs more work, tightening-->
<exclusion>
<groupId>hsqldb</groupId>
<artifactId>hsqldb</artifactId>
</exclusion>
<exclusion>
<groupId>net.sf.kosmosfs</groupId>
<artifactId>kfs</artifactId>
</exclusion>
<exclusion>
<groupId>org.eclipse.jdt</groupId>
<artifactId>core</artifactId>
</exclusion>
<exclusion>
<groupId>net.java.dev.jets3t</groupId>
<artifactId>jets3t</artifactId>
</exclusion>
<exclusion>
<groupId>oro</groupId>
<artifactId>oro</artifactId>
</exclusion>
<exclusion>
<groupId>jdiff</groupId>
<artifactId>jdiff</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-core</artifactId>
</exclusion>
</exclusions>
</dependency>
<!-- test deps for hadoop-0.23 profile -->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common-test</artifactId>
<version>${hadoop.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs-test</artifactId>
<version>${hadoop.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapred-test</artifactId>
<version>${hadoop.version}</version>
<scope>test</scope>
</dependency>
</dependencies>
</profile>
</profiles>
<!-- See http://jira.codehaus.org/browse/MSITE-443 why the settings need to be here and not in pluginManagement. -->

View File

@ -195,7 +195,7 @@ public class HConnectionManager {
* @param stopProxy
* Shuts down all the proxy's put up to cluster members including to
* cluster HMaster. Calls
* {@link HBaseRPC#stopProxy(org.apache.hadoop.ipc.VersionedProtocol)}
* {@link HBaseRPC#stopProxy(org.apache.hadoop.hbase.ipc.VersionedProtocol)}
* .
*/
public static void deleteConnection(Configuration conf, boolean stopProxy) {

View File

@ -21,7 +21,7 @@ import java.io.IOException;
import org.apache.hadoop.hbase.Coprocessor;
import org.apache.hadoop.hbase.CoprocessorEnvironment;
import org.apache.hadoop.hbase.ipc.CoprocessorProtocol;
import org.apache.hadoop.ipc.VersionedProtocol;
import org.apache.hadoop.hbase.ipc.VersionedProtocol;
/**
* This abstract class provides default implementation of an Endpoint.

View File

@ -19,7 +19,7 @@
*/
package org.apache.hadoop.hbase.ipc;
import org.apache.hadoop.ipc.VersionedProtocol;
import org.apache.hadoop.hbase.ipc.VersionedProtocol;
/**
* All custom RPC protocols to be exported by Coprocessors must extend this interface.

View File

@ -57,7 +57,7 @@ import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.ipc.VersionedProtocol;
import org.apache.hadoop.hbase.ipc.VersionedProtocol;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.ReflectionUtils;

View File

@ -27,7 +27,7 @@ import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.client.RetriesExhaustedException;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.ipc.VersionedProtocol;
import org.apache.hadoop.hbase.ipc.VersionedProtocol;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.ReflectionUtils;

View File

@ -22,7 +22,7 @@ package org.apache.hadoop.hbase.ipc;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.ipc.VersionedProtocol;
import org.apache.hadoop.hbase.ipc.VersionedProtocol;
import org.apache.hadoop.metrics.MetricsContext;
import org.apache.hadoop.metrics.MetricsRecord;
import org.apache.hadoop.metrics.MetricsUtil;

View File

@ -63,7 +63,7 @@ import org.apache.hadoop.hbase.util.ByteBufferOutputStream;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.ipc.VersionedProtocol;
import org.apache.hadoop.hbase.ipc.VersionedProtocol;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.StringUtils;

View File

@ -26,7 +26,7 @@ import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.UnknownRegionException;
import org.apache.hadoop.ipc.VersionedProtocol;
import org.apache.hadoop.hbase.ipc.VersionedProtocol;
/**
* Clients interact with the HMasterInterface to gain access to meta-level

View File

@ -24,7 +24,7 @@ import java.io.IOException;
import org.apache.hadoop.hbase.HServerLoad;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.io.MapWritable;
import org.apache.hadoop.ipc.VersionedProtocol;
import org.apache.hadoop.hbase.ipc.VersionedProtocol;
/**
* The Master publishes this Interface for RegionServers to register themselves

View File

@ -46,7 +46,7 @@ import org.apache.hadoop.hbase.io.hfile.BlockCacheColumnFamilySummary;
import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.ipc.VersionedProtocol;
import org.apache.hadoop.hbase.ipc.VersionedProtocol;
/**
* Clients interact with HRegionServers using a handle to the HRegionInterface.

View File

@ -24,7 +24,7 @@ import java.io.IOException;
import java.net.InetSocketAddress;
import javax.net.SocketFactory;
import org.apache.hadoop.ipc.VersionedProtocol;
import org.apache.hadoop.hbase.ipc.VersionedProtocol;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.conf.Configuration;

View File

@ -22,7 +22,7 @@ package org.apache.hadoop.hbase.ipc;
import com.google.common.base.Function;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.ipc.VersionedProtocol;
import org.apache.hadoop.hbase.ipc.VersionedProtocol;
import java.io.IOException;
import java.net.InetSocketAddress;

View File

@ -0,0 +1,43 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.ipc;
import java.io.IOException;
/**
* Superclass of all protocols that use Hadoop RPC.
* Subclasses of this interface are also supposed to have
* a static final long versionID field.
*
* This has been copied from the Hadoop IPC project so that
* we can run on multiple different versions of Hadoop.
*/
public interface VersionedProtocol {
/**
* Return protocol version corresponding to protocol interface.
* @param protocol The classname of the protocol interface
* @param clientVersion The version of the protocol that the client speaks
* @return the version that the server will speak
* @throws IOException if any IO error occurs
*/
@Deprecated
public long getProtocolVersion(String protocol,
long clientVersion) throws IOException;
}

View File

@ -38,7 +38,7 @@ import org.apache.commons.logging.*;
import org.apache.hadoop.hbase.io.HbaseObjectWritable;
import org.apache.hadoop.hbase.util.Objects;
import org.apache.hadoop.io.*;
import org.apache.hadoop.ipc.VersionedProtocol;
import org.apache.hadoop.hbase.ipc.VersionedProtocol;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
import org.apache.hadoop.conf.*;

View File

@ -69,6 +69,7 @@ import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.mapred.MiniMRCluster;
import org.apache.zookeeper.ZooKeeper;
@ -1443,7 +1444,11 @@ public class HBaseTestingUtility {
field = nn.getClass().getDeclaredField("namesystem");
field.setAccessible(true);
FSNamesystem namesystem = (FSNamesystem)field.get(nn);
namesystem.leaseManager.setLeasePeriod(100, 50000);
field = namesystem.getClass().getDeclaredField("leaseManager");
field.setAccessible(true);
LeaseManager lm = (LeaseManager)field.get(namesystem);
lm.setLeasePeriod(100, 50000);
}
/**

View File

@ -31,7 +31,7 @@ import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.ipc.VersionedProtocol;
import org.apache.hadoop.hbase.ipc.VersionedProtocol;
import org.apache.log4j.AppenderSkeleton;
import org.apache.log4j.Logger;
import org.apache.log4j.spi.LoggingEvent;

View File

@ -237,8 +237,7 @@ public class TestHFileOutputFormat {
* metadata used by time-restricted scans.
*/
@Test
public void test_TIMERANGE()
throws IOException, InterruptedException {
public void test_TIMERANGE() throws Exception {
Configuration conf = new Configuration(this.util.getConfiguration());
RecordWriter<ImmutableBytesWritable, KeyValue> writer = null;
TaskAttemptContext context = null;
@ -249,8 +248,7 @@ public class TestHFileOutputFormat {
// build a record writer using HFileOutputFormat
Job job = new Job(conf);
FileOutputFormat.setOutputPath(job, dir);
context = new TaskAttemptContext(job.getConfiguration(),
new TaskAttemptID());
context = getTestTaskAttemptContext(job);
HFileOutputFormat hof = new HFileOutputFormat();
writer = hof.getRecordWriter(context);

View File

@ -38,7 +38,7 @@ import org.apache.hadoop.hbase.client.coprocessor.Batch;
import org.apache.hadoop.hbase.ipc.CoprocessorProtocol;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.JVMClusterUtil;
import org.apache.hadoop.ipc.VersionedProtocol;
import org.apache.hadoop.hbase.ipc.VersionedProtocol;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;

View File

@ -64,7 +64,8 @@ public class TestHLog {
{
((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)LogFactory.getLog("org.apache.hadoop.hdfs.server.namenode.FSNamesystem"))
.getLogger().setLevel(Level.ALL);
((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)HLog.LOG).getLogger().setLevel(Level.ALL);
}

View File

@ -74,7 +74,8 @@ public class TestLogRolling {
{
((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)LogFactory.getLog("org.apache.hadoop.hdfs.server.namenode.FSNamesystem"))
.getLogger().setLevel(Level.ALL);
((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)HRegionServer.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)HRegion.LOG).getLogger().setLevel(Level.ALL);