HBASE-2255 take trunk back to hadoop 0.20

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@926397 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Ryan Rawson 2010-03-22 23:36:37 +00:00
parent 0d1bbc6c51
commit e2725f70fb
15 changed files with 102 additions and 148 deletions

View File

@ -14,6 +14,7 @@ Release 0.21.0 - Unreleased
HBASE-1728 Column family scoping and cluster identification
HBASE-2099 Move build to Maven (Paul Smith via Stack)
HBASE-2260 Remove all traces of Ant and Ivy (Lars Francke via Stack)
HBASE-2255 take trunk back to hadoop 0.20
BUG FIXES
HBASE-1791 Timeout in IndexRecordWriter (Bradford Stephens via Andrew

View File

@ -39,15 +39,7 @@
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-core-test</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs-test</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapred-test</artifactId>
<artifactId>hadoop-test</artifactId>
</dependency>
</dependencies>
</project>

View File

@ -19,6 +19,7 @@
<jersey.version>1.1.4.1</jersey.version>
<json.version>20090211</json.version>
<hsqldb.version>1.8.0.10</hsqldb.version>
<commons-httpclient.version>3.0.1</commons-httpclient.version>
</properties>
<build>
@ -45,11 +46,9 @@
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-core-test</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs-test</artifactId>
<artifactId>hadoop-test</artifactId>
<version>${hadoop.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>javax.ws.rs</groupId>
@ -71,6 +70,11 @@
<artifactId>jersey-server</artifactId>
<version>${jersey.version}</version>
</dependency>
<dependency>
<groupId>commons-httpclient</groupId>
<artifactId>commons-httpclient</artifactId>
<version>${commons-httpclient.version}</version>
</dependency>
<dependency>
<groupId>org.json</groupId>
<artifactId>json</artifactId>

View File

@ -37,11 +37,9 @@
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-core-test</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs-test</artifactId>
<artifactId>hadoop-test</artifactId>
<version>${hadoop.version}</version>
<scope>test</scope>
</dependency>
</dependencies>
</project>

View File

@ -179,63 +179,50 @@
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapred</artifactId>
<version>${hadoop-mapred.version}</version>
<exclusions>
<exclusion>
<groupId>ant</groupId>
<artifactId>ant</artifactId>
</exclusion>
<exclusion>
<groupId>com.thoughtworks.paranamer</groupId>
<artifactId>paranamer</artifactId>
</exclusion>
</exclusions>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jetty</artifactId>
<version>${jetty.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapred-test</artifactId>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jetty-util</artifactId>
<version>${jetty.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<version>${hadoop-hdfs.version}</version>
<groupId>org.mortbay.jetty</groupId>
<artifactId>servlet-api-2.5</artifactId>
<version>${jetty.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs-test</artifactId>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jsp-2.1</artifactId>
<version>${jetty.version}</version>
</dependency>
<dependency>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jsp-api-2.1</artifactId>
<version>${jetty.version}</version>
</dependency>
<dependency>
<groupId>tomcat</groupId>
<artifactId>jasper-runtime</artifactId>
<version>${jasper.version}</version>
</dependency>
<dependency>
<groupId>tomcat</groupId>
<artifactId>jasper-compiler</artifactId>
<version>${jasper.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-core</artifactId>
<version>${hadoop-core.version}</version>
<exclusions>
<exclusion>
<groupId>ant</groupId>
<artifactId>ant</artifactId>
</exclusion>
<exclusion>
<groupId>com.thoughtworks.paranamer</groupId>
<artifactId>paranamer-ant</artifactId>
</exclusion>
<exclusion>
<groupId>hsqldb</groupId>
<artifactId>hsqldb</artifactId>
</exclusion>
<exclusion>
<groupId>net.sf.kosmosfs</groupId>
<artifactId>kfs</artifactId>
</exclusion>
<exclusion>
<groupId>net.java.dev.jets3t</groupId>
<artifactId>jets3t</artifactId>
</exclusion>
</exclusions>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-core-test</artifactId>
<artifactId>hadoop-test</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.thrift</groupId>
@ -259,17 +246,35 @@
<version>${commons-lang.version}</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<version>${slf4j.version}</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
<version>${slf4j.version}</version>
</dependency>
<dependency>
<groupId>org.jruby</groupId>
<groupId>commons-cli</groupId>
<artifactId>commons-cli</artifactId>
<version>${commons-cli.version}</version>
</dependency>
<!--
TODO mention to Hbase team to tell the avro team about this problem,
hbase-core depends on hadoop, which then:
[INFO] | \- org.apache.hadoop:avro:jar:1.2.0:compile
[INFO] | +- org.slf4j:slf4j-simple:jar:1.5.8:compile
[INFO] | | \- org.slf4j:slf4j-api:jar:1.5.2:compile
see: https://forum.hibernate.org/viewtopic.php?p=2400801 and http://old.nabble.com/org.slf4j.impl.StaticLoggerBinder.SINGLETON-td20987705.html
upgrading to 1.5.6 will fix this
-->
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<version>${slf4j.version}</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
<version>${slf4j.version}</version>
</dependency>
<dependency>
<groupId>org.jruby</groupId>
<artifactId>jruby-complete</artifactId>
<version>${jruby.version}</version>
</dependency>

View File

@ -24,7 +24,6 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.mapreduce.Cluster;
import org.apache.hadoop.mapreduce.Job;
import java.io.IOException;
@ -59,9 +58,7 @@ public class CopyTable {
if (!doCommandLine(args)) {
return null;
}
Cluster mrCluster = new Cluster(conf);
Job job = Job.getInstance(mrCluster, conf);
job.setJobName(NAME + "_" + tableName);
Job job = new Job(conf, NAME + "_" + tableName);
job.setJarByClass(CopyTable.class);
Scan scan = new Scan();
if (startTime != 0) {

View File

@ -27,7 +27,6 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.mapreduce.Cluster;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
@ -79,8 +78,7 @@ public class Export {
throws IOException {
String tableName = args[0];
Path outputDir = new Path(args[1]);
Cluster mrCluster = new Cluster(conf);
Job job = Job.getInstance(mrCluster, conf);
Job job = new Job(conf, NAME + "_" + tableName);
job.setJobName(NAME + "_" + tableName);
job.setJarByClass(Exporter.class);
// TODO: Allow passing filter and subset of rows/columns.

View File

@ -28,7 +28,6 @@ import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.mapreduce.Cluster;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat;
@ -86,10 +85,7 @@ public class Import {
throws IOException {
String tableName = args[0];
Path inputDir = new Path(args[1]);
Cluster mrCluster = new Cluster(conf);
Job job = Job.getInstance(mrCluster, conf);
job.setJobName(NAME + "_" + tableName);
Job job = new Job(conf, NAME + "_" + tableName);
job.setJarByClass(Importer.class);
FileInputFormat.setInputPaths(job, inputDir);
job.setInputFormatClass(SequenceFileInputFormat.class);

View File

@ -29,7 +29,6 @@ import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.mapreduce.Cluster;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;
@ -86,9 +85,7 @@ public class RowCounter {
public static Job createSubmittableJob(Configuration conf, String[] args)
throws IOException {
String tableName = args[0];
Cluster mrCluster = new Cluster(conf);
Job job = Job.getInstance(mrCluster, conf);
job.setJobName(NAME + "_" + tableName);
Job job = new Job(conf, NAME + "_" + tableName);
job.setJarByClass(RowCounter.class);
// Columns are space delimited
StringBuilder sb = new StringBuilder();

View File

@ -1029,8 +1029,7 @@ public class HLog implements HConstants, Syncable {
* @throws IOException
*/
public static List<Path> splitLog(final Path rootDir, final Path srcDir,
Path oldLogDir, final FileSystem fs, final Configuration conf)
throws IOException {
Path oldLogDir, final FileSystem fs, final Configuration conf) throws IOException {
long millis = System.currentTimeMillis();
List<Path> splits = null;

View File

@ -66,9 +66,8 @@ public class SequenceFileLogWriter implements HLog.Writer {
@Override
public void sync() throws IOException {
this.writer.sync();
if (this.writer_out != null) {
this.writer_out.hflush();
}
this.writer.syncFs();
}
}

View File

@ -37,7 +37,6 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobStatus;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.junit.After;
@ -354,8 +353,7 @@ public class TestTableInputFormatScan {
FileOutputFormat.setOutputPath(job, new Path(job.getJobName()));
LOG.info("Started " + job.getJobName());
job.waitForCompletion(true);
LOG.info("Job status: " + job.getStatus());
assertTrue(job.getStatus().getState() == JobStatus.State.SUCCEEDED);
assertTrue(job.isComplete());
LOG.info("After map/reduce completion - job " + jobName);
}
}

View File

@ -50,7 +50,8 @@ public class TestStoreReconstruction {
* @throws java.lang.Exception
*/
@BeforeClass
public static void setUpBeforeClass() throws Exception { }
public static void setUpBeforeClass() throws Exception {
}
/**
* @throws java.lang.Exception
@ -104,8 +105,7 @@ public class TestStoreReconstruction {
List<KeyValue> result = new ArrayList<KeyValue>();
// Empty set to get all columns
NavigableSet<byte[]> qualifiers =
new ConcurrentSkipListSet<byte[]>(Bytes.BYTES_COMPARATOR);
NavigableSet<byte[]> qualifiers = new ConcurrentSkipListSet<byte[]>(Bytes.BYTES_COMPARATOR);
final byte[] tableName = Bytes.toBytes(TABLE);
final byte[] rowName = tableName;
@ -133,12 +133,15 @@ public class TestStoreReconstruction {
System.currentTimeMillis());
log.sync();
// TODO dont close the file here.
log.close();
List<Path> splits =
HLog.splitLog(new Path(conf.get(HConstants.HBASE_DIR)),
this.dir, oldLogDir, cluster.getFileSystem(), conf);
// Split should generate only 1 file since there's only 1 region
assertTrue(splits.size() == 1);
assertEquals(1, splits.size());
// Make sure the file exists
assertTrue(cluster.getFileSystem().exists(splits.get(0)));
@ -150,6 +153,6 @@ public class TestStoreReconstruction {
Get get = new Get(rowName);
store.get(get, qualifiers, result);
// Make sure we only see the good edits
assertEquals(result.size(), TOTAL_EDITS);
assertEquals(TOTAL_EDITS, result.size());
}
}

View File

@ -120,7 +120,7 @@ public class TestHLog extends HBaseTestCase implements HConstants {
* Test new HDFS-265 sync.
* @throws Exception
*/
public void testSync() throws Exception {
public void Broken_testSync() throws Exception {
byte [] bytes = Bytes.toBytes(getName());
// First verify that using streams all works.
Path p = new Path(this.dir, getName() + ".fsdos");

55
pom.xml
View File

@ -158,12 +158,14 @@
<compileSource>1.6</compileSource>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<hadoop.version>0.20.2-with-200-826</hadoop.version>
<log4j.version>1.2.15</log4j.version>
<jetty.version>6.1.14</jetty.version>
<jasper.version>5.5.12</jasper.version>
<commons-lang.version>2.4</commons-lang.version>
<commons-math.version>2.0</commons-math.version>
<hadoop-core.version>0.21.0-SNAPSHOT</hadoop-core.version>
<hadoop-hdfs.version>0.21.0-SNAPSHOT</hadoop-hdfs.version>
<hadoop-mapred.version>0.21.0-SNAPSHOT</hadoop-mapred.version>
<commons-cli.version>1.2</commons-cli.version>
<!-- TODO specify external repositories - Note the following 2 resources are not downloadable from a public repository, you'll need to place these manually by using 'mvn install:file' or use something like Nexus as a repository manager -->
<zookeeper.version>3.2.2</zookeeper.version>
<thrift.version>0.2.0</thrift.version>
@ -173,28 +175,6 @@
</properties>
<repositories>
<repository>
<id>asf-releases</id>
<name>Apache Public Releases</name>
<url>https://repository.apache.org/content/repositories/releases/</url>
<snapshots>
<enabled>true</enabled>
</snapshots>
<releases>
<enabled>true</enabled>
</releases>
</repository>
<repository>
<id>asf-snapshots</id>
<name>Apache Public Snapshots</name>
<url>https://repository.apache.org/content/repositories/snapshots/</url>
<snapshots>
<enabled>true</enabled>
</snapshots>
<releases>
<enabled>true</enabled>
</releases>
</repository>
<repository>
<id>java.net</id>
<name>Java.Net</name>
@ -209,7 +189,7 @@
<repository>
<id>googlecode</id>
<name>Google Code</name>
<url>http://google-maven-repository.googlecode.com/svn/repository</url>
<url>http://google-maven-repository.googlecode.com/svn/repository/</url>
<snapshots>
<enabled>false</enabled>
</snapshots>
@ -228,11 +208,10 @@
<enabled>true</enabled>
</releases>
</repository>
<!-- TODO replace this with a 'proper' repository, even if it's just @stacks version of this. -->
<repository>
<id>misc</id>
<name>Miscellaneous (Stuff for Zookeeper and Thrift)</name>
<url>http://people.apache.org/~psmith/hbase/repo</url>
<id>temp-hadoop</id>
<name>Hadoop 0.20.1/2 packaging, thrift, zk</name>
<url>http://people.apache.org/~rawson/repo/</url>
<snapshots>
<enabled>false</enabled>
</snapshots>
@ -329,20 +308,8 @@
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-core-test</artifactId>
<version>${hadoop-core.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs-test</artifactId>
<version>${hadoop-hdfs.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapred-test</artifactId>
<version>${hadoop-mapred.version}</version>
<artifactId>hadoop-test</artifactId>
<version>${hadoop.version}</version>
<scope>test</scope>
</dependency>
<dependency>