HBASE-5006 Move hbase 0.92RC1 on to hadoop 1.0.0RC2

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1213928 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2011-12-13 21:01:44 +00:00
parent 1b1725188e
commit 4de54f3552
5 changed files with 156 additions and 133 deletions

25
pom.xml
View File

@ -236,9 +236,9 @@
<url>https://repository.apache.org/content/repositories/releases/</url>
</repository>
<repository>
<id>hadoop-non-releases</id>
<name>Hadoop non-releases</name>
<url>http://people.apache.org/~rawson/repo/</url>
<id>apache non-releases</id>
<name>Apache non-releases</name>
<url>http://people.apache.org/~stack/m2/repository</url>
<snapshots>
<enabled>false</enabled>
</snapshots>
@ -503,7 +503,7 @@
<plugin>
<groupId>com.agilejava.docbkx</groupId>
<artifactId>docbkx-maven-plugin</artifactId>
<version>2.0.11</version>
<version>2.0.13</version>
<executions>
<execution>
<id>multipage</id>
@ -869,6 +869,7 @@
<commons-lang.version>2.5</commons-lang.version>
<commons-logging.version>1.1.1</commons-logging.version>
<commons-math.version>2.1</commons-math.version>
<commons-configuration.version>1.6</commons-configuration.version>
<guava.version>r09</guava.version>
<jackson.version>1.5.5</jackson.version>
<jasper.version>5.5.23</jasper.version>
@ -876,7 +877,7 @@
<jetty.version>6.1.26</jetty.version>
<jetty.jspapi.version>6.1.14</jetty.jspapi.version>
<jersey.version>1.4</jersey.version>
<jruby.version>1.6.0</jruby.version>
<jruby.version>1.6.5</jruby.version>
<junit.version>4.10-HBASE-1</junit.version>
<log4j.version>1.2.16</log4j.version>
<mockito-all.version>1.8.5</mockito-all.version>
@ -884,7 +885,7 @@
<slf4j.version>1.5.8</slf4j.version><!-- newer version available -->
<stax-api.version>1.0.1</stax-api.version>
<thrift.version>0.7.0</thrift.version>
<zookeeper.version>3.4.0</zookeeper.version>
<zookeeper.version>3.4.1rc0</zookeeper.version>
<hadoop-snappy.version>0.0.1-SNAPSHOT</hadoop-snappy.version>
<package.prefix>/usr</package.prefix>
@ -938,6 +939,11 @@
<artifactId>commons-cli</artifactId>
<version>${commons-cli.version}</version>
</dependency>
<dependency>
<groupId>commons-configuration</groupId>
<artifactId>commons-configuration</artifactId>
<version>${commons-configuration.version}</version>
</dependency>
<dependency>
<groupId>com.github.stephenc.high-scale-lib</groupId>
<artifactId>high-scale-lib</artifactId>
@ -1357,16 +1363,16 @@
</build>
</profile>
<!-- profile for building against Hadoop 0.20.0 : This is the default. -->
<!-- profile for building against Hadoop 1.0.x: This is the default. -->
<profile>
<id>hadoop-0.20</id>
<id>hadoop-1.0</id>
<activation>
<property>
<name>!hadoop.profile</name>
</property>
</activation>
<properties>
<hadoop.version>0.20.205.0</hadoop.version>
<hadoop.version>1.0.0rc2</hadoop.version>
</properties>
<dependencies>
<dependency>
@ -1397,7 +1403,6 @@
</exclusion>
</exclusions>
</dependency>
<!-- test deps for hadoop-0.20 profile -->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-test</artifactId>

View File

@ -707,7 +707,7 @@ public class HLog implements Syncable {
writer.init(fs, path, conf);
return writer;
} catch (Exception e) {
IOException ie = new IOException("cannot get log writer");
IOException ie = new IOException("cannot get log writer", e);
ie.initCause(e);
throw ie;
}

View File

@ -140,6 +140,118 @@ public class TestHRegion extends HBaseTestCase {
// /tmp/testtable
//////////////////////////////////////////////////////////////////////////////
public void testSkipRecoveredEditsReplay() throws Exception {
String method = "testSkipRecoveredEditsReplay";
byte[] tableName = Bytes.toBytes(method);
byte[] family = Bytes.toBytes("family");
Configuration conf = HBaseConfiguration.create();
initHRegion(tableName, method, conf, family);
Path regiondir = region.getRegionDir();
FileSystem fs = region.getFilesystem();
byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
Path recoveredEditsDir = HLog.getRegionDirRecoveredEditsDir(regiondir);
long maxSeqId = 1050;
long minSeqId = 1000;
for (long i = minSeqId; i <= maxSeqId; i += 10) {
Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i));
fs.create(recoveredEdits);
HLog.Writer writer = HLog.createWriter(fs, recoveredEdits, conf);
long time = System.nanoTime();
WALEdit edit = new WALEdit();
edit.add(new KeyValue(row, family, Bytes.toBytes(i),
time, KeyValue.Type.Put, Bytes.toBytes(i)));
writer.append(new HLog.Entry(new HLogKey(regionName, tableName,
i, time, HConstants.DEFAULT_CLUSTER_ID), edit));
writer.close();
}
MonitoredTask status = TaskMonitor.get().createStatus(method);
long seqId = region.replayRecoveredEditsIfAny(regiondir, minSeqId-1, null, status);
assertEquals(maxSeqId, seqId);
Get get = new Get(row);
Result result = region.get(get, null);
for (long i = minSeqId; i <= maxSeqId; i += 10) {
List<KeyValue> kvs = result.getColumn(family, Bytes.toBytes(i));
assertEquals(1, kvs.size());
assertEquals(Bytes.toBytes(i), kvs.get(0).getValue());
}
}
public void testSkipRecoveredEditsReplaySomeIgnored() throws Exception {
String method = "testSkipRecoveredEditsReplaySomeIgnored";
byte[] tableName = Bytes.toBytes(method);
byte[] family = Bytes.toBytes("family");
initHRegion(tableName, method, HBaseConfiguration.create(), family);
Path regiondir = region.getRegionDir();
FileSystem fs = region.getFilesystem();
byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
Path recoveredEditsDir = HLog.getRegionDirRecoveredEditsDir(regiondir);
long maxSeqId = 1050;
long minSeqId = 1000;
for (long i = minSeqId; i <= maxSeqId; i += 10) {
Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i));
fs.create(recoveredEdits);
HLog.Writer writer = HLog.createWriter(fs, recoveredEdits, conf);
long time = System.nanoTime();
WALEdit edit = new WALEdit();
edit.add(new KeyValue(row, family, Bytes.toBytes(i),
time, KeyValue.Type.Put, Bytes.toBytes(i)));
writer.append(new HLog.Entry(new HLogKey(regionName, tableName,
i, time, HConstants.DEFAULT_CLUSTER_ID), edit));
writer.close();
}
long recoverSeqId = 1030;
MonitoredTask status = TaskMonitor.get().createStatus(method);
long seqId = region.replayRecoveredEditsIfAny(regiondir, recoverSeqId-1, null, status);
assertEquals(maxSeqId, seqId);
Get get = new Get(row);
Result result = region.get(get, null);
for (long i = minSeqId; i <= maxSeqId; i += 10) {
List<KeyValue> kvs = result.getColumn(family, Bytes.toBytes(i));
if (i < recoverSeqId) {
assertEquals(0, kvs.size());
} else {
assertEquals(1, kvs.size());
assertEquals(Bytes.toBytes(i), kvs.get(0).getValue());
}
}
}
public void testSkipRecoveredEditsReplayAllIgnored() throws Exception {
String method = "testSkipRecoveredEditsReplayAllIgnored";
byte[] tableName = Bytes.toBytes(method);
byte[] family = Bytes.toBytes("family");
initHRegion(tableName, method, HBaseConfiguration.create(), family);
Path regiondir = region.getRegionDir();
FileSystem fs = region.getFilesystem();
Path recoveredEditsDir = HLog.getRegionDirRecoveredEditsDir(regiondir);
for (int i = 1000; i < 1050; i += 10) {
Path recoveredEdits = new Path(
recoveredEditsDir, String.format("%019d", i));
FSDataOutputStream dos= fs.create(recoveredEdits);
dos.writeInt(i);
dos.close();
}
long minSeqId = 2000;
Path recoveredEdits = new Path(
recoveredEditsDir, String.format("%019d", minSeqId-1));
FSDataOutputStream dos= fs.create(recoveredEdits);
dos.close();
long seqId = region.replayRecoveredEditsIfAny(regiondir, minSeqId, null, null);
assertEquals(minSeqId, seqId);
}
public void testGetWhileRegionClose() throws IOException {
Configuration hc = initSplit();
int numRows = 100;
@ -2828,115 +2940,6 @@ public class TestHRegion extends HBaseTestCase {
region.get(g, null);
}
public void testSkipRecoveredEditsReplay() throws Exception {
String method = "testSkipRecoveredEditsReplay";
byte[] tableName = Bytes.toBytes(method);
byte[] family = Bytes.toBytes("family");
Configuration conf = HBaseConfiguration.create();
initHRegion(tableName, method, conf, family);
Path regiondir = region.getRegionDir();
FileSystem fs = region.getFilesystem();
byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
Path recoveredEditsDir = HLog.getRegionDirRecoveredEditsDir(regiondir);
long maxSeqId = 1050;
long minSeqId = 1000;
for (long i = minSeqId; i <= maxSeqId; i += 10) {
Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i));
HLog.Writer writer = HLog.createWriter(fs, recoveredEdits, conf);
long time = System.nanoTime();
WALEdit edit = new WALEdit();
edit.add(new KeyValue(row, family, Bytes.toBytes(i),
time, KeyValue.Type.Put, Bytes.toBytes(i)));
writer.append(new HLog.Entry(new HLogKey(regionName, tableName,
i, time, HConstants.DEFAULT_CLUSTER_ID), edit));
writer.close();
}
MonitoredTask status = TaskMonitor.get().createStatus(method);
long seqId = region.replayRecoveredEditsIfAny(regiondir, minSeqId-1, null, status);
assertEquals(maxSeqId, seqId);
Get get = new Get(row);
Result result = region.get(get, null);
for (long i = minSeqId; i <= maxSeqId; i += 10) {
List<KeyValue> kvs = result.getColumn(family, Bytes.toBytes(i));
assertEquals(1, kvs.size());
assertEquals(Bytes.toBytes(i), kvs.get(0).getValue());
}
}
public void testSkipRecoveredEditsReplaySomeIgnored() throws Exception {
String method = "testSkipRecoveredEditsReplaySomeIgnored";
byte[] tableName = Bytes.toBytes(method);
byte[] family = Bytes.toBytes("family");
initHRegion(tableName, method, HBaseConfiguration.create(), family);
Path regiondir = region.getRegionDir();
FileSystem fs = region.getFilesystem();
byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
Path recoveredEditsDir = HLog.getRegionDirRecoveredEditsDir(regiondir);
long maxSeqId = 1050;
long minSeqId = 1000;
for (long i = minSeqId; i <= maxSeqId; i += 10) {
Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i));
HLog.Writer writer = HLog.createWriter(fs, recoveredEdits, conf);
long time = System.nanoTime();
WALEdit edit = new WALEdit();
edit.add(new KeyValue(row, family, Bytes.toBytes(i),
time, KeyValue.Type.Put, Bytes.toBytes(i)));
writer.append(new HLog.Entry(new HLogKey(regionName, tableName,
i, time, HConstants.DEFAULT_CLUSTER_ID), edit));
writer.close();
}
long recoverSeqId = 1030;
MonitoredTask status = TaskMonitor.get().createStatus(method);
long seqId = region.replayRecoveredEditsIfAny(regiondir, recoverSeqId-1, null, status);
assertEquals(maxSeqId, seqId);
Get get = new Get(row);
Result result = region.get(get, null);
for (long i = minSeqId; i <= maxSeqId; i += 10) {
List<KeyValue> kvs = result.getColumn(family, Bytes.toBytes(i));
if (i < recoverSeqId) {
assertEquals(0, kvs.size());
} else {
assertEquals(1, kvs.size());
assertEquals(Bytes.toBytes(i), kvs.get(0).getValue());
}
}
}
public void testSkipRecoveredEditsReplayAllIgnored() throws Exception {
String method = "testSkipRecoveredEditsReplayAllIgnored";
byte[] tableName = Bytes.toBytes(method);
byte[] family = Bytes.toBytes("family");
initHRegion(tableName, method, HBaseConfiguration.create(), family);
Path regiondir = region.getRegionDir();
FileSystem fs = region.getFilesystem();
Path recoveredEditsDir = HLog.getRegionDirRecoveredEditsDir(regiondir);
for (int i = 1000; i < 1050; i += 10) {
Path recoveredEdits = new Path(
recoveredEditsDir, String.format("%019d", i));
FSDataOutputStream dos= fs.create(recoveredEdits);
dos.writeInt(i);
dos.close();
}
long minSeqId = 2000;
Path recoveredEdits = new Path(
recoveredEditsDir, String.format("%019d", minSeqId-1));
FSDataOutputStream dos= fs.create(recoveredEdits);
dos.close();
long seqId = region.replayRecoveredEditsIfAny(regiondir, minSeqId, null, null);
assertEquals(minSeqId, seqId);
}
public void testIndexesScanWithOneDeletedRow() throws IOException {
byte[] tableName = Bytes.toBytes("testIndexesScanWithOneDeletedRow");
byte[] family = Bytes.toBytes("family");

View File

@ -587,8 +587,16 @@ public class TestStore extends TestCase {
Progressable progress) throws IOException {
return new FaultyOutputStream(super.create(f, permission,
overwrite, bufferSize, replication, blockSize, progress), faultPos);
}
}
@Override
public FSDataOutputStream createNonRecursive(Path f, boolean overwrite,
int bufferSize, short replication, long blockSize, Progressable progress)
throws IOException {
// Fake it. Call create instead. The default implementation throws an IOE
// that this is not supported.
return create(f, overwrite, bufferSize, replication, blockSize, progress);
}
}
static class FaultyOutputStream extends FSDataOutputStream {

View File

@ -19,24 +19,27 @@
*/
package org.apache.hadoop.hbase.replication;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.BeforeClass;
import org.junit.Ignore;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
@Category(MediumTests.class)
public class TestReplicationSource {
@ -44,7 +47,7 @@ public class TestReplicationSource {
LogFactory.getLog(TestReplicationSource.class);
private final static HBaseTestingUtility TEST_UTIL =
new HBaseTestingUtility();
private static FileSystem fs;
private static FileSystem FS;
private static Path oldLogDir;
private static Path logDir;
private static Configuration conf = HBaseConfiguration.create();
@ -55,11 +58,13 @@ public class TestReplicationSource {
@BeforeClass
public static void setUpBeforeClass() throws Exception {
TEST_UTIL.startMiniDFSCluster(1);
fs = TEST_UTIL.getDFSCluster().getFileSystem();
oldLogDir = new Path(fs.getHomeDirectory(),
FS = TEST_UTIL.getDFSCluster().getFileSystem();
oldLogDir = new Path(FS.getHomeDirectory(),
HConstants.HREGION_OLDLOGDIR_NAME);
logDir = new Path(fs.getHomeDirectory(),
if (FS.exists(oldLogDir)) FS.delete(oldLogDir, true);
logDir = new Path(FS.getHomeDirectory(),
HConstants.HREGION_LOGDIR_NAME);
if (FS.exists(logDir)) FS.delete(logDir, true);
}
/**
@ -71,7 +76,9 @@ public class TestReplicationSource {
@Test
public void testLogMoving() throws Exception{
Path logPath = new Path(logDir, "log");
HLog.Writer writer = HLog.createWriter(fs, logPath, conf);
if (!FS.exists(logDir)) FS.mkdirs(logDir);
if (!FS.exists(oldLogDir)) FS.mkdirs(oldLogDir);
HLog.Writer writer = HLog.createWriter(FS, logPath, conf);
for(int i = 0; i < 3; i++) {
byte[] b = Bytes.toBytes(Integer.toString(i));
KeyValue kv = new KeyValue(b,b,b);
@ -83,12 +90,12 @@ public class TestReplicationSource {
}
writer.close();
HLog.Reader reader = HLog.getReader(fs, logPath, conf);
HLog.Reader reader = HLog.getReader(FS, logPath, conf);
HLog.Entry entry = reader.next();
assertNotNull(entry);
Path oldLogPath = new Path(oldLogDir, "log");
fs.rename(logPath, oldLogPath);
FS.rename(logPath, oldLogPath);
entry = reader.next();
assertNotNull(entry);