HBASE-5006 Move hbase 0.92RC1 on to hadoop 1.0.0RC2

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1213928 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2011-12-13 21:01:44 +00:00
parent 1b1725188e
commit 4de54f3552
5 changed files with 156 additions and 133 deletions

25
pom.xml
View File

@ -236,9 +236,9 @@
<url>https://repository.apache.org/content/repositories/releases/</url> <url>https://repository.apache.org/content/repositories/releases/</url>
</repository> </repository>
<repository> <repository>
<id>hadoop-non-releases</id> <id>apache non-releases</id>
<name>Hadoop non-releases</name> <name>Apache non-releases</name>
<url>http://people.apache.org/~rawson/repo/</url> <url>http://people.apache.org/~stack/m2/repository</url>
<snapshots> <snapshots>
<enabled>false</enabled> <enabled>false</enabled>
</snapshots> </snapshots>
@ -503,7 +503,7 @@
<plugin> <plugin>
<groupId>com.agilejava.docbkx</groupId> <groupId>com.agilejava.docbkx</groupId>
<artifactId>docbkx-maven-plugin</artifactId> <artifactId>docbkx-maven-plugin</artifactId>
<version>2.0.11</version> <version>2.0.13</version>
<executions> <executions>
<execution> <execution>
<id>multipage</id> <id>multipage</id>
@ -869,6 +869,7 @@
<commons-lang.version>2.5</commons-lang.version> <commons-lang.version>2.5</commons-lang.version>
<commons-logging.version>1.1.1</commons-logging.version> <commons-logging.version>1.1.1</commons-logging.version>
<commons-math.version>2.1</commons-math.version> <commons-math.version>2.1</commons-math.version>
<commons-configuration.version>1.6</commons-configuration.version>
<guava.version>r09</guava.version> <guava.version>r09</guava.version>
<jackson.version>1.5.5</jackson.version> <jackson.version>1.5.5</jackson.version>
<jasper.version>5.5.23</jasper.version> <jasper.version>5.5.23</jasper.version>
@ -876,7 +877,7 @@
<jetty.version>6.1.26</jetty.version> <jetty.version>6.1.26</jetty.version>
<jetty.jspapi.version>6.1.14</jetty.jspapi.version> <jetty.jspapi.version>6.1.14</jetty.jspapi.version>
<jersey.version>1.4</jersey.version> <jersey.version>1.4</jersey.version>
<jruby.version>1.6.0</jruby.version> <jruby.version>1.6.5</jruby.version>
<junit.version>4.10-HBASE-1</junit.version> <junit.version>4.10-HBASE-1</junit.version>
<log4j.version>1.2.16</log4j.version> <log4j.version>1.2.16</log4j.version>
<mockito-all.version>1.8.5</mockito-all.version> <mockito-all.version>1.8.5</mockito-all.version>
@ -884,7 +885,7 @@
<slf4j.version>1.5.8</slf4j.version><!-- newer version available --> <slf4j.version>1.5.8</slf4j.version><!-- newer version available -->
<stax-api.version>1.0.1</stax-api.version> <stax-api.version>1.0.1</stax-api.version>
<thrift.version>0.7.0</thrift.version> <thrift.version>0.7.0</thrift.version>
<zookeeper.version>3.4.0</zookeeper.version> <zookeeper.version>3.4.1rc0</zookeeper.version>
<hadoop-snappy.version>0.0.1-SNAPSHOT</hadoop-snappy.version> <hadoop-snappy.version>0.0.1-SNAPSHOT</hadoop-snappy.version>
<package.prefix>/usr</package.prefix> <package.prefix>/usr</package.prefix>
@ -938,6 +939,11 @@
<artifactId>commons-cli</artifactId> <artifactId>commons-cli</artifactId>
<version>${commons-cli.version}</version> <version>${commons-cli.version}</version>
</dependency> </dependency>
<dependency>
<groupId>commons-configuration</groupId>
<artifactId>commons-configuration</artifactId>
<version>${commons-configuration.version}</version>
</dependency>
<dependency> <dependency>
<groupId>com.github.stephenc.high-scale-lib</groupId> <groupId>com.github.stephenc.high-scale-lib</groupId>
<artifactId>high-scale-lib</artifactId> <artifactId>high-scale-lib</artifactId>
@ -1357,16 +1363,16 @@
</build> </build>
</profile> </profile>
<!-- profile for building against Hadoop 0.20.0 : This is the default. --> <!-- profile for building against Hadoop 1.0.x: This is the default. -->
<profile> <profile>
<id>hadoop-0.20</id> <id>hadoop-1.0</id>
<activation> <activation>
<property> <property>
<name>!hadoop.profile</name> <name>!hadoop.profile</name>
</property> </property>
</activation> </activation>
<properties> <properties>
<hadoop.version>0.20.205.0</hadoop.version> <hadoop.version>1.0.0rc2</hadoop.version>
</properties> </properties>
<dependencies> <dependencies>
<dependency> <dependency>
@ -1397,7 +1403,6 @@
</exclusion> </exclusion>
</exclusions> </exclusions>
</dependency> </dependency>
<!-- test deps for hadoop-0.20 profile -->
<dependency> <dependency>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-test</artifactId> <artifactId>hadoop-test</artifactId>

View File

@ -707,7 +707,7 @@ public class HLog implements Syncable {
writer.init(fs, path, conf); writer.init(fs, path, conf);
return writer; return writer;
} catch (Exception e) { } catch (Exception e) {
IOException ie = new IOException("cannot get log writer"); IOException ie = new IOException("cannot get log writer", e);
ie.initCause(e); ie.initCause(e);
throw ie; throw ie;
} }

View File

@ -140,6 +140,118 @@ public class TestHRegion extends HBaseTestCase {
// /tmp/testtable // /tmp/testtable
////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////
public void testSkipRecoveredEditsReplay() throws Exception {
String method = "testSkipRecoveredEditsReplay";
byte[] tableName = Bytes.toBytes(method);
byte[] family = Bytes.toBytes("family");
Configuration conf = HBaseConfiguration.create();
initHRegion(tableName, method, conf, family);
Path regiondir = region.getRegionDir();
FileSystem fs = region.getFilesystem();
byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
Path recoveredEditsDir = HLog.getRegionDirRecoveredEditsDir(regiondir);
long maxSeqId = 1050;
long minSeqId = 1000;
for (long i = minSeqId; i <= maxSeqId; i += 10) {
Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i));
fs.create(recoveredEdits);
HLog.Writer writer = HLog.createWriter(fs, recoveredEdits, conf);
long time = System.nanoTime();
WALEdit edit = new WALEdit();
edit.add(new KeyValue(row, family, Bytes.toBytes(i),
time, KeyValue.Type.Put, Bytes.toBytes(i)));
writer.append(new HLog.Entry(new HLogKey(regionName, tableName,
i, time, HConstants.DEFAULT_CLUSTER_ID), edit));
writer.close();
}
MonitoredTask status = TaskMonitor.get().createStatus(method);
long seqId = region.replayRecoveredEditsIfAny(regiondir, minSeqId-1, null, status);
assertEquals(maxSeqId, seqId);
Get get = new Get(row);
Result result = region.get(get, null);
for (long i = minSeqId; i <= maxSeqId; i += 10) {
List<KeyValue> kvs = result.getColumn(family, Bytes.toBytes(i));
assertEquals(1, kvs.size());
assertEquals(Bytes.toBytes(i), kvs.get(0).getValue());
}
}
public void testSkipRecoveredEditsReplaySomeIgnored() throws Exception {
String method = "testSkipRecoveredEditsReplaySomeIgnored";
byte[] tableName = Bytes.toBytes(method);
byte[] family = Bytes.toBytes("family");
initHRegion(tableName, method, HBaseConfiguration.create(), family);
Path regiondir = region.getRegionDir();
FileSystem fs = region.getFilesystem();
byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
Path recoveredEditsDir = HLog.getRegionDirRecoveredEditsDir(regiondir);
long maxSeqId = 1050;
long minSeqId = 1000;
for (long i = minSeqId; i <= maxSeqId; i += 10) {
Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i));
fs.create(recoveredEdits);
HLog.Writer writer = HLog.createWriter(fs, recoveredEdits, conf);
long time = System.nanoTime();
WALEdit edit = new WALEdit();
edit.add(new KeyValue(row, family, Bytes.toBytes(i),
time, KeyValue.Type.Put, Bytes.toBytes(i)));
writer.append(new HLog.Entry(new HLogKey(regionName, tableName,
i, time, HConstants.DEFAULT_CLUSTER_ID), edit));
writer.close();
}
long recoverSeqId = 1030;
MonitoredTask status = TaskMonitor.get().createStatus(method);
long seqId = region.replayRecoveredEditsIfAny(regiondir, recoverSeqId-1, null, status);
assertEquals(maxSeqId, seqId);
Get get = new Get(row);
Result result = region.get(get, null);
for (long i = minSeqId; i <= maxSeqId; i += 10) {
List<KeyValue> kvs = result.getColumn(family, Bytes.toBytes(i));
if (i < recoverSeqId) {
assertEquals(0, kvs.size());
} else {
assertEquals(1, kvs.size());
assertEquals(Bytes.toBytes(i), kvs.get(0).getValue());
}
}
}
public void testSkipRecoveredEditsReplayAllIgnored() throws Exception {
String method = "testSkipRecoveredEditsReplayAllIgnored";
byte[] tableName = Bytes.toBytes(method);
byte[] family = Bytes.toBytes("family");
initHRegion(tableName, method, HBaseConfiguration.create(), family);
Path regiondir = region.getRegionDir();
FileSystem fs = region.getFilesystem();
Path recoveredEditsDir = HLog.getRegionDirRecoveredEditsDir(regiondir);
for (int i = 1000; i < 1050; i += 10) {
Path recoveredEdits = new Path(
recoveredEditsDir, String.format("%019d", i));
FSDataOutputStream dos= fs.create(recoveredEdits);
dos.writeInt(i);
dos.close();
}
long minSeqId = 2000;
Path recoveredEdits = new Path(
recoveredEditsDir, String.format("%019d", minSeqId-1));
FSDataOutputStream dos= fs.create(recoveredEdits);
dos.close();
long seqId = region.replayRecoveredEditsIfAny(regiondir, minSeqId, null, null);
assertEquals(minSeqId, seqId);
}
public void testGetWhileRegionClose() throws IOException { public void testGetWhileRegionClose() throws IOException {
Configuration hc = initSplit(); Configuration hc = initSplit();
int numRows = 100; int numRows = 100;
@ -2828,115 +2940,6 @@ public class TestHRegion extends HBaseTestCase {
region.get(g, null); region.get(g, null);
} }
public void testSkipRecoveredEditsReplay() throws Exception {
String method = "testSkipRecoveredEditsReplay";
byte[] tableName = Bytes.toBytes(method);
byte[] family = Bytes.toBytes("family");
Configuration conf = HBaseConfiguration.create();
initHRegion(tableName, method, conf, family);
Path regiondir = region.getRegionDir();
FileSystem fs = region.getFilesystem();
byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
Path recoveredEditsDir = HLog.getRegionDirRecoveredEditsDir(regiondir);
long maxSeqId = 1050;
long minSeqId = 1000;
for (long i = minSeqId; i <= maxSeqId; i += 10) {
Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i));
HLog.Writer writer = HLog.createWriter(fs, recoveredEdits, conf);
long time = System.nanoTime();
WALEdit edit = new WALEdit();
edit.add(new KeyValue(row, family, Bytes.toBytes(i),
time, KeyValue.Type.Put, Bytes.toBytes(i)));
writer.append(new HLog.Entry(new HLogKey(regionName, tableName,
i, time, HConstants.DEFAULT_CLUSTER_ID), edit));
writer.close();
}
MonitoredTask status = TaskMonitor.get().createStatus(method);
long seqId = region.replayRecoveredEditsIfAny(regiondir, minSeqId-1, null, status);
assertEquals(maxSeqId, seqId);
Get get = new Get(row);
Result result = region.get(get, null);
for (long i = minSeqId; i <= maxSeqId; i += 10) {
List<KeyValue> kvs = result.getColumn(family, Bytes.toBytes(i));
assertEquals(1, kvs.size());
assertEquals(Bytes.toBytes(i), kvs.get(0).getValue());
}
}
public void testSkipRecoveredEditsReplaySomeIgnored() throws Exception {
String method = "testSkipRecoveredEditsReplaySomeIgnored";
byte[] tableName = Bytes.toBytes(method);
byte[] family = Bytes.toBytes("family");
initHRegion(tableName, method, HBaseConfiguration.create(), family);
Path regiondir = region.getRegionDir();
FileSystem fs = region.getFilesystem();
byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
Path recoveredEditsDir = HLog.getRegionDirRecoveredEditsDir(regiondir);
long maxSeqId = 1050;
long minSeqId = 1000;
for (long i = minSeqId; i <= maxSeqId; i += 10) {
Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i));
HLog.Writer writer = HLog.createWriter(fs, recoveredEdits, conf);
long time = System.nanoTime();
WALEdit edit = new WALEdit();
edit.add(new KeyValue(row, family, Bytes.toBytes(i),
time, KeyValue.Type.Put, Bytes.toBytes(i)));
writer.append(new HLog.Entry(new HLogKey(regionName, tableName,
i, time, HConstants.DEFAULT_CLUSTER_ID), edit));
writer.close();
}
long recoverSeqId = 1030;
MonitoredTask status = TaskMonitor.get().createStatus(method);
long seqId = region.replayRecoveredEditsIfAny(regiondir, recoverSeqId-1, null, status);
assertEquals(maxSeqId, seqId);
Get get = new Get(row);
Result result = region.get(get, null);
for (long i = minSeqId; i <= maxSeqId; i += 10) {
List<KeyValue> kvs = result.getColumn(family, Bytes.toBytes(i));
if (i < recoverSeqId) {
assertEquals(0, kvs.size());
} else {
assertEquals(1, kvs.size());
assertEquals(Bytes.toBytes(i), kvs.get(0).getValue());
}
}
}
public void testSkipRecoveredEditsReplayAllIgnored() throws Exception {
String method = "testSkipRecoveredEditsReplayAllIgnored";
byte[] tableName = Bytes.toBytes(method);
byte[] family = Bytes.toBytes("family");
initHRegion(tableName, method, HBaseConfiguration.create(), family);
Path regiondir = region.getRegionDir();
FileSystem fs = region.getFilesystem();
Path recoveredEditsDir = HLog.getRegionDirRecoveredEditsDir(regiondir);
for (int i = 1000; i < 1050; i += 10) {
Path recoveredEdits = new Path(
recoveredEditsDir, String.format("%019d", i));
FSDataOutputStream dos= fs.create(recoveredEdits);
dos.writeInt(i);
dos.close();
}
long minSeqId = 2000;
Path recoveredEdits = new Path(
recoveredEditsDir, String.format("%019d", minSeqId-1));
FSDataOutputStream dos= fs.create(recoveredEdits);
dos.close();
long seqId = region.replayRecoveredEditsIfAny(regiondir, minSeqId, null, null);
assertEquals(minSeqId, seqId);
}
public void testIndexesScanWithOneDeletedRow() throws IOException { public void testIndexesScanWithOneDeletedRow() throws IOException {
byte[] tableName = Bytes.toBytes("testIndexesScanWithOneDeletedRow"); byte[] tableName = Bytes.toBytes("testIndexesScanWithOneDeletedRow");
byte[] family = Bytes.toBytes("family"); byte[] family = Bytes.toBytes("family");

View File

@ -589,6 +589,14 @@ public class TestStore extends TestCase {
overwrite, bufferSize, replication, blockSize, progress), faultPos); overwrite, bufferSize, replication, blockSize, progress), faultPos);
} }
@Override
public FSDataOutputStream createNonRecursive(Path f, boolean overwrite,
int bufferSize, short replication, long blockSize, Progressable progress)
throws IOException {
// Fake it. Call create instead. The default implementation throws an IOE
// that this is not supported.
return create(f, overwrite, bufferSize, replication, blockSize, progress);
}
} }
static class FaultyOutputStream extends FSDataOutputStream { static class FaultyOutputStream extends FSDataOutputStream {

View File

@ -19,24 +19,27 @@
*/ */
package org.apache.hadoop.hbase.replication; package org.apache.hadoop.hbase.replication;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.regionserver.wal.HLog; import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.regionserver.wal.HLogKey; import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.junit.BeforeClass; import org.junit.BeforeClass;
import org.junit.Ignore;
import org.junit.Test; import org.junit.Test;
import org.junit.experimental.categories.Category; import org.junit.experimental.categories.Category;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
@Category(MediumTests.class) @Category(MediumTests.class)
public class TestReplicationSource { public class TestReplicationSource {
@ -44,7 +47,7 @@ public class TestReplicationSource {
LogFactory.getLog(TestReplicationSource.class); LogFactory.getLog(TestReplicationSource.class);
private final static HBaseTestingUtility TEST_UTIL = private final static HBaseTestingUtility TEST_UTIL =
new HBaseTestingUtility(); new HBaseTestingUtility();
private static FileSystem fs; private static FileSystem FS;
private static Path oldLogDir; private static Path oldLogDir;
private static Path logDir; private static Path logDir;
private static Configuration conf = HBaseConfiguration.create(); private static Configuration conf = HBaseConfiguration.create();
@ -55,11 +58,13 @@ public class TestReplicationSource {
@BeforeClass @BeforeClass
public static void setUpBeforeClass() throws Exception { public static void setUpBeforeClass() throws Exception {
TEST_UTIL.startMiniDFSCluster(1); TEST_UTIL.startMiniDFSCluster(1);
fs = TEST_UTIL.getDFSCluster().getFileSystem(); FS = TEST_UTIL.getDFSCluster().getFileSystem();
oldLogDir = new Path(fs.getHomeDirectory(), oldLogDir = new Path(FS.getHomeDirectory(),
HConstants.HREGION_OLDLOGDIR_NAME); HConstants.HREGION_OLDLOGDIR_NAME);
logDir = new Path(fs.getHomeDirectory(), if (FS.exists(oldLogDir)) FS.delete(oldLogDir, true);
logDir = new Path(FS.getHomeDirectory(),
HConstants.HREGION_LOGDIR_NAME); HConstants.HREGION_LOGDIR_NAME);
if (FS.exists(logDir)) FS.delete(logDir, true);
} }
/** /**
@ -71,7 +76,9 @@ public class TestReplicationSource {
@Test @Test
public void testLogMoving() throws Exception{ public void testLogMoving() throws Exception{
Path logPath = new Path(logDir, "log"); Path logPath = new Path(logDir, "log");
HLog.Writer writer = HLog.createWriter(fs, logPath, conf); if (!FS.exists(logDir)) FS.mkdirs(logDir);
if (!FS.exists(oldLogDir)) FS.mkdirs(oldLogDir);
HLog.Writer writer = HLog.createWriter(FS, logPath, conf);
for(int i = 0; i < 3; i++) { for(int i = 0; i < 3; i++) {
byte[] b = Bytes.toBytes(Integer.toString(i)); byte[] b = Bytes.toBytes(Integer.toString(i));
KeyValue kv = new KeyValue(b,b,b); KeyValue kv = new KeyValue(b,b,b);
@ -83,12 +90,12 @@ public class TestReplicationSource {
} }
writer.close(); writer.close();
HLog.Reader reader = HLog.getReader(fs, logPath, conf); HLog.Reader reader = HLog.getReader(FS, logPath, conf);
HLog.Entry entry = reader.next(); HLog.Entry entry = reader.next();
assertNotNull(entry); assertNotNull(entry);
Path oldLogPath = new Path(oldLogDir, "log"); Path oldLogPath = new Path(oldLogDir, "log");
fs.rename(logPath, oldLogPath); FS.rename(logPath, oldLogPath);
entry = reader.next(); entry = reader.next();
assertNotNull(entry); assertNotNull(entry);