HBASE-492 hbase TRUNK does not build against hadoop TRUNK
git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@634168 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
36b5f4791f
commit
a0f73c7efd
|
@ -29,6 +29,7 @@ Hbase Change Log
|
||||||
HBASE-473 When a table is deleted, master sends multiple close messages to
|
HBASE-473 When a table is deleted, master sends multiple close messages to
|
||||||
the region server
|
the region server
|
||||||
HBASE-490 Doubly-assigned .META.; master uses one and clients another
|
HBASE-490 Doubly-assigned .META.; master uses one and clients another
|
||||||
|
HBASE-492 hbase TRUNK does not build against hadoop TRUNK
|
||||||
|
|
||||||
IMPROVEMENTS
|
IMPROVEMENTS
|
||||||
HBASE-415 Rewrite leases to use DelayedBlockingQueue instead of polling
|
HBASE-415 Rewrite leases to use DelayedBlockingQueue instead of polling
|
||||||
|
|
Binary file not shown.
Binary file not shown.
BIN
lib/hadoop-0.17.0-dev.2008-02-07_12-01-58-test.jar → lib/hadoop-0.17.0-dev.2008-03-04_15-19-00-test.jar
Normal file → Executable file
BIN
lib/hadoop-0.17.0-dev.2008-02-07_12-01-58-test.jar → lib/hadoop-0.17.0-dev.2008-03-04_15-19-00-test.jar
Normal file → Executable file
Binary file not shown.
|
@ -107,4 +107,8 @@
|
||||||
Keep the maximum filesize small so we split more often in tests.
|
Keep the maximum filesize small so we split more often in tests.
|
||||||
</description>
|
</description>
|
||||||
</property>
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>hadoop.log.dir</name>
|
||||||
|
<value>${user.dir}/logs</value>
|
||||||
|
</property>
|
||||||
</configuration>
|
</configuration>
|
||||||
|
|
|
@ -42,6 +42,15 @@ import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
||||||
public class MultiRegionTable extends HBaseTestCase {
|
public class MultiRegionTable extends HBaseTestCase {
|
||||||
static final Log LOG = LogFactory.getLog(MultiRegionTable.class.getName());
|
static final Log LOG = LogFactory.getLog(MultiRegionTable.class.getName());
|
||||||
|
|
||||||
|
/** {@inheritDoc} */
|
||||||
|
@Override
|
||||||
|
public void setUp() throws Exception {
|
||||||
|
// These are needed for the new and improved Map/Reduce framework
|
||||||
|
System.setProperty("hadoop.log.dir", conf.get("hadoop.log.dir"));
|
||||||
|
conf.set("mapred.output.dir", conf.get("hadoop.tmp.dir"));
|
||||||
|
super.setUp();
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Make a multi-region table. Presumption is that table already exists and
|
* Make a multi-region table. Presumption is that table already exists and
|
||||||
* that there is only one regionserver. Makes it multi-region by filling with
|
* that there is only one regionserver. Makes it multi-region by filling with
|
||||||
|
@ -187,7 +196,7 @@ public class MultiRegionTable extends HBaseTestCase {
|
||||||
if (splitB == null) {
|
if (splitB == null) {
|
||||||
LOG.info("splitB was already null. Assuming it was previously compacted.");
|
LOG.info("splitB was already null. Assuming it was previously compacted.");
|
||||||
} else {
|
} else {
|
||||||
LOG.info("Daughter splitB: " + splitA.getRegionName());
|
LOG.info("Daughter splitB: " + splitB.getRegionName());
|
||||||
|
|
||||||
// Call second split.
|
// Call second split.
|
||||||
compact(cluster, splitB);
|
compact(cluster, splitB);
|
||||||
|
|
|
@ -161,8 +161,8 @@ public class PerformanceEvaluation implements HConstants {
|
||||||
}
|
}
|
||||||
|
|
||||||
/** {@inheritDoc} */
|
/** {@inheritDoc} */
|
||||||
public void map(@SuppressWarnings("unused") final WritableComparable key,
|
public void map(@SuppressWarnings("unused") final Object key,
|
||||||
final Writable value, final OutputCollector output,
|
final Object value, final OutputCollector output,
|
||||||
final Reporter reporter)
|
final Reporter reporter)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
Matcher m = LINE_PATTERN.matcher(((Text)value).toString());
|
Matcher m = LINE_PATTERN.matcher(((Text)value).toString());
|
||||||
|
|
Loading…
Reference in New Issue