HBASE-1526 mapreduce fixup

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@785008 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Andrew Kyle Purtell 2009-06-15 22:20:55 +00:00
parent 76d4ac4d07
commit e8d66ea285
3 changed files with 19 additions and 16 deletions

View File

@ -88,7 +88,9 @@ Release 0.20.0 - Unreleased
HBASE-1310 Off by one error in Bytes.vintToBytes HBASE-1310 Off by one error in Bytes.vintToBytes
HBASE-1202 getRow does not always work when specifying number of versions HBASE-1202 getRow does not always work when specifying number of versions
HBASE-1324 hbase-1234 broke testget2 unit test (and broke the build) HBASE-1324 hbase-1234 broke testget2 unit test (and broke the build)
HBASE-1321 hbase-1234 broke TestCompaction; fix and reenable HBASE-1330 binary keys broken on trunk (Ryan Rawson via Stack) HBASE-1332 regionserver carrying .META. starts sucking all cpu, drives load HBASE-1321 hbase-1234 broke TestCompaction; fix and reenable
HBASE-1330 binary keys broken on trunk (Ryan Rawson via Stack)
HBASE-1332 regionserver carrying .META. starts sucking all cpu, drives load
up - infinite loop? (Ryan Rawson via Stack) up - infinite loop? (Ryan Rawson via Stack)
HBASE-1334 .META. region running into hfile errors (Ryan Rawson via Stack) HBASE-1334 .META. region running into hfile errors (Ryan Rawson via Stack)
HBASE-1338 lost use of compaction.dir; we were compacting into live store HBASE-1338 lost use of compaction.dir; we were compacting into live store
@ -168,8 +170,10 @@ Release 0.20.0 - Unreleased
HBASE-1493 New TableMapReduceUtil methods should be static (Billy Pearson HBASE-1493 New TableMapReduceUtil methods should be static (Billy Pearson
via Andrew Purtell) via Andrew Purtell)
HBASE-1486 BLOCKCACHE always on even when disabled (Lars George via Stack) HBASE-1486 BLOCKCACHE always on even when disabled (Lars George via Stack)
HBASE-1491 ZooKeeper errors: "Client has seen zxid 0xe our last zxid is 0xd" HBASE-1491 ZooKeeper errors: "Client has seen zxid 0xe our last zxid
HBASE-1499 Fix javadoc warnings after HBASE-1304 commit (Lars George via Stack) is 0xd"
HBASE-1499 Fix javadoc warnings after HBASE-1304 commit (Lars George via
Stack)
HBASE-1504 Remove left-over debug from 1304 commit HBASE-1504 Remove left-over debug from 1304 commit
HBASE-1518 Delete Trackers using compareRow, should just use raw HBASE-1518 Delete Trackers using compareRow, should just use raw
binary comparator (Jon Gray via Stack) binary comparator (Jon Gray via Stack)
@ -181,7 +185,7 @@ Release 0.20.0 - Unreleased
HBASE-1522 We delete splits before their time occasionally HBASE-1522 We delete splits before their time occasionally
HBASE-1523 NPE in BaseScanner HBASE-1523 NPE in BaseScanner
HBASE-1525 HTable.incrementColumnValue hangs() HBASE-1525 HTable.incrementColumnValue hangs()
HBASE-1526 mapreduce fixup
IMPROVEMENTS IMPROVEMENTS
HBASE-1089 Add count of regions on filesystem to master UI; add percentage HBASE-1089 Add count of regions on filesystem to master UI; add percentage

View File

@ -75,7 +75,7 @@ import org.apache.hadoop.util.StringUtils;
* </pre> * </pre>
*/ */
public abstract class TableInputFormatBase public abstract class TableInputFormatBase
implements InputFormat<ImmutableBytesWritable, Result> { implements InputFormat<ImmutableBytesWritable, RowResult> {
final Log LOG = LogFactory.getLog(TableInputFormatBase.class); final Log LOG = LogFactory.getLog(TableInputFormatBase.class);
private byte [][] inputColumns; private byte [][] inputColumns;
private HTable table; private HTable table;
@ -86,7 +86,7 @@ implements InputFormat<ImmutableBytesWritable, Result> {
* Iterate over an HBase table data, return (Text, RowResult) pairs * Iterate over an HBase table data, return (Text, RowResult) pairs
*/ */
protected class TableRecordReader protected class TableRecordReader
implements RecordReader<ImmutableBytesWritable, Result> { implements RecordReader<ImmutableBytesWritable, RowResult> {
private byte [] startRow; private byte [] startRow;
private byte [] endRow; private byte [] endRow;
private byte [] lastRow; private byte [] lastRow;
@ -189,8 +189,8 @@ implements InputFormat<ImmutableBytesWritable, Result> {
* *
* @see org.apache.hadoop.mapred.RecordReader#createValue() * @see org.apache.hadoop.mapred.RecordReader#createValue()
*/ */
public Result createValue() { public RowResult createValue() {
return new Result(); return new RowResult();
} }
public long getPos() { public long getPos() {
@ -210,7 +210,7 @@ implements InputFormat<ImmutableBytesWritable, Result> {
* @return true if there was more data * @return true if there was more data
* @throws IOException * @throws IOException
*/ */
public boolean next(ImmutableBytesWritable key, Result value) public boolean next(ImmutableBytesWritable key, RowResult value)
throws IOException { throws IOException {
Result result; Result result;
try { try {
@ -225,7 +225,7 @@ implements InputFormat<ImmutableBytesWritable, Result> {
if (result != null && result.size() > 0) { if (result != null && result.size() > 0) {
key.set(result.getRow()); key.set(result.getRow());
lastRow = key.get(); lastRow = key.get();
Writables.copyWritable(result, value); Writables.copyWritable(result.getRowResult(), value);
return true; return true;
} }
return false; return false;
@ -239,7 +239,7 @@ implements InputFormat<ImmutableBytesWritable, Result> {
* @see org.apache.hadoop.mapred.InputFormat#getRecordReader(InputSplit, * @see org.apache.hadoop.mapred.InputFormat#getRecordReader(InputSplit,
* JobConf, Reporter) * JobConf, Reporter)
*/ */
public RecordReader<ImmutableBytesWritable, Result> getRecordReader( public RecordReader<ImmutableBytesWritable, RowResult> getRecordReader(
InputSplit split, JobConf job, Reporter reporter) InputSplit split, JobConf job, Reporter reporter)
throws IOException { throws IOException {
TableSplit tSplit = (TableSplit) split; TableSplit tSplit = (TableSplit) split;

View File

@ -26,7 +26,6 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.io.BatchUpdate; import org.apache.hadoop.hbase.io.BatchUpdate;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.mapred.FileAlreadyExistsException; import org.apache.hadoop.mapred.FileAlreadyExistsException;
@ -41,7 +40,7 @@ import org.apache.hadoop.util.Progressable;
* Convert Map/Reduce output and write it to an HBase table * Convert Map/Reduce output and write it to an HBase table
*/ */
public class TableOutputFormat extends public class TableOutputFormat extends
FileOutputFormat<ImmutableBytesWritable, Put> { FileOutputFormat<ImmutableBytesWritable, BatchUpdate> {
/** JobConf parameter that specifies the output table */ /** JobConf parameter that specifies the output table */
public static final String OUTPUT_TABLE = "hbase.mapred.outputtable"; public static final String OUTPUT_TABLE = "hbase.mapred.outputtable";
@ -52,7 +51,7 @@ FileOutputFormat<ImmutableBytesWritable, Put> {
* and write to an HBase table * and write to an HBase table
*/ */
protected static class TableRecordWriter protected static class TableRecordWriter
implements RecordWriter<ImmutableBytesWritable, Put> { implements RecordWriter<ImmutableBytesWritable, BatchUpdate> {
private HTable m_table; private HTable m_table;
/** /**
@ -70,8 +69,8 @@ FileOutputFormat<ImmutableBytesWritable, Put> {
} }
public void write(ImmutableBytesWritable key, public void write(ImmutableBytesWritable key,
Put value) throws IOException { BatchUpdate value) throws IOException {
m_table.put(new Put(value)); m_table.commit(new BatchUpdate(value));
} }
} }