HBASE-1526 mapreduce fixup
git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@785008 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
76d4ac4d07
commit
e8d66ea285
12
CHANGES.txt
12
CHANGES.txt
|
@ -88,7 +88,9 @@ Release 0.20.0 - Unreleased
|
|||
HBASE-1310 Off by one error in Bytes.vintToBytes
|
||||
HBASE-1202 getRow does not always work when specifying number of versions
|
||||
HBASE-1324 hbase-1234 broke testget2 unit test (and broke the build)
|
||||
HBASE-1321 hbase-1234 broke TestCompaction; fix and reenable HBASE-1330 binary keys broken on trunk (Ryan Rawson via Stack) HBASE-1332 regionserver carrying .META. starts sucking all cpu, drives load
|
||||
HBASE-1321 hbase-1234 broke TestCompaction; fix and reenable
|
||||
HBASE-1330 binary keys broken on trunk (Ryan Rawson via Stack)
|
||||
HBASE-1332 regionserver carrying .META. starts sucking all cpu, drives load
|
||||
up - infinite loop? (Ryan Rawson via Stack)
|
||||
HBASE-1334 .META. region running into hfile errors (Ryan Rawson via Stack)
|
||||
HBASE-1338 lost use of compaction.dir; we were compacting into live store
|
||||
|
@ -168,8 +170,10 @@ Release 0.20.0 - Unreleased
|
|||
HBASE-1493 New TableMapReduceUtil methods should be static (Billy Pearson
|
||||
via Andrew Purtell)
|
||||
HBASE-1486 BLOCKCACHE always on even when disabled (Lars George via Stack)
|
||||
HBASE-1491 ZooKeeper errors: "Client has seen zxid 0xe our last zxid is 0xd"
|
||||
HBASE-1499 Fix javadoc warnings after HBASE-1304 commit (Lars George via Stack)
|
||||
HBASE-1491 ZooKeeper errors: "Client has seen zxid 0xe our last zxid
|
||||
is 0xd"
|
||||
HBASE-1499 Fix javadoc warnings after HBASE-1304 commit (Lars George via
|
||||
Stack)
|
||||
HBASE-1504 Remove left-over debug from 1304 commit
|
||||
HBASE-1518 Delete Trackers using compareRow, should just use raw
|
||||
binary comparator (Jon Gray via Stack)
|
||||
|
@ -181,7 +185,7 @@ Release 0.20.0 - Unreleased
|
|||
HBASE-1522 We delete splits before their time occasionally
|
||||
HBASE-1523 NPE in BaseScanner
|
||||
HBASE-1525 HTable.incrementColumnValue hangs()
|
||||
|
||||
HBASE-1526 mapreduce fixup
|
||||
|
||||
IMPROVEMENTS
|
||||
HBASE-1089 Add count of regions on filesystem to master UI; add percentage
|
||||
|
|
|
@ -75,7 +75,7 @@ import org.apache.hadoop.util.StringUtils;
|
|||
* </pre>
|
||||
*/
|
||||
public abstract class TableInputFormatBase
|
||||
implements InputFormat<ImmutableBytesWritable, Result> {
|
||||
implements InputFormat<ImmutableBytesWritable, RowResult> {
|
||||
final Log LOG = LogFactory.getLog(TableInputFormatBase.class);
|
||||
private byte [][] inputColumns;
|
||||
private HTable table;
|
||||
|
@ -86,7 +86,7 @@ implements InputFormat<ImmutableBytesWritable, Result> {
|
|||
* Iterate over an HBase table data, return (Text, RowResult) pairs
|
||||
*/
|
||||
protected class TableRecordReader
|
||||
implements RecordReader<ImmutableBytesWritable, Result> {
|
||||
implements RecordReader<ImmutableBytesWritable, RowResult> {
|
||||
private byte [] startRow;
|
||||
private byte [] endRow;
|
||||
private byte [] lastRow;
|
||||
|
@ -189,8 +189,8 @@ implements InputFormat<ImmutableBytesWritable, Result> {
|
|||
*
|
||||
* @see org.apache.hadoop.mapred.RecordReader#createValue()
|
||||
*/
|
||||
public Result createValue() {
|
||||
return new Result();
|
||||
public RowResult createValue() {
|
||||
return new RowResult();
|
||||
}
|
||||
|
||||
public long getPos() {
|
||||
|
@ -210,7 +210,7 @@ implements InputFormat<ImmutableBytesWritable, Result> {
|
|||
* @return true if there was more data
|
||||
* @throws IOException
|
||||
*/
|
||||
public boolean next(ImmutableBytesWritable key, Result value)
|
||||
public boolean next(ImmutableBytesWritable key, RowResult value)
|
||||
throws IOException {
|
||||
Result result;
|
||||
try {
|
||||
|
@ -225,7 +225,7 @@ implements InputFormat<ImmutableBytesWritable, Result> {
|
|||
if (result != null && result.size() > 0) {
|
||||
key.set(result.getRow());
|
||||
lastRow = key.get();
|
||||
Writables.copyWritable(result, value);
|
||||
Writables.copyWritable(result.getRowResult(), value);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
@ -239,7 +239,7 @@ implements InputFormat<ImmutableBytesWritable, Result> {
|
|||
* @see org.apache.hadoop.mapred.InputFormat#getRecordReader(InputSplit,
|
||||
* JobConf, Reporter)
|
||||
*/
|
||||
public RecordReader<ImmutableBytesWritable, Result> getRecordReader(
|
||||
public RecordReader<ImmutableBytesWritable, RowResult> getRecordReader(
|
||||
InputSplit split, JobConf job, Reporter reporter)
|
||||
throws IOException {
|
||||
TableSplit tSplit = (TableSplit) split;
|
||||
|
|
|
@ -26,7 +26,6 @@ import org.apache.commons.logging.LogFactory;
|
|||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
||||
import org.apache.hadoop.mapred.FileAlreadyExistsException;
|
||||
|
@ -41,7 +40,7 @@ import org.apache.hadoop.util.Progressable;
|
|||
* Convert Map/Reduce output and write it to an HBase table
|
||||
*/
|
||||
public class TableOutputFormat extends
|
||||
FileOutputFormat<ImmutableBytesWritable, Put> {
|
||||
FileOutputFormat<ImmutableBytesWritable, BatchUpdate> {
|
||||
|
||||
/** JobConf parameter that specifies the output table */
|
||||
public static final String OUTPUT_TABLE = "hbase.mapred.outputtable";
|
||||
|
@ -52,7 +51,7 @@ FileOutputFormat<ImmutableBytesWritable, Put> {
|
|||
* and write to an HBase table
|
||||
*/
|
||||
protected static class TableRecordWriter
|
||||
implements RecordWriter<ImmutableBytesWritable, Put> {
|
||||
implements RecordWriter<ImmutableBytesWritable, BatchUpdate> {
|
||||
private HTable m_table;
|
||||
|
||||
/**
|
||||
|
@ -70,8 +69,8 @@ FileOutputFormat<ImmutableBytesWritable, Put> {
|
|||
}
|
||||
|
||||
public void write(ImmutableBytesWritable key,
|
||||
Put value) throws IOException {
|
||||
m_table.put(new Put(value));
|
||||
BatchUpdate value) throws IOException {
|
||||
m_table.commit(new BatchUpdate(value));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue