diff --git a/CHANGES.txt b/CHANGES.txt index 9be080af66d..7cfca1dfbd5 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -88,7 +88,9 @@ Release 0.20.0 - Unreleased HBASE-1310 Off by one error in Bytes.vintToBytes HBASE-1202 getRow does not always work when specifying number of versions HBASE-1324 hbase-1234 broke testget2 unit test (and broke the build) - HBASE-1321 hbase-1234 broke TestCompaction; fix and reenable HBASE-1330 binary keys broken on trunk (Ryan Rawson via Stack) HBASE-1332 regionserver carrying .META. starts sucking all cpu, drives load + HBASE-1321 hbase-1234 broke TestCompaction; fix and reenable + HBASE-1330 binary keys broken on trunk (Ryan Rawson via Stack) + HBASE-1332 regionserver carrying .META. starts sucking all cpu, drives load up - infinite loop? (Ryan Rawson via Stack) HBASE-1334 .META. region running into hfile errors (Ryan Rawson via Stack) HBASE-1338 lost use of compaction.dir; we were compacting into live store @@ -168,8 +170,10 @@ Release 0.20.0 - Unreleased HBASE-1493 New TableMapReduceUtil methods should be static (Billy Pearson via Andrew Purtell) HBASE-1486 BLOCKCACHE always on even when disabled (Lars George via Stack) - HBASE-1491 ZooKeeper errors: "Client has seen zxid 0xe our last zxid is 0xd" - HBASE-1499 Fix javadoc warnings after HBASE-1304 commit (Lars George via Stack) + HBASE-1491 ZooKeeper errors: "Client has seen zxid 0xe our last zxid + is 0xd" + HBASE-1499 Fix javadoc warnings after HBASE-1304 commit (Lars George via + Stack) HBASE-1504 Remove left-over debug from 1304 commit HBASE-1518 Delete Trackers using compareRow, should just use raw binary comparator (Jon Gray via Stack) @@ -181,7 +185,7 @@ Release 0.20.0 - Unreleased HBASE-1522 We delete splits before their time occasionally HBASE-1523 NPE in BaseScanner HBASE-1525 HTable.incrementColumnValue hangs() - + HBASE-1526 mapreduce fixup IMPROVEMENTS HBASE-1089 Add count of regions on filesystem to master UI; add percentage diff --git a/src/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java b/src/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java index cf92960c3f4..34f74fd6e30 100644 --- a/src/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java +++ b/src/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java @@ -75,7 +75,7 @@ import org.apache.hadoop.util.StringUtils; * */ public abstract class TableInputFormatBase -implements InputFormat { +implements InputFormat { final Log LOG = LogFactory.getLog(TableInputFormatBase.class); private byte [][] inputColumns; private HTable table; @@ -86,7 +86,7 @@ implements InputFormat { * Iterate over an HBase table data, return (Text, RowResult) pairs */ protected class TableRecordReader - implements RecordReader { + implements RecordReader { private byte [] startRow; private byte [] endRow; private byte [] lastRow; @@ -189,8 +189,8 @@ implements InputFormat { * * @see org.apache.hadoop.mapred.RecordReader#createValue() */ - public Result createValue() { - return new Result(); + public RowResult createValue() { + return new RowResult(); } public long getPos() { @@ -210,7 +210,7 @@ implements InputFormat { * @return true if there was more data * @throws IOException */ - public boolean next(ImmutableBytesWritable key, Result value) + public boolean next(ImmutableBytesWritable key, RowResult value) throws IOException { Result result; try { @@ -225,7 +225,7 @@ implements InputFormat { if (result != null && result.size() > 0) { key.set(result.getRow()); lastRow = key.get(); - Writables.copyWritable(result, value); + Writables.copyWritable(result.getRowResult(), value); return true; } return false; @@ -239,7 +239,7 @@ implements InputFormat { * @see org.apache.hadoop.mapred.InputFormat#getRecordReader(InputSplit, * JobConf, Reporter) */ - public RecordReader getRecordReader( + public RecordReader getRecordReader( InputSplit split, JobConf job, Reporter reporter) throws IOException { TableSplit tSplit = (TableSplit) split; diff --git a/src/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java b/src/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java index 9bff33b1ab1..26d40fe3a35 100644 --- a/src/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java +++ b/src/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java @@ -26,7 +26,6 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.io.BatchUpdate; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.mapred.FileAlreadyExistsException; @@ -41,7 +40,7 @@ import org.apache.hadoop.util.Progressable; * Convert Map/Reduce output and write it to an HBase table */ public class TableOutputFormat extends -FileOutputFormat { +FileOutputFormat { /** JobConf parameter that specifies the output table */ public static final String OUTPUT_TABLE = "hbase.mapred.outputtable"; @@ -52,7 +51,7 @@ FileOutputFormat { * and write to an HBase table */ protected static class TableRecordWriter - implements RecordWriter { + implements RecordWriter { private HTable m_table; /** @@ -70,8 +69,8 @@ FileOutputFormat { } public void write(ImmutableBytesWritable key, - Put value) throws IOException { - m_table.put(new Put(value)); + BatchUpdate value) throws IOException { + m_table.commit(new BatchUpdate(value)); } }