HBASE-3162 Add TimeRange support into Increment to optimize for counters that are partitioned on time

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1029118 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jonathan Gray 2010-10-30 16:58:44 +00:00
parent 8f3c1a5bc4
commit 5abb3867c0
3 changed files with 40 additions and 0 deletions

View File

@ -1065,6 +1065,8 @@ Release 0.21.0 - Unreleased
a RS no longer present
HBASE-3174 Add ability for Get operations to enable/disable use of block
caching
HBASE-3162 Add TimeRange support into Increment to optimize for counters
that are partitioned on time
NEW FEATURES
HBASE-1961 HBase EC2 scripts

View File

@ -27,6 +27,7 @@ import java.util.NavigableMap;
import java.util.Set;
import java.util.TreeMap;
import org.apache.hadoop.hbase.io.TimeRange;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.Writable;
@ -48,6 +49,7 @@ public class Increment implements Writable {
private byte [] row = null;
private long lockId = -1L;
private boolean writeToWAL = true;
private TimeRange tr = new TimeRange();
private Map<byte [], NavigableMap<byte [], Long>> familyMap =
new TreeMap<byte [], NavigableMap<byte [], Long>>(Bytes.BYTES_COMPARATOR);
@ -143,6 +145,34 @@ public class Increment implements Writable {
return this;
}
/**
* Gets the TimeRange used for this increment.
* @return TimeRange
*/
public TimeRange getTimeRange() {
return this.tr;
}
/**
* Sets the TimeRange to be used on the Get for this increment.
* <p>
* This is useful for when you have counters that only last for specific
* periods of time (ie. counters that are partitioned by time). By setting
* the range of valid times for this increment, you can potentially gain
* some performance with a more optimal Get operation.
* <p>
* This range is used as [minStamp, maxStamp).
* @param minStamp minimum timestamp value, inclusive
* @param maxStamp maximum timestamp value, exclusive
* @throws IOException if invalid time range
* @return this
*/
public Increment setTimeRange(long minStamp, long maxStamp)
throws IOException {
tr = new TimeRange(minStamp, maxStamp);
return this;
}
/**
* Method for retrieving the keys in the familyMap
* @return keys in the current familyMap
@ -241,6 +271,8 @@ public class Increment implements Writable {
throw new IOException("unsupported version");
}
this.row = Bytes.readByteArray(in);
this.tr = new TimeRange();
tr.readFields(in);
this.lockId = in.readLong();
int numFamilies = in.readInt();
if (numFamilies == 0) {
@ -270,6 +302,7 @@ public class Increment implements Writable {
throws IOException {
out.writeByte(INCREMENT_VERSION);
Bytes.writeByteArray(out, this.row);
tr.write(out);
out.writeLong(this.lockId);
if (familyMap.size() == 0) {
throw new IOException("At least one column required");

View File

@ -71,6 +71,7 @@ import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.filter.IncompatibleFilterException;
import org.apache.hadoop.hbase.io.HeapSize;
import org.apache.hadoop.hbase.io.TimeRange;
import org.apache.hadoop.hbase.io.hfile.BlockCache;
import org.apache.hadoop.hbase.ipc.HRegionInterface;
import org.apache.hadoop.hbase.regionserver.wal.HLog;
@ -2946,6 +2947,8 @@ public class HRegion implements HeapSize { // , Writable{
newGet.addColumn(family, qualifier);
}
}
newGet.setTimeRange(get.getTimeRange().getMin(),
get.getTimeRange().getMax());
iscan = new InternalScan(newGet);
}
@ -3002,6 +3005,7 @@ public class HRegion implements HeapSize { // , Writable{
// TODO: Use RWCC to make this set of increments atomic to reads
byte [] row = increment.getRow();
checkRow(row);
TimeRange tr = increment.getTimeRange();
boolean flush = false;
WALEdit walEdits = null;
List<KeyValue> allKVs = new ArrayList<KeyValue>(increment.numColumns());
@ -3025,6 +3029,7 @@ public class HRegion implements HeapSize { // , Writable{
for (Map.Entry<byte [], Long> column : family.getValue().entrySet()) {
get.addColumn(family.getKey(), column.getKey());
}
get.setTimeRange(tr.getMin(), tr.getMax());
List<KeyValue> results = getLastIncrement(get);
// Iterate the input columns and update existing values if they were