HADOOP-2224 Add HTable.getRow(ROW, ts)

git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk/src/contrib/hbase@599879 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2007-11-30 17:18:01 +00:00
parent c22dd06d14
commit f1f6738e2c
5 changed files with 70 additions and 3 deletions

View File

@ -71,6 +71,8 @@ Trunk (unreleased changes)
HADOOP-2296 hbase shell: phantom columns show up from select command
HADOOP-2297 System.exit() Handling in hbase shell jar command
(Edward Yoon via Stack)
HADOOP-2224 Add HTable.getRow(ROW, ts)
(Bryan Duxbury via Stack)
Release 0.15.1

View File

@ -1017,7 +1017,25 @@ public class HRegion implements HConstants {
* @throws IOException
*/
public Map<Text, byte []> getFull(Text row) throws IOException {
HStoreKey key = new HStoreKey(row, System.currentTimeMillis());
return getFull(row, HConstants.LATEST_TIMESTAMP);
}
/**
* Fetch all the columns for the indicated row at a specified timestamp.
* Returns a TreeMap that maps column names to values.
*
* We should eventually use Bloom filters here, to reduce running time. If
* the database has many column families and is very sparse, then we could be
* checking many files needlessly. A small Bloom for each row would help us
* determine which column groups are useful for that row. That would let us
* avoid a bunch of disk activity.
*
* @param row
* @return Map<columnName, byte[]> values
* @throws IOException
*/
public Map<Text, byte []> getFull(Text row, long ts) throws IOException {
HStoreKey key = new HStoreKey(row, ts);
obtainRowLock(row);
try {
TreeMap<Text, byte []> result = new TreeMap<Text, byte[]>();

View File

@ -99,6 +99,18 @@ public interface HRegionInterface extends VersionedProtocol {
public MapWritable getRow(final Text regionName, final Text row)
throws IOException;
/**
* Get all the data for the specified row at a given timestamp
*
* @param regionName region name
* @param row row key
* @return map of values
* @throws IOException
*/
public MapWritable getRow(final Text regionName, final Text row, final long ts)
throws IOException;
/**
* Applies a batch of updates via one RPC
*

View File

@ -1290,6 +1290,29 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
}
}
/** {@inheritDoc} */
public MapWritable getRow(final Text regionName, final Text row, final long ts)
throws IOException {
checkOpen();
requestCount.incrementAndGet();
try {
HRegion region = getRegion(regionName);
MapWritable result = new MapWritable();
Map<Text, byte[]> map = region.getFull(row);
for (Map.Entry<Text, byte []> es: map.entrySet()) {
result.put(new HStoreKey(row, es.getKey()),
new ImmutableBytesWritable(es.getValue()));
}
return result;
} catch (IOException e) {
checkFileSystem();
throw e;
}
}
/** {@inheritDoc} */
public MapWritable next(final long scannerId) throws IOException {

View File

@ -347,13 +347,24 @@ public class HTable implements HConstants {
}
/**
* Get all the data for the specified row
* Get all the data for the specified row at the latest timestamp
*
* @param row row key
* @return map of colums to values
* @throws IOException
*/
public SortedMap<Text, byte[]> getRow(Text row) throws IOException {
return getRow(row, HConstants.LATEST_TIMESTAMP);
}
/**
* Get all the data for the specified row at a specified timestamp
*
* @param row row key
* @return map of colums to values
* @throws IOException
*/
public SortedMap<Text, byte[]> getRow(Text row, long ts) throws IOException {
checkClosed();
MapWritable value = null;
for (int tries = 0; tries < numRetries; tries++) {
@ -362,7 +373,7 @@ public class HTable implements HConstants {
connection.getHRegionConnection(r.getServerAddress());
try {
value = server.getRow(r.getRegionInfo().getRegionName(), row);
value = server.getRow(r.getRegionInfo().getRegionName(), row, ts);
break;
} catch (IOException e) {
@ -396,6 +407,7 @@ public class HTable implements HConstants {
return results;
}
/**
* Get a scanner on the current table starting at the specified row.
* Return the specified columns.