HBASE-841 Consolidate multiple overloaded methods in HRegionInterface, HRegionServer (Jean-Daniel Cryans via Jim Kellerman)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@689162 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jim Kellerman 2008-08-26 18:20:25 +00:00
parent 90685d499c
commit f277d7d3b0
12 changed files with 46 additions and 110 deletions

View File

@ -48,6 +48,8 @@ Release 0.18.0 - Unreleased
(Sishen Freecity via Stack)
HBASE-830 Debugging HCM.locateRegionInMeta is painful
HBASE-784 Base hbase-0.3.0 on hadoop-0.18
HBASE-841 Consolidate multiple overloaded methods in HRegionInterface,
HRegionServer (Jean-Daniel Cryans via Jim Kellerman)
NEW FEATURES
HBASE-787 Postgresql to HBase table replication example (Tim Sell via Stack)

View File

@ -386,8 +386,9 @@ public class HTable {
return connection.getRegionServerWithRetries(
new ServerCallable<Cell>(connection, tableName, row) {
public Cell call() throws IOException {
return server.get(location.getRegionInfo().getRegionName(), row,
column);
Cell[] result = server.get(location.getRegionInfo().getRegionName(),
row, column, -1, -1);
return (result == null)? null : result[0];
}
}
);
@ -408,7 +409,7 @@ public class HTable {
new ServerCallable<Cell[]>(connection, tableName, row) {
public Cell[] call() throws IOException {
return server.get(location.getRegionInfo().getRegionName(), row,
column, numVersions);
column, -1, numVersions);
}
}
);

View File

@ -49,33 +49,6 @@ public interface HRegionInterface extends VersionedProtocol {
*/
public HRegionInfo getRegionInfo(final byte [] regionName)
throws NotServingRegionException;
/**
* Retrieve a single value from the specified region for the specified row
* and column keys
*
* @param regionName name of region
* @param row row key
* @param column column key
* @return alue for that region/row/column
* @throws IOException
*/
public Cell get(final byte [] regionName, final byte [] row, final byte [] column)
throws IOException;
/**
* Get the specified number of versions of the specified row and column
*
* @param regionName region name
* @param row row key
* @param column column key
* @param numVersions number of versions to return
* @return array of values
* @throws IOException
*/
public Cell[] get(final byte [] regionName, final byte [] row,
final byte [] column, final int numVersions)
throws IOException;
/**
* Get the specified number of versions of the specified row and column with

View File

@ -1118,31 +1118,6 @@ public class HRegion implements HConstants {
// get() methods for client use.
//////////////////////////////////////////////////////////////////////////////
/**
* Fetch a single data item.
* @param row
* @param column
* @return column value
* @throws IOException
*/
public Cell get(byte [] row, byte [] column) throws IOException {
Cell[] results = get(row, column, Long.MAX_VALUE, 1);
return (results == null || results.length == 0)? null: results[0];
}
/**
* Fetch multiple versions of a single data item
*
* @param row
* @param column
* @param numVersions
* @return array of values one element per version
* @throws IOException
*/
public Cell[] get(byte [] row, byte [] column, int numVersions)
throws IOException {
return get(row, column, Long.MAX_VALUE, numVersions);
}
/**
* Fetch multiple versions of a single data item, with timestamp.
*
@ -1150,12 +1125,20 @@ public class HRegion implements HConstants {
* @param column
* @param timestamp
* @param numVersions
* @return array of values one element per version that matches the timestamp
* @return array of values one element per version that matches the timestamp,
* or null if there are no matches.
* @throws IOException
*/
public Cell[] get(byte [] row, byte [] column, long timestamp,
int numVersions)
throws IOException {
if (timestamp == -1) {
timestamp = Long.MAX_VALUE;
}
if (numVersions == -1) {
numVersions = 1;
}
splitsAndClosesLock.readLock().lock();
try {
if (this.closed.get()) {
@ -1166,7 +1149,10 @@ public class HRegion implements HConstants {
checkColumn(column);
// Don't need a row lock for a simple get
HStoreKey key = new HStoreKey(row, column, timestamp);
return getStore(column).get(key, numVersions);
Cell[] result = getStore(column).get(key, numVersions);
// Guarantee that we return null instead of a zero-length array,
// if there are no results to return.
return (result == null || result.length == 0)? null : result;
} finally {
splitsAndClosesLock.readLock().unlock();
}

View File

@ -1007,34 +1007,6 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
return getRegion(regionName).getRegionInfo();
}
/** {@inheritDoc} */
public Cell get(final byte [] regionName, final byte [] row,
final byte [] column)
throws IOException {
checkOpen();
requestCount.incrementAndGet();
try {
return getRegion(regionName).get(row, column);
} catch (IOException e) {
checkFileSystem();
throw e;
}
}
/** {@inheritDoc} */
public Cell[] get(final byte [] regionName, final byte [] row,
final byte [] column, final int numVersions)
throws IOException {
checkOpen();
requestCount.incrementAndGet();
try {
return getRegion(regionName).get(row, column, numVersions);
} catch (IOException e) {
checkFileSystem();
throw e;
}
}
/** {@inheritDoc} */
public Cell[] get(final byte [] regionName, final byte [] row,
final byte [] column, final long timestamp, final int numVersions)

View File

@ -33,6 +33,7 @@ import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.regionserver.HLog;
@ -140,10 +141,10 @@ public class Merge extends Configured implements Tool {
*/
private void mergeTwoMetaRegions() throws IOException {
HRegion rootRegion = utils.getRootRegion();
HRegionInfo info1 = Writables.getHRegionInfo(
rootRegion.get(region1, HConstants.COL_REGIONINFO));
HRegionInfo info2 = Writables.getHRegionInfo(
rootRegion.get(region2, HConstants.COL_REGIONINFO));
Cell[] cells1 = rootRegion.get(region1, HConstants.COL_REGIONINFO, -1, -1);
HRegionInfo info1 = Writables.getHRegionInfo((cells1 == null)? null : cells1[0]);
Cell[] cells2 = rootRegion.get(region2, HConstants.COL_REGIONINFO, -1, -1);
HRegionInfo info2 = Writables.getHRegionInfo((cells2 == null)? null : cells2[0]);
HRegion merged = merge(info1, rootRegion, info2, rootRegion);
LOG.info("Adding " + merged.getRegionInfo() + " to " +
rootRegion.getRegionInfo());
@ -205,8 +206,8 @@ public class Merge extends Configured implements Tool {
LOG.info("Found meta for region1 " + meta1.getRegionName() +
", meta for region2 " + meta2.getRegionName());
HRegion metaRegion1 = this.utils.getMetaRegion(meta1);
HRegionInfo info1 = Writables.getHRegionInfo(
metaRegion1.get(region1, HConstants.COL_REGIONINFO));
Cell[] cells1 = metaRegion1.get(region1, HConstants.COL_REGIONINFO, -1, -1);
HRegionInfo info1 = Writables.getHRegionInfo((cells1 == null)? null : cells1[0]);
if (info1== null) {
throw new NullPointerException("info1 is null using key " + region1 +
" in " + meta1);
@ -218,8 +219,8 @@ public class Merge extends Configured implements Tool {
} else {
metaRegion2 = utils.getMetaRegion(meta2);
}
HRegionInfo info2 = Writables.getHRegionInfo(
metaRegion2.get(region2, HConstants.COL_REGIONINFO));
Cell[] cells2 = metaRegion2.get(region2, HConstants.COL_REGIONINFO, -1, -1);
HRegionInfo info2 = Writables.getHRegionInfo((cells2 == null)? null : cells2[0]);
if (info2 == null) {
throw new NullPointerException("info2 is null using key " + meta2);
}

View File

@ -400,7 +400,7 @@ public class MetaUtils {
throws IOException {
if (LOG.isDebugEnabled()) {
HRegionInfo h = Writables.getHRegionInfoOrNull(
r.get(hri.getRegionName(), HConstants.COL_REGIONINFO).getValue());
r.get(hri.getRegionName(), HConstants.COL_REGIONINFO, -1, -1)[0].getValue());
LOG.debug("Old " + Bytes.toString(HConstants.COL_REGIONINFO) +
" for " + hri.toString() + " in " + r.toString() + " is: " +
h.toString());
@ -410,7 +410,7 @@ public class MetaUtils {
r.batchUpdate(b, null);
if (LOG.isDebugEnabled()) {
HRegionInfo h = Writables.getHRegionInfoOrNull(
r.get(hri.getRegionName(), HConstants.COL_REGIONINFO).getValue());
r.get(hri.getRegionName(), HConstants.COL_REGIONINFO, -1, -1)[0].getValue());
LOG.debug("New " + Bytes.toString(HConstants.COL_REGIONINFO) +
" for " + hri.toString() + " in " + r.toString() + " is: " +
h.toString());

View File

@ -422,13 +422,14 @@ public abstract class HBaseTestCase extends TestCase {
/** {@inheritDoc} */
public Cell get(byte [] row, byte [] column) throws IOException {
return this.region.get(row, column);
Cell[] result = this.region.get(row, column, -1, -1);
return (result == null)? null : result[0];
}
/** {@inheritDoc} */
public Cell[] get(byte [] row, byte [] column, int versions)
throws IOException {
return this.region.get(row, column, versions);
return this.region.get(row, column, -1, versions);
}
/** {@inheritDoc} */

View File

@ -100,7 +100,7 @@ public class TestCompaction extends HBaseTestCase {
// available.
addContent(new HRegionIncommon(r), Bytes.toString(COLUMN_FAMILY));
Cell[] cellValues =
r.get(STARTROW, COLUMN_FAMILY_TEXT, 100 /*Too many*/);
r.get(STARTROW, COLUMN_FAMILY_TEXT, -1, 100 /*Too many*/);
// Assert that I can get 3 versions since it is the max I should get
assertTrue(cellValues.length == 3);
r.flushcache();
@ -114,7 +114,7 @@ public class TestCompaction extends HBaseTestCase {
byte [] secondRowBytes = START_KEY.getBytes(HConstants.UTF8_ENCODING);
// Increment the least significant character so we get to next row.
secondRowBytes[START_KEY_BYTES.length - 1]++;
cellValues = r.get(secondRowBytes, COLUMN_FAMILY_TEXT, 100/*Too many*/);
cellValues = r.get(secondRowBytes, COLUMN_FAMILY_TEXT, -1, 100/*Too many*/);
LOG.info("Count of " + Bytes.toString(secondRowBytes) + ": " + cellValues.length);
// Commented out because fails on an hp+ubuntu single-processor w/ 1G and
// "Intel(R) Pentium(R) 4 CPU 3.20GHz" though passes on all local
@ -131,10 +131,10 @@ public class TestCompaction extends HBaseTestCase {
// Now, before compacting, remove all instances of the first row so can
// verify that it is removed as we compact.
// Assert all delted.
assertNull(r.get(STARTROW, COLUMN_FAMILY_TEXT, 100 /*Too many*/));
assertNull(r.get(STARTROW, COLUMN_FAMILY_TEXT, -1, 100 /*Too many*/));
r.flushcache();
assertEquals(r.getStore(COLUMN_FAMILY_TEXT).getStorefiles().size(), 2);
assertNull(r.get(STARTROW, COLUMN_FAMILY_TEXT, 100 /*Too many*/));
assertNull(r.get(STARTROW, COLUMN_FAMILY_TEXT, -1, 100 /*Too many*/));
// Add a bit of data and flush it so we for sure have the compaction limit
// for store files. Usually by this time we will have but if compaction
// included the flush that ran 'concurrently', there may be just the
@ -146,7 +146,7 @@ public class TestCompaction extends HBaseTestCase {
r.compactStores();
assertEquals(r.getStore(COLUMN_FAMILY_TEXT).getStorefiles().size(), 2);
// Assert that the first row is still deleted.
cellValues = r.get(STARTROW, COLUMN_FAMILY_TEXT, 100 /*Too many*/);
cellValues = r.get(STARTROW, COLUMN_FAMILY_TEXT, -1, 100 /*Too many*/);
assertNull(cellValues);
// Make sure the store files do have some 'aaa' keys in them.
boolean containsStartRow = false;

View File

@ -82,11 +82,11 @@ public class TestGet2 extends HBaseTestCase implements HConstants {
// Now delete all then retry
region.deleteAll(Bytes.toBytes(T00), System.currentTimeMillis(), null);
Cell [] cells = region.get(Bytes.toBytes(T00), column,
Cell [] cells = region.get(Bytes.toBytes(T00), column, -1,
HColumnDescriptor.DEFAULT_VERSIONS);
assertTrue(cells == null);
region.flushcache();
cells = region.get(Bytes.toBytes(T00), column,
cells = region.get(Bytes.toBytes(T00), column, -1,
HColumnDescriptor.DEFAULT_VERSIONS);
assertTrue(cells == null);
@ -123,11 +123,11 @@ public class TestGet2 extends HBaseTestCase implements HConstants {
final byte [] column)
throws IOException {
byte [] r = Bytes.toBytes(row);
Cell [] cells = region.get(r, column, 100);
Cell [] cells = region.get(r, column, -1, 100);
assertTrue(cells.length == HColumnDescriptor.DEFAULT_VERSIONS);
cells = region.get(r, column, 1);
cells = region.get(r, column, -1, 1);
assertTrue(cells.length == 1);
cells = region.get(r, column, HConstants.ALL_VERSIONS);
cells = region.get(r, column, -1, HConstants.ALL_VERSIONS);
assertTrue(cells.length == HColumnDescriptor.DEFAULT_VERSIONS);
}

View File

@ -215,7 +215,7 @@ public class TestSplit extends HBaseClusterTestCase {
private void assertGet(final HRegion r, final byte [] family, final byte [] k)
throws IOException {
// Now I have k, get values out and assert they are as expected.
Cell[] results = r.get(k, family, Integer.MAX_VALUE);
Cell[] results = r.get(k, family, -1, Integer.MAX_VALUE);
for (int j = 0; j < results.length; j++) {
byte [] tmp = results[j].getValue();
// Row should be equal to value every time.

View File

@ -179,7 +179,7 @@ public class TestMergeTool extends HBaseTestCase {
throws IOException {
for (int i = 0; i < upperbound; i++) {
for (int j = 0; j < rows[i].length; j++) {
byte[] bytes = merged.get(rows[i][j], COLUMN_NAME).getValue();
byte[] bytes = merged.get(rows[i][j], COLUMN_NAME, -1, -1)[0].getValue();
assertNotNull(rows[i][j].toString(), bytes);
assertTrue(Bytes.equals(bytes, rows[i][j]));
}
@ -195,7 +195,7 @@ public class TestMergeTool extends HBaseTestCase {
// contain the right data.
for (int i = 0; i < regions.length; i++) {
for (int j = 0; j < rows[i].length; j++) {
byte[] bytes = regions[i].get(rows[i][j], COLUMN_NAME).getValue();
byte[] bytes = regions[i].get(rows[i][j], COLUMN_NAME, -1, -1)[0].getValue();
assertNotNull(bytes);
assertTrue(Bytes.equals(bytes, rows[i][j]));
}