HBASE-544 Purge startUpdate from all internal code and test cases
Removes startUpdate calls from all but a few places. TestBatchUpdate and TestMultipleUpdates both stay the same, but TMU will be removed when startUpdate is. Parts of TBU will also be whacked when we remove the deprecated methods. HTable still has its startUpdate methods. Changed the Incommon interface to remove the startUpdate, put, delete, and commit methods, and made a new commit(BatchUpdate). git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@644811 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
589a407f0c
commit
0c3d5abbc8
|
@ -17,6 +17,7 @@ Hbase Change Log
|
|||
|
||||
IMPROVEMENTS
|
||||
HBASE-469 Streamline HStore startup and compactions
|
||||
HBASE-544 Purge startUpdate from internal code and test cases
|
||||
|
||||
Release 0.1.0
|
||||
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.client.HBaseAdmin;
|
|||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||
|
||||
/**
|
||||
* Deletes values from tables.
|
||||
|
@ -55,11 +56,11 @@ public class DeleteCommand extends BasicCommand {
|
|||
HTable hTable = new HTable(conf, tableName);
|
||||
|
||||
if (rowKey != null) {
|
||||
long lockID = hTable.startUpdate(rowKey);
|
||||
BatchUpdate bu = new BatchUpdate(rowKey);
|
||||
for (Text column : getColumnList(admin, hTable)) {
|
||||
hTable.delete(lockID, new Text(column));
|
||||
bu.delete(new Text(column));
|
||||
}
|
||||
hTable.commit(lockID);
|
||||
hTable.commit(bu);
|
||||
} else {
|
||||
admin.disableTable(tableName);
|
||||
for (Text column : getColumnList(admin, hTable)) {
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.client.HBaseAdmin;
|
|||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.hbase.MasterNotRunningException;
|
||||
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||
|
||||
/**
|
||||
* Inserts values into tables.
|
||||
|
@ -59,7 +60,9 @@ public class InsertCommand extends BasicCommand {
|
|||
|
||||
try {
|
||||
HTable table = new HTable(conf, tableName);
|
||||
long lockId = table.startUpdate(getRow());
|
||||
BatchUpdate batchUpdate = timestamp == null ?
|
||||
new BatchUpdate(getRow())
|
||||
: new BatchUpdate(getRow(), Long.parseLong(timestamp));
|
||||
|
||||
for (int i = 0; i < values.size(); i++) {
|
||||
Text column = null;
|
||||
|
@ -67,13 +70,10 @@ public class InsertCommand extends BasicCommand {
|
|||
column = getColumn(i);
|
||||
else
|
||||
column = new Text(getColumn(i) + ":");
|
||||
table.put(lockId, column, getValue(i));
|
||||
batchUpdate.put(column, getValue(i));
|
||||
}
|
||||
|
||||
if(timestamp != null)
|
||||
table.commit(lockId, Long.parseLong(timestamp));
|
||||
else
|
||||
table.commit(lockId);
|
||||
table.commit(batchUpdate);
|
||||
|
||||
return new ReturnMsg(1, "1 row inserted successfully.");
|
||||
} catch (IOException e) {
|
||||
|
|
|
@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.HTableDescriptor;
|
|||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.io.Cell;
|
||||
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||
|
||||
import org.mortbay.servlet.MultiPartResponse;
|
||||
import org.w3c.dom.Document;
|
||||
|
@ -296,12 +297,13 @@ public class TableHandler extends GenericHandler {
|
|||
throw new ServletException(e);
|
||||
}
|
||||
|
||||
long lock_id = -1;
|
||||
BatchUpdate batchUpdate;
|
||||
|
||||
try{
|
||||
// start an update
|
||||
Text key = new Text(row);
|
||||
lock_id = table.startUpdate(key);
|
||||
batchUpdate = timestamp == null ?
|
||||
new BatchUpdate(key) : new BatchUpdate(key, Long.parseLong(timestamp));
|
||||
|
||||
// set the columns from the xml
|
||||
NodeList columns = doc.getElementsByTagName("column");
|
||||
|
@ -328,24 +330,16 @@ public class TableHandler extends GenericHandler {
|
|||
}
|
||||
|
||||
// put the value
|
||||
table.put(lock_id, name, value);
|
||||
batchUpdate.put(name, value);
|
||||
}
|
||||
|
||||
// commit the update
|
||||
if (timestamp != null) {
|
||||
table.commit(lock_id, Long.parseLong(timestamp));
|
||||
}
|
||||
else{
|
||||
table.commit(lock_id);
|
||||
}
|
||||
table.commit(batchUpdate);
|
||||
|
||||
// respond with a 200
|
||||
response.setStatus(200);
|
||||
}
|
||||
catch(Exception e){
|
||||
if (lock_id != -1) {
|
||||
table.abort(lock_id);
|
||||
}
|
||||
throw new ServletException(e);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -50,6 +50,7 @@ import org.apache.hadoop.hbase.thrift.generated.ScanEntry;
|
|||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.io.Cell;
|
||||
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||
|
||||
import com.facebook.thrift.TException;
|
||||
import com.facebook.thrift.protocol.TBinaryProtocol;
|
||||
|
@ -296,9 +297,9 @@ public class ThriftServer {
|
|||
}
|
||||
try {
|
||||
HTable table = getTable(tableName);
|
||||
long lockid = table.startUpdate(getText(row));
|
||||
table.put(lockid, getText(column), value);
|
||||
table.commit(lockid);
|
||||
BatchUpdate batchUpdate = new BatchUpdate(getText(row));
|
||||
batchUpdate.put(getText(column), value);
|
||||
table.commit(batchUpdate);
|
||||
} catch (IOException e) {
|
||||
throw new IOError(e.getMessage());
|
||||
} catch (IllegalArgumentException e) {
|
||||
|
@ -412,15 +413,15 @@ public class ThriftServer {
|
|||
|
||||
try {
|
||||
table = getTable(tableName);
|
||||
lockid = table.startUpdate(getText(row));
|
||||
BatchUpdate batchUpdate = new BatchUpdate(getText(row), timestamp);
|
||||
for (Mutation m : mutations) {
|
||||
if (m.isDelete) {
|
||||
table.delete(lockid, getText(m.column));
|
||||
batchUpdate.delete(getText(m.column));
|
||||
} else {
|
||||
table.put(lockid, getText(m.column), m.value);
|
||||
batchUpdate.put(getText(m.column), m.value);
|
||||
}
|
||||
}
|
||||
table.commit(lockid, timestamp);
|
||||
table.commit(batchUpdate);
|
||||
} catch (IOException e) {
|
||||
if (lockid != null) {
|
||||
table.abort(lockid);
|
||||
|
|
|
@ -25,6 +25,7 @@ import java.util.Random;
|
|||
|
||||
import org.apache.hadoop.dfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
||||
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
|
@ -126,11 +127,11 @@ public abstract class AbstractMergeTestBase extends HBaseClusterTestCase {
|
|||
|
||||
HRegionIncommon r = new HRegionIncommon(region);
|
||||
for(int i = firstRow; i < firstRow + nrows; i++) {
|
||||
long lockid = r.startUpdate(new Text("row_"
|
||||
BatchUpdate batchUpdate = new BatchUpdate(new Text("row_"
|
||||
+ String.format("%1$05d", i)));
|
||||
|
||||
r.put(lockid, COLUMN_NAME, value.get());
|
||||
r.commit(lockid, System.currentTimeMillis());
|
||||
batchUpdate.put(COLUMN_NAME, value.get());
|
||||
region.batchUpdate(batchUpdate);
|
||||
if(i % 10000 == 0) {
|
||||
System.out.println("Flushing write #" + i);
|
||||
r.flushcache();
|
||||
|
|
|
@ -49,6 +49,7 @@ import org.apache.hadoop.hbase.client.HBaseAdmin;
|
|||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
||||
import org.apache.hadoop.hbase.io.RowResult;
|
||||
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||
|
||||
/**
|
||||
* Additional scanner tests.
|
||||
|
@ -106,9 +107,9 @@ public class DisabledTestScanner2 extends HBaseClusterTestCase {
|
|||
// flipping the switch in StopRowFilter stopping us returning all
|
||||
// of the rest of the other store content.
|
||||
if (i == 0) {
|
||||
long id = inc.startUpdate(new Text("bbb"));
|
||||
inc.put(id, families[0], "bbb".getBytes());
|
||||
inc.commit(id);
|
||||
BatchUpdate batchUpdate = new BatchUpdate(new Text("bbb"));
|
||||
batchUpdate.put(families[0], "bbb".getBytes());
|
||||
inc.commit(batchUpdate);
|
||||
}
|
||||
}
|
||||
RowFilterInterface f =
|
||||
|
@ -173,12 +174,12 @@ public class DisabledTestScanner2 extends HBaseClusterTestCase {
|
|||
HTable table = new HTable(this.conf, tableName);
|
||||
// Add a row to columns without qualifiers and then two with. Make one
|
||||
// numbers only so easy to find w/ a regex.
|
||||
long id = table.startUpdate(new Text(getName()));
|
||||
BatchUpdate batchUpdate = new BatchUpdate(new Text(getName()));
|
||||
final String firstColkeyFamily = Character.toString(FIRST_COLKEY) + ":";
|
||||
table.put(id, new Text(firstColkeyFamily + getName()), GOOD_BYTES);
|
||||
table.put(id, new Text(firstColkeyFamily + "22222"), GOOD_BYTES);
|
||||
table.put(id, new Text(firstColkeyFamily), GOOD_BYTES);
|
||||
table.commit(id);
|
||||
batchUpdate.put(new Text(firstColkeyFamily + getName()), GOOD_BYTES);
|
||||
batchUpdate.put(new Text(firstColkeyFamily + "22222"), GOOD_BYTES);
|
||||
batchUpdate.put(new Text(firstColkeyFamily), GOOD_BYTES);
|
||||
table.commit(batchUpdate);
|
||||
// Now do a scan using a regex for a column name.
|
||||
checkRegexingScanner(table, firstColkeyFamily + "\\d+");
|
||||
// Do a new scan that only matches on column family.
|
||||
|
@ -230,12 +231,12 @@ public class DisabledTestScanner2 extends HBaseClusterTestCase {
|
|||
HTable table = new HTable(conf, tableName);
|
||||
for (char i = FIRST_ROWKEY; i <= LAST_ROWKEY; i++) {
|
||||
Text rowKey = new Text(new String(new char[] { i }));
|
||||
long lockID = table.startUpdate(rowKey);
|
||||
BatchUpdate batchUpdate = new BatchUpdate(rowKey);
|
||||
for (char j = 0; j < colKeys.length; j++) {
|
||||
table.put(lockID, colKeys[j], (i >= FIRST_BAD_RANGE_ROWKEY &&
|
||||
batchUpdate.put(colKeys[j], (i >= FIRST_BAD_RANGE_ROWKEY &&
|
||||
i <= LAST_BAD_RANGE_ROWKEY)? BAD_BYTES : GOOD_BYTES);
|
||||
}
|
||||
table.commit(lockID);
|
||||
table.commit(batchUpdate);
|
||||
}
|
||||
|
||||
regExpFilterTest(table, colKeys);
|
||||
|
@ -412,13 +413,13 @@ public class DisabledTestScanner2 extends HBaseClusterTestCase {
|
|||
final HServerAddress serverAddress,
|
||||
final long startCode)
|
||||
throws IOException {
|
||||
long lockid = t.startUpdate(region.getRegionName());
|
||||
t.put(lockid, HConstants.COL_REGIONINFO,
|
||||
BatchUpdate batchUpdate = new BatchUpdate(region.getRegionName());
|
||||
batchUpdate.put(HConstants.COL_REGIONINFO,
|
||||
Writables.getBytes(region.getRegionInfo()));
|
||||
t.put(lockid, HConstants.COL_SERVER,
|
||||
batchUpdate.put(HConstants.COL_SERVER,
|
||||
Writables.stringToBytes(serverAddress.toString()));
|
||||
t.put(lockid, HConstants.COL_STARTCODE, Writables.longToBytes(startCode));
|
||||
t.commit(lockid);
|
||||
batchUpdate.put(HConstants.COL_STARTCODE, Writables.longToBytes(startCode));
|
||||
t.commit(batchUpdate);
|
||||
// Assert added.
|
||||
byte [] bytes =
|
||||
t.get(region.getRegionName(), HConstants.COL_REGIONINFO).getValue();
|
||||
|
@ -439,11 +440,11 @@ public class DisabledTestScanner2 extends HBaseClusterTestCase {
|
|||
*/
|
||||
private void removeRegionFromMETA(final HTable t, final Text regionName)
|
||||
throws IOException {
|
||||
long lockid = t.startUpdate(regionName);
|
||||
t.delete(lockid, HConstants.COL_REGIONINFO);
|
||||
t.delete(lockid, HConstants.COL_SERVER);
|
||||
t.delete(lockid, HConstants.COL_STARTCODE);
|
||||
t.commit(lockid);
|
||||
BatchUpdate batchUpdate = new BatchUpdate(regionName);
|
||||
batchUpdate.delete(HConstants.COL_REGIONINFO);
|
||||
batchUpdate.delete(HConstants.COL_SERVER);
|
||||
batchUpdate.delete(HConstants.COL_STARTCODE);
|
||||
t.commit(batchUpdate);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Removed " + regionName + " from table " + t.getTableName());
|
||||
}
|
||||
|
|
|
@ -270,32 +270,21 @@ public abstract class HBaseTestCase extends TestCase {
|
|||
break EXIT;
|
||||
}
|
||||
try {
|
||||
long lockid = updater.startUpdate(t);
|
||||
BatchUpdate batchUpdate = ts == -1 ?
|
||||
new BatchUpdate(t) : new BatchUpdate(t, ts);
|
||||
try {
|
||||
updater.put(lockid, new Text(column), bytes);
|
||||
if (ts == -1) {
|
||||
updater.commit(lockid);
|
||||
} else {
|
||||
updater.commit(lockid, ts);
|
||||
}
|
||||
lockid = -1;
|
||||
batchUpdate.put(new Text(column), bytes);
|
||||
updater.commit(batchUpdate);
|
||||
} catch (RuntimeException ex) {
|
||||
ex.printStackTrace();
|
||||
throw ex;
|
||||
|
||||
} catch (IOException ex) {
|
||||
ex.printStackTrace();
|
||||
throw ex;
|
||||
|
||||
} finally {
|
||||
if (lockid != -1) {
|
||||
updater.abort(lockid);
|
||||
}
|
||||
}
|
||||
} catch (RuntimeException ex) {
|
||||
ex.printStackTrace();
|
||||
throw ex;
|
||||
|
||||
} catch (IOException ex) {
|
||||
ex.printStackTrace();
|
||||
throw ex;
|
||||
|
@ -325,12 +314,6 @@ public abstract class HBaseTestCase extends TestCase {
|
|||
* TOOD: Come up w/ a better name for this interface.
|
||||
*/
|
||||
public static interface Incommon {
|
||||
/**
|
||||
* @param row
|
||||
* @return update id
|
||||
* @throws IOException
|
||||
*/
|
||||
public long startUpdate(Text row) throws IOException;
|
||||
/**
|
||||
* @param row
|
||||
* @param column
|
||||
|
@ -356,19 +339,6 @@ public abstract class HBaseTestCase extends TestCase {
|
|||
*/
|
||||
public Cell[] get(Text row, Text column, long ts, int versions)
|
||||
throws IOException;
|
||||
/**
|
||||
* @param lockid
|
||||
* @param column
|
||||
* @param val
|
||||
* @throws IOException
|
||||
*/
|
||||
public void put(long lockid, Text column, byte val[]) throws IOException;
|
||||
/**
|
||||
* @param lockid
|
||||
* @param column
|
||||
* @throws IOException
|
||||
*/
|
||||
public void delete(long lockid, Text column) throws IOException;
|
||||
/**
|
||||
* @param row
|
||||
* @param column
|
||||
|
@ -376,22 +346,13 @@ public abstract class HBaseTestCase extends TestCase {
|
|||
* @throws IOException
|
||||
*/
|
||||
public void deleteAll(Text row, Text column, long ts) throws IOException;
|
||||
|
||||
/**
|
||||
* @param lockid
|
||||
* @param batchUpdate
|
||||
* @throws IOException
|
||||
*/
|
||||
public void commit(long lockid) throws IOException;
|
||||
/**
|
||||
* @param lockid
|
||||
* @param ts
|
||||
* @throws IOException
|
||||
*/
|
||||
public void commit(long lockid, long ts) throws IOException;
|
||||
/**
|
||||
* @param lockid
|
||||
* @throws IOException
|
||||
*/
|
||||
public void abort(long lockid) throws IOException;
|
||||
public void commit(BatchUpdate batchUpdate) throws IOException;
|
||||
|
||||
/**
|
||||
* @param columns
|
||||
* @param firstRow
|
||||
|
@ -423,70 +384,39 @@ public abstract class HBaseTestCase extends TestCase {
|
|||
this.region = HRegion;
|
||||
this.batch = null;
|
||||
}
|
||||
|
||||
/** {@inheritDoc} */
|
||||
public void abort(@SuppressWarnings("unused") long lockid) {
|
||||
this.batch = null;
|
||||
}
|
||||
/** {@inheritDoc} */
|
||||
public void commit(long lockid) throws IOException {
|
||||
commit(lockid, HConstants.LATEST_TIMESTAMP);
|
||||
}
|
||||
/** {@inheritDoc} */
|
||||
public void commit(@SuppressWarnings("unused") long lockid, final long ts)
|
||||
throws IOException {
|
||||
checkBatch();
|
||||
try {
|
||||
this.batch.setTimestamp(ts);
|
||||
this.region.batchUpdate(batch);
|
||||
} finally {
|
||||
this.batch = null;
|
||||
}
|
||||
}
|
||||
/** {@inheritDoc} */
|
||||
public void put(@SuppressWarnings("unused") long lockid, Text column,
|
||||
byte[] val) {
|
||||
checkBatch();
|
||||
this.batch.put(column, val);
|
||||
}
|
||||
/** {@inheritDoc} */
|
||||
public void delete(@SuppressWarnings("unused") long lockid, Text column) {
|
||||
checkBatch();
|
||||
this.batch.delete(column);
|
||||
}
|
||||
public void commit(BatchUpdate batchUpdate) throws IOException {
|
||||
region.batchUpdate(batchUpdate);
|
||||
};
|
||||
|
||||
/** {@inheritDoc} */
|
||||
public void deleteAll(Text row, Text column, long ts) throws IOException {
|
||||
this.region.deleteAll(row, column, ts);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param row
|
||||
* @return update id
|
||||
*/
|
||||
public long startUpdate(Text row) {
|
||||
if (this.batch != null) {
|
||||
throw new IllegalStateException("Update already in progress");
|
||||
}
|
||||
this.batch = new BatchUpdate(row);
|
||||
return 1;
|
||||
}
|
||||
/** {@inheritDoc} */
|
||||
public HScannerInterface getScanner(Text [] columns, Text firstRow,
|
||||
long ts) throws IOException {
|
||||
return this.region.getScanner(columns, firstRow, ts, null);
|
||||
}
|
||||
|
||||
/** {@inheritDoc} */
|
||||
public Cell get(Text row, Text column) throws IOException {
|
||||
return this.region.get(row, column);
|
||||
}
|
||||
|
||||
/** {@inheritDoc} */
|
||||
public Cell[] get(Text row, Text column, int versions) throws IOException {
|
||||
return this.region.get(row, column, versions);
|
||||
}
|
||||
|
||||
/** {@inheritDoc} */
|
||||
public Cell[] get(Text row, Text column, long ts, int versions)
|
||||
throws IOException {
|
||||
return this.region.get(row, column, ts, versions);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param row
|
||||
* @return values for each column in the specified row
|
||||
|
@ -495,6 +425,7 @@ public abstract class HBaseTestCase extends TestCase {
|
|||
public Map<Text, Cell> getFull(Text row) throws IOException {
|
||||
return region.getFull(row, null, HConstants.LATEST_TIMESTAMP);
|
||||
}
|
||||
|
||||
/** {@inheritDoc} */
|
||||
public void flushcache() throws IOException {
|
||||
this.region.flushcache();
|
||||
|
@ -522,56 +453,17 @@ public abstract class HBaseTestCase extends TestCase {
|
|||
this.table = table;
|
||||
this.batch = null;
|
||||
}
|
||||
/** {@inheritDoc} */
|
||||
public void abort(@SuppressWarnings("unused") long lockid) {
|
||||
if (this.batch != null) {
|
||||
this.batch = null;
|
||||
}
|
||||
}
|
||||
/** {@inheritDoc} */
|
||||
public void commit(@SuppressWarnings("unused") long lockid)
|
||||
throws IOException {
|
||||
checkBatch();
|
||||
this.table.commit(batch);
|
||||
this.batch = null;
|
||||
}
|
||||
|
||||
/** {@inheritDoc} */
|
||||
public void commit(@SuppressWarnings("unused") long lockid, final long ts)
|
||||
throws IOException {
|
||||
checkBatch();
|
||||
this.batch.setTimestamp(ts);
|
||||
this.table.commit(batch);
|
||||
this.batch = null;
|
||||
}
|
||||
|
||||
/** {@inheritDoc} */
|
||||
public void put(@SuppressWarnings("unused") long lockid, Text column,
|
||||
byte[] val) {
|
||||
checkBatch();
|
||||
this.batch.put(column, val);
|
||||
}
|
||||
|
||||
/** {@inheritDoc} */
|
||||
public void delete(@SuppressWarnings("unused") long lockid, Text column) {
|
||||
checkBatch();
|
||||
this.batch.delete(column);
|
||||
}
|
||||
public void commit(BatchUpdate batchUpdate) throws IOException {
|
||||
table.commit(batchUpdate);
|
||||
};
|
||||
|
||||
/** {@inheritDoc} */
|
||||
public void deleteAll(Text row, Text column, long ts) throws IOException {
|
||||
this.table.deleteAll(row, column, ts);
|
||||
}
|
||||
|
||||
/** {@inheritDoc} */
|
||||
public long startUpdate(Text row) {
|
||||
if (this.batch != null) {
|
||||
throw new IllegalStateException("Batch update already in progress.");
|
||||
}
|
||||
this.batch = new BatchUpdate(row);
|
||||
return 0L;
|
||||
}
|
||||
|
||||
/** {@inheritDoc} */
|
||||
public HScannerInterface getScanner(Text [] columns, Text firstRow,
|
||||
long ts) throws IOException {
|
||||
|
|
|
@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.HScannerInterface;
|
|||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.io.Cell;
|
||||
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||
import org.apache.hadoop.hbase.HBaseTestCase;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
|
@ -228,13 +229,10 @@ public class TimestampTestBase extends HBaseTestCase {
|
|||
public static void put(final Incommon loader, final byte [] bytes,
|
||||
final long ts)
|
||||
throws IOException {
|
||||
long lockid = loader.startUpdate(ROW);
|
||||
loader.put(lockid, COLUMN, bytes);
|
||||
if (ts == HConstants.LATEST_TIMESTAMP) {
|
||||
loader.commit(lockid);
|
||||
} else {
|
||||
loader.commit(lockid, ts);
|
||||
}
|
||||
BatchUpdate batchUpdate = ts == HConstants.LATEST_TIMESTAMP ?
|
||||
new BatchUpdate(ROW) : new BatchUpdate(ROW, ts);
|
||||
batchUpdate.put(COLUMN, bytes);
|
||||
loader.commit(batchUpdate);
|
||||
}
|
||||
|
||||
public static void delete(final Incommon loader) throws IOException {
|
||||
|
@ -242,12 +240,9 @@ public class TimestampTestBase extends HBaseTestCase {
|
|||
}
|
||||
|
||||
public static void delete(final Incommon loader, final long ts) throws IOException {
|
||||
long lockid = loader.startUpdate(ROW);
|
||||
loader.delete(lockid, COLUMN);
|
||||
if (ts == HConstants.LATEST_TIMESTAMP) {
|
||||
loader.commit(lockid);
|
||||
} else {
|
||||
loader.commit(lockid, ts);
|
||||
}
|
||||
BatchUpdate batchUpdate = ts == HConstants.LATEST_TIMESTAMP ?
|
||||
new BatchUpdate(ROW) : new BatchUpdate(ROW, ts);
|
||||
batchUpdate.delete(COLUMN);
|
||||
loader.commit(batchUpdate);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.HStoreKey;
|
|||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.TableNotFoundException;
|
||||
import org.apache.hadoop.hbase.HScannerInterface;
|
||||
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||
|
||||
/**
|
||||
* Tests HTable
|
||||
|
@ -87,9 +88,9 @@ public class TestHTable extends HBaseClusterTestCase implements HConstants {
|
|||
HTableDescriptor meta = a.getMetadata();
|
||||
assertTrue(meta.equals(tableAdesc));
|
||||
|
||||
long lockid = a.startUpdate(row);
|
||||
a.put(lockid, COLUMN_FAMILY, value);
|
||||
a.commit(lockid);
|
||||
BatchUpdate batchUpdate = new BatchUpdate(row);
|
||||
batchUpdate.put(COLUMN_FAMILY, value);
|
||||
a.commit(batchUpdate);
|
||||
|
||||
// open a new connection to A and a connection to b
|
||||
|
||||
|
@ -105,12 +106,11 @@ public class TestHTable extends HBaseClusterTestCase implements HConstants {
|
|||
HStoreKey key = new HStoreKey();
|
||||
TreeMap<Text, byte[]> results = new TreeMap<Text, byte[]>();
|
||||
while(s.next(key, results)) {
|
||||
lockid = b.startUpdate(key.getRow());
|
||||
batchUpdate = new BatchUpdate(key.getRow());
|
||||
for(Map.Entry<Text, byte[]> e: results.entrySet()) {
|
||||
b.put(lockid, e.getKey(), e.getValue());
|
||||
batchUpdate.put(e.getKey(), e.getValue());
|
||||
}
|
||||
b.commit(lockid);
|
||||
b.abort(lockid);
|
||||
b.commit(batchUpdate);
|
||||
}
|
||||
} finally {
|
||||
s.close();
|
||||
|
|
|
@ -25,8 +25,11 @@ import org.apache.hadoop.hbase.HTableDescriptor;
|
|||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
|
||||
/**
|
||||
* Tests that HClient protects against multiple updates
|
||||
* Tests that HClient protects against multiple updates. This test case will
|
||||
* be removed at the same time that HTable.startuUpdate and friends are
|
||||
* removed.
|
||||
*/
|
||||
@Deprecated
|
||||
public class TestMultipleUpdates extends HBaseClusterTestCase {
|
||||
private static final String CONTENTS_STR = "contents:";
|
||||
private static final Text CONTENTS = new Text(CONTENTS_STR);
|
||||
|
|
|
@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.HConstants;
|
|||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.StaticTestEnvironment;
|
||||
import org.apache.hadoop.hbase.io.Cell;
|
||||
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||
|
||||
/**
|
||||
* Test the functionality of deleteAll.
|
||||
|
@ -95,26 +96,26 @@ public class TestDeleteAll extends HBaseTestCase {
|
|||
Text colC = new Text(COLUMNS[0].toString() + "c");
|
||||
Text colD = new Text(COLUMNS[0].toString());
|
||||
|
||||
long lock = region_incommon.startUpdate(row);
|
||||
region_incommon.put(lock, colA, cellData(0, flush).getBytes());
|
||||
region_incommon.put(lock, colB, cellData(0, flush).getBytes());
|
||||
region_incommon.put(lock, colC, cellData(0, flush).getBytes());
|
||||
region_incommon.put(lock, colD, cellData(0, flush).getBytes());
|
||||
region_incommon.commit(lock, t0);
|
||||
BatchUpdate batchUpdate = new BatchUpdate(row, t0);
|
||||
batchUpdate.put(colA, cellData(0, flush).getBytes());
|
||||
batchUpdate.put(colB, cellData(0, flush).getBytes());
|
||||
batchUpdate.put(colC, cellData(0, flush).getBytes());
|
||||
batchUpdate.put(colD, cellData(0, flush).getBytes());
|
||||
region_incommon.commit(batchUpdate);
|
||||
|
||||
lock = region_incommon.startUpdate(row);
|
||||
region_incommon.put(lock, colA, cellData(1, flush).getBytes());
|
||||
region_incommon.put(lock, colB, cellData(1, flush).getBytes());
|
||||
region_incommon.put(lock, colC, cellData(1, flush).getBytes());
|
||||
region_incommon.put(lock, colD, cellData(1, flush).getBytes());
|
||||
region_incommon.commit(lock, t1);
|
||||
batchUpdate = new BatchUpdate(row, t1);
|
||||
batchUpdate.put(colA, cellData(1, flush).getBytes());
|
||||
batchUpdate.put(colB, cellData(1, flush).getBytes());
|
||||
batchUpdate.put(colC, cellData(1, flush).getBytes());
|
||||
batchUpdate.put(colD, cellData(1, flush).getBytes());
|
||||
region_incommon.commit(batchUpdate);
|
||||
|
||||
lock = region_incommon.startUpdate(row);
|
||||
region_incommon.put(lock, colA, cellData(2, flush).getBytes());
|
||||
region_incommon.put(lock, colB, cellData(2, flush).getBytes());
|
||||
region_incommon.put(lock, colC, cellData(2, flush).getBytes());
|
||||
region_incommon.put(lock, colD, cellData(2, flush).getBytes());
|
||||
region_incommon.commit(lock, t2);
|
||||
batchUpdate = new BatchUpdate(row, t2);
|
||||
batchUpdate.put(colA, cellData(2, flush).getBytes());
|
||||
batchUpdate.put(colB, cellData(2, flush).getBytes());
|
||||
batchUpdate.put(colC, cellData(2, flush).getBytes());
|
||||
batchUpdate.put(colD, cellData(2, flush).getBytes());
|
||||
region_incommon.commit(batchUpdate);
|
||||
|
||||
if (flush) {region_incommon.flushcache();}
|
||||
|
||||
|
|
|
@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.HConstants;
|
|||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.StaticTestEnvironment;
|
||||
import org.apache.hadoop.hbase.io.Cell;
|
||||
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||
|
||||
/**
|
||||
* Test the functionality of deleteFamily.
|
||||
|
@ -91,23 +92,24 @@ public class TestDeleteFamily extends HBaseTestCase {
|
|||
Text colB = new Text(COLUMNS[0].toString() + "b");
|
||||
Text colC = new Text(COLUMNS[1].toString() + "c");
|
||||
|
||||
long lock = region_incommon.startUpdate(row);
|
||||
region_incommon.put(lock, colA, cellData(0, flush).getBytes());
|
||||
region_incommon.put(lock, colB, cellData(0, flush).getBytes());
|
||||
region_incommon.put(lock, colC, cellData(0, flush).getBytes());
|
||||
region_incommon.commit(lock, t0);
|
||||
BatchUpdate batchUpdate = null;
|
||||
batchUpdate = new BatchUpdate(row, t0);
|
||||
batchUpdate.put(colA, cellData(0, flush).getBytes());
|
||||
batchUpdate.put(colB, cellData(0, flush).getBytes());
|
||||
batchUpdate.put(colC, cellData(0, flush).getBytes());
|
||||
region_incommon.commit(batchUpdate);
|
||||
|
||||
lock = region_incommon.startUpdate(row);
|
||||
region_incommon.put(lock, colA, cellData(1, flush).getBytes());
|
||||
region_incommon.put(lock, colB, cellData(1, flush).getBytes());
|
||||
region_incommon.put(lock, colC, cellData(1, flush).getBytes());
|
||||
region_incommon.commit(lock, t1);
|
||||
batchUpdate = new BatchUpdate(row, t1);
|
||||
batchUpdate.put(colA, cellData(1, flush).getBytes());
|
||||
batchUpdate.put(colB, cellData(1, flush).getBytes());
|
||||
batchUpdate.put(colC, cellData(1, flush).getBytes());
|
||||
region_incommon.commit(batchUpdate);
|
||||
|
||||
lock = region_incommon.startUpdate(row);
|
||||
region_incommon.put(lock, colA, cellData(2, flush).getBytes());
|
||||
region_incommon.put(lock, colB, cellData(2, flush).getBytes());
|
||||
region_incommon.put(lock, colC, cellData(2, flush).getBytes());
|
||||
region_incommon.commit(lock, t2);
|
||||
batchUpdate = new BatchUpdate(row, t2);
|
||||
batchUpdate.put(colA, cellData(2, flush).getBytes());
|
||||
batchUpdate.put(colB, cellData(2, flush).getBytes());
|
||||
batchUpdate.put(colC, cellData(2, flush).getBytes());
|
||||
region_incommon.commit(batchUpdate);
|
||||
|
||||
if (flush) {region_incommon.flushcache();}
|
||||
|
||||
|
|
|
@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
|
|||
import org.apache.hadoop.hbase.HServerAddress;
|
||||
import org.apache.hadoop.hbase.StaticTestEnvironment;
|
||||
import org.apache.hadoop.hbase.io.Cell;
|
||||
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||
|
||||
/** Test case for get */
|
||||
public class TestGet extends HBaseTestCase {
|
||||
|
@ -100,19 +101,20 @@ public class TestGet extends HBaseTestCase {
|
|||
|
||||
// Write information to the table
|
||||
|
||||
long lockid = r.startUpdate(ROW_KEY);
|
||||
r.put(lockid, CONTENTS, Writables.getBytes(CONTENTS));
|
||||
r.put(lockid, HConstants.COL_REGIONINFO,
|
||||
BatchUpdate batchUpdate = null;
|
||||
batchUpdate = new BatchUpdate(ROW_KEY, System.currentTimeMillis());
|
||||
batchUpdate.put(CONTENTS, Writables.getBytes(CONTENTS));
|
||||
batchUpdate.put(HConstants.COL_REGIONINFO,
|
||||
Writables.getBytes(HRegionInfo.rootRegionInfo));
|
||||
r.commit(lockid, System.currentTimeMillis());
|
||||
r.commit(batchUpdate);
|
||||
|
||||
lockid = r.startUpdate(ROW_KEY);
|
||||
r.put(lockid, HConstants.COL_SERVER,
|
||||
batchUpdate = new BatchUpdate(ROW_KEY, System.currentTimeMillis());
|
||||
batchUpdate.put(HConstants.COL_SERVER,
|
||||
Writables.stringToBytes(new HServerAddress(SERVER_ADDRESS).toString()));
|
||||
r.put(lockid, HConstants.COL_STARTCODE, Writables.longToBytes(lockid));
|
||||
r.put(lockid, new Text(HConstants.COLUMN_FAMILY + "region"),
|
||||
batchUpdate.put(HConstants.COL_STARTCODE, Writables.longToBytes(12345));
|
||||
batchUpdate.put(new Text(HConstants.COLUMN_FAMILY + "region"),
|
||||
"region".getBytes(HConstants.UTF8_ENCODING));
|
||||
r.commit(lockid, System.currentTimeMillis());
|
||||
r.commit(batchUpdate);
|
||||
|
||||
// Verify that get works the same from memcache as when reading from disk
|
||||
// NOTE dumpRegion won't work here because it only reads from disk.
|
||||
|
@ -131,15 +133,15 @@ public class TestGet extends HBaseTestCase {
|
|||
|
||||
// Update one family member and add a new one
|
||||
|
||||
lockid = r.startUpdate(ROW_KEY);
|
||||
r.put(lockid, new Text(HConstants.COLUMN_FAMILY + "region"),
|
||||
batchUpdate = new BatchUpdate(ROW_KEY, System.currentTimeMillis());
|
||||
batchUpdate.put(new Text(HConstants.COLUMN_FAMILY + "region"),
|
||||
"region2".getBytes(HConstants.UTF8_ENCODING));
|
||||
String otherServerName = "bar.foo.com:4321";
|
||||
r.put(lockid, HConstants.COL_SERVER,
|
||||
batchUpdate.put(HConstants.COL_SERVER,
|
||||
Writables.stringToBytes(new HServerAddress(otherServerName).toString()));
|
||||
r.put(lockid, new Text(HConstants.COLUMN_FAMILY + "junk"),
|
||||
batchUpdate.put(new Text(HConstants.COLUMN_FAMILY + "junk"),
|
||||
"junk".getBytes(HConstants.UTF8_ENCODING));
|
||||
r.commit(lockid, System.currentTimeMillis());
|
||||
r.commit(batchUpdate);
|
||||
|
||||
verifyGet(r, otherServerName);
|
||||
|
||||
|
|
|
@ -122,13 +122,13 @@ public class TestGet2 extends HBaseTestCase implements HConstants {
|
|||
long one_second_ago = right_now - 1000;
|
||||
|
||||
Text t = new Text("test_row");
|
||||
long lockid = region_incommon.startUpdate(t);
|
||||
region_incommon.put(lockid, COLUMNS[0], "old text".getBytes());
|
||||
region_incommon.commit(lockid, one_second_ago);
|
||||
BatchUpdate batchUpdate = new BatchUpdate(t, one_second_ago);
|
||||
batchUpdate.put(COLUMNS[0], "old text".getBytes());
|
||||
region_incommon.commit(batchUpdate);
|
||||
|
||||
lockid = region_incommon.startUpdate(t);
|
||||
region_incommon.put(lockid, COLUMNS[0], "new text".getBytes());
|
||||
region_incommon.commit(lockid, right_now);
|
||||
batchUpdate = new BatchUpdate(t, right_now);
|
||||
batchUpdate.put(COLUMNS[0], "new text".getBytes());
|
||||
region_incommon.commit(batchUpdate);
|
||||
|
||||
assertCellEquals(region, t, COLUMNS[0], right_now, "new text");
|
||||
assertCellEquals(region, t, COLUMNS[0], one_second_ago, "old text");
|
||||
|
|
|
@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.StaticTestEnvironment;
|
|||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HScannerInterface;
|
||||
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||
|
||||
/**
|
||||
* Basic stand-alone testing of HRegion.
|
||||
|
@ -128,12 +129,13 @@ implements RegionUnavailableListener {
|
|||
// Write out a bunch of values
|
||||
|
||||
for (int k = FIRST_ROW; k <= NUM_VALS; k++) {
|
||||
long writeid = region.startUpdate(new Text("row_" + k));
|
||||
region.put(writeid, CONTENTS_BASIC,
|
||||
BatchUpdate batchUpdate =
|
||||
new BatchUpdate(new Text("row_" + k), System.currentTimeMillis());
|
||||
batchUpdate.put(CONTENTS_BASIC,
|
||||
(CONTENTSTR + k).getBytes(HConstants.UTF8_ENCODING));
|
||||
region.put(writeid, new Text(ANCHORNUM + k),
|
||||
batchUpdate.put(new Text(ANCHORNUM + k),
|
||||
(ANCHORSTR + k).getBytes(HConstants.UTF8_ENCODING));
|
||||
region.commit(writeid, System.currentTimeMillis());
|
||||
region.commit(batchUpdate);
|
||||
}
|
||||
LOG.info("Write " + NUM_VALS + " rows. Elapsed time: "
|
||||
+ ((System.currentTimeMillis() - startTime) / 1000.0));
|
||||
|
@ -178,32 +180,18 @@ implements RegionUnavailableListener {
|
|||
}
|
||||
|
||||
private void badPuts() {
|
||||
|
||||
// Try put with bad lockid.
|
||||
boolean exceptionThrown = false;
|
||||
try {
|
||||
region.put(-1, CONTENTS_BASIC,
|
||||
"bad input".getBytes(HConstants.UTF8_ENCODING));
|
||||
} catch (Exception e) {
|
||||
exceptionThrown = true;
|
||||
}
|
||||
assertTrue("Bad lock id", exceptionThrown);
|
||||
|
||||
// Try column name not registered in the table.
|
||||
boolean exceptionThrown = false;
|
||||
exceptionThrown = false;
|
||||
long lockid = -1;
|
||||
try {
|
||||
lockid = region.startUpdate(new Text("Some old key"));
|
||||
BatchUpdate batchUpdate = new BatchUpdate(new Text("Some old key"));
|
||||
String unregisteredColName = "FamilyGroup:FamilyLabel";
|
||||
region.put(lockid, new Text(unregisteredColName),
|
||||
batchUpdate.put(new Text(unregisteredColName),
|
||||
unregisteredColName.getBytes(HConstants.UTF8_ENCODING));
|
||||
region.commit(lockid);
|
||||
region.commit(batchUpdate);
|
||||
} catch (IOException e) {
|
||||
exceptionThrown = true;
|
||||
} finally {
|
||||
if (lockid != -1) {
|
||||
region.abort(lockid);
|
||||
}
|
||||
}
|
||||
assertTrue("Bad family", exceptionThrown);
|
||||
LOG.info("badPuts completed.");
|
||||
|
@ -286,10 +274,12 @@ implements RegionUnavailableListener {
|
|||
for(int k = 0; k < vals1.length / 2; k++) {
|
||||
String kLabel = String.format("%1$03d", k);
|
||||
|
||||
long lockid = region.startUpdate(new Text("row_vals1_" + kLabel));
|
||||
region.put(lockid, cols[0], vals1[k].getBytes(HConstants.UTF8_ENCODING));
|
||||
region.put(lockid, cols[1], vals1[k].getBytes(HConstants.UTF8_ENCODING));
|
||||
region.commit(lockid, System.currentTimeMillis());
|
||||
BatchUpdate batchUpdate =
|
||||
new BatchUpdate(new Text("row_vals1_" + kLabel),
|
||||
System.currentTimeMillis());
|
||||
batchUpdate.put(cols[0], vals1[k].getBytes(HConstants.UTF8_ENCODING));
|
||||
batchUpdate.put(cols[1], vals1[k].getBytes(HConstants.UTF8_ENCODING));
|
||||
region.commit(batchUpdate);
|
||||
numInserted += 2;
|
||||
}
|
||||
|
||||
|
@ -389,10 +379,12 @@ implements RegionUnavailableListener {
|
|||
for(int k = vals1.length/2; k < vals1.length; k++) {
|
||||
String kLabel = String.format("%1$03d", k);
|
||||
|
||||
long lockid = region.startUpdate(new Text("row_vals1_" + kLabel));
|
||||
region.put(lockid, cols[0], vals1[k].getBytes(HConstants.UTF8_ENCODING));
|
||||
region.put(lockid, cols[1], vals1[k].getBytes(HConstants.UTF8_ENCODING));
|
||||
region.commit(lockid, System.currentTimeMillis());
|
||||
BatchUpdate batchUpdate =
|
||||
new BatchUpdate(new Text("row_vals1_" + kLabel),
|
||||
System.currentTimeMillis());
|
||||
batchUpdate.put(cols[0], vals1[k].getBytes(HConstants.UTF8_ENCODING));
|
||||
batchUpdate.put(cols[1], vals1[k].getBytes(HConstants.UTF8_ENCODING));
|
||||
region.commit(batchUpdate);
|
||||
numInserted += 2;
|
||||
}
|
||||
|
||||
|
@ -550,10 +542,11 @@ implements RegionUnavailableListener {
|
|||
}
|
||||
|
||||
// Write to the HRegion
|
||||
long writeid = region.startUpdate(new Text("row_" + k));
|
||||
region.put(writeid, CONTENTS_BODY,
|
||||
BatchUpdate batchUpdate =
|
||||
new BatchUpdate(new Text("row_" + k), System.currentTimeMillis());
|
||||
batchUpdate.put(CONTENTS_BODY,
|
||||
buf1.toString().getBytes(HConstants.UTF8_ENCODING));
|
||||
region.commit(writeid, System.currentTimeMillis());
|
||||
region.commit(batchUpdate);
|
||||
if (k > 0 && k % (N_ROWS / 100) == 0) {
|
||||
LOG.info("Flushing write #" + k);
|
||||
|
||||
|
|
|
@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.HScannerInterface;
|
|||
import org.apache.hadoop.hbase.HServerAddress;
|
||||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
import org.apache.hadoop.hbase.StaticTestEnvironment;
|
||||
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||
|
||||
/**
|
||||
* Test of a long-lived scanner validating as we go.
|
||||
|
@ -151,13 +152,13 @@ public class TestScanner extends HBaseTestCase {
|
|||
|
||||
// Write information to the meta table
|
||||
|
||||
long lockid = region.startUpdate(ROW_KEY);
|
||||
BatchUpdate batchUpdate = new BatchUpdate(ROW_KEY, System.currentTimeMillis());
|
||||
|
||||
ByteArrayOutputStream byteStream = new ByteArrayOutputStream();
|
||||
DataOutputStream s = new DataOutputStream(byteStream);
|
||||
HRegionInfo.rootRegionInfo.write(s);
|
||||
region.put(lockid, HConstants.COL_REGIONINFO, byteStream.toByteArray());
|
||||
region.commit(lockid, System.currentTimeMillis());
|
||||
batchUpdate.put(HConstants.COL_REGIONINFO, byteStream.toByteArray());
|
||||
region.commit(batchUpdate);
|
||||
|
||||
// What we just committed is in the memcache. Verify that we can get
|
||||
// it back both with scanning and get
|
||||
|
@ -180,15 +181,15 @@ public class TestScanner extends HBaseTestCase {
|
|||
|
||||
HServerAddress address = new HServerAddress("foo.bar.com:1234");
|
||||
|
||||
lockid = region.startUpdate(ROW_KEY);
|
||||
batchUpdate = new BatchUpdate(ROW_KEY, System.currentTimeMillis());
|
||||
|
||||
region.put(lockid, HConstants.COL_SERVER,
|
||||
batchUpdate.put(HConstants.COL_SERVER,
|
||||
Writables.stringToBytes(address.toString()));
|
||||
|
||||
region.put(lockid, HConstants.COL_STARTCODE,
|
||||
batchUpdate.put(HConstants.COL_STARTCODE,
|
||||
Writables.longToBytes(START_CODE));
|
||||
|
||||
region.commit(lockid, System.currentTimeMillis());
|
||||
region.commit(batchUpdate);
|
||||
|
||||
// Validate that we can still get the HRegionInfo, even though it is in
|
||||
// an older row on disk and there is a newer row in the memcache
|
||||
|
@ -220,12 +221,12 @@ public class TestScanner extends HBaseTestCase {
|
|||
|
||||
address = new HServerAddress("bar.foo.com:4321");
|
||||
|
||||
lockid = region.startUpdate(ROW_KEY);
|
||||
batchUpdate = new BatchUpdate(ROW_KEY, System.currentTimeMillis());
|
||||
|
||||
region.put(lockid, HConstants.COL_SERVER,
|
||||
batchUpdate.put(HConstants.COL_SERVER,
|
||||
Writables.stringToBytes(address.toString()));
|
||||
|
||||
region.commit(lockid, System.currentTimeMillis());
|
||||
region.commit(batchUpdate);
|
||||
|
||||
// Validate again
|
||||
|
||||
|
|
Loading…
Reference in New Issue