HBASE-544 Purge startUpdate from all internal code and test cases
Removes startUpdate calls from all but a few places. TestBatchUpdate and TestMultipleUpdates both stay the same, but TMU will be removed when startUpdate is. Parts of TBU will also be whacked when we remove the deprecated methods. HTable still has its startUpdate methods. Changed the Incommon interface to remove the startUpdate, put, delete, and commit methods, and made a new commit(BatchUpdate). git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@644811 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
589a407f0c
commit
0c3d5abbc8
|
@ -17,6 +17,7 @@ Hbase Change Log
|
||||||
|
|
||||||
IMPROVEMENTS
|
IMPROVEMENTS
|
||||||
HBASE-469 Streamline HStore startup and compactions
|
HBASE-469 Streamline HStore startup and compactions
|
||||||
|
HBASE-544 Purge startUpdate from internal code and test cases
|
||||||
|
|
||||||
Release 0.1.0
|
Release 0.1.0
|
||||||
|
|
||||||
|
|
|
@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
import org.apache.hadoop.hbase.client.HTable;
|
import org.apache.hadoop.hbase.client.HTable;
|
||||||
|
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Deletes values from tables.
|
* Deletes values from tables.
|
||||||
|
@ -55,11 +56,11 @@ public class DeleteCommand extends BasicCommand {
|
||||||
HTable hTable = new HTable(conf, tableName);
|
HTable hTable = new HTable(conf, tableName);
|
||||||
|
|
||||||
if (rowKey != null) {
|
if (rowKey != null) {
|
||||||
long lockID = hTable.startUpdate(rowKey);
|
BatchUpdate bu = new BatchUpdate(rowKey);
|
||||||
for (Text column : getColumnList(admin, hTable)) {
|
for (Text column : getColumnList(admin, hTable)) {
|
||||||
hTable.delete(lockID, new Text(column));
|
bu.delete(new Text(column));
|
||||||
}
|
}
|
||||||
hTable.commit(lockID);
|
hTable.commit(bu);
|
||||||
} else {
|
} else {
|
||||||
admin.disableTable(tableName);
|
admin.disableTable(tableName);
|
||||||
for (Text column : getColumnList(admin, hTable)) {
|
for (Text column : getColumnList(admin, hTable)) {
|
||||||
|
|
|
@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||||
import org.apache.hadoop.hbase.client.HTable;
|
import org.apache.hadoop.hbase.client.HTable;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
import org.apache.hadoop.hbase.MasterNotRunningException;
|
import org.apache.hadoop.hbase.MasterNotRunningException;
|
||||||
|
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Inserts values into tables.
|
* Inserts values into tables.
|
||||||
|
@ -59,7 +60,9 @@ public class InsertCommand extends BasicCommand {
|
||||||
|
|
||||||
try {
|
try {
|
||||||
HTable table = new HTable(conf, tableName);
|
HTable table = new HTable(conf, tableName);
|
||||||
long lockId = table.startUpdate(getRow());
|
BatchUpdate batchUpdate = timestamp == null ?
|
||||||
|
new BatchUpdate(getRow())
|
||||||
|
: new BatchUpdate(getRow(), Long.parseLong(timestamp));
|
||||||
|
|
||||||
for (int i = 0; i < values.size(); i++) {
|
for (int i = 0; i < values.size(); i++) {
|
||||||
Text column = null;
|
Text column = null;
|
||||||
|
@ -67,13 +70,10 @@ public class InsertCommand extends BasicCommand {
|
||||||
column = getColumn(i);
|
column = getColumn(i);
|
||||||
else
|
else
|
||||||
column = new Text(getColumn(i) + ":");
|
column = new Text(getColumn(i) + ":");
|
||||||
table.put(lockId, column, getValue(i));
|
batchUpdate.put(column, getValue(i));
|
||||||
}
|
}
|
||||||
|
|
||||||
if(timestamp != null)
|
table.commit(batchUpdate);
|
||||||
table.commit(lockId, Long.parseLong(timestamp));
|
|
||||||
else
|
|
||||||
table.commit(lockId);
|
|
||||||
|
|
||||||
return new ReturnMsg(1, "1 row inserted successfully.");
|
return new ReturnMsg(1, "1 row inserted successfully.");
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
|
|
|
@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
import org.apache.hadoop.hbase.client.HTable;
|
import org.apache.hadoop.hbase.client.HTable;
|
||||||
import org.apache.hadoop.hbase.io.Cell;
|
import org.apache.hadoop.hbase.io.Cell;
|
||||||
|
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||||
|
|
||||||
import org.mortbay.servlet.MultiPartResponse;
|
import org.mortbay.servlet.MultiPartResponse;
|
||||||
import org.w3c.dom.Document;
|
import org.w3c.dom.Document;
|
||||||
|
@ -296,12 +297,13 @@ public class TableHandler extends GenericHandler {
|
||||||
throw new ServletException(e);
|
throw new ServletException(e);
|
||||||
}
|
}
|
||||||
|
|
||||||
long lock_id = -1;
|
BatchUpdate batchUpdate;
|
||||||
|
|
||||||
try{
|
try{
|
||||||
// start an update
|
// start an update
|
||||||
Text key = new Text(row);
|
Text key = new Text(row);
|
||||||
lock_id = table.startUpdate(key);
|
batchUpdate = timestamp == null ?
|
||||||
|
new BatchUpdate(key) : new BatchUpdate(key, Long.parseLong(timestamp));
|
||||||
|
|
||||||
// set the columns from the xml
|
// set the columns from the xml
|
||||||
NodeList columns = doc.getElementsByTagName("column");
|
NodeList columns = doc.getElementsByTagName("column");
|
||||||
|
@ -328,24 +330,16 @@ public class TableHandler extends GenericHandler {
|
||||||
}
|
}
|
||||||
|
|
||||||
// put the value
|
// put the value
|
||||||
table.put(lock_id, name, value);
|
batchUpdate.put(name, value);
|
||||||
}
|
}
|
||||||
|
|
||||||
// commit the update
|
// commit the update
|
||||||
if (timestamp != null) {
|
table.commit(batchUpdate);
|
||||||
table.commit(lock_id, Long.parseLong(timestamp));
|
|
||||||
}
|
|
||||||
else{
|
|
||||||
table.commit(lock_id);
|
|
||||||
}
|
|
||||||
|
|
||||||
// respond with a 200
|
// respond with a 200
|
||||||
response.setStatus(200);
|
response.setStatus(200);
|
||||||
}
|
}
|
||||||
catch(Exception e){
|
catch(Exception e){
|
||||||
if (lock_id != -1) {
|
|
||||||
table.abort(lock_id);
|
|
||||||
}
|
|
||||||
throw new ServletException(e);
|
throw new ServletException(e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -50,6 +50,7 @@ import org.apache.hadoop.hbase.thrift.generated.ScanEntry;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
import org.apache.hadoop.hbase.client.HTable;
|
import org.apache.hadoop.hbase.client.HTable;
|
||||||
import org.apache.hadoop.hbase.io.Cell;
|
import org.apache.hadoop.hbase.io.Cell;
|
||||||
|
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||||
|
|
||||||
import com.facebook.thrift.TException;
|
import com.facebook.thrift.TException;
|
||||||
import com.facebook.thrift.protocol.TBinaryProtocol;
|
import com.facebook.thrift.protocol.TBinaryProtocol;
|
||||||
|
@ -296,9 +297,9 @@ public class ThriftServer {
|
||||||
}
|
}
|
||||||
try {
|
try {
|
||||||
HTable table = getTable(tableName);
|
HTable table = getTable(tableName);
|
||||||
long lockid = table.startUpdate(getText(row));
|
BatchUpdate batchUpdate = new BatchUpdate(getText(row));
|
||||||
table.put(lockid, getText(column), value);
|
batchUpdate.put(getText(column), value);
|
||||||
table.commit(lockid);
|
table.commit(batchUpdate);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new IOError(e.getMessage());
|
throw new IOError(e.getMessage());
|
||||||
} catch (IllegalArgumentException e) {
|
} catch (IllegalArgumentException e) {
|
||||||
|
@ -412,15 +413,15 @@ public class ThriftServer {
|
||||||
|
|
||||||
try {
|
try {
|
||||||
table = getTable(tableName);
|
table = getTable(tableName);
|
||||||
lockid = table.startUpdate(getText(row));
|
BatchUpdate batchUpdate = new BatchUpdate(getText(row), timestamp);
|
||||||
for (Mutation m : mutations) {
|
for (Mutation m : mutations) {
|
||||||
if (m.isDelete) {
|
if (m.isDelete) {
|
||||||
table.delete(lockid, getText(m.column));
|
batchUpdate.delete(getText(m.column));
|
||||||
} else {
|
} else {
|
||||||
table.put(lockid, getText(m.column), m.value);
|
batchUpdate.put(getText(m.column), m.value);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
table.commit(lockid, timestamp);
|
table.commit(batchUpdate);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
if (lockid != null) {
|
if (lockid != null) {
|
||||||
table.abort(lockid);
|
table.abort(lockid);
|
||||||
|
|
|
@ -25,6 +25,7 @@ import java.util.Random;
|
||||||
|
|
||||||
import org.apache.hadoop.dfs.MiniDFSCluster;
|
import org.apache.hadoop.dfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
||||||
|
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
import org.apache.log4j.Logger;
|
import org.apache.log4j.Logger;
|
||||||
|
|
||||||
|
@ -126,11 +127,11 @@ public abstract class AbstractMergeTestBase extends HBaseClusterTestCase {
|
||||||
|
|
||||||
HRegionIncommon r = new HRegionIncommon(region);
|
HRegionIncommon r = new HRegionIncommon(region);
|
||||||
for(int i = firstRow; i < firstRow + nrows; i++) {
|
for(int i = firstRow; i < firstRow + nrows; i++) {
|
||||||
long lockid = r.startUpdate(new Text("row_"
|
BatchUpdate batchUpdate = new BatchUpdate(new Text("row_"
|
||||||
+ String.format("%1$05d", i)));
|
+ String.format("%1$05d", i)));
|
||||||
|
|
||||||
r.put(lockid, COLUMN_NAME, value.get());
|
batchUpdate.put(COLUMN_NAME, value.get());
|
||||||
r.commit(lockid, System.currentTimeMillis());
|
region.batchUpdate(batchUpdate);
|
||||||
if(i % 10000 == 0) {
|
if(i % 10000 == 0) {
|
||||||
System.out.println("Flushing write #" + i);
|
System.out.println("Flushing write #" + i);
|
||||||
r.flushcache();
|
r.flushcache();
|
||||||
|
|
|
@ -49,6 +49,7 @@ import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||||
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
||||||
import org.apache.hadoop.hbase.io.RowResult;
|
import org.apache.hadoop.hbase.io.RowResult;
|
||||||
|
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Additional scanner tests.
|
* Additional scanner tests.
|
||||||
|
@ -106,9 +107,9 @@ public class DisabledTestScanner2 extends HBaseClusterTestCase {
|
||||||
// flipping the switch in StopRowFilter stopping us returning all
|
// flipping the switch in StopRowFilter stopping us returning all
|
||||||
// of the rest of the other store content.
|
// of the rest of the other store content.
|
||||||
if (i == 0) {
|
if (i == 0) {
|
||||||
long id = inc.startUpdate(new Text("bbb"));
|
BatchUpdate batchUpdate = new BatchUpdate(new Text("bbb"));
|
||||||
inc.put(id, families[0], "bbb".getBytes());
|
batchUpdate.put(families[0], "bbb".getBytes());
|
||||||
inc.commit(id);
|
inc.commit(batchUpdate);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
RowFilterInterface f =
|
RowFilterInterface f =
|
||||||
|
@ -173,12 +174,12 @@ public class DisabledTestScanner2 extends HBaseClusterTestCase {
|
||||||
HTable table = new HTable(this.conf, tableName);
|
HTable table = new HTable(this.conf, tableName);
|
||||||
// Add a row to columns without qualifiers and then two with. Make one
|
// Add a row to columns without qualifiers and then two with. Make one
|
||||||
// numbers only so easy to find w/ a regex.
|
// numbers only so easy to find w/ a regex.
|
||||||
long id = table.startUpdate(new Text(getName()));
|
BatchUpdate batchUpdate = new BatchUpdate(new Text(getName()));
|
||||||
final String firstColkeyFamily = Character.toString(FIRST_COLKEY) + ":";
|
final String firstColkeyFamily = Character.toString(FIRST_COLKEY) + ":";
|
||||||
table.put(id, new Text(firstColkeyFamily + getName()), GOOD_BYTES);
|
batchUpdate.put(new Text(firstColkeyFamily + getName()), GOOD_BYTES);
|
||||||
table.put(id, new Text(firstColkeyFamily + "22222"), GOOD_BYTES);
|
batchUpdate.put(new Text(firstColkeyFamily + "22222"), GOOD_BYTES);
|
||||||
table.put(id, new Text(firstColkeyFamily), GOOD_BYTES);
|
batchUpdate.put(new Text(firstColkeyFamily), GOOD_BYTES);
|
||||||
table.commit(id);
|
table.commit(batchUpdate);
|
||||||
// Now do a scan using a regex for a column name.
|
// Now do a scan using a regex for a column name.
|
||||||
checkRegexingScanner(table, firstColkeyFamily + "\\d+");
|
checkRegexingScanner(table, firstColkeyFamily + "\\d+");
|
||||||
// Do a new scan that only matches on column family.
|
// Do a new scan that only matches on column family.
|
||||||
|
@ -230,12 +231,12 @@ public class DisabledTestScanner2 extends HBaseClusterTestCase {
|
||||||
HTable table = new HTable(conf, tableName);
|
HTable table = new HTable(conf, tableName);
|
||||||
for (char i = FIRST_ROWKEY; i <= LAST_ROWKEY; i++) {
|
for (char i = FIRST_ROWKEY; i <= LAST_ROWKEY; i++) {
|
||||||
Text rowKey = new Text(new String(new char[] { i }));
|
Text rowKey = new Text(new String(new char[] { i }));
|
||||||
long lockID = table.startUpdate(rowKey);
|
BatchUpdate batchUpdate = new BatchUpdate(rowKey);
|
||||||
for (char j = 0; j < colKeys.length; j++) {
|
for (char j = 0; j < colKeys.length; j++) {
|
||||||
table.put(lockID, colKeys[j], (i >= FIRST_BAD_RANGE_ROWKEY &&
|
batchUpdate.put(colKeys[j], (i >= FIRST_BAD_RANGE_ROWKEY &&
|
||||||
i <= LAST_BAD_RANGE_ROWKEY)? BAD_BYTES : GOOD_BYTES);
|
i <= LAST_BAD_RANGE_ROWKEY)? BAD_BYTES : GOOD_BYTES);
|
||||||
}
|
}
|
||||||
table.commit(lockID);
|
table.commit(batchUpdate);
|
||||||
}
|
}
|
||||||
|
|
||||||
regExpFilterTest(table, colKeys);
|
regExpFilterTest(table, colKeys);
|
||||||
|
@ -412,13 +413,13 @@ public class DisabledTestScanner2 extends HBaseClusterTestCase {
|
||||||
final HServerAddress serverAddress,
|
final HServerAddress serverAddress,
|
||||||
final long startCode)
|
final long startCode)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
long lockid = t.startUpdate(region.getRegionName());
|
BatchUpdate batchUpdate = new BatchUpdate(region.getRegionName());
|
||||||
t.put(lockid, HConstants.COL_REGIONINFO,
|
batchUpdate.put(HConstants.COL_REGIONINFO,
|
||||||
Writables.getBytes(region.getRegionInfo()));
|
Writables.getBytes(region.getRegionInfo()));
|
||||||
t.put(lockid, HConstants.COL_SERVER,
|
batchUpdate.put(HConstants.COL_SERVER,
|
||||||
Writables.stringToBytes(serverAddress.toString()));
|
Writables.stringToBytes(serverAddress.toString()));
|
||||||
t.put(lockid, HConstants.COL_STARTCODE, Writables.longToBytes(startCode));
|
batchUpdate.put(HConstants.COL_STARTCODE, Writables.longToBytes(startCode));
|
||||||
t.commit(lockid);
|
t.commit(batchUpdate);
|
||||||
// Assert added.
|
// Assert added.
|
||||||
byte [] bytes =
|
byte [] bytes =
|
||||||
t.get(region.getRegionName(), HConstants.COL_REGIONINFO).getValue();
|
t.get(region.getRegionName(), HConstants.COL_REGIONINFO).getValue();
|
||||||
|
@ -439,11 +440,11 @@ public class DisabledTestScanner2 extends HBaseClusterTestCase {
|
||||||
*/
|
*/
|
||||||
private void removeRegionFromMETA(final HTable t, final Text regionName)
|
private void removeRegionFromMETA(final HTable t, final Text regionName)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
long lockid = t.startUpdate(regionName);
|
BatchUpdate batchUpdate = new BatchUpdate(regionName);
|
||||||
t.delete(lockid, HConstants.COL_REGIONINFO);
|
batchUpdate.delete(HConstants.COL_REGIONINFO);
|
||||||
t.delete(lockid, HConstants.COL_SERVER);
|
batchUpdate.delete(HConstants.COL_SERVER);
|
||||||
t.delete(lockid, HConstants.COL_STARTCODE);
|
batchUpdate.delete(HConstants.COL_STARTCODE);
|
||||||
t.commit(lockid);
|
t.commit(batchUpdate);
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
LOG.debug("Removed " + regionName + " from table " + t.getTableName());
|
LOG.debug("Removed " + regionName + " from table " + t.getTableName());
|
||||||
}
|
}
|
||||||
|
|
|
@ -270,32 +270,21 @@ public abstract class HBaseTestCase extends TestCase {
|
||||||
break EXIT;
|
break EXIT;
|
||||||
}
|
}
|
||||||
try {
|
try {
|
||||||
long lockid = updater.startUpdate(t);
|
BatchUpdate batchUpdate = ts == -1 ?
|
||||||
|
new BatchUpdate(t) : new BatchUpdate(t, ts);
|
||||||
try {
|
try {
|
||||||
updater.put(lockid, new Text(column), bytes);
|
batchUpdate.put(new Text(column), bytes);
|
||||||
if (ts == -1) {
|
updater.commit(batchUpdate);
|
||||||
updater.commit(lockid);
|
|
||||||
} else {
|
|
||||||
updater.commit(lockid, ts);
|
|
||||||
}
|
|
||||||
lockid = -1;
|
|
||||||
} catch (RuntimeException ex) {
|
} catch (RuntimeException ex) {
|
||||||
ex.printStackTrace();
|
ex.printStackTrace();
|
||||||
throw ex;
|
throw ex;
|
||||||
|
|
||||||
} catch (IOException ex) {
|
} catch (IOException ex) {
|
||||||
ex.printStackTrace();
|
ex.printStackTrace();
|
||||||
throw ex;
|
throw ex;
|
||||||
|
}
|
||||||
} finally {
|
|
||||||
if (lockid != -1) {
|
|
||||||
updater.abort(lockid);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} catch (RuntimeException ex) {
|
} catch (RuntimeException ex) {
|
||||||
ex.printStackTrace();
|
ex.printStackTrace();
|
||||||
throw ex;
|
throw ex;
|
||||||
|
|
||||||
} catch (IOException ex) {
|
} catch (IOException ex) {
|
||||||
ex.printStackTrace();
|
ex.printStackTrace();
|
||||||
throw ex;
|
throw ex;
|
||||||
|
@ -325,12 +314,6 @@ public abstract class HBaseTestCase extends TestCase {
|
||||||
* TOOD: Come up w/ a better name for this interface.
|
* TOOD: Come up w/ a better name for this interface.
|
||||||
*/
|
*/
|
||||||
public static interface Incommon {
|
public static interface Incommon {
|
||||||
/**
|
|
||||||
* @param row
|
|
||||||
* @return update id
|
|
||||||
* @throws IOException
|
|
||||||
*/
|
|
||||||
public long startUpdate(Text row) throws IOException;
|
|
||||||
/**
|
/**
|
||||||
* @param row
|
* @param row
|
||||||
* @param column
|
* @param column
|
||||||
|
@ -356,19 +339,6 @@ public abstract class HBaseTestCase extends TestCase {
|
||||||
*/
|
*/
|
||||||
public Cell[] get(Text row, Text column, long ts, int versions)
|
public Cell[] get(Text row, Text column, long ts, int versions)
|
||||||
throws IOException;
|
throws IOException;
|
||||||
/**
|
|
||||||
* @param lockid
|
|
||||||
* @param column
|
|
||||||
* @param val
|
|
||||||
* @throws IOException
|
|
||||||
*/
|
|
||||||
public void put(long lockid, Text column, byte val[]) throws IOException;
|
|
||||||
/**
|
|
||||||
* @param lockid
|
|
||||||
* @param column
|
|
||||||
* @throws IOException
|
|
||||||
*/
|
|
||||||
public void delete(long lockid, Text column) throws IOException;
|
|
||||||
/**
|
/**
|
||||||
* @param row
|
* @param row
|
||||||
* @param column
|
* @param column
|
||||||
|
@ -376,22 +346,13 @@ public abstract class HBaseTestCase extends TestCase {
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public void deleteAll(Text row, Text column, long ts) throws IOException;
|
public void deleteAll(Text row, Text column, long ts) throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param lockid
|
* @param batchUpdate
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public void commit(long lockid) throws IOException;
|
public void commit(BatchUpdate batchUpdate) throws IOException;
|
||||||
/**
|
|
||||||
* @param lockid
|
|
||||||
* @param ts
|
|
||||||
* @throws IOException
|
|
||||||
*/
|
|
||||||
public void commit(long lockid, long ts) throws IOException;
|
|
||||||
/**
|
|
||||||
* @param lockid
|
|
||||||
* @throws IOException
|
|
||||||
*/
|
|
||||||
public void abort(long lockid) throws IOException;
|
|
||||||
/**
|
/**
|
||||||
* @param columns
|
* @param columns
|
||||||
* @param firstRow
|
* @param firstRow
|
||||||
|
@ -423,70 +384,39 @@ public abstract class HBaseTestCase extends TestCase {
|
||||||
this.region = HRegion;
|
this.region = HRegion;
|
||||||
this.batch = null;
|
this.batch = null;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** {@inheritDoc} */
|
/** {@inheritDoc} */
|
||||||
public void abort(@SuppressWarnings("unused") long lockid) {
|
public void commit(BatchUpdate batchUpdate) throws IOException {
|
||||||
this.batch = null;
|
region.batchUpdate(batchUpdate);
|
||||||
}
|
};
|
||||||
/** {@inheritDoc} */
|
|
||||||
public void commit(long lockid) throws IOException {
|
|
||||||
commit(lockid, HConstants.LATEST_TIMESTAMP);
|
|
||||||
}
|
|
||||||
/** {@inheritDoc} */
|
|
||||||
public void commit(@SuppressWarnings("unused") long lockid, final long ts)
|
|
||||||
throws IOException {
|
|
||||||
checkBatch();
|
|
||||||
try {
|
|
||||||
this.batch.setTimestamp(ts);
|
|
||||||
this.region.batchUpdate(batch);
|
|
||||||
} finally {
|
|
||||||
this.batch = null;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
/** {@inheritDoc} */
|
|
||||||
public void put(@SuppressWarnings("unused") long lockid, Text column,
|
|
||||||
byte[] val) {
|
|
||||||
checkBatch();
|
|
||||||
this.batch.put(column, val);
|
|
||||||
}
|
|
||||||
/** {@inheritDoc} */
|
|
||||||
public void delete(@SuppressWarnings("unused") long lockid, Text column) {
|
|
||||||
checkBatch();
|
|
||||||
this.batch.delete(column);
|
|
||||||
}
|
|
||||||
/** {@inheritDoc} */
|
/** {@inheritDoc} */
|
||||||
public void deleteAll(Text row, Text column, long ts) throws IOException {
|
public void deleteAll(Text row, Text column, long ts) throws IOException {
|
||||||
this.region.deleteAll(row, column, ts);
|
this.region.deleteAll(row, column, ts);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @param row
|
|
||||||
* @return update id
|
|
||||||
*/
|
|
||||||
public long startUpdate(Text row) {
|
|
||||||
if (this.batch != null) {
|
|
||||||
throw new IllegalStateException("Update already in progress");
|
|
||||||
}
|
|
||||||
this.batch = new BatchUpdate(row);
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
/** {@inheritDoc} */
|
/** {@inheritDoc} */
|
||||||
public HScannerInterface getScanner(Text [] columns, Text firstRow,
|
public HScannerInterface getScanner(Text [] columns, Text firstRow,
|
||||||
long ts) throws IOException {
|
long ts) throws IOException {
|
||||||
return this.region.getScanner(columns, firstRow, ts, null);
|
return this.region.getScanner(columns, firstRow, ts, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** {@inheritDoc} */
|
/** {@inheritDoc} */
|
||||||
public Cell get(Text row, Text column) throws IOException {
|
public Cell get(Text row, Text column) throws IOException {
|
||||||
return this.region.get(row, column);
|
return this.region.get(row, column);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** {@inheritDoc} */
|
/** {@inheritDoc} */
|
||||||
public Cell[] get(Text row, Text column, int versions) throws IOException {
|
public Cell[] get(Text row, Text column, int versions) throws IOException {
|
||||||
return this.region.get(row, column, versions);
|
return this.region.get(row, column, versions);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** {@inheritDoc} */
|
/** {@inheritDoc} */
|
||||||
public Cell[] get(Text row, Text column, long ts, int versions)
|
public Cell[] get(Text row, Text column, long ts, int versions)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
return this.region.get(row, column, ts, versions);
|
return this.region.get(row, column, ts, versions);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param row
|
* @param row
|
||||||
* @return values for each column in the specified row
|
* @return values for each column in the specified row
|
||||||
|
@ -495,6 +425,7 @@ public abstract class HBaseTestCase extends TestCase {
|
||||||
public Map<Text, Cell> getFull(Text row) throws IOException {
|
public Map<Text, Cell> getFull(Text row) throws IOException {
|
||||||
return region.getFull(row, null, HConstants.LATEST_TIMESTAMP);
|
return region.getFull(row, null, HConstants.LATEST_TIMESTAMP);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** {@inheritDoc} */
|
/** {@inheritDoc} */
|
||||||
public void flushcache() throws IOException {
|
public void flushcache() throws IOException {
|
||||||
this.region.flushcache();
|
this.region.flushcache();
|
||||||
|
@ -522,56 +453,17 @@ public abstract class HBaseTestCase extends TestCase {
|
||||||
this.table = table;
|
this.table = table;
|
||||||
this.batch = null;
|
this.batch = null;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** {@inheritDoc} */
|
/** {@inheritDoc} */
|
||||||
public void abort(@SuppressWarnings("unused") long lockid) {
|
public void commit(BatchUpdate batchUpdate) throws IOException {
|
||||||
if (this.batch != null) {
|
table.commit(batchUpdate);
|
||||||
this.batch = null;
|
};
|
||||||
}
|
|
||||||
}
|
|
||||||
/** {@inheritDoc} */
|
|
||||||
public void commit(@SuppressWarnings("unused") long lockid)
|
|
||||||
throws IOException {
|
|
||||||
checkBatch();
|
|
||||||
this.table.commit(batch);
|
|
||||||
this.batch = null;
|
|
||||||
}
|
|
||||||
|
|
||||||
/** {@inheritDoc} */
|
|
||||||
public void commit(@SuppressWarnings("unused") long lockid, final long ts)
|
|
||||||
throws IOException {
|
|
||||||
checkBatch();
|
|
||||||
this.batch.setTimestamp(ts);
|
|
||||||
this.table.commit(batch);
|
|
||||||
this.batch = null;
|
|
||||||
}
|
|
||||||
|
|
||||||
/** {@inheritDoc} */
|
|
||||||
public void put(@SuppressWarnings("unused") long lockid, Text column,
|
|
||||||
byte[] val) {
|
|
||||||
checkBatch();
|
|
||||||
this.batch.put(column, val);
|
|
||||||
}
|
|
||||||
|
|
||||||
/** {@inheritDoc} */
|
|
||||||
public void delete(@SuppressWarnings("unused") long lockid, Text column) {
|
|
||||||
checkBatch();
|
|
||||||
this.batch.delete(column);
|
|
||||||
}
|
|
||||||
|
|
||||||
/** {@inheritDoc} */
|
/** {@inheritDoc} */
|
||||||
public void deleteAll(Text row, Text column, long ts) throws IOException {
|
public void deleteAll(Text row, Text column, long ts) throws IOException {
|
||||||
this.table.deleteAll(row, column, ts);
|
this.table.deleteAll(row, column, ts);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** {@inheritDoc} */
|
|
||||||
public long startUpdate(Text row) {
|
|
||||||
if (this.batch != null) {
|
|
||||||
throw new IllegalStateException("Batch update already in progress.");
|
|
||||||
}
|
|
||||||
this.batch = new BatchUpdate(row);
|
|
||||||
return 0L;
|
|
||||||
}
|
|
||||||
|
|
||||||
/** {@inheritDoc} */
|
/** {@inheritDoc} */
|
||||||
public HScannerInterface getScanner(Text [] columns, Text firstRow,
|
public HScannerInterface getScanner(Text [] columns, Text firstRow,
|
||||||
long ts) throws IOException {
|
long ts) throws IOException {
|
||||||
|
|
|
@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.HScannerInterface;
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||||
import org.apache.hadoop.hbase.io.Cell;
|
import org.apache.hadoop.hbase.io.Cell;
|
||||||
|
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||||
import org.apache.hadoop.hbase.HBaseTestCase;
|
import org.apache.hadoop.hbase.HBaseTestCase;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
|
@ -228,13 +229,10 @@ public class TimestampTestBase extends HBaseTestCase {
|
||||||
public static void put(final Incommon loader, final byte [] bytes,
|
public static void put(final Incommon loader, final byte [] bytes,
|
||||||
final long ts)
|
final long ts)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
long lockid = loader.startUpdate(ROW);
|
BatchUpdate batchUpdate = ts == HConstants.LATEST_TIMESTAMP ?
|
||||||
loader.put(lockid, COLUMN, bytes);
|
new BatchUpdate(ROW) : new BatchUpdate(ROW, ts);
|
||||||
if (ts == HConstants.LATEST_TIMESTAMP) {
|
batchUpdate.put(COLUMN, bytes);
|
||||||
loader.commit(lockid);
|
loader.commit(batchUpdate);
|
||||||
} else {
|
|
||||||
loader.commit(lockid, ts);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public static void delete(final Incommon loader) throws IOException {
|
public static void delete(final Incommon loader) throws IOException {
|
||||||
|
@ -242,12 +240,9 @@ public class TimestampTestBase extends HBaseTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public static void delete(final Incommon loader, final long ts) throws IOException {
|
public static void delete(final Incommon loader, final long ts) throws IOException {
|
||||||
long lockid = loader.startUpdate(ROW);
|
BatchUpdate batchUpdate = ts == HConstants.LATEST_TIMESTAMP ?
|
||||||
loader.delete(lockid, COLUMN);
|
new BatchUpdate(ROW) : new BatchUpdate(ROW, ts);
|
||||||
if (ts == HConstants.LATEST_TIMESTAMP) {
|
batchUpdate.delete(COLUMN);
|
||||||
loader.commit(lockid);
|
loader.commit(batchUpdate);
|
||||||
} else {
|
|
||||||
loader.commit(lockid, ts);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.HStoreKey;
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
import org.apache.hadoop.hbase.TableNotFoundException;
|
import org.apache.hadoop.hbase.TableNotFoundException;
|
||||||
import org.apache.hadoop.hbase.HScannerInterface;
|
import org.apache.hadoop.hbase.HScannerInterface;
|
||||||
|
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Tests HTable
|
* Tests HTable
|
||||||
|
@ -87,9 +88,9 @@ public class TestHTable extends HBaseClusterTestCase implements HConstants {
|
||||||
HTableDescriptor meta = a.getMetadata();
|
HTableDescriptor meta = a.getMetadata();
|
||||||
assertTrue(meta.equals(tableAdesc));
|
assertTrue(meta.equals(tableAdesc));
|
||||||
|
|
||||||
long lockid = a.startUpdate(row);
|
BatchUpdate batchUpdate = new BatchUpdate(row);
|
||||||
a.put(lockid, COLUMN_FAMILY, value);
|
batchUpdate.put(COLUMN_FAMILY, value);
|
||||||
a.commit(lockid);
|
a.commit(batchUpdate);
|
||||||
|
|
||||||
// open a new connection to A and a connection to b
|
// open a new connection to A and a connection to b
|
||||||
|
|
||||||
|
@ -105,12 +106,11 @@ public class TestHTable extends HBaseClusterTestCase implements HConstants {
|
||||||
HStoreKey key = new HStoreKey();
|
HStoreKey key = new HStoreKey();
|
||||||
TreeMap<Text, byte[]> results = new TreeMap<Text, byte[]>();
|
TreeMap<Text, byte[]> results = new TreeMap<Text, byte[]>();
|
||||||
while(s.next(key, results)) {
|
while(s.next(key, results)) {
|
||||||
lockid = b.startUpdate(key.getRow());
|
batchUpdate = new BatchUpdate(key.getRow());
|
||||||
for(Map.Entry<Text, byte[]> e: results.entrySet()) {
|
for(Map.Entry<Text, byte[]> e: results.entrySet()) {
|
||||||
b.put(lockid, e.getKey(), e.getValue());
|
batchUpdate.put(e.getKey(), e.getValue());
|
||||||
}
|
}
|
||||||
b.commit(lockid);
|
b.commit(batchUpdate);
|
||||||
b.abort(lockid);
|
|
||||||
}
|
}
|
||||||
} finally {
|
} finally {
|
||||||
s.close();
|
s.close();
|
||||||
|
|
|
@ -25,8 +25,11 @@ import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Tests that HClient protects against multiple updates
|
* Tests that HClient protects against multiple updates. This test case will
|
||||||
|
* be removed at the same time that HTable.startuUpdate and friends are
|
||||||
|
* removed.
|
||||||
*/
|
*/
|
||||||
|
@Deprecated
|
||||||
public class TestMultipleUpdates extends HBaseClusterTestCase {
|
public class TestMultipleUpdates extends HBaseClusterTestCase {
|
||||||
private static final String CONTENTS_STR = "contents:";
|
private static final String CONTENTS_STR = "contents:";
|
||||||
private static final Text CONTENTS = new Text(CONTENTS_STR);
|
private static final Text CONTENTS = new Text(CONTENTS_STR);
|
||||||
|
|
|
@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
import org.apache.hadoop.hbase.StaticTestEnvironment;
|
import org.apache.hadoop.hbase.StaticTestEnvironment;
|
||||||
import org.apache.hadoop.hbase.io.Cell;
|
import org.apache.hadoop.hbase.io.Cell;
|
||||||
|
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Test the functionality of deleteAll.
|
* Test the functionality of deleteAll.
|
||||||
|
@ -94,27 +95,27 @@ public class TestDeleteAll extends HBaseTestCase {
|
||||||
Text colB = new Text(COLUMNS[0].toString() + "b");
|
Text colB = new Text(COLUMNS[0].toString() + "b");
|
||||||
Text colC = new Text(COLUMNS[0].toString() + "c");
|
Text colC = new Text(COLUMNS[0].toString() + "c");
|
||||||
Text colD = new Text(COLUMNS[0].toString());
|
Text colD = new Text(COLUMNS[0].toString());
|
||||||
|
|
||||||
long lock = region_incommon.startUpdate(row);
|
|
||||||
region_incommon.put(lock, colA, cellData(0, flush).getBytes());
|
|
||||||
region_incommon.put(lock, colB, cellData(0, flush).getBytes());
|
|
||||||
region_incommon.put(lock, colC, cellData(0, flush).getBytes());
|
|
||||||
region_incommon.put(lock, colD, cellData(0, flush).getBytes());
|
|
||||||
region_incommon.commit(lock, t0);
|
|
||||||
|
|
||||||
lock = region_incommon.startUpdate(row);
|
BatchUpdate batchUpdate = new BatchUpdate(row, t0);
|
||||||
region_incommon.put(lock, colA, cellData(1, flush).getBytes());
|
batchUpdate.put(colA, cellData(0, flush).getBytes());
|
||||||
region_incommon.put(lock, colB, cellData(1, flush).getBytes());
|
batchUpdate.put(colB, cellData(0, flush).getBytes());
|
||||||
region_incommon.put(lock, colC, cellData(1, flush).getBytes());
|
batchUpdate.put(colC, cellData(0, flush).getBytes());
|
||||||
region_incommon.put(lock, colD, cellData(1, flush).getBytes());
|
batchUpdate.put(colD, cellData(0, flush).getBytes());
|
||||||
region_incommon.commit(lock, t1);
|
region_incommon.commit(batchUpdate);
|
||||||
|
|
||||||
|
batchUpdate = new BatchUpdate(row, t1);
|
||||||
|
batchUpdate.put(colA, cellData(1, flush).getBytes());
|
||||||
|
batchUpdate.put(colB, cellData(1, flush).getBytes());
|
||||||
|
batchUpdate.put(colC, cellData(1, flush).getBytes());
|
||||||
|
batchUpdate.put(colD, cellData(1, flush).getBytes());
|
||||||
|
region_incommon.commit(batchUpdate);
|
||||||
|
|
||||||
lock = region_incommon.startUpdate(row);
|
batchUpdate = new BatchUpdate(row, t2);
|
||||||
region_incommon.put(lock, colA, cellData(2, flush).getBytes());
|
batchUpdate.put(colA, cellData(2, flush).getBytes());
|
||||||
region_incommon.put(lock, colB, cellData(2, flush).getBytes());
|
batchUpdate.put(colB, cellData(2, flush).getBytes());
|
||||||
region_incommon.put(lock, colC, cellData(2, flush).getBytes());
|
batchUpdate.put(colC, cellData(2, flush).getBytes());
|
||||||
region_incommon.put(lock, colD, cellData(2, flush).getBytes());
|
batchUpdate.put(colD, cellData(2, flush).getBytes());
|
||||||
region_incommon.commit(lock, t2);
|
region_incommon.commit(batchUpdate);
|
||||||
|
|
||||||
if (flush) {region_incommon.flushcache();}
|
if (flush) {region_incommon.flushcache();}
|
||||||
|
|
||||||
|
|
|
@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
import org.apache.hadoop.hbase.StaticTestEnvironment;
|
import org.apache.hadoop.hbase.StaticTestEnvironment;
|
||||||
import org.apache.hadoop.hbase.io.Cell;
|
import org.apache.hadoop.hbase.io.Cell;
|
||||||
|
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Test the functionality of deleteFamily.
|
* Test the functionality of deleteFamily.
|
||||||
|
@ -90,24 +91,25 @@ public class TestDeleteFamily extends HBaseTestCase {
|
||||||
Text colA = new Text(COLUMNS[0].toString() + "a");
|
Text colA = new Text(COLUMNS[0].toString() + "a");
|
||||||
Text colB = new Text(COLUMNS[0].toString() + "b");
|
Text colB = new Text(COLUMNS[0].toString() + "b");
|
||||||
Text colC = new Text(COLUMNS[1].toString() + "c");
|
Text colC = new Text(COLUMNS[1].toString() + "c");
|
||||||
|
|
||||||
long lock = region_incommon.startUpdate(row);
|
|
||||||
region_incommon.put(lock, colA, cellData(0, flush).getBytes());
|
|
||||||
region_incommon.put(lock, colB, cellData(0, flush).getBytes());
|
|
||||||
region_incommon.put(lock, colC, cellData(0, flush).getBytes());
|
|
||||||
region_incommon.commit(lock, t0);
|
|
||||||
|
|
||||||
lock = region_incommon.startUpdate(row);
|
BatchUpdate batchUpdate = null;
|
||||||
region_incommon.put(lock, colA, cellData(1, flush).getBytes());
|
batchUpdate = new BatchUpdate(row, t0);
|
||||||
region_incommon.put(lock, colB, cellData(1, flush).getBytes());
|
batchUpdate.put(colA, cellData(0, flush).getBytes());
|
||||||
region_incommon.put(lock, colC, cellData(1, flush).getBytes());
|
batchUpdate.put(colB, cellData(0, flush).getBytes());
|
||||||
region_incommon.commit(lock, t1);
|
batchUpdate.put(colC, cellData(0, flush).getBytes());
|
||||||
|
region_incommon.commit(batchUpdate);
|
||||||
|
|
||||||
|
batchUpdate = new BatchUpdate(row, t1);
|
||||||
|
batchUpdate.put(colA, cellData(1, flush).getBytes());
|
||||||
|
batchUpdate.put(colB, cellData(1, flush).getBytes());
|
||||||
|
batchUpdate.put(colC, cellData(1, flush).getBytes());
|
||||||
|
region_incommon.commit(batchUpdate);
|
||||||
|
|
||||||
lock = region_incommon.startUpdate(row);
|
batchUpdate = new BatchUpdate(row, t2);
|
||||||
region_incommon.put(lock, colA, cellData(2, flush).getBytes());
|
batchUpdate.put(colA, cellData(2, flush).getBytes());
|
||||||
region_incommon.put(lock, colB, cellData(2, flush).getBytes());
|
batchUpdate.put(colB, cellData(2, flush).getBytes());
|
||||||
region_incommon.put(lock, colC, cellData(2, flush).getBytes());
|
batchUpdate.put(colC, cellData(2, flush).getBytes());
|
||||||
region_incommon.commit(lock, t2);
|
region_incommon.commit(batchUpdate);
|
||||||
|
|
||||||
if (flush) {region_incommon.flushcache();}
|
if (flush) {region_incommon.flushcache();}
|
||||||
|
|
||||||
|
|
|
@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||||
import org.apache.hadoop.hbase.HServerAddress;
|
import org.apache.hadoop.hbase.HServerAddress;
|
||||||
import org.apache.hadoop.hbase.StaticTestEnvironment;
|
import org.apache.hadoop.hbase.StaticTestEnvironment;
|
||||||
import org.apache.hadoop.hbase.io.Cell;
|
import org.apache.hadoop.hbase.io.Cell;
|
||||||
|
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||||
|
|
||||||
/** Test case for get */
|
/** Test case for get */
|
||||||
public class TestGet extends HBaseTestCase {
|
public class TestGet extends HBaseTestCase {
|
||||||
|
@ -99,20 +100,21 @@ public class TestGet extends HBaseTestCase {
|
||||||
HRegionIncommon r = new HRegionIncommon(region);
|
HRegionIncommon r = new HRegionIncommon(region);
|
||||||
|
|
||||||
// Write information to the table
|
// Write information to the table
|
||||||
|
|
||||||
long lockid = r.startUpdate(ROW_KEY);
|
BatchUpdate batchUpdate = null;
|
||||||
r.put(lockid, CONTENTS, Writables.getBytes(CONTENTS));
|
batchUpdate = new BatchUpdate(ROW_KEY, System.currentTimeMillis());
|
||||||
r.put(lockid, HConstants.COL_REGIONINFO,
|
batchUpdate.put(CONTENTS, Writables.getBytes(CONTENTS));
|
||||||
|
batchUpdate.put(HConstants.COL_REGIONINFO,
|
||||||
Writables.getBytes(HRegionInfo.rootRegionInfo));
|
Writables.getBytes(HRegionInfo.rootRegionInfo));
|
||||||
r.commit(lockid, System.currentTimeMillis());
|
r.commit(batchUpdate);
|
||||||
|
|
||||||
lockid = r.startUpdate(ROW_KEY);
|
batchUpdate = new BatchUpdate(ROW_KEY, System.currentTimeMillis());
|
||||||
r.put(lockid, HConstants.COL_SERVER,
|
batchUpdate.put(HConstants.COL_SERVER,
|
||||||
Writables.stringToBytes(new HServerAddress(SERVER_ADDRESS).toString()));
|
Writables.stringToBytes(new HServerAddress(SERVER_ADDRESS).toString()));
|
||||||
r.put(lockid, HConstants.COL_STARTCODE, Writables.longToBytes(lockid));
|
batchUpdate.put(HConstants.COL_STARTCODE, Writables.longToBytes(12345));
|
||||||
r.put(lockid, new Text(HConstants.COLUMN_FAMILY + "region"),
|
batchUpdate.put(new Text(HConstants.COLUMN_FAMILY + "region"),
|
||||||
"region".getBytes(HConstants.UTF8_ENCODING));
|
"region".getBytes(HConstants.UTF8_ENCODING));
|
||||||
r.commit(lockid, System.currentTimeMillis());
|
r.commit(batchUpdate);
|
||||||
|
|
||||||
// Verify that get works the same from memcache as when reading from disk
|
// Verify that get works the same from memcache as when reading from disk
|
||||||
// NOTE dumpRegion won't work here because it only reads from disk.
|
// NOTE dumpRegion won't work here because it only reads from disk.
|
||||||
|
@ -131,15 +133,15 @@ public class TestGet extends HBaseTestCase {
|
||||||
|
|
||||||
// Update one family member and add a new one
|
// Update one family member and add a new one
|
||||||
|
|
||||||
lockid = r.startUpdate(ROW_KEY);
|
batchUpdate = new BatchUpdate(ROW_KEY, System.currentTimeMillis());
|
||||||
r.put(lockid, new Text(HConstants.COLUMN_FAMILY + "region"),
|
batchUpdate.put(new Text(HConstants.COLUMN_FAMILY + "region"),
|
||||||
"region2".getBytes(HConstants.UTF8_ENCODING));
|
"region2".getBytes(HConstants.UTF8_ENCODING));
|
||||||
String otherServerName = "bar.foo.com:4321";
|
String otherServerName = "bar.foo.com:4321";
|
||||||
r.put(lockid, HConstants.COL_SERVER,
|
batchUpdate.put(HConstants.COL_SERVER,
|
||||||
Writables.stringToBytes(new HServerAddress(otherServerName).toString()));
|
Writables.stringToBytes(new HServerAddress(otherServerName).toString()));
|
||||||
r.put(lockid, new Text(HConstants.COLUMN_FAMILY + "junk"),
|
batchUpdate.put(new Text(HConstants.COLUMN_FAMILY + "junk"),
|
||||||
"junk".getBytes(HConstants.UTF8_ENCODING));
|
"junk".getBytes(HConstants.UTF8_ENCODING));
|
||||||
r.commit(lockid, System.currentTimeMillis());
|
r.commit(batchUpdate);
|
||||||
|
|
||||||
verifyGet(r, otherServerName);
|
verifyGet(r, otherServerName);
|
||||||
|
|
||||||
|
|
|
@ -122,13 +122,13 @@ public class TestGet2 extends HBaseTestCase implements HConstants {
|
||||||
long one_second_ago = right_now - 1000;
|
long one_second_ago = right_now - 1000;
|
||||||
|
|
||||||
Text t = new Text("test_row");
|
Text t = new Text("test_row");
|
||||||
long lockid = region_incommon.startUpdate(t);
|
BatchUpdate batchUpdate = new BatchUpdate(t, one_second_ago);
|
||||||
region_incommon.put(lockid, COLUMNS[0], "old text".getBytes());
|
batchUpdate.put(COLUMNS[0], "old text".getBytes());
|
||||||
region_incommon.commit(lockid, one_second_ago);
|
region_incommon.commit(batchUpdate);
|
||||||
|
|
||||||
lockid = region_incommon.startUpdate(t);
|
batchUpdate = new BatchUpdate(t, right_now);
|
||||||
region_incommon.put(lockid, COLUMNS[0], "new text".getBytes());
|
batchUpdate.put(COLUMNS[0], "new text".getBytes());
|
||||||
region_incommon.commit(lockid, right_now);
|
region_incommon.commit(batchUpdate);
|
||||||
|
|
||||||
assertCellEquals(region, t, COLUMNS[0], right_now, "new text");
|
assertCellEquals(region, t, COLUMNS[0], right_now, "new text");
|
||||||
assertCellEquals(region, t, COLUMNS[0], one_second_ago, "old text");
|
assertCellEquals(region, t, COLUMNS[0], one_second_ago, "old text");
|
||||||
|
|
|
@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.StaticTestEnvironment;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||||
import org.apache.hadoop.hbase.HScannerInterface;
|
import org.apache.hadoop.hbase.HScannerInterface;
|
||||||
|
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Basic stand-alone testing of HRegion.
|
* Basic stand-alone testing of HRegion.
|
||||||
|
@ -128,12 +129,13 @@ implements RegionUnavailableListener {
|
||||||
// Write out a bunch of values
|
// Write out a bunch of values
|
||||||
|
|
||||||
for (int k = FIRST_ROW; k <= NUM_VALS; k++) {
|
for (int k = FIRST_ROW; k <= NUM_VALS; k++) {
|
||||||
long writeid = region.startUpdate(new Text("row_" + k));
|
BatchUpdate batchUpdate =
|
||||||
region.put(writeid, CONTENTS_BASIC,
|
new BatchUpdate(new Text("row_" + k), System.currentTimeMillis());
|
||||||
|
batchUpdate.put(CONTENTS_BASIC,
|
||||||
(CONTENTSTR + k).getBytes(HConstants.UTF8_ENCODING));
|
(CONTENTSTR + k).getBytes(HConstants.UTF8_ENCODING));
|
||||||
region.put(writeid, new Text(ANCHORNUM + k),
|
batchUpdate.put(new Text(ANCHORNUM + k),
|
||||||
(ANCHORSTR + k).getBytes(HConstants.UTF8_ENCODING));
|
(ANCHORSTR + k).getBytes(HConstants.UTF8_ENCODING));
|
||||||
region.commit(writeid, System.currentTimeMillis());
|
region.commit(batchUpdate);
|
||||||
}
|
}
|
||||||
LOG.info("Write " + NUM_VALS + " rows. Elapsed time: "
|
LOG.info("Write " + NUM_VALS + " rows. Elapsed time: "
|
||||||
+ ((System.currentTimeMillis() - startTime) / 1000.0));
|
+ ((System.currentTimeMillis() - startTime) / 1000.0));
|
||||||
|
@ -178,32 +180,18 @@ implements RegionUnavailableListener {
|
||||||
}
|
}
|
||||||
|
|
||||||
private void badPuts() {
|
private void badPuts() {
|
||||||
|
// Try column name not registered in the table.
|
||||||
// Try put with bad lockid.
|
|
||||||
boolean exceptionThrown = false;
|
boolean exceptionThrown = false;
|
||||||
try {
|
|
||||||
region.put(-1, CONTENTS_BASIC,
|
|
||||||
"bad input".getBytes(HConstants.UTF8_ENCODING));
|
|
||||||
} catch (Exception e) {
|
|
||||||
exceptionThrown = true;
|
|
||||||
}
|
|
||||||
assertTrue("Bad lock id", exceptionThrown);
|
|
||||||
|
|
||||||
// Try column name not registered in the table.
|
|
||||||
exceptionThrown = false;
|
exceptionThrown = false;
|
||||||
long lockid = -1;
|
|
||||||
try {
|
try {
|
||||||
lockid = region.startUpdate(new Text("Some old key"));
|
BatchUpdate batchUpdate = new BatchUpdate(new Text("Some old key"));
|
||||||
String unregisteredColName = "FamilyGroup:FamilyLabel";
|
String unregisteredColName = "FamilyGroup:FamilyLabel";
|
||||||
region.put(lockid, new Text(unregisteredColName),
|
batchUpdate.put(new Text(unregisteredColName),
|
||||||
unregisteredColName.getBytes(HConstants.UTF8_ENCODING));
|
unregisteredColName.getBytes(HConstants.UTF8_ENCODING));
|
||||||
region.commit(lockid);
|
region.commit(batchUpdate);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
exceptionThrown = true;
|
exceptionThrown = true;
|
||||||
} finally {
|
} finally {
|
||||||
if (lockid != -1) {
|
|
||||||
region.abort(lockid);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
assertTrue("Bad family", exceptionThrown);
|
assertTrue("Bad family", exceptionThrown);
|
||||||
LOG.info("badPuts completed.");
|
LOG.info("badPuts completed.");
|
||||||
|
@ -286,10 +274,12 @@ implements RegionUnavailableListener {
|
||||||
for(int k = 0; k < vals1.length / 2; k++) {
|
for(int k = 0; k < vals1.length / 2; k++) {
|
||||||
String kLabel = String.format("%1$03d", k);
|
String kLabel = String.format("%1$03d", k);
|
||||||
|
|
||||||
long lockid = region.startUpdate(new Text("row_vals1_" + kLabel));
|
BatchUpdate batchUpdate =
|
||||||
region.put(lockid, cols[0], vals1[k].getBytes(HConstants.UTF8_ENCODING));
|
new BatchUpdate(new Text("row_vals1_" + kLabel),
|
||||||
region.put(lockid, cols[1], vals1[k].getBytes(HConstants.UTF8_ENCODING));
|
System.currentTimeMillis());
|
||||||
region.commit(lockid, System.currentTimeMillis());
|
batchUpdate.put(cols[0], vals1[k].getBytes(HConstants.UTF8_ENCODING));
|
||||||
|
batchUpdate.put(cols[1], vals1[k].getBytes(HConstants.UTF8_ENCODING));
|
||||||
|
region.commit(batchUpdate);
|
||||||
numInserted += 2;
|
numInserted += 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -389,10 +379,12 @@ implements RegionUnavailableListener {
|
||||||
for(int k = vals1.length/2; k < vals1.length; k++) {
|
for(int k = vals1.length/2; k < vals1.length; k++) {
|
||||||
String kLabel = String.format("%1$03d", k);
|
String kLabel = String.format("%1$03d", k);
|
||||||
|
|
||||||
long lockid = region.startUpdate(new Text("row_vals1_" + kLabel));
|
BatchUpdate batchUpdate =
|
||||||
region.put(lockid, cols[0], vals1[k].getBytes(HConstants.UTF8_ENCODING));
|
new BatchUpdate(new Text("row_vals1_" + kLabel),
|
||||||
region.put(lockid, cols[1], vals1[k].getBytes(HConstants.UTF8_ENCODING));
|
System.currentTimeMillis());
|
||||||
region.commit(lockid, System.currentTimeMillis());
|
batchUpdate.put(cols[0], vals1[k].getBytes(HConstants.UTF8_ENCODING));
|
||||||
|
batchUpdate.put(cols[1], vals1[k].getBytes(HConstants.UTF8_ENCODING));
|
||||||
|
region.commit(batchUpdate);
|
||||||
numInserted += 2;
|
numInserted += 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -550,10 +542,11 @@ implements RegionUnavailableListener {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write to the HRegion
|
// Write to the HRegion
|
||||||
long writeid = region.startUpdate(new Text("row_" + k));
|
BatchUpdate batchUpdate =
|
||||||
region.put(writeid, CONTENTS_BODY,
|
new BatchUpdate(new Text("row_" + k), System.currentTimeMillis());
|
||||||
|
batchUpdate.put(CONTENTS_BODY,
|
||||||
buf1.toString().getBytes(HConstants.UTF8_ENCODING));
|
buf1.toString().getBytes(HConstants.UTF8_ENCODING));
|
||||||
region.commit(writeid, System.currentTimeMillis());
|
region.commit(batchUpdate);
|
||||||
if (k > 0 && k % (N_ROWS / 100) == 0) {
|
if (k > 0 && k % (N_ROWS / 100) == 0) {
|
||||||
LOG.info("Flushing write #" + k);
|
LOG.info("Flushing write #" + k);
|
||||||
|
|
||||||
|
|
|
@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.HScannerInterface;
|
||||||
import org.apache.hadoop.hbase.HServerAddress;
|
import org.apache.hadoop.hbase.HServerAddress;
|
||||||
import org.apache.hadoop.hbase.HStoreKey;
|
import org.apache.hadoop.hbase.HStoreKey;
|
||||||
import org.apache.hadoop.hbase.StaticTestEnvironment;
|
import org.apache.hadoop.hbase.StaticTestEnvironment;
|
||||||
|
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Test of a long-lived scanner validating as we go.
|
* Test of a long-lived scanner validating as we go.
|
||||||
|
@ -150,14 +151,14 @@ public class TestScanner extends HBaseTestCase {
|
||||||
region = new HRegionIncommon(r);
|
region = new HRegionIncommon(r);
|
||||||
|
|
||||||
// Write information to the meta table
|
// Write information to the meta table
|
||||||
|
|
||||||
long lockid = region.startUpdate(ROW_KEY);
|
BatchUpdate batchUpdate = new BatchUpdate(ROW_KEY, System.currentTimeMillis());
|
||||||
|
|
||||||
ByteArrayOutputStream byteStream = new ByteArrayOutputStream();
|
ByteArrayOutputStream byteStream = new ByteArrayOutputStream();
|
||||||
DataOutputStream s = new DataOutputStream(byteStream);
|
DataOutputStream s = new DataOutputStream(byteStream);
|
||||||
HRegionInfo.rootRegionInfo.write(s);
|
HRegionInfo.rootRegionInfo.write(s);
|
||||||
region.put(lockid, HConstants.COL_REGIONINFO, byteStream.toByteArray());
|
batchUpdate.put(HConstants.COL_REGIONINFO, byteStream.toByteArray());
|
||||||
region.commit(lockid, System.currentTimeMillis());
|
region.commit(batchUpdate);
|
||||||
|
|
||||||
// What we just committed is in the memcache. Verify that we can get
|
// What we just committed is in the memcache. Verify that we can get
|
||||||
// it back both with scanning and get
|
// it back both with scanning and get
|
||||||
|
@ -180,15 +181,15 @@ public class TestScanner extends HBaseTestCase {
|
||||||
|
|
||||||
HServerAddress address = new HServerAddress("foo.bar.com:1234");
|
HServerAddress address = new HServerAddress("foo.bar.com:1234");
|
||||||
|
|
||||||
lockid = region.startUpdate(ROW_KEY);
|
batchUpdate = new BatchUpdate(ROW_KEY, System.currentTimeMillis());
|
||||||
|
|
||||||
region.put(lockid, HConstants.COL_SERVER,
|
batchUpdate.put(HConstants.COL_SERVER,
|
||||||
Writables.stringToBytes(address.toString()));
|
Writables.stringToBytes(address.toString()));
|
||||||
|
|
||||||
region.put(lockid, HConstants.COL_STARTCODE,
|
batchUpdate.put(HConstants.COL_STARTCODE,
|
||||||
Writables.longToBytes(START_CODE));
|
Writables.longToBytes(START_CODE));
|
||||||
|
|
||||||
region.commit(lockid, System.currentTimeMillis());
|
region.commit(batchUpdate);
|
||||||
|
|
||||||
// Validate that we can still get the HRegionInfo, even though it is in
|
// Validate that we can still get the HRegionInfo, even though it is in
|
||||||
// an older row on disk and there is a newer row in the memcache
|
// an older row on disk and there is a newer row in the memcache
|
||||||
|
@ -220,12 +221,12 @@ public class TestScanner extends HBaseTestCase {
|
||||||
|
|
||||||
address = new HServerAddress("bar.foo.com:4321");
|
address = new HServerAddress("bar.foo.com:4321");
|
||||||
|
|
||||||
lockid = region.startUpdate(ROW_KEY);
|
batchUpdate = new BatchUpdate(ROW_KEY, System.currentTimeMillis());
|
||||||
|
|
||||||
region.put(lockid, HConstants.COL_SERVER,
|
batchUpdate.put(HConstants.COL_SERVER,
|
||||||
Writables.stringToBytes(address.toString()));
|
Writables.stringToBytes(address.toString()));
|
||||||
|
|
||||||
region.commit(lockid, System.currentTimeMillis());
|
region.commit(batchUpdate);
|
||||||
|
|
||||||
// Validate again
|
// Validate again
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue