HBASE-950 HTable.commit no longer works with existing RowLocks though it's still in API

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@707225 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jean-Daniel Cryans 2008-10-22 23:48:12 +00:00
parent 4357226cd4
commit d0ce40d1ec
5 changed files with 35 additions and 7 deletions

View File

@ -38,6 +38,7 @@ Release 0.19.0 - Unreleased
HBASE-939 NPE in HStoreKey
HBASE-945 Be consistent in use of qualified/unqualified mapfile paths
HBASE-946 Row with 55k deletes timesout scanner lease
HBASE-950 HTable.commit no longer works with existing RowLocks though it's still in API
IMPROVEMENTS
HBASE-901 Add a limit to key length, check key and value length on client side

View File

@ -994,6 +994,9 @@ public class HTable {
final RowLock rl)
throws IOException {
checkRowAndColumns(batchUpdate);
if(rl != null) {
batchUpdate.setRowLock(rl.getLockId());
}
writeBuffer.add(batchUpdate);
currentWriteBufferSize += batchUpdate.getSize();
if(autoFlush || currentWriteBufferSize > writeBufferSize) {

View File

@ -27,6 +27,7 @@ import java.util.Arrays;
import java.util.Iterator;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.client.RowLock;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.WritableComparable;
@ -50,6 +51,8 @@ public class BatchUpdate implements WritableComparable<BatchUpdate>,
private long timestamp = HConstants.LATEST_TIMESTAMP;
private long rowLock = -1l;
/**
* Default constructor used serializing. Do not use directly.
*/
@ -99,6 +102,22 @@ public class BatchUpdate implements WritableComparable<BatchUpdate>,
this.operations = new ArrayList<BatchOperation>();
this.size = (row == null)? 0: row.length;
}
/**
* Get the row lock associated with this update
* @return the row lock
*/
public long getRowLock() {
return rowLock;
}
/**
* Set the lock to be used for this update
* @param rowLock the row lock
*/
public void setRowLock(long rowLock) {
this.rowLock = rowLock;
}
/** @return the row */
public byte [] getRow() {
@ -283,6 +302,7 @@ public class BatchUpdate implements WritableComparable<BatchUpdate>,
op.readFields(in);
this.operations.add(op);
}
this.rowLock = in.readLong();
}
public void write(final DataOutput out) throws IOException {
@ -293,6 +313,7 @@ public class BatchUpdate implements WritableComparable<BatchUpdate>,
for (BatchOperation op: operations) {
op.write(out);
}
out.writeLong(this.rowLock);
}
public int compareTo(BatchUpdate o) {

View File

@ -1358,11 +1358,12 @@ public class HRegion implements HConstants {
* blocked while updating.
* @param bus
*/
public void batchUpdate(BatchUpdate[] bus) throws IOException {
public void batchUpdate(BatchUpdate[] bus, Integer[] locks)
throws IOException {
splitsAndClosesLock.readLock().lock();
try {
for (BatchUpdate bu : bus) {
batchUpdate(bu, null);
for (int i = 0; i < bus.length; i++) {
batchUpdate(bus[i], locks[i]);
}
} finally {
splitsAndClosesLock.readLock().unlock();

View File

@ -1147,7 +1147,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
validateValuesLength(b, region);
try {
cacheFlusher.reclaimMemcacheMemory();
region.batchUpdate(b, getLockFromId(lockId));
region.batchUpdate(b, getLockFromId(b.getRowLock()));
} catch (OutOfMemoryError error) {
abort();
LOG.fatal("Ran out of memory", error);
@ -1164,12 +1164,14 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
try {
HRegion region = getRegion(regionName);
this.cacheFlusher.reclaimMemcacheMemory();
for (BatchUpdate batchUpdate : b) {
Integer[] locks = new Integer[b.length];
for (int j = 0; j < b.length; j++) {
this.requestCount.incrementAndGet();
validateValuesLength(batchUpdate, region);
validateValuesLength(b[j], region);
locks[j] = getLockFromId(b[j].getRowLock());
}
i+= b.length-1;
region.batchUpdate(b);
region.batchUpdate(b, locks);
} catch (OutOfMemoryError error) {
abort();
LOG.fatal("Ran out of memory", error);