HBASE-15786 Create DBB backed MSLAB pool.

This commit is contained in:
anoopsamjohn 2016-11-24 20:47:41 +05:30
parent a45b3c3533
commit 86e17858f7
48 changed files with 901 additions and 588 deletions

View File

@ -22,7 +22,7 @@ package org.apache.hadoop.hbase.filter;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import org.apache.hadoop.hbase.ByteBufferedCell; import org.apache.hadoop.hbase.ByteBufferCell;
import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
@ -91,9 +91,9 @@ public class ColumnPrefixFilter extends FilterBase {
} }
private static int compareQualifierPart(Cell cell, int length, byte[] prefix) { private static int compareQualifierPart(Cell cell, int length, byte[] prefix) {
if (cell instanceof ByteBufferedCell) { if (cell instanceof ByteBufferCell) {
return ByteBufferUtils.compareTo(((ByteBufferedCell) cell).getQualifierByteBuffer(), return ByteBufferUtils.compareTo(((ByteBufferCell) cell).getQualifierByteBuffer(),
((ByteBufferedCell) cell).getQualifierPosition(), length, prefix, 0, length); ((ByteBufferCell) cell).getQualifierPosition(), length, prefix, 0, length);
} }
return Bytes.compareTo(cell.getQualifierArray(), cell.getQualifierOffset(), length, prefix, 0, return Bytes.compareTo(cell.getQualifierArray(), cell.getQualifierOffset(), length, prefix, 0,
length); length);

View File

@ -23,7 +23,7 @@ import java.io.IOException;
import java.nio.ByteBuffer; import java.nio.ByteBuffer;
import java.util.ArrayList; import java.util.ArrayList;
import org.apache.hadoop.hbase.ByteBufferedCell; import org.apache.hadoop.hbase.ByteBufferCell;
import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
@ -62,8 +62,8 @@ public class KeyOnlyFilter extends FilterBase {
} }
private Cell createKeyOnlyCell(Cell c) { private Cell createKeyOnlyCell(Cell c) {
if (c instanceof ByteBufferedCell) { if (c instanceof ByteBufferCell) {
return new KeyOnlyByteBufferedCell((ByteBufferedCell) c, lenAsVal); return new KeyOnlyByteBufferCell((ByteBufferCell) c, lenAsVal);
} else { } else {
return new KeyOnlyCell(c, lenAsVal); return new KeyOnlyCell(c, lenAsVal);
} }
@ -232,11 +232,11 @@ public class KeyOnlyFilter extends FilterBase {
} }
} }
static class KeyOnlyByteBufferedCell extends ByteBufferedCell { static class KeyOnlyByteBufferCell extends ByteBufferCell {
private ByteBufferedCell cell; private ByteBufferCell cell;
private boolean lenAsVal; private boolean lenAsVal;
public KeyOnlyByteBufferedCell(ByteBufferedCell c, boolean lenAsVal) { public KeyOnlyByteBufferCell(ByteBufferCell c, boolean lenAsVal) {
this.cell = c; this.cell = c;
this.lenAsVal = lenAsVal; this.lenAsVal = lenAsVal;
} }

View File

@ -21,7 +21,7 @@ package org.apache.hadoop.hbase.filter;
import java.util.ArrayList; import java.util.ArrayList;
import org.apache.hadoop.hbase.ByteBufferedCell; import org.apache.hadoop.hbase.ByteBufferCell;
import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.classification.InterfaceStability;
@ -62,9 +62,9 @@ public class PrefixFilter extends FilterBase {
// else return true, filter row // else return true, filter row
// if we are passed the prefix, set flag // if we are passed the prefix, set flag
int cmp; int cmp;
if (firstRowCell instanceof ByteBufferedCell) { if (firstRowCell instanceof ByteBufferCell) {
cmp = ByteBufferUtils.compareTo(((ByteBufferedCell) firstRowCell).getRowByteBuffer(), cmp = ByteBufferUtils.compareTo(((ByteBufferCell) firstRowCell).getRowByteBuffer(),
((ByteBufferedCell) firstRowCell).getRowPosition(), this.prefix.length, ((ByteBufferCell) firstRowCell).getRowPosition(), this.prefix.length,
this.prefix, 0, this.prefix.length); this.prefix, 0, this.prefix.length);
} else { } else {
cmp = Bytes.compareTo(firstRowCell.getRowArray(), firstRowCell.getRowOffset(), cmp = Bytes.compareTo(firstRowCell.getRowArray(), firstRowCell.getRowOffset(),

View File

@ -25,7 +25,7 @@ import java.nio.ByteBuffer;
import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.TestCellUtil.ByteBufferedCellImpl; import org.apache.hadoop.hbase.TestCellUtil.ByteBufferCellImpl;
import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.MiscTests;
import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
@ -51,18 +51,18 @@ public class TestComparators {
// Row compare // Row compare
KeyValue kv = new KeyValue(r1, f, q1, v1); KeyValue kv = new KeyValue(r1, f, q1, v1);
ByteBuffer buffer = ByteBuffer.wrap(kv.getBuffer()); ByteBuffer buffer = ByteBuffer.wrap(kv.getBuffer());
Cell bbCell = new ByteBufferedCellImpl(buffer, 0, buffer.remaining()); Cell bbCell = new ByteBufferCellImpl(buffer, 0, buffer.remaining());
ByteArrayComparable comparable = new BinaryComparator(r1); ByteArrayComparable comparable = new BinaryComparator(r1);
assertEquals(0, CellComparator.compareRow(bbCell, comparable)); assertEquals(0, CellComparator.compareRow(bbCell, comparable));
assertEquals(0, CellComparator.compareRow(kv, comparable)); assertEquals(0, CellComparator.compareRow(kv, comparable));
kv = new KeyValue(r0, f, q1, v1); kv = new KeyValue(r0, f, q1, v1);
buffer = ByteBuffer.wrap(kv.getBuffer()); buffer = ByteBuffer.wrap(kv.getBuffer());
bbCell = new ByteBufferedCellImpl(buffer, 0, buffer.remaining()); bbCell = new ByteBufferCellImpl(buffer, 0, buffer.remaining());
assertTrue(CellComparator.compareRow(bbCell, comparable) > 0); assertTrue(CellComparator.compareRow(bbCell, comparable) > 0);
assertTrue(CellComparator.compareRow(kv, comparable) > 0); assertTrue(CellComparator.compareRow(kv, comparable) > 0);
kv = new KeyValue(r2, f, q1, v1); kv = new KeyValue(r2, f, q1, v1);
buffer = ByteBuffer.wrap(kv.getBuffer()); buffer = ByteBuffer.wrap(kv.getBuffer());
bbCell = new ByteBufferedCellImpl(buffer, 0, buffer.remaining()); bbCell = new ByteBufferCellImpl(buffer, 0, buffer.remaining());
assertTrue(CellComparator.compareRow(bbCell, comparable) < 0); assertTrue(CellComparator.compareRow(bbCell, comparable) < 0);
assertTrue(CellComparator.compareRow(kv, comparable) < 0); assertTrue(CellComparator.compareRow(kv, comparable) < 0);
// Qualifier compare // Qualifier compare
@ -71,12 +71,12 @@ public class TestComparators {
assertEquals(0, CellComparator.compareQualifier(kv, comparable)); assertEquals(0, CellComparator.compareQualifier(kv, comparable));
kv = new KeyValue(r2, f, q2, v1); kv = new KeyValue(r2, f, q2, v1);
buffer = ByteBuffer.wrap(kv.getBuffer()); buffer = ByteBuffer.wrap(kv.getBuffer());
bbCell = new ByteBufferedCellImpl(buffer, 0, buffer.remaining()); bbCell = new ByteBufferCellImpl(buffer, 0, buffer.remaining());
assertEquals(0, CellComparator.compareQualifier(bbCell, comparable)); assertEquals(0, CellComparator.compareQualifier(bbCell, comparable));
assertEquals(0, CellComparator.compareQualifier(kv, comparable)); assertEquals(0, CellComparator.compareQualifier(kv, comparable));
kv = new KeyValue(r2, f, q3, v1); kv = new KeyValue(r2, f, q3, v1);
buffer = ByteBuffer.wrap(kv.getBuffer()); buffer = ByteBuffer.wrap(kv.getBuffer());
bbCell = new ByteBufferedCellImpl(buffer, 0, buffer.remaining()); bbCell = new ByteBufferCellImpl(buffer, 0, buffer.remaining());
assertTrue(CellComparator.compareQualifier(bbCell, comparable) < 0); assertTrue(CellComparator.compareQualifier(bbCell, comparable) < 0);
assertTrue(CellComparator.compareQualifier(kv, comparable) < 0); assertTrue(CellComparator.compareQualifier(kv, comparable) < 0);
// Value compare // Value compare
@ -85,7 +85,7 @@ public class TestComparators {
assertEquals(0, CellComparator.compareValue(kv, comparable)); assertEquals(0, CellComparator.compareValue(kv, comparable));
kv = new KeyValue(r1, f, q1, v2); kv = new KeyValue(r1, f, q1, v2);
buffer = ByteBuffer.wrap(kv.getBuffer()); buffer = ByteBuffer.wrap(kv.getBuffer());
bbCell = new ByteBufferedCellImpl(buffer, 0, buffer.remaining()); bbCell = new ByteBufferCellImpl(buffer, 0, buffer.remaining());
assertTrue(CellComparator.compareValue(bbCell, comparable) < 0); assertTrue(CellComparator.compareValue(bbCell, comparable) < 0);
assertTrue(CellComparator.compareValue(kv, comparable) < 0); assertTrue(CellComparator.compareValue(kv, comparable) < 0);
// Family compare // Family compare

View File

@ -28,8 +28,8 @@ import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValue.Type; import org.apache.hadoop.hbase.KeyValue.Type;
import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.TestCellUtil.ByteBufferedCellImpl; import org.apache.hadoop.hbase.TestCellUtil.ByteBufferCellImpl;
import org.apache.hadoop.hbase.filter.KeyOnlyFilter.KeyOnlyByteBufferedCell; import org.apache.hadoop.hbase.filter.KeyOnlyFilter.KeyOnlyByteBufferCell;
import org.apache.hadoop.hbase.filter.KeyOnlyFilter.KeyOnlyCell; import org.apache.hadoop.hbase.filter.KeyOnlyFilter.KeyOnlyCell;
import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.MiscTests;
import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.testclassification.SmallTests;
@ -71,7 +71,7 @@ public class TestKeyOnlyFilter {
v.length, tags); v.length, tags);
ByteBuffer buffer = ByteBuffer.wrap(kv.getBuffer()); ByteBuffer buffer = ByteBuffer.wrap(kv.getBuffer());
ByteBufferedCellImpl bbCell = new ByteBufferedCellImpl(buffer, 0, ByteBufferCellImpl bbCell = new ByteBufferCellImpl(buffer, 0,
buffer.remaining()); buffer.remaining());
// KV format: <keylen:4><valuelen:4><key:keylen><value:valuelen> // KV format: <keylen:4><valuelen:4><key:keylen><value:valuelen>
@ -89,7 +89,7 @@ public class TestKeyOnlyFilter {
KeyValue KeyOnlyKeyValue = new KeyValue(newBuffer); KeyValue KeyOnlyKeyValue = new KeyValue(newBuffer);
KeyOnlyCell keyOnlyCell = new KeyOnlyCell(kv, lenAsVal); KeyOnlyCell keyOnlyCell = new KeyOnlyCell(kv, lenAsVal);
KeyOnlyByteBufferedCell keyOnlyByteBufferedCell = new KeyOnlyByteBufferedCell( KeyOnlyByteBufferCell keyOnlyByteBufferedCell = new KeyOnlyByteBufferCell(
bbCell, lenAsVal); bbCell, lenAsVal);
assertTrue(CellUtil.matchingRows(KeyOnlyKeyValue, keyOnlyCell)); assertTrue(CellUtil.matchingRows(KeyOnlyKeyValue, keyOnlyCell));

View File

@ -44,8 +44,30 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
* appropriate Cell access server-side: i.e. ByteBufferedCell when backed by a ByteBuffer and Cell * appropriate Cell access server-side: i.e. ByteBufferedCell when backed by a ByteBuffer and Cell
* when it is not. * when it is not.
*/ */
/*
* Even though all the methods are abstract, ByteBufferCell is not made to be an interface with
* intent. In CellComparator compare method, we have instance of check to decide whether to use
* getXXXArray() or getXXXByteBuffer(). This is a very hot method in read and write paths.
* if (left instanceof ByteBufferCell && right instanceof ByteBufferCell) {
....
}
if (left instanceof ByteBufferCell) {
....
}
if (right instanceof ByteBufferCell) {
....
}
return Bytes.compareTo(left.getRowArray(), left.getRowOffset(), left.getRowLength(),
right.getRowArray(), right.getRowOffset(), right.getRowLength());
* We did JMH micro benchmark tests with both left and right cells as ByteBufferCell, one only
* ByteBufferCell and both as Cells. This is compared against JMH results on compare logic with out
* any instance of checks. We noticed that if ByteBufferCell is an interface, the benchmark result
* seems to be very bad for case of both right and left are Cell only (Not ByteBufferCell). When
* ByteBufferCell is an abstract class all 4 possible cases giving almost similar performance number
* compared with compare logic with no instance of checks.
*/
@InterfaceAudience.Private @InterfaceAudience.Private
public abstract class ByteBufferedCell implements Cell { public abstract class ByteBufferCell implements Cell {
/** /**
* @return The {@link ByteBuffer} containing the row bytes. * @return The {@link ByteBuffer} containing the row bytes.
*/ */

View File

@ -30,7 +30,7 @@ import org.apache.hadoop.hbase.util.Bytes;
* (onheap and offheap). * (onheap and offheap).
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public class ByteBufferedKeyOnlyKeyValue extends ByteBufferedCell { public class ByteBufferKeyOnlyKeyValue extends ByteBufferCell {
private ByteBuffer buf; private ByteBuffer buf;
private int offset = 0; // offset into buffer where key starts at private int offset = 0; // offset into buffer where key starts at
@ -41,10 +41,10 @@ public class ByteBufferedKeyOnlyKeyValue extends ByteBufferedCell {
* Used in cases where we want to avoid lot of garbage by allocating new objects with different * Used in cases where we want to avoid lot of garbage by allocating new objects with different
* keys. Use the emtpy construtor and set the keys using {@link #setKey(ByteBuffer, int, int)} * keys. Use the emtpy construtor and set the keys using {@link #setKey(ByteBuffer, int, int)}
*/ */
public ByteBufferedKeyOnlyKeyValue() { public ByteBufferKeyOnlyKeyValue() {
} }
public ByteBufferedKeyOnlyKeyValue(ByteBuffer buf, int offset, int length) { public ByteBufferKeyOnlyKeyValue(ByteBuffer buf, int offset, int length) {
setKey(buf, offset, length); setKey(buf, offset, length);
} }

View File

@ -159,35 +159,35 @@ public class CellComparator implements Comparator<Cell>, Serializable {
* @return 0 if both cells are equal, 1 if left cell is bigger than right, -1 otherwise * @return 0 if both cells are equal, 1 if left cell is bigger than right, -1 otherwise
*/ */
public final static int compareFamilies(Cell left, Cell right) { public final static int compareFamilies(Cell left, Cell right) {
if (left instanceof ByteBufferedCell && right instanceof ByteBufferedCell) { if (left instanceof ByteBufferCell && right instanceof ByteBufferCell) {
return ByteBufferUtils.compareTo(((ByteBufferedCell) left).getFamilyByteBuffer(), return ByteBufferUtils.compareTo(((ByteBufferCell) left).getFamilyByteBuffer(),
((ByteBufferedCell) left).getFamilyPosition(), left.getFamilyLength(), ((ByteBufferCell) left).getFamilyPosition(), left.getFamilyLength(),
((ByteBufferedCell) right).getFamilyByteBuffer(), ((ByteBufferCell) right).getFamilyByteBuffer(),
((ByteBufferedCell) right).getFamilyPosition(), right.getFamilyLength()); ((ByteBufferCell) right).getFamilyPosition(), right.getFamilyLength());
} }
if (left instanceof ByteBufferedCell) { if (left instanceof ByteBufferCell) {
return ByteBufferUtils.compareTo(((ByteBufferedCell) left).getFamilyByteBuffer(), return ByteBufferUtils.compareTo(((ByteBufferCell) left).getFamilyByteBuffer(),
((ByteBufferedCell) left).getFamilyPosition(), left.getFamilyLength(), ((ByteBufferCell) left).getFamilyPosition(), left.getFamilyLength(),
right.getFamilyArray(), right.getFamilyOffset(), right.getFamilyLength()); right.getFamilyArray(), right.getFamilyOffset(), right.getFamilyLength());
} }
if (right instanceof ByteBufferedCell) { if (right instanceof ByteBufferCell) {
// Notice how we flip the order of the compare here. We used to negate the return value but // Notice how we flip the order of the compare here. We used to negate the return value but
// see what FindBugs says // see what FindBugs says
// http://findbugs.sourceforge.net/bugDescriptions.html#RV_NEGATING_RESULT_OF_COMPARETO // http://findbugs.sourceforge.net/bugDescriptions.html#RV_NEGATING_RESULT_OF_COMPARETO
// It suggest flipping the order to get same effect and 'safer'. // It suggest flipping the order to get same effect and 'safer'.
return ByteBufferUtils.compareTo( return ByteBufferUtils.compareTo(
left.getFamilyArray(), left.getFamilyOffset(), left.getFamilyLength(), left.getFamilyArray(), left.getFamilyOffset(), left.getFamilyLength(),
((ByteBufferedCell)right).getFamilyByteBuffer(), ((ByteBufferCell)right).getFamilyByteBuffer(),
((ByteBufferedCell)right).getFamilyPosition(), right.getFamilyLength()); ((ByteBufferCell)right).getFamilyPosition(), right.getFamilyLength());
} }
return Bytes.compareTo(left.getFamilyArray(), left.getFamilyOffset(), left.getFamilyLength(), return Bytes.compareTo(left.getFamilyArray(), left.getFamilyOffset(), left.getFamilyLength(),
right.getFamilyArray(), right.getFamilyOffset(), right.getFamilyLength()); right.getFamilyArray(), right.getFamilyOffset(), right.getFamilyLength());
} }
private final static int compareFamilies(Cell left, byte[] right, int roffset, int rlength) { private final static int compareFamilies(Cell left, byte[] right, int roffset, int rlength) {
if (left instanceof ByteBufferedCell) { if (left instanceof ByteBufferCell) {
return ByteBufferUtils.compareTo(((ByteBufferedCell) left).getFamilyByteBuffer(), return ByteBufferUtils.compareTo(((ByteBufferCell) left).getFamilyByteBuffer(),
((ByteBufferedCell) left).getFamilyPosition(), left.getFamilyLength(), right, ((ByteBufferCell) left).getFamilyPosition(), left.getFamilyLength(), right,
roffset, rlength); roffset, rlength);
} }
return Bytes.compareTo(left.getFamilyArray(), left.getFamilyOffset(), left.getFamilyLength(), return Bytes.compareTo(left.getFamilyArray(), left.getFamilyOffset(), left.getFamilyLength(),
@ -201,28 +201,28 @@ public class CellComparator implements Comparator<Cell>, Serializable {
* @return 0 if both cells are equal, 1 if left cell is bigger than right, -1 otherwise * @return 0 if both cells are equal, 1 if left cell is bigger than right, -1 otherwise
*/ */
public final static int compareQualifiers(Cell left, Cell right) { public final static int compareQualifiers(Cell left, Cell right) {
if (left instanceof ByteBufferedCell && right instanceof ByteBufferedCell) { if (left instanceof ByteBufferCell && right instanceof ByteBufferCell) {
return ByteBufferUtils return ByteBufferUtils
.compareTo(((ByteBufferedCell) left).getQualifierByteBuffer(), .compareTo(((ByteBufferCell) left).getQualifierByteBuffer(),
((ByteBufferedCell) left).getQualifierPosition(), ((ByteBufferCell) left).getQualifierPosition(),
left.getQualifierLength(), ((ByteBufferedCell) right).getQualifierByteBuffer(), left.getQualifierLength(), ((ByteBufferCell) right).getQualifierByteBuffer(),
((ByteBufferedCell) right).getQualifierPosition(), ((ByteBufferCell) right).getQualifierPosition(),
right.getQualifierLength()); right.getQualifierLength());
} }
if (left instanceof ByteBufferedCell) { if (left instanceof ByteBufferCell) {
return ByteBufferUtils.compareTo(((ByteBufferedCell) left).getQualifierByteBuffer(), return ByteBufferUtils.compareTo(((ByteBufferCell) left).getQualifierByteBuffer(),
((ByteBufferedCell) left).getQualifierPosition(), left.getQualifierLength(), ((ByteBufferCell) left).getQualifierPosition(), left.getQualifierLength(),
right.getQualifierArray(), right.getQualifierOffset(), right.getQualifierLength()); right.getQualifierArray(), right.getQualifierOffset(), right.getQualifierLength());
} }
if (right instanceof ByteBufferedCell) { if (right instanceof ByteBufferCell) {
// Notice how we flip the order of the compare here. We used to negate the return value but // Notice how we flip the order of the compare here. We used to negate the return value but
// see what FindBugs says // see what FindBugs says
// http://findbugs.sourceforge.net/bugDescriptions.html#RV_NEGATING_RESULT_OF_COMPARETO // http://findbugs.sourceforge.net/bugDescriptions.html#RV_NEGATING_RESULT_OF_COMPARETO
// It suggest flipping the order to get same effect and 'safer'. // It suggest flipping the order to get same effect and 'safer'.
return ByteBufferUtils.compareTo(left.getQualifierArray(), return ByteBufferUtils.compareTo(left.getQualifierArray(),
left.getQualifierOffset(), left.getQualifierLength(), left.getQualifierOffset(), left.getQualifierLength(),
((ByteBufferedCell)right).getQualifierByteBuffer(), ((ByteBufferCell)right).getQualifierByteBuffer(),
((ByteBufferedCell)right).getQualifierPosition(), right.getQualifierLength()); ((ByteBufferCell)right).getQualifierPosition(), right.getQualifierLength());
} }
return Bytes.compareTo(left.getQualifierArray(), left.getQualifierOffset(), return Bytes.compareTo(left.getQualifierArray(), left.getQualifierOffset(),
left.getQualifierLength(), right.getQualifierArray(), right.getQualifierOffset(), left.getQualifierLength(), right.getQualifierArray(), right.getQualifierOffset(),
@ -230,9 +230,9 @@ public class CellComparator implements Comparator<Cell>, Serializable {
} }
public final static int compareQualifiers(Cell left, byte[] right, int rOffset, int rLength) { public final static int compareQualifiers(Cell left, byte[] right, int rOffset, int rLength) {
if (left instanceof ByteBufferedCell) { if (left instanceof ByteBufferCell) {
return ByteBufferUtils.compareTo(((ByteBufferedCell) left).getQualifierByteBuffer(), return ByteBufferUtils.compareTo(((ByteBufferCell) left).getQualifierByteBuffer(),
((ByteBufferedCell) left).getQualifierPosition(), left.getQualifierLength(), ((ByteBufferCell) left).getQualifierPosition(), left.getQualifierLength(),
right, rOffset, rLength); right, rOffset, rLength);
} }
return Bytes.compareTo(left.getQualifierArray(), left.getQualifierOffset(), return Bytes.compareTo(left.getQualifierArray(), left.getQualifierOffset(),
@ -328,25 +328,25 @@ public class CellComparator implements Comparator<Cell>, Serializable {
if (left == right) { if (left == right) {
return 0; return 0;
} }
if (left instanceof ByteBufferedCell && right instanceof ByteBufferedCell) { if (left instanceof ByteBufferCell && right instanceof ByteBufferCell) {
return ByteBufferUtils.compareTo(((ByteBufferedCell) left).getRowByteBuffer(), return ByteBufferUtils.compareTo(((ByteBufferCell) left).getRowByteBuffer(),
((ByteBufferedCell) left).getRowPosition(), left.getRowLength(), ((ByteBufferCell) left).getRowPosition(), left.getRowLength(),
((ByteBufferedCell) right).getRowByteBuffer(), ((ByteBufferCell) right).getRowByteBuffer(),
((ByteBufferedCell) right).getRowPosition(), right.getRowLength()); ((ByteBufferCell) right).getRowPosition(), right.getRowLength());
} }
if (left instanceof ByteBufferedCell) { if (left instanceof ByteBufferCell) {
return ByteBufferUtils.compareTo(((ByteBufferedCell) left).getRowByteBuffer(), return ByteBufferUtils.compareTo(((ByteBufferCell) left).getRowByteBuffer(),
((ByteBufferedCell) left).getRowPosition(), left.getRowLength(), ((ByteBufferCell) left).getRowPosition(), left.getRowLength(),
right.getRowArray(), right.getRowOffset(), right.getRowLength()); right.getRowArray(), right.getRowOffset(), right.getRowLength());
} }
if (right instanceof ByteBufferedCell) { if (right instanceof ByteBufferCell) {
// Notice how we flip the order of the compare here. We used to negate the return value but // Notice how we flip the order of the compare here. We used to negate the return value but
// see what FindBugs says // see what FindBugs says
// http://findbugs.sourceforge.net/bugDescriptions.html#RV_NEGATING_RESULT_OF_COMPARETO // http://findbugs.sourceforge.net/bugDescriptions.html#RV_NEGATING_RESULT_OF_COMPARETO
// It suggest flipping the order to get same effect and 'safer'. // It suggest flipping the order to get same effect and 'safer'.
return ByteBufferUtils.compareTo(left.getRowArray(), left.getRowOffset(), left.getRowLength(), return ByteBufferUtils.compareTo(left.getRowArray(), left.getRowOffset(), left.getRowLength(),
((ByteBufferedCell)right).getRowByteBuffer(), ((ByteBufferCell)right).getRowByteBuffer(),
((ByteBufferedCell)right).getRowPosition(), right.getRowLength()); ((ByteBufferCell)right).getRowPosition(), right.getRowLength());
} }
return Bytes.compareTo(left.getRowArray(), left.getRowOffset(), left.getRowLength(), return Bytes.compareTo(left.getRowArray(), left.getRowOffset(), left.getRowLength(),
right.getRowArray(), right.getRowOffset(), right.getRowLength()); right.getRowArray(), right.getRowOffset(), right.getRowLength());
@ -369,9 +369,9 @@ public class CellComparator implements Comparator<Cell>, Serializable {
* than byte[], -1 otherwise * than byte[], -1 otherwise
*/ */
public int compareRows(Cell left, byte[] right, int roffset, int rlength) { public int compareRows(Cell left, byte[] right, int roffset, int rlength) {
if (left instanceof ByteBufferedCell) { if (left instanceof ByteBufferCell) {
return ByteBufferUtils.compareTo(((ByteBufferedCell) left).getRowByteBuffer(), return ByteBufferUtils.compareTo(((ByteBufferCell) left).getRowByteBuffer(),
((ByteBufferedCell) left).getRowPosition(), left.getRowLength(), right, ((ByteBufferCell) left).getRowPosition(), left.getRowLength(), right,
roffset, rlength); roffset, rlength);
} }
return Bytes.compareTo(left.getRowArray(), left.getRowOffset(), left.getRowLength(), right, return Bytes.compareTo(left.getRowArray(), left.getRowOffset(), left.getRowLength(), right,
@ -521,9 +521,9 @@ public class CellComparator implements Comparator<Cell>, Serializable {
* @return result comparing cell's row * @return result comparing cell's row
*/ */
public static int compareRow(Cell cell, ByteArrayComparable comparator) { public static int compareRow(Cell cell, ByteArrayComparable comparator) {
if (cell instanceof ByteBufferedCell) { if (cell instanceof ByteBufferCell) {
return comparator.compareTo(((ByteBufferedCell) cell).getRowByteBuffer(), return comparator.compareTo(((ByteBufferCell) cell).getRowByteBuffer(),
((ByteBufferedCell) cell).getRowPosition(), cell.getRowLength()); ((ByteBufferCell) cell).getRowPosition(), cell.getRowLength());
} }
return comparator.compareTo(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()); return comparator.compareTo(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
} }
@ -535,9 +535,9 @@ public class CellComparator implements Comparator<Cell>, Serializable {
* @return result comparing cell's column family * @return result comparing cell's column family
*/ */
public static int compareFamily(Cell cell, ByteArrayComparable comparator) { public static int compareFamily(Cell cell, ByteArrayComparable comparator) {
if (cell instanceof ByteBufferedCell) { if (cell instanceof ByteBufferCell) {
return comparator.compareTo(((ByteBufferedCell) cell).getFamilyByteBuffer(), return comparator.compareTo(((ByteBufferCell) cell).getFamilyByteBuffer(),
((ByteBufferedCell) cell).getFamilyPosition(), cell.getFamilyLength()); ((ByteBufferCell) cell).getFamilyPosition(), cell.getFamilyLength());
} }
return comparator.compareTo(cell.getFamilyArray(), cell.getFamilyOffset(), return comparator.compareTo(cell.getFamilyArray(), cell.getFamilyOffset(),
cell.getFamilyLength()); cell.getFamilyLength());
@ -550,9 +550,9 @@ public class CellComparator implements Comparator<Cell>, Serializable {
* @return result comparing cell's qualifier * @return result comparing cell's qualifier
*/ */
public static int compareQualifier(Cell cell, ByteArrayComparable comparator) { public static int compareQualifier(Cell cell, ByteArrayComparable comparator) {
if (cell instanceof ByteBufferedCell) { if (cell instanceof ByteBufferCell) {
return comparator.compareTo(((ByteBufferedCell) cell).getQualifierByteBuffer(), return comparator.compareTo(((ByteBufferCell) cell).getQualifierByteBuffer(),
((ByteBufferedCell) cell).getQualifierPosition(), cell.getQualifierLength()); ((ByteBufferCell) cell).getQualifierPosition(), cell.getQualifierLength());
} }
return comparator.compareTo(cell.getQualifierArray(), cell.getQualifierOffset(), return comparator.compareTo(cell.getQualifierArray(), cell.getQualifierOffset(),
cell.getQualifierLength()); cell.getQualifierLength());
@ -565,9 +565,9 @@ public class CellComparator implements Comparator<Cell>, Serializable {
* @return result comparing cell's value * @return result comparing cell's value
*/ */
public static int compareValue(Cell cell, ByteArrayComparable comparator) { public static int compareValue(Cell cell, ByteArrayComparable comparator) {
if (cell instanceof ByteBufferedCell) { if (cell instanceof ByteBufferCell) {
return comparator.compareTo(((ByteBufferedCell) cell).getValueByteBuffer(), return comparator.compareTo(((ByteBufferCell) cell).getValueByteBuffer(),
((ByteBufferedCell) cell).getValuePosition(), cell.getValueLength()); ((ByteBufferCell) cell).getValuePosition(), cell.getValueLength());
} }
return comparator.compareTo(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); return comparator.compareTo(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
} }

View File

@ -131,10 +131,10 @@ public final class CellUtil {
public static int copyRowTo(Cell cell, byte[] destination, int destinationOffset) { public static int copyRowTo(Cell cell, byte[] destination, int destinationOffset) {
short rowLen = cell.getRowLength(); short rowLen = cell.getRowLength();
if (cell instanceof ByteBufferedCell) { if (cell instanceof ByteBufferCell) {
ByteBufferUtils.copyFromBufferToArray(destination, ByteBufferUtils.copyFromBufferToArray(destination,
((ByteBufferedCell) cell).getRowByteBuffer(), ((ByteBufferCell) cell).getRowByteBuffer(),
((ByteBufferedCell) cell).getRowPosition(), destinationOffset, rowLen); ((ByteBufferCell) cell).getRowPosition(), destinationOffset, rowLen);
} else { } else {
System.arraycopy(cell.getRowArray(), cell.getRowOffset(), destination, destinationOffset, System.arraycopy(cell.getRowArray(), cell.getRowOffset(), destination, destinationOffset,
rowLen); rowLen);
@ -142,16 +142,28 @@ public final class CellUtil {
return destinationOffset + rowLen; return destinationOffset + rowLen;
} }
public static int copyRowTo(Cell cell, ByteBuffer destination, int destinationOffset) {
short rowLen = cell.getRowLength();
if (cell instanceof ByteBufferCell) {
ByteBufferUtils.copyFromBufferToBuffer(((ByteBufferCell) cell).getRowByteBuffer(),
destination, ((ByteBufferCell) cell).getRowPosition(), destinationOffset, rowLen);
} else {
ByteBufferUtils.copyFromArrayToBuffer(destination, destinationOffset, cell.getRowArray(),
cell.getRowOffset(), rowLen);
}
return destinationOffset + rowLen;
}
/** /**
* Copies the row to a new byte[] * Copies the row to a new byte[]
* @param cell the cell from which row has to copied * @param cell the cell from which row has to copied
* @return the byte[] containing the row * @return the byte[] containing the row
*/ */
public static byte[] copyRow(Cell cell) { public static byte[] copyRow(Cell cell) {
if (cell instanceof ByteBufferedCell) { if (cell instanceof ByteBufferCell) {
return ByteBufferUtils.copyOfRange(((ByteBufferedCell) cell).getRowByteBuffer(), return ByteBufferUtils.copyOfRange(((ByteBufferCell) cell).getRowByteBuffer(),
((ByteBufferedCell) cell).getRowPosition(), ((ByteBufferCell) cell).getRowPosition(),
((ByteBufferedCell) cell).getRowPosition() + cell.getRowLength()); ((ByteBufferCell) cell).getRowPosition() + cell.getRowLength());
} else { } else {
return Arrays.copyOfRange(cell.getRowArray(), cell.getRowOffset(), return Arrays.copyOfRange(cell.getRowArray(), cell.getRowOffset(),
cell.getRowOffset() + cell.getRowLength()); cell.getRowOffset() + cell.getRowLength());
@ -160,10 +172,10 @@ public final class CellUtil {
public static int copyFamilyTo(Cell cell, byte[] destination, int destinationOffset) { public static int copyFamilyTo(Cell cell, byte[] destination, int destinationOffset) {
byte fLen = cell.getFamilyLength(); byte fLen = cell.getFamilyLength();
if (cell instanceof ByteBufferedCell) { if (cell instanceof ByteBufferCell) {
ByteBufferUtils.copyFromBufferToArray(destination, ByteBufferUtils.copyFromBufferToArray(destination,
((ByteBufferedCell) cell).getFamilyByteBuffer(), ((ByteBufferCell) cell).getFamilyByteBuffer(),
((ByteBufferedCell) cell).getFamilyPosition(), destinationOffset, fLen); ((ByteBufferCell) cell).getFamilyPosition(), destinationOffset, fLen);
} else { } else {
System.arraycopy(cell.getFamilyArray(), cell.getFamilyOffset(), destination, System.arraycopy(cell.getFamilyArray(), cell.getFamilyOffset(), destination,
destinationOffset, fLen); destinationOffset, fLen);
@ -171,12 +183,24 @@ public final class CellUtil {
return destinationOffset + fLen; return destinationOffset + fLen;
} }
public static int copyFamilyTo(Cell cell, ByteBuffer destination, int destinationOffset) {
byte fLen = cell.getFamilyLength();
if (cell instanceof ByteBufferCell) {
ByteBufferUtils.copyFromBufferToBuffer(((ByteBufferCell) cell).getFamilyByteBuffer(),
destination, ((ByteBufferCell) cell).getFamilyPosition(), destinationOffset, fLen);
} else {
ByteBufferUtils.copyFromArrayToBuffer(destination, destinationOffset, cell.getFamilyArray(),
cell.getFamilyOffset(), fLen);
}
return destinationOffset + fLen;
}
public static int copyQualifierTo(Cell cell, byte[] destination, int destinationOffset) { public static int copyQualifierTo(Cell cell, byte[] destination, int destinationOffset) {
int qlen = cell.getQualifierLength(); int qlen = cell.getQualifierLength();
if (cell instanceof ByteBufferedCell) { if (cell instanceof ByteBufferCell) {
ByteBufferUtils.copyFromBufferToArray(destination, ByteBufferUtils.copyFromBufferToArray(destination,
((ByteBufferedCell) cell).getQualifierByteBuffer(), ((ByteBufferCell) cell).getQualifierByteBuffer(),
((ByteBufferedCell) cell).getQualifierPosition(), destinationOffset, qlen); ((ByteBufferCell) cell).getQualifierPosition(), destinationOffset, qlen);
} else { } else {
System.arraycopy(cell.getQualifierArray(), cell.getQualifierOffset(), destination, System.arraycopy(cell.getQualifierArray(), cell.getQualifierOffset(), destination,
destinationOffset, qlen); destinationOffset, qlen);
@ -184,12 +208,24 @@ public final class CellUtil {
return destinationOffset + qlen; return destinationOffset + qlen;
} }
public static int copyQualifierTo(Cell cell, ByteBuffer destination, int destinationOffset) {
int qlen = cell.getQualifierLength();
if (cell instanceof ByteBufferCell) {
ByteBufferUtils.copyFromBufferToBuffer(((ByteBufferCell) cell).getQualifierByteBuffer(),
destination, ((ByteBufferCell) cell).getQualifierPosition(), destinationOffset, qlen);
} else {
ByteBufferUtils.copyFromArrayToBuffer(destination, destinationOffset,
cell.getQualifierArray(), cell.getQualifierOffset(), qlen);
}
return destinationOffset + qlen;
}
public static int copyValueTo(Cell cell, byte[] destination, int destinationOffset) { public static int copyValueTo(Cell cell, byte[] destination, int destinationOffset) {
int vlen = cell.getValueLength(); int vlen = cell.getValueLength();
if (cell instanceof ByteBufferedCell) { if (cell instanceof ByteBufferCell) {
ByteBufferUtils.copyFromBufferToArray(destination, ByteBufferUtils.copyFromBufferToArray(destination,
((ByteBufferedCell) cell).getValueByteBuffer(), ((ByteBufferCell) cell).getValueByteBuffer(),
((ByteBufferedCell) cell).getValuePosition(), destinationOffset, vlen); ((ByteBufferCell) cell).getValuePosition(), destinationOffset, vlen);
} else { } else {
System.arraycopy(cell.getValueArray(), cell.getValueOffset(), destination, destinationOffset, System.arraycopy(cell.getValueArray(), cell.getValueOffset(), destination, destinationOffset,
vlen); vlen);
@ -197,6 +233,18 @@ public final class CellUtil {
return destinationOffset + vlen; return destinationOffset + vlen;
} }
public static int copyValueTo(Cell cell, ByteBuffer destination, int destinationOffset) {
int vlen = cell.getValueLength();
if (cell instanceof ByteBufferCell) {
ByteBufferUtils.copyFromBufferToBuffer(((ByteBufferCell) cell).getValueByteBuffer(),
destination, ((ByteBufferCell) cell).getValuePosition(), destinationOffset, vlen);
} else {
ByteBufferUtils.copyFromArrayToBuffer(destination, destinationOffset, cell.getValueArray(),
cell.getValueOffset(), vlen);
}
return destinationOffset + vlen;
}
/** /**
* Copies the tags info into the tag portion of the cell * Copies the tags info into the tag portion of the cell
* @param cell * @param cell
@ -206,10 +254,10 @@ public final class CellUtil {
*/ */
public static int copyTagTo(Cell cell, byte[] destination, int destinationOffset) { public static int copyTagTo(Cell cell, byte[] destination, int destinationOffset) {
int tlen = cell.getTagsLength(); int tlen = cell.getTagsLength();
if (cell instanceof ByteBufferedCell) { if (cell instanceof ByteBufferCell) {
ByteBufferUtils.copyFromBufferToArray(destination, ByteBufferUtils.copyFromBufferToArray(destination,
((ByteBufferedCell) cell).getTagsByteBuffer(), ((ByteBufferCell) cell).getTagsByteBuffer(),
((ByteBufferedCell) cell).getTagsPosition(), destinationOffset, tlen); ((ByteBufferCell) cell).getTagsPosition(), destinationOffset, tlen);
} else { } else {
System.arraycopy(cell.getTagsArray(), cell.getTagsOffset(), destination, destinationOffset, System.arraycopy(cell.getTagsArray(), cell.getTagsOffset(), destination, destinationOffset,
tlen); tlen);
@ -217,22 +265,34 @@ public final class CellUtil {
return destinationOffset + tlen; return destinationOffset + tlen;
} }
public static int copyTagTo(Cell cell, ByteBuffer destination, int destinationOffset) {
int tlen = cell.getTagsLength();
if (cell instanceof ByteBufferCell) {
ByteBufferUtils.copyFromBufferToBuffer(((ByteBufferCell) cell).getTagsByteBuffer(),
destination, ((ByteBufferCell) cell).getTagsPosition(), destinationOffset, tlen);
} else {
ByteBufferUtils.copyFromArrayToBuffer(destination, destinationOffset, cell.getTagsArray(),
cell.getTagsOffset(), tlen);
}
return destinationOffset + tlen;
}
/********************* misc *************************************/ /********************* misc *************************************/
@Private @Private
public static byte getRowByte(Cell cell, int index) { public static byte getRowByte(Cell cell, int index) {
if (cell instanceof ByteBufferedCell) { if (cell instanceof ByteBufferCell) {
return ((ByteBufferedCell) cell).getRowByteBuffer().get( return ((ByteBufferCell) cell).getRowByteBuffer().get(
((ByteBufferedCell) cell).getRowPosition() + index); ((ByteBufferCell) cell).getRowPosition() + index);
} }
return cell.getRowArray()[cell.getRowOffset() + index]; return cell.getRowArray()[cell.getRowOffset() + index];
} }
@Private @Private
public static byte getQualifierByte(Cell cell, int index) { public static byte getQualifierByte(Cell cell, int index) {
if (cell instanceof ByteBufferedCell) { if (cell instanceof ByteBufferCell) {
return ((ByteBufferedCell) cell).getQualifierByteBuffer().get( return ((ByteBufferCell) cell).getQualifierByteBuffer().get(
((ByteBufferedCell) cell).getQualifierPosition() + index); ((ByteBufferCell) cell).getQualifierPosition() + index);
} }
return cell.getQualifierArray()[cell.getQualifierOffset() + index]; return cell.getQualifierArray()[cell.getQualifierOffset() + index];
} }
@ -546,12 +606,12 @@ public final class CellUtil {
} }
@Override @Override
public void write(byte[] buf, int offset) { public void write(ByteBuffer buf, int offset) {
offset = KeyValueUtil.appendToByteArray(this.cell, buf, offset, false); offset = KeyValueUtil.appendToByteBuffer(this.cell, buf, offset, false);
int tagsLen = this.tags.length; int tagsLen = this.tags.length;
assert tagsLen > 0; assert tagsLen > 0;
offset = Bytes.putAsShort(buf, offset, tagsLen); offset = ByteBufferUtils.putAsShort(buf, offset, tagsLen);
System.arraycopy(this.tags, 0, buf, offset, tagsLen); ByteBufferUtils.copyFromArrayToBuffer(buf, offset, this.tags, 0, tagsLen);
} }
@Override @Override
@ -720,9 +780,9 @@ public final class CellUtil {
public static boolean matchingRow(final Cell left, final byte[] buf, final int offset, public static boolean matchingRow(final Cell left, final byte[] buf, final int offset,
final int length) { final int length) {
if (left instanceof ByteBufferedCell) { if (left instanceof ByteBufferCell) {
return ByteBufferUtils.equals(((ByteBufferedCell) left).getRowByteBuffer(), return ByteBufferUtils.equals(((ByteBufferCell) left).getRowByteBuffer(),
((ByteBufferedCell) left).getRowPosition(), left.getRowLength(), buf, offset, ((ByteBufferCell) left).getRowPosition(), left.getRowLength(), buf, offset,
length); length);
} }
return Bytes.equals(left.getRowArray(), left.getRowOffset(), left.getRowLength(), buf, offset, return Bytes.equals(left.getRowArray(), left.getRowOffset(), left.getRowLength(), buf, offset,
@ -732,20 +792,20 @@ public final class CellUtil {
public static boolean matchingFamily(final Cell left, final Cell right) { public static boolean matchingFamily(final Cell left, final Cell right) {
byte lfamlength = left.getFamilyLength(); byte lfamlength = left.getFamilyLength();
byte rfamlength = right.getFamilyLength(); byte rfamlength = right.getFamilyLength();
if (left instanceof ByteBufferedCell && right instanceof ByteBufferedCell) { if (left instanceof ByteBufferCell && right instanceof ByteBufferCell) {
return ByteBufferUtils.equals(((ByteBufferedCell) left).getFamilyByteBuffer(), return ByteBufferUtils.equals(((ByteBufferCell) left).getFamilyByteBuffer(),
((ByteBufferedCell) left).getFamilyPosition(), lfamlength, ((ByteBufferCell) left).getFamilyPosition(), lfamlength,
((ByteBufferedCell) right).getFamilyByteBuffer(), ((ByteBufferCell) right).getFamilyByteBuffer(),
((ByteBufferedCell) right).getFamilyPosition(), rfamlength); ((ByteBufferCell) right).getFamilyPosition(), rfamlength);
} }
if (left instanceof ByteBufferedCell) { if (left instanceof ByteBufferCell) {
return ByteBufferUtils.equals(((ByteBufferedCell) left).getFamilyByteBuffer(), return ByteBufferUtils.equals(((ByteBufferCell) left).getFamilyByteBuffer(),
((ByteBufferedCell) left).getFamilyPosition(), lfamlength, ((ByteBufferCell) left).getFamilyPosition(), lfamlength,
right.getFamilyArray(), right.getFamilyOffset(), rfamlength); right.getFamilyArray(), right.getFamilyOffset(), rfamlength);
} }
if (right instanceof ByteBufferedCell) { if (right instanceof ByteBufferCell) {
return ByteBufferUtils.equals(((ByteBufferedCell) right).getFamilyByteBuffer(), return ByteBufferUtils.equals(((ByteBufferCell) right).getFamilyByteBuffer(),
((ByteBufferedCell) right).getFamilyPosition(), rfamlength, ((ByteBufferCell) right).getFamilyPosition(), rfamlength,
left.getFamilyArray(), left.getFamilyOffset(), lfamlength); left.getFamilyArray(), left.getFamilyOffset(), lfamlength);
} }
return Bytes.equals(left.getFamilyArray(), left.getFamilyOffset(), lfamlength, return Bytes.equals(left.getFamilyArray(), left.getFamilyOffset(), lfamlength,
@ -761,9 +821,9 @@ public final class CellUtil {
public static boolean matchingFamily(final Cell left, final byte[] buf, final int offset, public static boolean matchingFamily(final Cell left, final byte[] buf, final int offset,
final int length) { final int length) {
if (left instanceof ByteBufferedCell) { if (left instanceof ByteBufferCell) {
return ByteBufferUtils.equals(((ByteBufferedCell) left).getFamilyByteBuffer(), return ByteBufferUtils.equals(((ByteBufferCell) left).getFamilyByteBuffer(),
((ByteBufferedCell) left).getFamilyPosition(), left.getFamilyLength(), buf, ((ByteBufferCell) left).getFamilyPosition(), left.getFamilyLength(), buf,
offset, length); offset, length);
} }
return Bytes.equals(left.getFamilyArray(), left.getFamilyOffset(), left.getFamilyLength(), buf, return Bytes.equals(left.getFamilyArray(), left.getFamilyOffset(), left.getFamilyLength(), buf,
@ -773,20 +833,20 @@ public final class CellUtil {
public static boolean matchingQualifier(final Cell left, final Cell right) { public static boolean matchingQualifier(final Cell left, final Cell right) {
int lqlength = left.getQualifierLength(); int lqlength = left.getQualifierLength();
int rqlength = right.getQualifierLength(); int rqlength = right.getQualifierLength();
if (left instanceof ByteBufferedCell && right instanceof ByteBufferedCell) { if (left instanceof ByteBufferCell && right instanceof ByteBufferCell) {
return ByteBufferUtils.equals(((ByteBufferedCell) left).getQualifierByteBuffer(), return ByteBufferUtils.equals(((ByteBufferCell) left).getQualifierByteBuffer(),
((ByteBufferedCell) left).getQualifierPosition(), lqlength, ((ByteBufferCell) left).getQualifierPosition(), lqlength,
((ByteBufferedCell) right).getQualifierByteBuffer(), ((ByteBufferCell) right).getQualifierByteBuffer(),
((ByteBufferedCell) right).getQualifierPosition(), rqlength); ((ByteBufferCell) right).getQualifierPosition(), rqlength);
} }
if (left instanceof ByteBufferedCell) { if (left instanceof ByteBufferCell) {
return ByteBufferUtils.equals(((ByteBufferedCell) left).getQualifierByteBuffer(), return ByteBufferUtils.equals(((ByteBufferCell) left).getQualifierByteBuffer(),
((ByteBufferedCell) left).getQualifierPosition(), lqlength, ((ByteBufferCell) left).getQualifierPosition(), lqlength,
right.getQualifierArray(), right.getQualifierOffset(), rqlength); right.getQualifierArray(), right.getQualifierOffset(), rqlength);
} }
if (right instanceof ByteBufferedCell) { if (right instanceof ByteBufferCell) {
return ByteBufferUtils.equals(((ByteBufferedCell) right).getQualifierByteBuffer(), return ByteBufferUtils.equals(((ByteBufferCell) right).getQualifierByteBuffer(),
((ByteBufferedCell) right).getQualifierPosition(), rqlength, ((ByteBufferCell) right).getQualifierPosition(), rqlength,
left.getQualifierArray(), left.getQualifierOffset(), lqlength); left.getQualifierArray(), left.getQualifierOffset(), lqlength);
} }
return Bytes.equals(left.getQualifierArray(), left.getQualifierOffset(), return Bytes.equals(left.getQualifierArray(), left.getQualifierOffset(),
@ -822,9 +882,9 @@ public final class CellUtil {
if (buf == null) { if (buf == null) {
return left.getQualifierLength() == 0; return left.getQualifierLength() == 0;
} }
if (left instanceof ByteBufferedCell) { if (left instanceof ByteBufferCell) {
return ByteBufferUtils.equals(((ByteBufferedCell) left).getQualifierByteBuffer(), return ByteBufferUtils.equals(((ByteBufferCell) left).getQualifierByteBuffer(),
((ByteBufferedCell) left).getQualifierPosition(), left.getQualifierLength(), ((ByteBufferCell) left).getQualifierPosition(), left.getQualifierLength(),
buf, offset, length); buf, offset, length);
} }
return Bytes.equals(left.getQualifierArray(), left.getQualifierOffset(), return Bytes.equals(left.getQualifierArray(), left.getQualifierOffset(),
@ -856,20 +916,20 @@ public final class CellUtil {
public static boolean matchingValue(final Cell left, final Cell right, int lvlength, public static boolean matchingValue(final Cell left, final Cell right, int lvlength,
int rvlength) { int rvlength) {
if (left instanceof ByteBufferedCell && right instanceof ByteBufferedCell) { if (left instanceof ByteBufferCell && right instanceof ByteBufferCell) {
return ByteBufferUtils.equals(((ByteBufferedCell) left).getValueByteBuffer(), return ByteBufferUtils.equals(((ByteBufferCell) left).getValueByteBuffer(),
((ByteBufferedCell) left).getValuePosition(), lvlength, ((ByteBufferCell) left).getValuePosition(), lvlength,
((ByteBufferedCell) right).getValueByteBuffer(), ((ByteBufferCell) right).getValueByteBuffer(),
((ByteBufferedCell) right).getValuePosition(), rvlength); ((ByteBufferCell) right).getValuePosition(), rvlength);
} }
if (left instanceof ByteBufferedCell) { if (left instanceof ByteBufferCell) {
return ByteBufferUtils.equals(((ByteBufferedCell) left).getValueByteBuffer(), return ByteBufferUtils.equals(((ByteBufferCell) left).getValueByteBuffer(),
((ByteBufferedCell) left).getValuePosition(), lvlength, right.getValueArray(), ((ByteBufferCell) left).getValuePosition(), lvlength, right.getValueArray(),
right.getValueOffset(), rvlength); right.getValueOffset(), rvlength);
} }
if (right instanceof ByteBufferedCell) { if (right instanceof ByteBufferCell) {
return ByteBufferUtils.equals(((ByteBufferedCell) right).getValueByteBuffer(), return ByteBufferUtils.equals(((ByteBufferCell) right).getValueByteBuffer(),
((ByteBufferedCell) right).getValuePosition(), rvlength, left.getValueArray(), ((ByteBufferCell) right).getValuePosition(), rvlength, left.getValueArray(),
left.getValueOffset(), lvlength); left.getValueOffset(), lvlength);
} }
return Bytes.equals(left.getValueArray(), left.getValueOffset(), lvlength, return Bytes.equals(left.getValueArray(), left.getValueOffset(), lvlength,
@ -877,9 +937,9 @@ public final class CellUtil {
} }
public static boolean matchingValue(final Cell left, final byte[] buf) { public static boolean matchingValue(final Cell left, final byte[] buf) {
if (left instanceof ByteBufferedCell) { if (left instanceof ByteBufferCell) {
return ByteBufferUtils.compareTo(((ByteBufferedCell) left).getValueByteBuffer(), return ByteBufferUtils.compareTo(((ByteBufferCell) left).getValueByteBuffer(),
((ByteBufferedCell) left).getValuePosition(), left.getValueLength(), buf, 0, ((ByteBufferCell) left).getValuePosition(), left.getValueLength(), buf, 0,
buf.length) == 0; buf.length) == 0;
} }
return Bytes.equals(left.getValueArray(), left.getValueOffset(), left.getValueLength(), buf, 0, return Bytes.equals(left.getValueArray(), left.getValueOffset(), left.getValueLength(), buf, 0,
@ -1105,9 +1165,9 @@ public final class CellUtil {
if (tagsLength == 0) { if (tagsLength == 0) {
return TagUtil.EMPTY_TAGS_ITR; return TagUtil.EMPTY_TAGS_ITR;
} }
if (cell instanceof ByteBufferedCell) { if (cell instanceof ByteBufferCell) {
return tagsIterator(((ByteBufferedCell) cell).getTagsByteBuffer(), return tagsIterator(((ByteBufferCell) cell).getTagsByteBuffer(),
((ByteBufferedCell) cell).getTagsPosition(), tagsLength); ((ByteBufferCell) cell).getTagsPosition(), tagsLength);
} }
return tagsIterator(cell.getTagsArray(), cell.getTagsOffset(), tagsLength); return tagsIterator(cell.getTagsArray(), cell.getTagsOffset(), tagsLength);
} }
@ -1133,14 +1193,14 @@ public final class CellUtil {
* @return null if there is no tag of the passed in tag type * @return null if there is no tag of the passed in tag type
*/ */
public static Tag getTag(Cell cell, byte type){ public static Tag getTag(Cell cell, byte type){
boolean bufferBacked = cell instanceof ByteBufferedCell; boolean bufferBacked = cell instanceof ByteBufferCell;
int length = cell.getTagsLength(); int length = cell.getTagsLength();
int offset = bufferBacked? ((ByteBufferedCell)cell).getTagsPosition():cell.getTagsOffset(); int offset = bufferBacked? ((ByteBufferCell)cell).getTagsPosition():cell.getTagsOffset();
int pos = offset; int pos = offset;
while (pos < offset + length) { while (pos < offset + length) {
int tagLen; int tagLen;
if (bufferBacked) { if (bufferBacked) {
ByteBuffer tagsBuffer = ((ByteBufferedCell)cell).getTagsByteBuffer(); ByteBuffer tagsBuffer = ((ByteBufferCell)cell).getTagsByteBuffer();
tagLen = ByteBufferUtils.readAsInt(tagsBuffer, pos, TAG_LENGTH_SIZE); tagLen = ByteBufferUtils.readAsInt(tagsBuffer, pos, TAG_LENGTH_SIZE);
if (ByteBufferUtils.toByte(tagsBuffer, pos + TAG_LENGTH_SIZE) == type) { if (ByteBufferUtils.toByte(tagsBuffer, pos + TAG_LENGTH_SIZE) == type) {
return new OffheapTag(tagsBuffer, pos, tagLen + TAG_LENGTH_SIZE); return new OffheapTag(tagsBuffer, pos, tagLen + TAG_LENGTH_SIZE);
@ -1264,15 +1324,15 @@ public final class CellUtil {
int qLen = cell.getQualifierLength(); int qLen = cell.getQualifierLength();
// Using just one if/else loop instead of every time checking before writing every // Using just one if/else loop instead of every time checking before writing every
// component of cell // component of cell
if (cell instanceof ByteBufferedCell) { if (cell instanceof ByteBufferCell) {
out.writeShort(rowLen); out.writeShort(rowLen);
ByteBufferUtils.copyBufferToStream(out, ((ByteBufferedCell) cell).getRowByteBuffer(), ByteBufferUtils.copyBufferToStream(out, ((ByteBufferCell) cell).getRowByteBuffer(),
((ByteBufferedCell) cell).getRowPosition(), rowLen); ((ByteBufferCell) cell).getRowPosition(), rowLen);
out.writeByte(fLen); out.writeByte(fLen);
ByteBufferUtils.copyBufferToStream(out, ((ByteBufferedCell) cell).getFamilyByteBuffer(), ByteBufferUtils.copyBufferToStream(out, ((ByteBufferCell) cell).getFamilyByteBuffer(),
((ByteBufferedCell) cell).getFamilyPosition(), fLen); ((ByteBufferCell) cell).getFamilyPosition(), fLen);
ByteBufferUtils.copyBufferToStream(out, ((ByteBufferedCell) cell).getQualifierByteBuffer(), ByteBufferUtils.copyBufferToStream(out, ((ByteBufferCell) cell).getQualifierByteBuffer(),
((ByteBufferedCell) cell).getQualifierPosition(), qLen); ((ByteBufferCell) cell).getQualifierPosition(), qLen);
} else { } else {
out.writeShort(rowLen); out.writeShort(rowLen);
out.write(cell.getRowArray(), cell.getRowOffset(), rowLen); out.write(cell.getRowArray(), cell.getRowOffset(), rowLen);
@ -1292,9 +1352,9 @@ public final class CellUtil {
* @throws IOException * @throws IOException
*/ */
public static void writeRow(DataOutputStream out, Cell cell, short rlength) throws IOException { public static void writeRow(DataOutputStream out, Cell cell, short rlength) throws IOException {
if (cell instanceof ByteBufferedCell) { if (cell instanceof ByteBufferCell) {
ByteBufferUtils.copyBufferToStream(out, ((ByteBufferedCell) cell).getRowByteBuffer(), ByteBufferUtils.copyBufferToStream(out, ((ByteBufferCell) cell).getRowByteBuffer(),
((ByteBufferedCell) cell).getRowPosition(), rlength); ((ByteBufferCell) cell).getRowPosition(), rlength);
} else { } else {
out.write(cell.getRowArray(), cell.getRowOffset(), rlength); out.write(cell.getRowArray(), cell.getRowOffset(), rlength);
} }
@ -1309,9 +1369,9 @@ public final class CellUtil {
*/ */
public static void writeRowSkippingBytes(DataOutputStream out, Cell cell, short rlength, public static void writeRowSkippingBytes(DataOutputStream out, Cell cell, short rlength,
int commonPrefix) throws IOException { int commonPrefix) throws IOException {
if (cell instanceof ByteBufferedCell) { if (cell instanceof ByteBufferCell) {
ByteBufferUtils.copyBufferToStream(out, ((ByteBufferedCell) cell).getRowByteBuffer(), ByteBufferUtils.copyBufferToStream(out, ((ByteBufferCell) cell).getRowByteBuffer(),
((ByteBufferedCell) cell).getRowPosition() + commonPrefix, rlength - commonPrefix); ((ByteBufferCell) cell).getRowPosition() + commonPrefix, rlength - commonPrefix);
} else { } else {
out.write(cell.getRowArray(), cell.getRowOffset() + commonPrefix, rlength - commonPrefix); out.write(cell.getRowArray(), cell.getRowOffset() + commonPrefix, rlength - commonPrefix);
} }
@ -1325,9 +1385,9 @@ public final class CellUtil {
* @throws IOException * @throws IOException
*/ */
public static void writeFamily(DataOutputStream out, Cell cell, byte flength) throws IOException { public static void writeFamily(DataOutputStream out, Cell cell, byte flength) throws IOException {
if (cell instanceof ByteBufferedCell) { if (cell instanceof ByteBufferCell) {
ByteBufferUtils.copyBufferToStream(out, ((ByteBufferedCell) cell).getFamilyByteBuffer(), ByteBufferUtils.copyBufferToStream(out, ((ByteBufferCell) cell).getFamilyByteBuffer(),
((ByteBufferedCell) cell).getFamilyPosition(), flength); ((ByteBufferCell) cell).getFamilyPosition(), flength);
} else { } else {
out.write(cell.getFamilyArray(), cell.getFamilyOffset(), flength); out.write(cell.getFamilyArray(), cell.getFamilyOffset(), flength);
} }
@ -1342,9 +1402,9 @@ public final class CellUtil {
*/ */
public static void writeQualifier(DataOutputStream out, Cell cell, int qlength) public static void writeQualifier(DataOutputStream out, Cell cell, int qlength)
throws IOException { throws IOException {
if (cell instanceof ByteBufferedCell) { if (cell instanceof ByteBufferCell) {
ByteBufferUtils.copyBufferToStream(out, ((ByteBufferedCell) cell).getQualifierByteBuffer(), ByteBufferUtils.copyBufferToStream(out, ((ByteBufferCell) cell).getQualifierByteBuffer(),
((ByteBufferedCell) cell).getQualifierPosition(), qlength); ((ByteBufferCell) cell).getQualifierPosition(), qlength);
} else { } else {
out.write(cell.getQualifierArray(), cell.getQualifierOffset(), qlength); out.write(cell.getQualifierArray(), cell.getQualifierOffset(), qlength);
} }
@ -1359,9 +1419,9 @@ public final class CellUtil {
*/ */
public static void writeQualifierSkippingBytes(DataOutputStream out, Cell cell, public static void writeQualifierSkippingBytes(DataOutputStream out, Cell cell,
int qlength, int commonPrefix) throws IOException { int qlength, int commonPrefix) throws IOException {
if (cell instanceof ByteBufferedCell) { if (cell instanceof ByteBufferCell) {
ByteBufferUtils.copyBufferToStream(out, ((ByteBufferedCell) cell).getQualifierByteBuffer(), ByteBufferUtils.copyBufferToStream(out, ((ByteBufferCell) cell).getQualifierByteBuffer(),
((ByteBufferedCell) cell).getQualifierPosition() + commonPrefix, qlength - commonPrefix); ((ByteBufferCell) cell).getQualifierPosition() + commonPrefix, qlength - commonPrefix);
} else { } else {
out.write(cell.getQualifierArray(), cell.getQualifierOffset() + commonPrefix, out.write(cell.getQualifierArray(), cell.getQualifierOffset() + commonPrefix,
qlength - commonPrefix); qlength - commonPrefix);
@ -1376,9 +1436,9 @@ public final class CellUtil {
* @throws IOException * @throws IOException
*/ */
public static void writeValue(DataOutputStream out, Cell cell, int vlength) throws IOException { public static void writeValue(DataOutputStream out, Cell cell, int vlength) throws IOException {
if (cell instanceof ByteBufferedCell) { if (cell instanceof ByteBufferCell) {
ByteBufferUtils.copyBufferToStream(out, ((ByteBufferedCell) cell).getValueByteBuffer(), ByteBufferUtils.copyBufferToStream(out, ((ByteBufferCell) cell).getValueByteBuffer(),
((ByteBufferedCell) cell).getValuePosition(), vlength); ((ByteBufferCell) cell).getValuePosition(), vlength);
} else { } else {
out.write(cell.getValueArray(), cell.getValueOffset(), vlength); out.write(cell.getValueArray(), cell.getValueOffset(), vlength);
} }
@ -1392,9 +1452,9 @@ public final class CellUtil {
* @throws IOException * @throws IOException
*/ */
public static void writeTags(DataOutputStream out, Cell cell, int tagsLength) throws IOException { public static void writeTags(DataOutputStream out, Cell cell, int tagsLength) throws IOException {
if (cell instanceof ByteBufferedCell) { if (cell instanceof ByteBufferCell) {
ByteBufferUtils.copyBufferToStream(out, ((ByteBufferedCell) cell).getTagsByteBuffer(), ByteBufferUtils.copyBufferToStream(out, ((ByteBufferCell) cell).getTagsByteBuffer(),
((ByteBufferedCell) cell).getTagsPosition(), tagsLength); ((ByteBufferCell) cell).getTagsPosition(), tagsLength);
} else { } else {
out.write(cell.getTagsArray(), cell.getTagsOffset(), tagsLength); out.write(cell.getTagsArray(), cell.getTagsOffset(), tagsLength);
} }
@ -1494,10 +1554,10 @@ public final class CellUtil {
} }
// Compare the RKs // Compare the RKs
int rkCommonPrefix = 0; int rkCommonPrefix = 0;
if (c1 instanceof ByteBufferedCell && c2 instanceof ByteBufferedCell) { if (c1 instanceof ByteBufferCell && c2 instanceof ByteBufferCell) {
rkCommonPrefix = ByteBufferUtils.findCommonPrefix(((ByteBufferedCell) c1).getRowByteBuffer(), rkCommonPrefix = ByteBufferUtils.findCommonPrefix(((ByteBufferCell) c1).getRowByteBuffer(),
((ByteBufferedCell) c1).getRowPosition(), rLen1, ((ByteBufferedCell) c2).getRowByteBuffer(), ((ByteBufferCell) c1).getRowPosition(), rLen1, ((ByteBufferCell) c2).getRowByteBuffer(),
((ByteBufferedCell) c2).getRowPosition(), rLen2); ((ByteBufferCell) c2).getRowPosition(), rLen2);
} else { } else {
// There cannot be a case where one cell is BBCell and other is KeyValue. This flow comes either // There cannot be a case where one cell is BBCell and other is KeyValue. This flow comes either
// in flush or compactions. In flushes both cells are KV and in case of compaction it will be either // in flush or compactions. In flushes both cells are KV and in case of compaction it will be either
@ -1526,12 +1586,12 @@ public final class CellUtil {
commonPrefix += KeyValue.FAMILY_LENGTH_SIZE; commonPrefix += KeyValue.FAMILY_LENGTH_SIZE;
// Compare the CF names // Compare the CF names
int fCommonPrefix; int fCommonPrefix;
if (c1 instanceof ByteBufferedCell && c2 instanceof ByteBufferedCell) { if (c1 instanceof ByteBufferCell && c2 instanceof ByteBufferCell) {
fCommonPrefix = fCommonPrefix =
ByteBufferUtils.findCommonPrefix(((ByteBufferedCell) c1).getFamilyByteBuffer(), ByteBufferUtils.findCommonPrefix(((ByteBufferCell) c1).getFamilyByteBuffer(),
((ByteBufferedCell) c1).getFamilyPosition(), fLen1, ((ByteBufferCell) c1).getFamilyPosition(), fLen1,
((ByteBufferedCell) c2).getFamilyByteBuffer(), ((ByteBufferCell) c2).getFamilyByteBuffer(),
((ByteBufferedCell) c2).getFamilyPosition(), fLen2); ((ByteBufferCell) c2).getFamilyPosition(), fLen2);
} else { } else {
fCommonPrefix = ByteBufferUtils.findCommonPrefix(c1.getFamilyArray(), c1.getFamilyOffset(), fCommonPrefix = ByteBufferUtils.findCommonPrefix(c1.getFamilyArray(), c1.getFamilyOffset(),
fLen1, c2.getFamilyArray(), c2.getFamilyOffset(), fLen2); fLen1, c2.getFamilyArray(), c2.getFamilyOffset(), fLen2);
@ -1545,11 +1605,11 @@ public final class CellUtil {
int qLen1 = c1.getQualifierLength(); int qLen1 = c1.getQualifierLength();
int qLen2 = c2.getQualifierLength(); int qLen2 = c2.getQualifierLength();
int qCommon; int qCommon;
if (c1 instanceof ByteBufferedCell && c2 instanceof ByteBufferedCell) { if (c1 instanceof ByteBufferCell && c2 instanceof ByteBufferCell) {
qCommon = ByteBufferUtils.findCommonPrefix(((ByteBufferedCell) c1).getQualifierByteBuffer(), qCommon = ByteBufferUtils.findCommonPrefix(((ByteBufferCell) c1).getQualifierByteBuffer(),
((ByteBufferedCell) c1).getQualifierPosition(), qLen1, ((ByteBufferCell) c1).getQualifierPosition(), qLen1,
((ByteBufferedCell) c2).getQualifierByteBuffer(), ((ByteBufferCell) c2).getQualifierByteBuffer(),
((ByteBufferedCell) c2).getQualifierPosition(), qLen2); ((ByteBufferCell) c2).getQualifierPosition(), qLen2);
} else { } else {
qCommon = ByteBufferUtils.findCommonPrefix(c1.getQualifierArray(), c1.getQualifierOffset(), qCommon = ByteBufferUtils.findCommonPrefix(c1.getQualifierArray(), c1.getQualifierOffset(),
qLen1, c2.getQualifierArray(), c2.getQualifierOffset(), qLen2); qLen1, c2.getQualifierArray(), c2.getQualifierOffset(), qLen2);
@ -1658,20 +1718,20 @@ public final class CellUtil {
short lrowlength = left.getRowLength(); short lrowlength = left.getRowLength();
short rrowlength = right.getRowLength(); short rrowlength = right.getRowLength();
if (lrowlength != rrowlength) return false; if (lrowlength != rrowlength) return false;
if (left instanceof ByteBufferedCell && right instanceof ByteBufferedCell) { if (left instanceof ByteBufferCell && right instanceof ByteBufferCell) {
return ByteBufferUtils.equals(((ByteBufferedCell) left).getRowByteBuffer(), return ByteBufferUtils.equals(((ByteBufferCell) left).getRowByteBuffer(),
((ByteBufferedCell) left).getRowPosition(), lrowlength, ((ByteBufferCell) left).getRowPosition(), lrowlength,
((ByteBufferedCell) right).getRowByteBuffer(), ((ByteBufferCell) right).getRowByteBuffer(),
((ByteBufferedCell) right).getRowPosition(), rrowlength); ((ByteBufferCell) right).getRowPosition(), rrowlength);
} }
if (left instanceof ByteBufferedCell) { if (left instanceof ByteBufferCell) {
return ByteBufferUtils.equals(((ByteBufferedCell) left).getRowByteBuffer(), return ByteBufferUtils.equals(((ByteBufferCell) left).getRowByteBuffer(),
((ByteBufferedCell) left).getRowPosition(), lrowlength, right.getRowArray(), ((ByteBufferCell) left).getRowPosition(), lrowlength, right.getRowArray(),
right.getRowOffset(), rrowlength); right.getRowOffset(), rrowlength);
} }
if (right instanceof ByteBufferedCell) { if (right instanceof ByteBufferCell) {
return ByteBufferUtils.equals(((ByteBufferedCell) right).getRowByteBuffer(), return ByteBufferUtils.equals(((ByteBufferCell) right).getRowByteBuffer(),
((ByteBufferedCell) right).getRowPosition(), rrowlength, left.getRowArray(), ((ByteBufferCell) right).getRowPosition(), rrowlength, left.getRowArray(),
left.getRowOffset(), lrowlength); left.getRowOffset(), lrowlength);
} }
return Bytes.equals(left.getRowArray(), left.getRowOffset(), lrowlength, return Bytes.equals(left.getRowArray(), left.getRowOffset(), lrowlength,
@ -1704,9 +1764,9 @@ public final class CellUtil {
* @return rowkey as int * @return rowkey as int
*/ */
public static int getRowAsInt(Cell cell) { public static int getRowAsInt(Cell cell) {
if (cell instanceof ByteBufferedCell) { if (cell instanceof ByteBufferCell) {
return ByteBufferUtils.toInt(((ByteBufferedCell) cell).getRowByteBuffer(), return ByteBufferUtils.toInt(((ByteBufferCell) cell).getRowByteBuffer(),
((ByteBufferedCell) cell).getRowPosition()); ((ByteBufferCell) cell).getRowPosition());
} }
return Bytes.toInt(cell.getRowArray(), cell.getRowOffset()); return Bytes.toInt(cell.getRowArray(), cell.getRowOffset());
} }
@ -1718,9 +1778,9 @@ public final class CellUtil {
* @return value as long * @return value as long
*/ */
public static long getValueAsLong(Cell cell) { public static long getValueAsLong(Cell cell) {
if (cell instanceof ByteBufferedCell) { if (cell instanceof ByteBufferCell) {
return ByteBufferUtils.toLong(((ByteBufferedCell) cell).getValueByteBuffer(), return ByteBufferUtils.toLong(((ByteBufferCell) cell).getValueByteBuffer(),
((ByteBufferedCell) cell).getValuePosition()); ((ByteBufferCell) cell).getValuePosition());
} }
return Bytes.toLong(cell.getValueArray(), cell.getValueOffset()); return Bytes.toLong(cell.getValueArray(), cell.getValueOffset());
} }
@ -1732,9 +1792,9 @@ public final class CellUtil {
* @return value as double * @return value as double
*/ */
public static double getValueAsDouble(Cell cell) { public static double getValueAsDouble(Cell cell) {
if (cell instanceof ByteBufferedCell) { if (cell instanceof ByteBufferCell) {
return ByteBufferUtils.toDouble(((ByteBufferedCell) cell).getValueByteBuffer(), return ByteBufferUtils.toDouble(((ByteBufferCell) cell).getValueByteBuffer(),
((ByteBufferedCell) cell).getValuePosition()); ((ByteBufferCell) cell).getValuePosition());
} }
return Bytes.toDouble(cell.getValueArray(), cell.getValueOffset()); return Bytes.toDouble(cell.getValueArray(), cell.getValueOffset());
} }
@ -1746,9 +1806,9 @@ public final class CellUtil {
* @return value as BigDecimal * @return value as BigDecimal
*/ */
public static BigDecimal getValueAsBigDecimal(Cell cell) { public static BigDecimal getValueAsBigDecimal(Cell cell) {
if (cell instanceof ByteBufferedCell) { if (cell instanceof ByteBufferCell) {
return ByteBufferUtils.toBigDecimal(((ByteBufferedCell) cell).getValueByteBuffer(), return ByteBufferUtils.toBigDecimal(((ByteBufferCell) cell).getValueByteBuffer(),
((ByteBufferedCell) cell).getValuePosition(), cell.getValueLength()); ((ByteBufferCell) cell).getValuePosition(), cell.getValueLength());
} }
return Bytes.toBigDecimal(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); return Bytes.toBigDecimal(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
} }
@ -1760,9 +1820,9 @@ public final class CellUtil {
* @return First possible Cell on passed Cell's row. * @return First possible Cell on passed Cell's row.
*/ */
public static Cell createFirstOnRow(final Cell cell) { public static Cell createFirstOnRow(final Cell cell) {
if (cell instanceof ByteBufferedCell) { if (cell instanceof ByteBufferCell) {
return new FirstOnRowByteBufferedCell(((ByteBufferedCell) cell).getRowByteBuffer(), return new FirstOnRowByteBufferCell(((ByteBufferCell) cell).getRowByteBuffer(),
((ByteBufferedCell) cell).getRowPosition(), cell.getRowLength()); ((ByteBufferCell) cell).getRowPosition(), cell.getRowLength());
} }
return new FirstOnRowCell(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()); return new FirstOnRowCell(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
} }
@ -1796,12 +1856,12 @@ public final class CellUtil {
* @return First possible Cell on passed Cell's row. * @return First possible Cell on passed Cell's row.
*/ */
public static Cell createFirstOnRowCol(final Cell cell) { public static Cell createFirstOnRowCol(final Cell cell) {
if (cell instanceof ByteBufferedCell) { if (cell instanceof ByteBufferCell) {
return new FirstOnRowColByteBufferedCell(((ByteBufferedCell) cell).getRowByteBuffer(), return new FirstOnRowColByteBufferCell(((ByteBufferCell) cell).getRowByteBuffer(),
((ByteBufferedCell) cell).getRowPosition(), cell.getRowLength(), ((ByteBufferCell) cell).getRowPosition(), cell.getRowLength(),
HConstants.EMPTY_BYTE_BUFFER, 0, (byte) 0, HConstants.EMPTY_BYTE_BUFFER, 0, (byte) 0,
((ByteBufferedCell) cell).getQualifierByteBuffer(), ((ByteBufferCell) cell).getQualifierByteBuffer(),
((ByteBufferedCell) cell).getQualifierPosition(), cell.getQualifierLength()); ((ByteBufferCell) cell).getQualifierPosition(), cell.getQualifierLength());
} }
return new FirstOnRowColCell(cell.getRowArray(), cell.getRowOffset(), return new FirstOnRowColCell(cell.getRowArray(), cell.getRowOffset(),
cell.getRowLength(), HConstants.EMPTY_BYTE_ARRAY, 0, (byte)0, cell.getQualifierArray(), cell.getRowLength(), HConstants.EMPTY_BYTE_ARRAY, 0, (byte)0, cell.getQualifierArray(),
@ -1829,11 +1889,11 @@ public final class CellUtil {
* @return Last possible Cell on passed Cell's rk:cf and passed qualifier. * @return Last possible Cell on passed Cell's rk:cf and passed qualifier.
*/ */
public static Cell createFirstOnRowCol(final Cell cell, byte[] qArray, int qoffest, int qlength) { public static Cell createFirstOnRowCol(final Cell cell, byte[] qArray, int qoffest, int qlength) {
if(cell instanceof ByteBufferedCell) { if(cell instanceof ByteBufferCell) {
return new FirstOnRowColByteBufferedCell(((ByteBufferedCell) cell).getRowByteBuffer(), return new FirstOnRowColByteBufferCell(((ByteBufferCell) cell).getRowByteBuffer(),
((ByteBufferedCell) cell).getRowPosition(), cell.getRowLength(), ((ByteBufferCell) cell).getRowPosition(), cell.getRowLength(),
((ByteBufferedCell) cell).getFamilyByteBuffer(), ((ByteBufferCell) cell).getFamilyByteBuffer(),
((ByteBufferedCell) cell).getFamilyPosition(), cell.getFamilyLength(), ((ByteBufferCell) cell).getFamilyPosition(), cell.getFamilyLength(),
ByteBuffer.wrap(qArray), qoffest, qlength); ByteBuffer.wrap(qArray), qoffest, qlength);
} }
return new FirstOnRowColCell(cell.getRowArray(), cell.getRowOffset(), return new FirstOnRowColCell(cell.getRowArray(), cell.getRowOffset(),
@ -1850,13 +1910,13 @@ public final class CellUtil {
* @param ts * @param ts
*/ */
public static Cell createFirstOnRowColTS(Cell cell, long ts) { public static Cell createFirstOnRowColTS(Cell cell, long ts) {
if(cell instanceof ByteBufferedCell) { if(cell instanceof ByteBufferCell) {
return new FirstOnRowColTSByteBufferedCell(((ByteBufferedCell) cell).getRowByteBuffer(), return new FirstOnRowColTSByteBufferCell(((ByteBufferCell) cell).getRowByteBuffer(),
((ByteBufferedCell) cell).getRowPosition(), cell.getRowLength(), ((ByteBufferCell) cell).getRowPosition(), cell.getRowLength(),
((ByteBufferedCell) cell).getFamilyByteBuffer(), ((ByteBufferCell) cell).getFamilyByteBuffer(),
((ByteBufferedCell) cell).getFamilyPosition(), cell.getFamilyLength(), ((ByteBufferCell) cell).getFamilyPosition(), cell.getFamilyLength(),
((ByteBufferedCell) cell).getQualifierByteBuffer(), ((ByteBufferCell) cell).getQualifierByteBuffer(),
((ByteBufferedCell) cell).getQualifierPosition(), cell.getQualifierLength(), ((ByteBufferCell) cell).getQualifierPosition(), cell.getQualifierLength(),
ts); ts);
} }
return new FirstOnRowColTSCell(cell.getRowArray(), cell.getRowOffset(), return new FirstOnRowColTSCell(cell.getRowArray(), cell.getRowOffset(),
@ -1871,9 +1931,9 @@ public final class CellUtil {
* @return Last possible Cell on passed Cell's row. * @return Last possible Cell on passed Cell's row.
*/ */
public static Cell createLastOnRow(final Cell cell) { public static Cell createLastOnRow(final Cell cell) {
if (cell instanceof ByteBufferedCell) { if (cell instanceof ByteBufferCell) {
return new LastOnRowByteBufferedCell(((ByteBufferedCell) cell).getRowByteBuffer(), return new LastOnRowByteBufferCell(((ByteBufferCell) cell).getRowByteBuffer(),
((ByteBufferedCell) cell).getRowPosition(), cell.getRowLength()); ((ByteBufferCell) cell).getRowPosition(), cell.getRowLength());
} }
return new LastOnRowCell(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()); return new LastOnRowCell(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
} }
@ -1891,13 +1951,13 @@ public final class CellUtil {
* @return Last possible Cell on passed Cell's rk:cf:q. * @return Last possible Cell on passed Cell's rk:cf:q.
*/ */
public static Cell createLastOnRowCol(final Cell cell) { public static Cell createLastOnRowCol(final Cell cell) {
if (cell instanceof ByteBufferedCell) { if (cell instanceof ByteBufferCell) {
return new LastOnRowColByteBufferedCell(((ByteBufferedCell) cell).getRowByteBuffer(), return new LastOnRowColByteBufferCell(((ByteBufferCell) cell).getRowByteBuffer(),
((ByteBufferedCell) cell).getRowPosition(), cell.getRowLength(), ((ByteBufferCell) cell).getRowPosition(), cell.getRowLength(),
((ByteBufferedCell) cell).getFamilyByteBuffer(), ((ByteBufferCell) cell).getFamilyByteBuffer(),
((ByteBufferedCell) cell).getFamilyPosition(), cell.getFamilyLength(), ((ByteBufferCell) cell).getFamilyPosition(), cell.getFamilyLength(),
((ByteBufferedCell) cell).getQualifierByteBuffer(), ((ByteBufferCell) cell).getQualifierByteBuffer(),
((ByteBufferedCell) cell).getQualifierPosition(), cell.getQualifierLength()); ((ByteBufferCell) cell).getQualifierPosition(), cell.getQualifierLength());
} }
return new LastOnRowColCell(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), return new LastOnRowColCell(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(),
cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(), cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(),
@ -1926,9 +1986,9 @@ public final class CellUtil {
*/ */
public static void compressTags(DataOutputStream out, Cell cell, public static void compressTags(DataOutputStream out, Cell cell,
TagCompressionContext tagCompressionContext) throws IOException { TagCompressionContext tagCompressionContext) throws IOException {
if (cell instanceof ByteBufferedCell) { if (cell instanceof ByteBufferCell) {
tagCompressionContext.compressTags(out, ((ByteBufferedCell) cell).getTagsByteBuffer(), tagCompressionContext.compressTags(out, ((ByteBufferCell) cell).getTagsByteBuffer(),
((ByteBufferedCell) cell).getTagsPosition(), cell.getTagsLength()); ((ByteBufferCell) cell).getTagsPosition(), cell.getTagsLength());
} else { } else {
tagCompressionContext.compressTags(out, cell.getTagsArray(), cell.getTagsOffset(), tagCompressionContext.compressTags(out, cell.getTagsArray(), cell.getTagsOffset(),
cell.getTagsLength()); cell.getTagsLength());
@ -2032,7 +2092,7 @@ public final class CellUtil {
* These cells are used in reseeks/seeks to improve the read performance. * These cells are used in reseeks/seeks to improve the read performance.
* They are not real cells that are returned back to the clients * They are not real cells that are returned back to the clients
*/ */
private static abstract class EmptyByteBufferedCell extends ByteBufferedCell private static abstract class EmptyByteBufferCell extends ByteBufferCell
implements SettableSequenceId { implements SettableSequenceId {
@Override @Override
@ -2210,12 +2270,12 @@ public final class CellUtil {
} }
@InterfaceAudience.Private @InterfaceAudience.Private
private static class FirstOnRowByteBufferedCell extends EmptyByteBufferedCell { private static class FirstOnRowByteBufferCell extends EmptyByteBufferCell {
private final ByteBuffer rowBuff; private final ByteBuffer rowBuff;
private final int roffset; private final int roffset;
private final short rlength; private final short rlength;
public FirstOnRowByteBufferedCell(final ByteBuffer row, int roffset, short rlength) { public FirstOnRowByteBufferCell(final ByteBuffer row, int roffset, short rlength) {
this.rowBuff = row; this.rowBuff = row;
this.roffset = roffset; this.roffset = roffset;
this.rlength = rlength; this.rlength = rlength;
@ -2248,12 +2308,12 @@ public final class CellUtil {
} }
@InterfaceAudience.Private @InterfaceAudience.Private
private static class LastOnRowByteBufferedCell extends EmptyByteBufferedCell { private static class LastOnRowByteBufferCell extends EmptyByteBufferCell {
private final ByteBuffer rowBuff; private final ByteBuffer rowBuff;
private final int roffset; private final int roffset;
private final short rlength; private final short rlength;
public LastOnRowByteBufferedCell(final ByteBuffer row, int roffset, short rlength) { public LastOnRowByteBufferCell(final ByteBuffer row, int roffset, short rlength) {
this.rowBuff = row; this.rowBuff = row;
this.roffset = roffset; this.roffset = roffset;
this.rlength = rlength; this.rlength = rlength;
@ -2286,7 +2346,7 @@ public final class CellUtil {
} }
@InterfaceAudience.Private @InterfaceAudience.Private
private static class FirstOnRowColByteBufferedCell extends FirstOnRowByteBufferedCell { private static class FirstOnRowColByteBufferCell extends FirstOnRowByteBufferCell {
private final ByteBuffer famBuff; private final ByteBuffer famBuff;
private final int famOffset; private final int famOffset;
private final byte famLength; private final byte famLength;
@ -2294,7 +2354,7 @@ public final class CellUtil {
private final int colOffset; private final int colOffset;
private final int colLength; private final int colLength;
public FirstOnRowColByteBufferedCell(final ByteBuffer row, int roffset, short rlength, public FirstOnRowColByteBufferCell(final ByteBuffer row, int roffset, short rlength,
final ByteBuffer famBuff, final int famOffset, final byte famLength, final ByteBuffer col, final ByteBuffer famBuff, final int famOffset, final byte famLength, final ByteBuffer col,
final int colOffset, final int colLength) { final int colOffset, final int colLength) {
super(row, roffset, rlength); super(row, roffset, rlength);
@ -2406,11 +2466,11 @@ public final class CellUtil {
} }
@InterfaceAudience.Private @InterfaceAudience.Private
private static class FirstOnRowColTSByteBufferedCell extends FirstOnRowColByteBufferedCell { private static class FirstOnRowColTSByteBufferCell extends FirstOnRowColByteBufferCell {
private long ts; private long ts;
public FirstOnRowColTSByteBufferedCell(ByteBuffer rBuffer, int roffset, short rlength, public FirstOnRowColTSByteBufferCell(ByteBuffer rBuffer, int roffset, short rlength,
ByteBuffer fBuffer, int foffset, byte flength, ByteBuffer qBuffer, int qoffset, int qlength, ByteBuffer fBuffer, int foffset, byte flength, ByteBuffer qBuffer, int qoffset, int qlength,
long ts) { long ts) {
super(rBuffer, roffset, rlength, fBuffer, foffset, flength, qBuffer, qoffset, qlength); super(rBuffer, roffset, rlength, fBuffer, foffset, flength, qBuffer, qoffset, qlength);
@ -2513,7 +2573,7 @@ public final class CellUtil {
} }
@InterfaceAudience.Private @InterfaceAudience.Private
private static class LastOnRowColByteBufferedCell extends LastOnRowByteBufferedCell { private static class LastOnRowColByteBufferCell extends LastOnRowByteBufferCell {
private final ByteBuffer fBuffer; private final ByteBuffer fBuffer;
private final int foffset; private final int foffset;
private final byte flength; private final byte flength;
@ -2521,7 +2581,7 @@ public final class CellUtil {
private final int qoffset; private final int qoffset;
private final int qlength; private final int qlength;
public LastOnRowColByteBufferedCell(ByteBuffer rBuffer, int roffset, short rlength, public LastOnRowColByteBufferCell(ByteBuffer rBuffer, int roffset, short rlength,
ByteBuffer fBuffer, int foffset, byte flength, ByteBuffer qBuffer, int qoffset, ByteBuffer fBuffer, int foffset, byte flength, ByteBuffer qBuffer, int qoffset,
int qlength) { int qlength) {
super(rBuffer, roffset, rlength); super(rBuffer, roffset, rlength);
@ -2604,4 +2664,34 @@ public final class CellUtil {
return Type.DeleteFamily.getCode(); return Type.DeleteFamily.getCode();
} }
} }
/**
* Clone the passed cell by copying its data into the passed buf.
*/
public static Cell copyCellTo(Cell cell, ByteBuffer buf, int offset, int len) {
int tagsLen = cell.getTagsLength();
if (cell instanceof ExtendedCell) {
((ExtendedCell) cell).write(buf, offset);
} else {
// Normally all Cell impls within Server will be of type ExtendedCell. Just considering the
// other case also. The data fragments within Cell is copied into buf as in KeyValue
// serialization format only.
KeyValueUtil.appendToByteBuffer(cell, buf, offset, true);
}
if (buf.hasArray()) {
KeyValue newKv;
if (tagsLen == 0) {
// When tagsLen is 0, make a NoTagsKeyValue version of Cell. This is an optimized class
// which directly return tagsLen as 0. So we avoid parsing many length components in
// reading the tagLength stored in the backing buffer. The Memstore addition of every Cell
// call getTagsLength().
newKv = new NoTagsKeyValue(buf.array(), buf.arrayOffset() + offset, len);
} else {
newKv = new KeyValue(buf.array(), buf.arrayOffset() + offset, len);
}
newKv.setSequenceId(cell.getSequenceId());
return newKv;
}
return new OffheapKeyValue(buf, offset, len, tagsLen > 0, cell.getSequenceId());
}
} }

View File

@ -19,6 +19,7 @@ package org.apache.hadoop.hbase;
import java.io.IOException; import java.io.IOException;
import java.io.OutputStream; import java.io.OutputStream;
import java.nio.ByteBuffer;
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.HeapSize;
@ -65,7 +66,7 @@ public interface ExtendedCell extends Cell, SettableSequenceId, SettableTimestam
* @param buf The buffer where to write the Cell. * @param buf The buffer where to write the Cell.
* @param offset The offset within buffer, to write the Cell. * @param offset The offset within buffer, to write the Cell.
*/ */
void write(byte[] buf, int offset); void write(ByteBuffer buf, int offset);
/** /**
* @return The heap size overhead associated with this Cell. * @return The heap size overhead associated with this Cell.

View File

@ -27,7 +27,6 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.io.util.HeapMemorySizeUtil;
import org.apache.hadoop.hbase.util.VersionInfo; import org.apache.hadoop.hbase.util.VersionInfo;
import org.apache.hadoop.hbase.zookeeper.ZKConfig; import org.apache.hadoop.hbase.zookeeper.ZKConfig;
@ -81,7 +80,6 @@ public class HBaseConfiguration extends Configuration {
conf.addResource("hbase-site.xml"); conf.addResource("hbase-site.xml");
checkDefaultsVersion(conf); checkDefaultsVersion(conf);
HeapMemorySizeUtil.checkForClusterFreeMemoryLimit(conf);
return conf; return conf;
} }

View File

@ -2491,8 +2491,8 @@ public class KeyValue implements ExtendedCell {
} }
@Override @Override
public void write(byte[] buf, int offset) { public void write(ByteBuffer buf, int offset) {
System.arraycopy(this.bytes, this.offset, buf, offset, this.length); ByteBufferUtils.copyFromArrayToBuffer(buf, offset, this.bytes, this.offset, this.length);
} }
/** /**

View File

@ -153,7 +153,6 @@ public class KeyValueUtil {
return nextOffset; return nextOffset;
} }
/**************** copy key and value *********************/ /**************** copy key and value *********************/
public static int appendToByteArray(Cell cell, byte[] output, int offset, boolean withTags) { public static int appendToByteArray(Cell cell, byte[] output, int offset, boolean withTags) {
@ -170,15 +169,25 @@ public class KeyValueUtil {
} }
/** /**
* The position will be set to the beginning of the new ByteBuffer * Copy the Cell content into the passed buf in KeyValue serialization format.
* @param cell
* @return the ByteBuffer containing the cell
*/ */
public static ByteBuffer copyToNewByteBuffer(final Cell cell) { public static int appendToByteBuffer(Cell cell, ByteBuffer buf, int offset, boolean withTags) {
byte[] bytes = new byte[length(cell)]; offset = ByteBufferUtils.putInt(buf, offset, keyLength(cell));// Key length
appendToByteArray(cell, bytes, 0, true); offset = ByteBufferUtils.putInt(buf, offset, cell.getValueLength());// Value length
ByteBuffer buffer = ByteBuffer.wrap(bytes); offset = ByteBufferUtils.putShort(buf, offset, cell.getRowLength());// RK length
return buffer; offset = CellUtil.copyRowTo(cell, buf, offset);// Row bytes
offset = ByteBufferUtils.putByte(buf, offset, cell.getFamilyLength());// CF length
offset = CellUtil.copyFamilyTo(cell, buf, offset);// CF bytes
offset = CellUtil.copyQualifierTo(cell, buf, offset);// Qualifier bytes
offset = ByteBufferUtils.putLong(buf, offset, cell.getTimestamp());// TS
offset = ByteBufferUtils.putByte(buf, offset, cell.getTypeByte());// Type
offset = CellUtil.copyValueTo(cell, buf, offset);// Value bytes
int tagsLength = cell.getTagsLength();
if (withTags && (tagsLength > 0)) {
offset = ByteBufferUtils.putAsShort(buf, offset, tagsLength);// Tags length
offset = CellUtil.copyTagTo(cell, buf, offset);// Tags bytes
}
return offset;
} }
public static void appendToByteBuffer(final ByteBuffer bb, final KeyValue kv, public static void appendToByteBuffer(final ByteBuffer bb, final KeyValue kv,
@ -660,29 +669,4 @@ public class KeyValueUtil {
return size; return size;
} }
} }
/**
* Write the given cell in KeyValue serialization format into the given buf and return a new
* KeyValue object around that.
*/
public static KeyValue copyCellTo(Cell cell, byte[] buf, int offset, int len) {
int tagsLen = cell.getTagsLength();
if (cell instanceof ExtendedCell) {
((ExtendedCell) cell).write(buf, offset);
} else {
appendToByteArray(cell, buf, offset, true);
}
KeyValue newKv;
if (tagsLen == 0) {
// When tagsLen is 0, make a NoTagsKeyValue version of Cell. This is an optimized class which
// directly return tagsLen as 0. So we avoid parsing many length components in reading the
// tagLength stored in the backing buffer. The Memstore addition of every Cell call
// getTagsLength().
newKv = new NoTagsKeyValue(buf, offset, len);
} else {
newKv = new KeyValue(buf, offset, len);
}
newKv.setSequenceId(cell.getSequenceId());
return newKv;
}
} }

View File

@ -27,11 +27,11 @@ import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ClassSize; import org.apache.hadoop.hbase.util.ClassSize;
/** /**
* This Cell is an implementation of {@link ByteBufferedCell} where the data resides in off heap * This Cell is an implementation of {@link ByteBufferCell} where the data resides in off heap
* memory. * memory.
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public class OffheapKeyValue extends ByteBufferedCell implements ExtendedCell { public class OffheapKeyValue extends ByteBufferCell implements ExtendedCell {
protected final ByteBuffer buf; protected final ByteBuffer buf;
protected final int offset; protected final int offset;
@ -266,8 +266,8 @@ public class OffheapKeyValue extends ByteBufferedCell implements ExtendedCell {
} }
@Override @Override
public void write(byte[] buf, int offset) { public void write(ByteBuffer buf, int offset) {
ByteBufferUtils.copyFromBufferToArray(buf, this.buf, this.offset, offset, this.length); ByteBufferUtils.copyFromBufferToBuffer(this.buf, buf, this.offset, offset, this.length);
} }
@Override @Override

View File

@ -19,7 +19,7 @@ package org.apache.hadoop.hbase.io.encoding;
import java.io.IOException; import java.io.IOException;
import java.nio.ByteBuffer; import java.nio.ByteBuffer;
import org.apache.hadoop.hbase.ByteBufferedKeyOnlyKeyValue; import org.apache.hadoop.hbase.ByteBufferKeyOnlyKeyValue;
import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue;
@ -55,7 +55,7 @@ public abstract class AbstractDataBlockEncoder implements DataBlockEncoder {
return new KeyValue.KeyOnlyKeyValue(key.array(), key.arrayOffset() return new KeyValue.KeyOnlyKeyValue(key.array(), key.arrayOffset()
+ key.position(), keyLength); + key.position(), keyLength);
} else { } else {
return new ByteBufferedKeyOnlyKeyValue(key, key.position(), keyLength); return new ByteBufferKeyOnlyKeyValue(key, key.position(), keyLength);
} }
} }

View File

@ -22,7 +22,7 @@ import java.io.IOException;
import java.io.OutputStream; import java.io.OutputStream;
import java.nio.ByteBuffer; import java.nio.ByteBuffer;
import org.apache.hadoop.hbase.ByteBufferedCell; import org.apache.hadoop.hbase.ByteBufferCell;
import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CellUtil;
@ -452,7 +452,7 @@ abstract class BufferedDataBlockEncoder extends AbstractDataBlockEncoder {
} }
@Override @Override
public void write(byte[] buf, int offset) { public void write(ByteBuffer buf, int offset) {
// This is not used in actual flow. Throwing UnsupportedOperationException // This is not used in actual flow. Throwing UnsupportedOperationException
throw new UnsupportedOperationException(); throw new UnsupportedOperationException();
} }
@ -475,7 +475,7 @@ abstract class BufferedDataBlockEncoder extends AbstractDataBlockEncoder {
} }
} }
protected static class OffheapDecodedCell extends ByteBufferedCell implements ExtendedCell { protected static class OffheapDecodedCell extends ByteBufferCell implements ExtendedCell {
private static final long FIXED_OVERHEAD = ClassSize.align(ClassSize.OBJECT private static final long FIXED_OVERHEAD = ClassSize.align(ClassSize.OBJECT
+ (3 * ClassSize.REFERENCE) + (2 * Bytes.SIZEOF_LONG) + (7 * Bytes.SIZEOF_INT) + (3 * ClassSize.REFERENCE) + (2 * Bytes.SIZEOF_LONG) + (7 * Bytes.SIZEOF_INT)
+ (Bytes.SIZEOF_SHORT) + (2 * Bytes.SIZEOF_BYTE) + (3 * ClassSize.BYTE_BUFFER)); + (Bytes.SIZEOF_SHORT) + (2 * Bytes.SIZEOF_BYTE) + (3 * ClassSize.BYTE_BUFFER));
@ -708,7 +708,7 @@ abstract class BufferedDataBlockEncoder extends AbstractDataBlockEncoder {
} }
@Override @Override
public void write(byte[] buf, int offset) { public void write(ByteBuffer buf, int offset) {
// This is not used in actual flow. Throwing UnsupportedOperationException // This is not used in actual flow. Throwing UnsupportedOperationException
throw new UnsupportedOperationException(); throw new UnsupportedOperationException();
} }

View File

@ -18,8 +18,8 @@ package org.apache.hadoop.hbase.io.encoding;
import java.nio.ByteBuffer; import java.nio.ByteBuffer;
import org.apache.hadoop.hbase.ByteBufferedCell; import org.apache.hadoop.hbase.ByteBufferCell;
import org.apache.hadoop.hbase.ByteBufferedKeyOnlyKeyValue; import org.apache.hadoop.hbase.ByteBufferKeyOnlyKeyValue;
import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CellUtil;
@ -153,10 +153,10 @@ public class RowIndexSeekerV1 extends AbstractEncodedSeeker {
} }
private int compareRows(ByteBuffer row, Cell seekCell) { private int compareRows(ByteBuffer row, Cell seekCell) {
if (seekCell instanceof ByteBufferedCell) { if (seekCell instanceof ByteBufferCell) {
return ByteBufferUtils.compareTo(row, row.position(), row.remaining(), return ByteBufferUtils.compareTo(row, row.position(), row.remaining(),
((ByteBufferedCell) seekCell).getRowByteBuffer(), ((ByteBufferCell) seekCell).getRowByteBuffer(),
((ByteBufferedCell) seekCell).getRowPosition(), ((ByteBufferCell) seekCell).getRowPosition(),
seekCell.getRowLength()); seekCell.getRowLength());
} else { } else {
return ByteBufferUtils.compareTo(row, row.position(), row.remaining(), return ByteBufferUtils.compareTo(row, row.position(), row.remaining(),
@ -315,7 +315,7 @@ public class RowIndexSeekerV1 extends AbstractEncodedSeeker {
protected long memstoreTS; protected long memstoreTS;
protected int nextKvOffset; protected int nextKvOffset;
// buffer backed keyonlyKV // buffer backed keyonlyKV
private ByteBufferedKeyOnlyKeyValue currentKey = new ByteBufferedKeyOnlyKeyValue(); private ByteBufferKeyOnlyKeyValue currentKey = new ByteBufferKeyOnlyKeyValue();
protected boolean isValid() { protected boolean isValid() {
return valueOffset != -1; return valueOffset != -1;
@ -323,7 +323,7 @@ public class RowIndexSeekerV1 extends AbstractEncodedSeeker {
protected void invalidate() { protected void invalidate() {
valueOffset = -1; valueOffset = -1;
currentKey = new ByteBufferedKeyOnlyKeyValue(); currentKey = new ByteBufferKeyOnlyKeyValue();
currentBuffer = null; currentBuffer = null;
} }

View File

@ -876,6 +876,14 @@ public final class ByteBufferUtils {
} }
} }
public static int putInt(ByteBuffer buffer, int index, int val) {
if (UNSAFE_UNALIGNED) {
return UnsafeAccess.putInt(buffer, index, val);
}
buffer.putInt(index, val);
return index + Bytes.SIZEOF_INT;
}
/** /**
* Reads a double value at the given buffer's offset. * Reads a double value at the given buffer's offset.
* @param buffer * @param buffer
@ -919,6 +927,21 @@ public final class ByteBufferUtils {
} }
} }
public static int putShort(ByteBuffer buffer, int index, short val) {
if (UNSAFE_UNALIGNED) {
return UnsafeAccess.putShort(buffer, index, val);
}
buffer.putShort(index, val);
return index + Bytes.SIZEOF_SHORT;
}
public static int putAsShort(ByteBuffer buf, int index, int val) {
buf.put(index + 1, (byte) val);
val >>= 8;
buf.put(index, (byte) val);
return index + Bytes.SIZEOF_SHORT;
}
/** /**
* Put a long value out to the given ByteBuffer's current position in big-endian format. * Put a long value out to the given ByteBuffer's current position in big-endian format.
* This also advances the position in buffer by long size. * This also advances the position in buffer by long size.
@ -933,6 +956,15 @@ public final class ByteBufferUtils {
buffer.putLong(val); buffer.putLong(val);
} }
} }
public static int putLong(ByteBuffer buffer, int index, long val) {
if (UNSAFE_UNALIGNED) {
return UnsafeAccess.putLong(buffer, index, val);
}
buffer.putLong(index, val);
return index + Bytes.SIZEOF_LONG;
}
/** /**
* Copies the bytes from given array's offset to length part into the given buffer. Puts the bytes * Copies the bytes from given array's offset to length part into the given buffer. Puts the bytes
* to buffer's current position. This also advances the position in the 'out' buffer by 'length' * to buffer's current position. This also advances the position in the 'out' buffer by 'length'

View File

@ -23,7 +23,7 @@ import static org.junit.Assert.assertTrue;
import java.nio.ByteBuffer; import java.nio.ByteBuffer;
import org.apache.hadoop.hbase.KeyValue.Type; import org.apache.hadoop.hbase.KeyValue.Type;
import org.apache.hadoop.hbase.TestCellUtil.ByteBufferedCellImpl; import org.apache.hadoop.hbase.TestCellUtil.ByteBufferCellImpl;
import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.MiscTests;
import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
@ -102,15 +102,15 @@ public class TestCellComparator {
byte[] v = Bytes.toBytes("val1"); byte[] v = Bytes.toBytes("val1");
KeyValue kv = new KeyValue(r1, f1, q1, v); KeyValue kv = new KeyValue(r1, f1, q1, v);
ByteBuffer buffer = ByteBuffer.wrap(kv.getBuffer()); ByteBuffer buffer = ByteBuffer.wrap(kv.getBuffer());
Cell bbCell1 = new ByteBufferedCellImpl(buffer, 0, buffer.remaining()); Cell bbCell1 = new ByteBufferCellImpl(buffer, 0, buffer.remaining());
kv = new KeyValue(r2, f1, q1, v); kv = new KeyValue(r2, f1, q1, v);
buffer = ByteBuffer.wrap(kv.getBuffer()); buffer = ByteBuffer.wrap(kv.getBuffer());
Cell bbCell2 = new ByteBufferedCellImpl(buffer, 0, buffer.remaining()); Cell bbCell2 = new ByteBufferCellImpl(buffer, 0, buffer.remaining());
assertEquals(0, CellComparator.compareColumns(bbCell1, bbCell2)); assertEquals(0, CellComparator.compareColumns(bbCell1, bbCell2));
assertEquals(0, CellComparator.compareColumns(bbCell1, kv)); assertEquals(0, CellComparator.compareColumns(bbCell1, kv));
kv = new KeyValue(r2, f1, q2, v); kv = new KeyValue(r2, f1, q2, v);
buffer = ByteBuffer.wrap(kv.getBuffer()); buffer = ByteBuffer.wrap(kv.getBuffer());
Cell bbCell3 = new ByteBufferedCellImpl(buffer, 0, buffer.remaining()); Cell bbCell3 = new ByteBufferCellImpl(buffer, 0, buffer.remaining());
assertEquals(0, CellComparator.compareFamilies(bbCell2, bbCell3)); assertEquals(0, CellComparator.compareFamilies(bbCell2, bbCell3));
assertTrue(CellComparator.compareQualifiers(bbCell2, bbCell3) < 0); assertTrue(CellComparator.compareQualifiers(bbCell2, bbCell3) < 0);
assertTrue(CellComparator.compareColumns(bbCell2, bbCell3) < 0); assertTrue(CellComparator.compareColumns(bbCell2, bbCell3) < 0);

View File

@ -416,7 +416,7 @@ public class TestCellUtil {
byte[] tags = Bytes.toBytes("tag1"); byte[] tags = Bytes.toBytes("tag1");
KeyValue kv = new KeyValue(r, f, q, 0, q.length, 1234L, Type.Put, v, 0, v.length, tags); KeyValue kv = new KeyValue(r, f, q, 0, q.length, 1234L, Type.Put, v, 0, v.length, tags);
ByteBuffer buffer = ByteBuffer.wrap(kv.getBuffer()); ByteBuffer buffer = ByteBuffer.wrap(kv.getBuffer());
Cell bbCell = new ByteBufferedCellImpl(buffer, 0, buffer.remaining()); Cell bbCell = new ByteBufferCellImpl(buffer, 0, buffer.remaining());
byte[] rDest = CellUtil.cloneRow(bbCell); byte[] rDest = CellUtil.cloneRow(bbCell);
assertTrue(Bytes.equals(r, rDest)); assertTrue(Bytes.equals(r, rDest));
byte[] fDest = CellUtil.cloneFamily(bbCell); byte[] fDest = CellUtil.cloneFamily(bbCell);
@ -440,10 +440,10 @@ public class TestCellUtil {
byte[] tags = Bytes.toBytes("tag1"); byte[] tags = Bytes.toBytes("tag1");
KeyValue kv = new KeyValue(r, f, q1, 0, q1.length, 1234L, Type.Put, v, 0, v.length, tags); KeyValue kv = new KeyValue(r, f, q1, 0, q1.length, 1234L, Type.Put, v, 0, v.length, tags);
ByteBuffer buffer = ByteBuffer.wrap(kv.getBuffer()); ByteBuffer buffer = ByteBuffer.wrap(kv.getBuffer());
Cell bbCell1 = new ByteBufferedCellImpl(buffer, 0, buffer.remaining()); Cell bbCell1 = new ByteBufferCellImpl(buffer, 0, buffer.remaining());
kv = new KeyValue(r, f, q2, 0, q2.length, 1234L, Type.Put, v, 0, v.length, tags); kv = new KeyValue(r, f, q2, 0, q2.length, 1234L, Type.Put, v, 0, v.length, tags);
buffer = ByteBuffer.wrap(kv.getBuffer()); buffer = ByteBuffer.wrap(kv.getBuffer());
Cell bbCell2 = new ByteBufferedCellImpl(buffer, 0, buffer.remaining()); Cell bbCell2 = new ByteBufferCellImpl(buffer, 0, buffer.remaining());
assertTrue(CellUtil.matchingRows(bbCell1, bbCell2)); assertTrue(CellUtil.matchingRows(bbCell1, bbCell2));
assertTrue(CellUtil.matchingRows(kv, bbCell2)); assertTrue(CellUtil.matchingRows(kv, bbCell2));
assertTrue(CellUtil.matchingRow(bbCell1, r)); assertTrue(CellUtil.matchingRow(bbCell1, r));
@ -473,30 +473,30 @@ public class TestCellUtil {
byte[] v = Bytes.toBytes(vl); byte[] v = Bytes.toBytes(vl);
KeyValue kv = new KeyValue(r, f, q, v); KeyValue kv = new KeyValue(r, f, q, v);
ByteBuffer buffer = ByteBuffer.wrap(kv.getBuffer()); ByteBuffer buffer = ByteBuffer.wrap(kv.getBuffer());
Cell bbCell = new ByteBufferedCellImpl(buffer, 0, buffer.remaining()); Cell bbCell = new ByteBufferCellImpl(buffer, 0, buffer.remaining());
assertEquals(ri, CellUtil.getRowAsInt(bbCell)); assertEquals(ri, CellUtil.getRowAsInt(bbCell));
assertEquals(vl, CellUtil.getValueAsLong(bbCell)); assertEquals(vl, CellUtil.getValueAsLong(bbCell));
double vd = 3005.5; double vd = 3005.5;
v = Bytes.toBytes(vd); v = Bytes.toBytes(vd);
kv = new KeyValue(r, f, q, v); kv = new KeyValue(r, f, q, v);
buffer = ByteBuffer.wrap(kv.getBuffer()); buffer = ByteBuffer.wrap(kv.getBuffer());
bbCell = new ByteBufferedCellImpl(buffer, 0, buffer.remaining()); bbCell = new ByteBufferCellImpl(buffer, 0, buffer.remaining());
assertEquals(vd, CellUtil.getValueAsDouble(bbCell), 0.0); assertEquals(vd, CellUtil.getValueAsDouble(bbCell), 0.0);
BigDecimal bd = new BigDecimal(9999); BigDecimal bd = new BigDecimal(9999);
v = Bytes.toBytes(bd); v = Bytes.toBytes(bd);
kv = new KeyValue(r, f, q, v); kv = new KeyValue(r, f, q, v);
buffer = ByteBuffer.wrap(kv.getBuffer()); buffer = ByteBuffer.wrap(kv.getBuffer());
bbCell = new ByteBufferedCellImpl(buffer, 0, buffer.remaining()); bbCell = new ByteBufferCellImpl(buffer, 0, buffer.remaining());
assertEquals(bd, CellUtil.getValueAsBigDecimal(bbCell)); assertEquals(bd, CellUtil.getValueAsBigDecimal(bbCell));
} }
// TODO remove this test impl once we have a Cell implementation backed by ByteBuffer // TODO remove this test impl once we have a Cell implementation backed by ByteBuffer
public static class ByteBufferedCellImpl extends ByteBufferedCell { public static class ByteBufferCellImpl extends ByteBufferCell {
private final ByteBuffer buffer; private final ByteBuffer buffer;
private final int offset, length; private final int offset, length;
public ByteBufferedCellImpl(ByteBuffer buffer, int offset, int length) { public ByteBufferCellImpl(ByteBuffer buffer, int offset, int length) {
this.buffer = buffer; this.buffer = buffer;
this.offset = offset; this.offset = offset;
this.length = length; this.length = length;

View File

@ -56,7 +56,7 @@ public class TestOffheapKeyValue {
KeyValue kvCell = new KeyValue(row1, fam1, qual1, 0l, Type.Put, row1); KeyValue kvCell = new KeyValue(row1, fam1, qual1, 0l, Type.Put, row1);
ByteBuffer buf = ByteBuffer.allocateDirect(kvCell.getBuffer().length); ByteBuffer buf = ByteBuffer.allocateDirect(kvCell.getBuffer().length);
ByteBufferUtils.copyFromArrayToBuffer(buf, kvCell.getBuffer(), 0, kvCell.getBuffer().length); ByteBufferUtils.copyFromArrayToBuffer(buf, kvCell.getBuffer(), 0, kvCell.getBuffer().length);
ByteBufferedCell offheapKV = new OffheapKeyValue(buf, 0, buf.capacity(), false, 0l); ByteBufferCell offheapKV = new OffheapKeyValue(buf, 0, buf.capacity(), false, 0l);
assertEquals( assertEquals(
ROW1, ROW1,
ByteBufferUtils.toStringBinary(offheapKV.getRowByteBuffer(), ByteBufferUtils.toStringBinary(offheapKV.getRowByteBuffer(),
@ -138,7 +138,7 @@ public class TestOffheapKeyValue {
KeyValue kvCell = new KeyValue(row1, fam1, qual1, 0l, Type.Put, row1, tags); KeyValue kvCell = new KeyValue(row1, fam1, qual1, 0l, Type.Put, row1, tags);
ByteBuffer buf = ByteBuffer.allocateDirect(kvCell.getBuffer().length); ByteBuffer buf = ByteBuffer.allocateDirect(kvCell.getBuffer().length);
ByteBufferUtils.copyFromArrayToBuffer(buf, kvCell.getBuffer(), 0, kvCell.getBuffer().length); ByteBufferUtils.copyFromArrayToBuffer(buf, kvCell.getBuffer(), 0, kvCell.getBuffer().length);
ByteBufferedCell offheapKV = new OffheapKeyValue(buf, 0, buf.capacity(), true, 0l); ByteBufferCell offheapKV = new OffheapKeyValue(buf, 0, buf.capacity(), true, 0l);
assertEquals( assertEquals(
ROW1, ROW1,
ByteBufferUtils.toStringBinary(offheapKV.getRowByteBuffer(), ByteBufferUtils.toStringBinary(offheapKV.getRowByteBuffer(),
@ -178,7 +178,7 @@ public class TestOffheapKeyValue {
ByteBuffer buf = ByteBuffer.allocateDirect(kvCell.getKeyLength()); ByteBuffer buf = ByteBuffer.allocateDirect(kvCell.getKeyLength());
ByteBufferUtils.copyFromArrayToBuffer(buf, kvCell.getBuffer(), kvCell.getKeyOffset(), ByteBufferUtils.copyFromArrayToBuffer(buf, kvCell.getBuffer(), kvCell.getKeyOffset(),
kvCell.getKeyLength()); kvCell.getKeyLength());
ByteBufferedCell offheapKeyOnlyKV = new ByteBufferedKeyOnlyKeyValue(buf, 0, buf.capacity()); ByteBufferCell offheapKeyOnlyKV = new ByteBufferKeyOnlyKeyValue(buf, 0, buf.capacity());
assertEquals( assertEquals(
ROW1, ROW1,
ByteBufferUtils.toStringBinary(offheapKeyOnlyKV.getRowByteBuffer(), ByteBufferUtils.toStringBinary(offheapKeyOnlyKV.getRowByteBuffer(),

View File

@ -30,7 +30,7 @@ import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.OffheapKeyValue; import org.apache.hadoop.hbase.OffheapKeyValue;
import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.Tag;
import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.ArrayBackedTag;
import org.apache.hadoop.hbase.ByteBufferedCell; import org.apache.hadoop.hbase.ByteBufferCell;
import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.io.util.LRUDictionary; import org.apache.hadoop.hbase.io.util.LRUDictionary;
import org.apache.hadoop.hbase.nio.SingleByteBuff; import org.apache.hadoop.hbase.nio.SingleByteBuff;
@ -80,10 +80,10 @@ public class TestTagCompressionContext {
ByteArrayOutputStream baos = new ByteArrayOutputStream(); ByteArrayOutputStream baos = new ByteArrayOutputStream();
DataOutputStream daos = new ByteBufferWriterDataOutputStream(baos); DataOutputStream daos = new ByteBufferWriterDataOutputStream(baos);
TagCompressionContext context = new TagCompressionContext(LRUDictionary.class, Byte.MAX_VALUE); TagCompressionContext context = new TagCompressionContext(LRUDictionary.class, Byte.MAX_VALUE);
ByteBufferedCell kv1 = (ByteBufferedCell)createOffheapKVWithTags(2); ByteBufferCell kv1 = (ByteBufferCell)createOffheapKVWithTags(2);
int tagsLength1 = kv1.getTagsLength(); int tagsLength1 = kv1.getTagsLength();
context.compressTags(daos, kv1.getTagsByteBuffer(), kv1.getTagsPosition(), tagsLength1); context.compressTags(daos, kv1.getTagsByteBuffer(), kv1.getTagsPosition(), tagsLength1);
ByteBufferedCell kv2 = (ByteBufferedCell)createOffheapKVWithTags(3); ByteBufferCell kv2 = (ByteBufferCell)createOffheapKVWithTags(3);
int tagsLength2 = kv2.getTagsLength(); int tagsLength2 = kv2.getTagsLength();
context.compressTags(daos, kv2.getTagsByteBuffer(), kv2.getTagsPosition(), tagsLength2); context.compressTags(daos, kv2.getTagsByteBuffer(), kv2.getTagsPosition(), tagsLength2);
@ -129,10 +129,10 @@ public class TestTagCompressionContext {
ByteArrayOutputStream baos = new ByteArrayOutputStream(); ByteArrayOutputStream baos = new ByteArrayOutputStream();
DataOutputStream daos = new ByteBufferWriterDataOutputStream(baos); DataOutputStream daos = new ByteBufferWriterDataOutputStream(baos);
TagCompressionContext context = new TagCompressionContext(LRUDictionary.class, Byte.MAX_VALUE); TagCompressionContext context = new TagCompressionContext(LRUDictionary.class, Byte.MAX_VALUE);
ByteBufferedCell kv1 = (ByteBufferedCell)createOffheapKVWithTags(1); ByteBufferCell kv1 = (ByteBufferCell)createOffheapKVWithTags(1);
int tagsLength1 = kv1.getTagsLength(); int tagsLength1 = kv1.getTagsLength();
context.compressTags(daos, kv1.getTagsByteBuffer(), kv1.getTagsPosition(), tagsLength1); context.compressTags(daos, kv1.getTagsByteBuffer(), kv1.getTagsPosition(), tagsLength1);
ByteBufferedCell kv2 = (ByteBufferedCell)createOffheapKVWithTags(3); ByteBufferCell kv2 = (ByteBufferCell)createOffheapKVWithTags(3);
int tagsLength2 = kv2.getTagsLength(); int tagsLength2 = kv2.getTagsLength();
context.compressTags(daos, kv2.getTagsByteBuffer(), kv2.getTagsPosition(), tagsLength2); context.compressTags(daos, kv2.getTagsByteBuffer(), kv2.getTagsPosition(), tagsLength2);

View File

@ -20,7 +20,7 @@ package org.apache.hadoop.hbase.codec.prefixtree;
import java.nio.ByteBuffer; import java.nio.ByteBuffer;
import org.apache.hadoop.hbase.ByteBufferedCell; import org.apache.hadoop.hbase.ByteBufferCell;
import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CellUtil;
@ -93,7 +93,7 @@ public class PrefixTreeSeeker implements EncodedSeeker {
// The PrefixTreecell is of type BytebufferedCell and the value part of the cell // The PrefixTreecell is of type BytebufferedCell and the value part of the cell
// determines whether we are offheap cell or onheap cell. All other parts of the cell- // determines whether we are offheap cell or onheap cell. All other parts of the cell-
// row, fam and col are all represented as onheap byte[] // row, fam and col are all represented as onheap byte[]
ByteBufferedCell cell = (ByteBufferedCell)ptSearcher.current(); ByteBufferCell cell = (ByteBufferCell)ptSearcher.current();
if (cell == null) { if (cell == null) {
return null; return null;
} }
@ -373,7 +373,7 @@ public class PrefixTreeSeeker implements EncodedSeeker {
} }
} }
private static class OffheapPrefixTreeCell extends ByteBufferedCell implements Cell, private static class OffheapPrefixTreeCell extends ByteBufferCell implements Cell,
SettableSequenceId, HeapSize { SettableSequenceId, HeapSize {
private static final long FIXED_OVERHEAD = ClassSize.align(ClassSize.OBJECT private static final long FIXED_OVERHEAD = ClassSize.align(ClassSize.OBJECT
+ (5 * ClassSize.REFERENCE) + (2 * Bytes.SIZEOF_LONG) + (4 * Bytes.SIZEOF_INT) + (5 * ClassSize.REFERENCE) + (2 * Bytes.SIZEOF_LONG) + (4 * Bytes.SIZEOF_INT)

View File

@ -20,7 +20,7 @@ package org.apache.hadoop.hbase.codec.prefixtree.decode;
import java.nio.ByteBuffer; import java.nio.ByteBuffer;
import org.apache.hadoop.hbase.ByteBufferedCell; import org.apache.hadoop.hbase.ByteBufferCell;
import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CellUtil;
@ -39,7 +39,7 @@ import org.apache.hadoop.hbase.util.ObjectIntPair;
* without allocating new memory for every Cell iterated through. * without allocating new memory for every Cell iterated through.
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public class PrefixTreeCell extends ByteBufferedCell implements SettableSequenceId, public class PrefixTreeCell extends ByteBufferCell implements SettableSequenceId,
Comparable<Cell> { Comparable<Cell> {
// Create a reference here? Can be removed too // Create a reference here? Can be removed too
protected CellComparator comparator = CellComparator.COMPARATOR; protected CellComparator comparator = CellComparator.COMPARATOR;

View File

@ -34,7 +34,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.hbase.ByteBufferedKeyOnlyKeyValue; import org.apache.hadoop.hbase.ByteBufferKeyOnlyKeyValue;
import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CellUtil;
@ -745,7 +745,7 @@ public class HFileBlockIndex {
// If we imagine that keys[-1] = -Infinity and // If we imagine that keys[-1] = -Infinity and
// keys[numEntries] = Infinity, then we are maintaining an invariant that // keys[numEntries] = Infinity, then we are maintaining an invariant that
// keys[low - 1] < key < keys[high + 1] while narrowing down the range. // keys[low - 1] < key < keys[high + 1] while narrowing down the range.
ByteBufferedKeyOnlyKeyValue nonRootIndexkeyOnlyKV = new ByteBufferedKeyOnlyKeyValue(); ByteBufferKeyOnlyKeyValue nonRootIndexkeyOnlyKV = new ByteBufferKeyOnlyKeyValue();
ObjectIntPair<ByteBuffer> pair = new ObjectIntPair<ByteBuffer>(); ObjectIntPair<ByteBuffer> pair = new ObjectIntPair<ByteBuffer>();
while (low <= high) { while (low <= high) {
mid = (low + high) >>> 1; mid = (low + high) >>> 1;

View File

@ -30,7 +30,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.ByteBufferedKeyOnlyKeyValue; import org.apache.hadoop.hbase.ByteBufferKeyOnlyKeyValue;
import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CellUtil;
@ -491,7 +491,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
protected final HFile.Reader reader; protected final HFile.Reader reader;
private int currTagsLen; private int currTagsLen;
// buffer backed keyonlyKV // buffer backed keyonlyKV
private ByteBufferedKeyOnlyKeyValue bufBackedKeyOnlyKv = new ByteBufferedKeyOnlyKeyValue(); private ByteBufferKeyOnlyKeyValue bufBackedKeyOnlyKv = new ByteBufferKeyOnlyKeyValue();
// A pair for reusing in blockSeek() so that we don't garbage lot of objects // A pair for reusing in blockSeek() so that we don't garbage lot of objects
final ObjectIntPair<ByteBuffer> pair = new ObjectIntPair<ByteBuffer>(); final ObjectIntPair<ByteBuffer> pair = new ObjectIntPair<ByteBuffer>();
@ -1208,7 +1208,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
return new KeyValue.KeyOnlyKeyValue(keyBuff.array(), keyBuff.arrayOffset() return new KeyValue.KeyOnlyKeyValue(keyBuff.array(), keyBuff.arrayOffset()
+ keyBuff.position(), klen); + keyBuff.position(), klen);
} else { } else {
return new ByteBufferedKeyOnlyKeyValue(keyBuff, keyBuff.position(), klen); return new ByteBufferKeyOnlyKeyValue(keyBuff, keyBuff.position(), klen);
} }
} }

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.hbase.io.util; package org.apache.hadoop.hbase.io.util;
import java.lang.management.ManagementFactory; import java.lang.management.ManagementFactory;
import java.lang.management.MemoryType;
import java.lang.management.MemoryUsage; import java.lang.management.MemoryUsage;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
@ -25,9 +26,14 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.regionserver.MemStoreLAB;
import org.apache.hadoop.hbase.util.Pair;
/**
* Util class to calculate memory size for memstore, block cache(L1, L2) of RS.
*/
@InterfaceAudience.Private @InterfaceAudience.Private
public class HeapMemorySizeUtil { public class MemorySizeUtil {
public static final String MEMSTORE_SIZE_KEY = "hbase.regionserver.global.memstore.size"; public static final String MEMSTORE_SIZE_KEY = "hbase.regionserver.global.memstore.size";
public static final String MEMSTORE_SIZE_OLD_KEY = public static final String MEMSTORE_SIZE_OLD_KEY =
@ -36,12 +42,16 @@ public class HeapMemorySizeUtil {
"hbase.regionserver.global.memstore.size.lower.limit"; "hbase.regionserver.global.memstore.size.lower.limit";
public static final String MEMSTORE_SIZE_LOWER_LIMIT_OLD_KEY = public static final String MEMSTORE_SIZE_LOWER_LIMIT_OLD_KEY =
"hbase.regionserver.global.memstore.lowerLimit"; "hbase.regionserver.global.memstore.lowerLimit";
// Max global off heap memory that can be used for all memstores
// This should be an absolute value in MBs and not percent.
public static final String OFFHEAP_MEMSTORE_SIZE_KEY =
"hbase.regionserver.offheap.global.memstore.size";
public static final float DEFAULT_MEMSTORE_SIZE = 0.4f; public static final float DEFAULT_MEMSTORE_SIZE = 0.4f;
// Default lower water mark limit is 95% size of memstore size. // Default lower water mark limit is 95% size of memstore size.
public static final float DEFAULT_MEMSTORE_SIZE_LOWER_LIMIT = 0.95f; public static final float DEFAULT_MEMSTORE_SIZE_LOWER_LIMIT = 0.95f;
private static final Log LOG = LogFactory.getLog(HeapMemorySizeUtil.class); private static final Log LOG = LogFactory.getLog(MemorySizeUtil.class);
// a constant to convert a fraction to a percentage // a constant to convert a fraction to a percentage
private static final int CONVERT_TO_PERCENTAGE = 100; private static final int CONVERT_TO_PERCENTAGE = 100;
@ -50,11 +60,11 @@ public class HeapMemorySizeUtil {
* We need atleast 20% of heap left out for other RS functions. * We need atleast 20% of heap left out for other RS functions.
* @param conf * @param conf
*/ */
public static void checkForClusterFreeMemoryLimit(Configuration conf) { public static void checkForClusterFreeHeapMemoryLimit(Configuration conf) {
if (conf.get(MEMSTORE_SIZE_OLD_KEY) != null) { if (conf.get(MEMSTORE_SIZE_OLD_KEY) != null) {
LOG.warn(MEMSTORE_SIZE_OLD_KEY + " is deprecated by " + MEMSTORE_SIZE_KEY); LOG.warn(MEMSTORE_SIZE_OLD_KEY + " is deprecated by " + MEMSTORE_SIZE_KEY);
} }
float globalMemstoreSize = getGlobalMemStorePercent(conf, false); float globalMemstoreSize = getGlobalMemStoreHeapPercent(conf, false);
int gml = (int)(globalMemstoreSize * CONVERT_TO_PERCENTAGE); int gml = (int)(globalMemstoreSize * CONVERT_TO_PERCENTAGE);
float blockCacheUpperLimit = getBlockCacheHeapPercent(conf); float blockCacheUpperLimit = getBlockCacheHeapPercent(conf);
int bcul = (int)(blockCacheUpperLimit * CONVERT_TO_PERCENTAGE); int bcul = (int)(blockCacheUpperLimit * CONVERT_TO_PERCENTAGE);
@ -76,7 +86,8 @@ public class HeapMemorySizeUtil {
* @param c * @param c
* @param logInvalid * @param logInvalid
*/ */
public static float getGlobalMemStorePercent(final Configuration c, final boolean logInvalid) { public static float getGlobalMemStoreHeapPercent(final Configuration c,
final boolean logInvalid) {
float limit = c.getFloat(MEMSTORE_SIZE_KEY, float limit = c.getFloat(MEMSTORE_SIZE_KEY,
c.getFloat(MEMSTORE_SIZE_OLD_KEY, DEFAULT_MEMSTORE_SIZE)); c.getFloat(MEMSTORE_SIZE_OLD_KEY, DEFAULT_MEMSTORE_SIZE));
if (limit > 0.8f || limit <= 0.0f) { if (limit > 0.8f || limit <= 0.0f) {
@ -93,34 +104,64 @@ public class HeapMemorySizeUtil {
* Retrieve configured size for global memstore lower water mark as fraction of global memstore * Retrieve configured size for global memstore lower water mark as fraction of global memstore
* size. * size.
*/ */
public static float getGlobalMemStoreLowerMark(final Configuration conf, float globalMemStorePercent) { public static float getGlobalMemStoreHeapLowerMark(final Configuration conf,
boolean honorOldConfig) {
String lowMarkPercentStr = conf.get(MEMSTORE_SIZE_LOWER_LIMIT_KEY); String lowMarkPercentStr = conf.get(MEMSTORE_SIZE_LOWER_LIMIT_KEY);
if (lowMarkPercentStr != null) { if (lowMarkPercentStr != null) {
float lowMarkPercent = Float.parseFloat(lowMarkPercentStr); float lowMarkPercent = Float.parseFloat(lowMarkPercentStr);
if (lowMarkPercent > 1.0f) { if (lowMarkPercent > 1.0f) {
LOG.error("Bad configuration value for " + MEMSTORE_SIZE_LOWER_LIMIT_KEY + ": " + LOG.error("Bad configuration value for " + MEMSTORE_SIZE_LOWER_LIMIT_KEY + ": "
lowMarkPercent + ". Using 1.0f instead."); + lowMarkPercent + ". Using 1.0f instead.");
lowMarkPercent = 1.0f; lowMarkPercent = 1.0f;
} }
return lowMarkPercent; return lowMarkPercent;
} }
if (!honorOldConfig) return DEFAULT_MEMSTORE_SIZE_LOWER_LIMIT;
String lowerWaterMarkOldValStr = conf.get(MEMSTORE_SIZE_LOWER_LIMIT_OLD_KEY); String lowerWaterMarkOldValStr = conf.get(MEMSTORE_SIZE_LOWER_LIMIT_OLD_KEY);
if (lowerWaterMarkOldValStr != null) { if (lowerWaterMarkOldValStr != null) {
LOG.warn(MEMSTORE_SIZE_LOWER_LIMIT_OLD_KEY + " is deprecated. Instead use " LOG.warn(MEMSTORE_SIZE_LOWER_LIMIT_OLD_KEY + " is deprecated. Instead use "
+ MEMSTORE_SIZE_LOWER_LIMIT_KEY); + MEMSTORE_SIZE_LOWER_LIMIT_KEY);
float lowerWaterMarkOldVal = Float.parseFloat(lowerWaterMarkOldValStr); float lowerWaterMarkOldVal = Float.parseFloat(lowerWaterMarkOldValStr);
if (lowerWaterMarkOldVal > globalMemStorePercent) { float upperMarkPercent = getGlobalMemStoreHeapPercent(conf, false);
lowerWaterMarkOldVal = globalMemStorePercent; if (lowerWaterMarkOldVal > upperMarkPercent) {
lowerWaterMarkOldVal = upperMarkPercent;
LOG.error("Value of " + MEMSTORE_SIZE_LOWER_LIMIT_OLD_KEY + " (" + lowerWaterMarkOldVal LOG.error("Value of " + MEMSTORE_SIZE_LOWER_LIMIT_OLD_KEY + " (" + lowerWaterMarkOldVal
+ ") is greater than global memstore limit (" + globalMemStorePercent + ") set by " + ") is greater than global memstore limit (" + upperMarkPercent + ") set by "
+ MEMSTORE_SIZE_KEY + "/" + MEMSTORE_SIZE_OLD_KEY + ". Setting memstore lower limit " + MEMSTORE_SIZE_KEY + "/" + MEMSTORE_SIZE_OLD_KEY + ". Setting memstore lower limit "
+ "to " + globalMemStorePercent); + "to " + upperMarkPercent);
} }
return lowerWaterMarkOldVal / globalMemStorePercent; return lowerWaterMarkOldVal / upperMarkPercent;
} }
return DEFAULT_MEMSTORE_SIZE_LOWER_LIMIT; return DEFAULT_MEMSTORE_SIZE_LOWER_LIMIT;
} }
/**
* @return Pair of global memstore size and memory type(ie. on heap or off heap).
*/
public static Pair<Long, MemoryType> getGlobalMemstoreSize(Configuration conf) {
long offheapMSGlobal = conf.getLong(OFFHEAP_MEMSTORE_SIZE_KEY, 0);// Size in MBs
if (offheapMSGlobal > 0) {
// Off heap memstore size has not relevance when MSLAB is turned OFF. We will go with making
// this entire size split into Chunks and pooling them in MemstoreLABPoool. We dont want to
// create so many on demand off heap chunks. In fact when this off heap size is configured, we
// will go with 100% of this size as the pool size
if (MemStoreLAB.isEnabled(conf)) {
// We are in offheap Memstore use
long globalMemStoreLimit = (long) (offheapMSGlobal * 1024 * 1024); // Size in bytes
return new Pair<Long, MemoryType>(globalMemStoreLimit, MemoryType.NON_HEAP);
} else {
// Off heap max memstore size is configured with turning off MSLAB. It makes no sense. Do a
// warn log and go with on heap memstore percentage. By default it will be 40% of Xmx
LOG.warn("There is no relevance of configuring '" + OFFHEAP_MEMSTORE_SIZE_KEY + "' when '"
+ MemStoreLAB.USEMSLAB_KEY + "' is turned off."
+ " Going with on heap global memstore size ('" + MEMSTORE_SIZE_KEY + "')");
}
}
long max = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax();
float globalMemStorePercent = getGlobalMemStoreHeapPercent(conf, true);
return new Pair<Long, MemoryType>((long) (max * globalMemStorePercent), MemoryType.HEAP);
}
/** /**
* Retrieve configured size for on heap block cache as percentage of total heap. * Retrieve configured size for on heap block cache as percentage of total heap.
* @param conf * @param conf

View File

@ -17,34 +17,34 @@
*/ */
package org.apache.hadoop.hbase.regionserver; package org.apache.hadoop.hbase.regionserver;
import java.nio.ByteBuffer;
import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicInteger;
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
/** /**
* A chunk of memory out of which allocations are sliced. * A chunk of memory out of which allocations are sliced.
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public class Chunk { public abstract class Chunk {
/** Actual underlying data */ /** Actual underlying data */
private byte[] data; protected ByteBuffer data;
private static final int UNINITIALIZED = -1; protected static final int UNINITIALIZED = -1;
private static final int OOM = -2; protected static final int OOM = -2;
/** /**
* Offset for the next allocation, or the sentinel value -1 which implies that the chunk is still * Offset for the next allocation, or the sentinel value -1 which implies that the chunk is still
* uninitialized. * uninitialized.
*/ */
private AtomicInteger nextFreeOffset = new AtomicInteger(UNINITIALIZED); protected AtomicInteger nextFreeOffset = new AtomicInteger(UNINITIALIZED);
/** Total number of allocations satisfied from this buffer */ /** Total number of allocations satisfied from this buffer */
private AtomicInteger allocCount = new AtomicInteger(); protected AtomicInteger allocCount = new AtomicInteger();
/** Size of chunk in bytes */ /** Size of chunk in bytes */
private final int size; protected final int size;
/** /**
* Create an uninitialized chunk. Note that memory is not allocated yet, so this is cheap. * Create an uninitialized chunk. Note that memory is not allocated yet, so this is cheap.
@ -60,23 +60,7 @@ public class Chunk {
* constructed the chunk. It is thread-safe against other threads calling alloc(), who will block * constructed the chunk. It is thread-safe against other threads calling alloc(), who will block
* until the allocation is complete. * until the allocation is complete.
*/ */
public void init() { public abstract void init();
assert nextFreeOffset.get() == UNINITIALIZED;
try {
if (data == null) {
data = new byte[size];
}
} catch (OutOfMemoryError e) {
boolean failInit = nextFreeOffset.compareAndSet(UNINITIALIZED, OOM);
assert failInit; // should be true.
throw e;
}
// Mark that it's ready for use
boolean initted = nextFreeOffset.compareAndSet(UNINITIALIZED, 0);
// We should always succeed the above CAS since only one thread
// calls init()!
Preconditions.checkState(initted, "Multiple threads tried to init same chunk");
}
/** /**
* Reset the offset to UNINITIALIZED before before reusing an old chunk * Reset the offset to UNINITIALIZED before before reusing an old chunk
@ -109,7 +93,7 @@ public class Chunk {
return -1; return -1;
} }
if (oldOffset + size > data.length) { if (oldOffset + size > data.capacity()) {
return -1; // alloc doesn't fit return -1; // alloc doesn't fit
} }
@ -126,14 +110,14 @@ public class Chunk {
/** /**
* @return This chunk's backing data. * @return This chunk's backing data.
*/ */
byte[] getData() { ByteBuffer getData() {
return this.data; return this.data;
} }
@Override @Override
public String toString() { public String toString() {
return "Chunk@" + System.identityHashCode(this) + " allocs=" + allocCount.get() + "waste=" return "Chunk@" + System.identityHashCode(this) + " allocs=" + allocCount.get() + "waste="
+ (data.length - nextFreeOffset.get()); + (data.capacity() - nextFreeOffset.get());
} }
@VisibleForTesting @VisibleForTesting

View File

@ -29,7 +29,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.io.util.HeapMemorySizeUtil; import org.apache.hadoop.hbase.io.util.MemorySizeUtil;
import org.apache.hadoop.hbase.regionserver.HeapMemoryManager.TunerContext; import org.apache.hadoop.hbase.regionserver.HeapMemoryManager.TunerContext;
import org.apache.hadoop.hbase.regionserver.HeapMemoryManager.TunerResult; import org.apache.hadoop.hbase.regionserver.HeapMemoryManager.TunerResult;
import org.apache.hadoop.hbase.util.RollingStatCalculator; import org.apache.hadoop.hbase.util.RollingStatCalculator;
@ -109,6 +109,9 @@ class DefaultHeapMemoryTuner implements HeapMemoryTuner {
private float globalMemStorePercentMaxRange; private float globalMemStorePercentMaxRange;
private float blockCachePercentMinRange; private float blockCachePercentMinRange;
private float blockCachePercentMaxRange; private float blockCachePercentMaxRange;
private float globalMemStoreLimitLowMarkPercent;
// Store statistics about the corresponding parameters for memory tuning // Store statistics about the corresponding parameters for memory tuning
private RollingStatCalculator rollingStatsForCacheMisses; private RollingStatCalculator rollingStatsForCacheMisses;
private RollingStatCalculator rollingStatsForFlushes; private RollingStatCalculator rollingStatsForFlushes;
@ -165,11 +168,9 @@ class DefaultHeapMemoryTuner implements HeapMemoryTuner {
newTuneDirection = StepDirection.NEUTRAL; newTuneDirection = StepDirection.NEUTRAL;
} }
// Increase / decrease the memstore / block cahce sizes depending on new tuner step. // Increase / decrease the memstore / block cahce sizes depending on new tuner step.
float globalMemstoreLowerMark = HeapMemorySizeUtil.getGlobalMemStoreLowerMark(conf,
curMemstoreSize);
// We don't want to exert immediate pressure on memstore. So, we decrease its size gracefully; // We don't want to exert immediate pressure on memstore. So, we decrease its size gracefully;
// we set a minimum bar in the middle of the total memstore size and the lower limit. // we set a minimum bar in the middle of the total memstore size and the lower limit.
float minMemstoreSize = ((globalMemstoreLowerMark + 1) * curMemstoreSize) / 2.00f; float minMemstoreSize = ((globalMemStoreLimitLowMarkPercent + 1) * curMemstoreSize) / 2.00f;
switch (newTuneDirection) { switch (newTuneDirection) {
case INCREASE_BLOCK_CACHE_SIZE: case INCREASE_BLOCK_CACHE_SIZE:
@ -365,9 +366,11 @@ class DefaultHeapMemoryTuner implements HeapMemoryTuner {
this.blockCachePercentMaxRange = conf.getFloat(BLOCK_CACHE_SIZE_MAX_RANGE_KEY, this.blockCachePercentMaxRange = conf.getFloat(BLOCK_CACHE_SIZE_MAX_RANGE_KEY,
conf.getFloat(HFILE_BLOCK_CACHE_SIZE_KEY, HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT)); conf.getFloat(HFILE_BLOCK_CACHE_SIZE_KEY, HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT));
this.globalMemStorePercentMinRange = conf.getFloat(MEMSTORE_SIZE_MIN_RANGE_KEY, this.globalMemStorePercentMinRange = conf.getFloat(MEMSTORE_SIZE_MIN_RANGE_KEY,
HeapMemorySizeUtil.getGlobalMemStorePercent(conf, false)); MemorySizeUtil.getGlobalMemStoreHeapPercent(conf, false));
this.globalMemStorePercentMaxRange = conf.getFloat(MEMSTORE_SIZE_MAX_RANGE_KEY, this.globalMemStorePercentMaxRange = conf.getFloat(MEMSTORE_SIZE_MAX_RANGE_KEY,
HeapMemorySizeUtil.getGlobalMemStorePercent(conf, false)); MemorySizeUtil.getGlobalMemStoreHeapPercent(conf, false));
this.globalMemStoreLimitLowMarkPercent = MemorySizeUtil.getGlobalMemStoreHeapLowerMark(conf,
true);
// Default value of periods to ignore is number of lookup periods // Default value of periods to ignore is number of lookup periods
this.numPeriodsToIgnore = conf.getInt(NUM_PERIODS_TO_IGNORE, this.tunerLookupPeriods); this.numPeriodsToIgnore = conf.getInt(NUM_PERIODS_TO_IGNORE, this.tunerLookupPeriods);
this.rollingStatsForCacheMisses = new RollingStatCalculator(this.tunerLookupPeriods); this.rollingStatsForCacheMisses = new RollingStatCalculator(this.tunerLookupPeriods);

View File

@ -7028,7 +7028,10 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
ClientProtos.RegionLoadStats.Builder stats = ClientProtos.RegionLoadStats.newBuilder(); ClientProtos.RegionLoadStats.Builder stats = ClientProtos.RegionLoadStats.newBuilder();
stats.setMemstoreLoad((int) (Math.min(100, (this.memstoreDataSize.get() * 100) / this stats.setMemstoreLoad((int) (Math.min(100, (this.memstoreDataSize.get() * 100) / this
.memstoreFlushSize))); .memstoreFlushSize)));
stats.setHeapOccupancy((int)rsServices.getHeapMemoryManager().getHeapOccupancyPercent()*100); if (rsServices.getHeapMemoryManager() != null) {
stats.setHeapOccupancy(
(int) rsServices.getHeapMemoryManager().getHeapOccupancyPercent() * 100);
}
stats.setCompactionPressure((int)rsServices.getCompactionPressure()*100 > 100 ? 100 : stats.setCompactionPressure((int)rsServices.getCompactionPressure()*100 > 100 ? 100 :
(int)rsServices.getCompactionPressure()*100); (int)rsServices.getCompactionPressure()*100);
return stats.build(); return stats.build();

View File

@ -22,6 +22,7 @@ import java.io.IOException;
import java.io.InterruptedIOException; import java.io.InterruptedIOException;
import java.lang.Thread.UncaughtExceptionHandler; import java.lang.Thread.UncaughtExceptionHandler;
import java.lang.management.ManagementFactory; import java.lang.management.ManagementFactory;
import java.lang.management.MemoryType;
import java.lang.management.MemoryUsage; import java.lang.management.MemoryUsage;
import java.lang.reflect.Constructor; import java.lang.reflect.Constructor;
import java.net.BindException; import java.net.BindException;
@ -100,6 +101,7 @@ import org.apache.hadoop.hbase.fs.HFileSystem;
import org.apache.hadoop.hbase.http.InfoServer; import org.apache.hadoop.hbase.http.InfoServer;
import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.io.util.MemorySizeUtil;
import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
import org.apache.hadoop.hbase.ipc.RpcClient; import org.apache.hadoop.hbase.ipc.RpcClient;
import org.apache.hadoop.hbase.ipc.RpcClientFactory; import org.apache.hadoop.hbase.ipc.RpcClientFactory;
@ -170,6 +172,7 @@ import org.apache.hadoop.hbase.util.ForeignExceptionUtil;
import org.apache.hadoop.hbase.util.HasThread; import org.apache.hadoop.hbase.util.HasThread;
import org.apache.hadoop.hbase.util.JSONBean; import org.apache.hadoop.hbase.util.JSONBean;
import org.apache.hadoop.hbase.util.JvmPauseMonitor; import org.apache.hadoop.hbase.util.JvmPauseMonitor;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
import org.apache.hadoop.hbase.util.Sleeper; import org.apache.hadoop.hbase.util.Sleeper;
import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.util.Threads;
@ -516,6 +519,7 @@ public class HRegionServer extends HasThread implements
super("RegionServer"); // thread name super("RegionServer"); // thread name
this.fsOk = true; this.fsOk = true;
this.conf = conf; this.conf = conf;
MemorySizeUtil.checkForClusterFreeHeapMemoryLimit(this.conf);
HFile.checkHFileVersion(this.conf); HFile.checkHFileVersion(this.conf);
checkCodecs(this.conf); checkCodecs(this.conf);
this.userProvider = UserProvider.instantiate(conf); this.userProvider = UserProvider.instantiate(conf);
@ -1451,6 +1455,8 @@ public class HRegionServer extends HasThread implements
startServiceThreads(); startServiceThreads();
startHeapMemoryManager(); startHeapMemoryManager();
// Call it after starting HeapMemoryManager.
initializeMemStoreChunkPool();
LOG.info("Serving as " + this.serverName + LOG.info("Serving as " + this.serverName +
", RpcServer on " + rpcServices.isa + ", RpcServer on " + rpcServices.isa +
", sessionid=0x" + ", sessionid=0x" +
@ -1470,16 +1476,34 @@ public class HRegionServer extends HasThread implements
} }
} }
private void initializeMemStoreChunkPool() {
if (MemStoreLAB.isEnabled(conf)) {
// MSLAB is enabled. So initialize MemStoreChunkPool
// By this time, the MemstoreFlusher is already initialized. We can get the global limits from
// it.
Pair<Long, MemoryType> pair = MemorySizeUtil.getGlobalMemstoreSize(conf);
long globalMemStoreSize = pair.getFirst();
boolean offheap = pair.getSecond() == MemoryType.NON_HEAP;
// When off heap memstore in use, take full area for chunk pool.
float poolSizePercentage = offheap ? 1.0F
: conf.getFloat(MemStoreLAB.CHUNK_POOL_MAXSIZE_KEY, MemStoreLAB.POOL_MAX_SIZE_DEFAULT);
float initialCountPercentage = conf.getFloat(MemStoreLAB.CHUNK_POOL_INITIALSIZE_KEY,
MemStoreLAB.POOL_INITIAL_SIZE_DEFAULT);
int chunkSize = conf.getInt(MemStoreLAB.CHUNK_SIZE_KEY, MemStoreLAB.CHUNK_SIZE_DEFAULT);
MemStoreChunkPool pool = MemStoreChunkPool.initialize(globalMemStoreSize, poolSizePercentage,
initialCountPercentage, chunkSize, offheap);
if (pool != null && this.hMemManager != null) {
// Register with Heap Memory manager
this.hMemManager.registerTuneObserver(pool);
}
}
}
private void startHeapMemoryManager() { private void startHeapMemoryManager() {
this.hMemManager = HeapMemoryManager.create(this.conf, this.cacheFlusher, this.hMemManager = HeapMemoryManager.create(this.conf, this.cacheFlusher, this,
this, this.regionServerAccounting); this.regionServerAccounting);
if (this.hMemManager != null) { if (this.hMemManager != null) {
this.hMemManager.start(getChoreService()); this.hMemManager.start(getChoreService());
MemStoreChunkPool chunkPool = MemStoreChunkPool.getPool(this.conf);
if (chunkPool != null) {
// Register it as HeapMemoryTuneObserver
this.hMemManager.registerTuneObserver(chunkPool);
}
} }
} }
@ -3522,11 +3546,6 @@ public class HRegionServer extends HasThread implements
configurationManager.notifyAllObservers(conf); configurationManager.notifyAllObservers(conf);
} }
@Override
public HeapMemoryManager getHeapMemoryManager() {
return hMemManager;
}
@Override @Override
public double getCompactionPressure() { public double getCompactionPressure() {
double max = 0; double max = 0;
@ -3541,6 +3560,11 @@ public class HRegionServer extends HasThread implements
return max; return max;
} }
@Override
public HeapMemoryManager getHeapMemoryManager() {
return hMemManager;
}
/** /**
* For testing * For testing
* @return whether all wal roll request finished for this regionserver * @return whether all wal roll request finished for this regionserver

View File

@ -36,13 +36,15 @@ import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.io.hfile.ResizableBlockCache; import org.apache.hadoop.hbase.io.hfile.ResizableBlockCache;
import org.apache.hadoop.hbase.io.util.HeapMemorySizeUtil; import org.apache.hadoop.hbase.io.util.MemorySizeUtil;
import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.ReflectionUtils;
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
/** /**
* Manages tuning of Heap memory using <code>HeapMemoryTuner</code>. * Manages tuning of Heap memory using <code>HeapMemoryTuner</code>. Most part of the heap memory is
* split between Memstores and BlockCache. This manager helps in tuning sizes of both these
* dynamically, as per the R/W load on the servers.
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public class HeapMemoryManager { public class HeapMemoryManager {
@ -117,10 +119,10 @@ public class HeapMemoryManager {
private boolean doInit(Configuration conf) { private boolean doInit(Configuration conf) {
boolean tuningEnabled = true; boolean tuningEnabled = true;
globalMemStorePercent = HeapMemorySizeUtil.getGlobalMemStorePercent(conf, false); globalMemStorePercent = MemorySizeUtil.getGlobalMemStoreHeapPercent(conf, false);
blockCachePercent = conf.getFloat(HFILE_BLOCK_CACHE_SIZE_KEY, blockCachePercent = conf.getFloat(HFILE_BLOCK_CACHE_SIZE_KEY,
HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT); HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT);
HeapMemorySizeUtil.checkForClusterFreeMemoryLimit(conf); MemorySizeUtil.checkForClusterFreeHeapMemoryLimit(conf);
// Initialize max and min range for memstore heap space // Initialize max and min range for memstore heap space
globalMemStorePercentMinRange = conf.getFloat(MEMSTORE_SIZE_MIN_RANGE_KEY, globalMemStorePercentMinRange = conf.getFloat(MEMSTORE_SIZE_MIN_RANGE_KEY,
globalMemStorePercent); globalMemStorePercent);
@ -128,14 +130,14 @@ public class HeapMemoryManager {
globalMemStorePercent); globalMemStorePercent);
if (globalMemStorePercent < globalMemStorePercentMinRange) { if (globalMemStorePercent < globalMemStorePercentMinRange) {
LOG.warn("Setting " + MEMSTORE_SIZE_MIN_RANGE_KEY + " to " + globalMemStorePercent LOG.warn("Setting " + MEMSTORE_SIZE_MIN_RANGE_KEY + " to " + globalMemStorePercent
+ ", same value as " + HeapMemorySizeUtil.MEMSTORE_SIZE_KEY + ", same value as " + MemorySizeUtil.MEMSTORE_SIZE_KEY
+ " because supplied value greater than initial memstore size value."); + " because supplied value greater than initial memstore size value.");
globalMemStorePercentMinRange = globalMemStorePercent; globalMemStorePercentMinRange = globalMemStorePercent;
conf.setFloat(MEMSTORE_SIZE_MIN_RANGE_KEY, globalMemStorePercentMinRange); conf.setFloat(MEMSTORE_SIZE_MIN_RANGE_KEY, globalMemStorePercentMinRange);
} }
if (globalMemStorePercent > globalMemStorePercentMaxRange) { if (globalMemStorePercent > globalMemStorePercentMaxRange) {
LOG.warn("Setting " + MEMSTORE_SIZE_MAX_RANGE_KEY + " to " + globalMemStorePercent LOG.warn("Setting " + MEMSTORE_SIZE_MAX_RANGE_KEY + " to " + globalMemStorePercent
+ ", same value as " + HeapMemorySizeUtil.MEMSTORE_SIZE_KEY + ", same value as " + MemorySizeUtil.MEMSTORE_SIZE_KEY
+ " because supplied value less than initial memstore size value."); + " because supplied value less than initial memstore size value.");
globalMemStorePercentMaxRange = globalMemStorePercent; globalMemStorePercentMaxRange = globalMemStorePercent;
conf.setFloat(MEMSTORE_SIZE_MAX_RANGE_KEY, globalMemStorePercentMaxRange); conf.setFloat(MEMSTORE_SIZE_MAX_RANGE_KEY, globalMemStorePercentMaxRange);
@ -167,7 +169,7 @@ public class HeapMemoryManager {
} }
int gml = (int) (globalMemStorePercentMaxRange * CONVERT_TO_PERCENTAGE); int gml = (int) (globalMemStorePercentMaxRange * CONVERT_TO_PERCENTAGE);
this.l2BlockCachePercent = HeapMemorySizeUtil.getL2BlockCacheHeapPercent(conf); this.l2BlockCachePercent = MemorySizeUtil.getL2BlockCacheHeapPercent(conf);
int bcul = (int) ((blockCachePercentMinRange + l2BlockCachePercent) * CONVERT_TO_PERCENTAGE); int bcul = (int) ((blockCachePercentMinRange + l2BlockCachePercent) * CONVERT_TO_PERCENTAGE);
if (CONVERT_TO_PERCENTAGE - (gml + bcul) < CLUSTER_MINIMUM_MEMORY_THRESHOLD) { if (CONVERT_TO_PERCENTAGE - (gml + bcul) < CLUSTER_MINIMUM_MEMORY_THRESHOLD) {
throw new RuntimeException("Current heap configuration for MemStore and BlockCache exceeds " throw new RuntimeException("Current heap configuration for MemStore and BlockCache exceeds "
@ -340,7 +342,7 @@ public class HeapMemoryManager {
if (CONVERT_TO_PERCENTAGE - (gml + bcul) < CLUSTER_MINIMUM_MEMORY_THRESHOLD) { if (CONVERT_TO_PERCENTAGE - (gml + bcul) < CLUSTER_MINIMUM_MEMORY_THRESHOLD) {
LOG.info("Current heap configuration from HeapMemoryTuner exceeds " LOG.info("Current heap configuration from HeapMemoryTuner exceeds "
+ "the threshold required for successful cluster operation. " + "the threshold required for successful cluster operation. "
+ "The combined value cannot exceed 0.8. " + HeapMemorySizeUtil.MEMSTORE_SIZE_KEY + "The combined value cannot exceed 0.8. " + MemorySizeUtil.MEMSTORE_SIZE_KEY
+ " is " + memstoreSize + " and " + HFILE_BLOCK_CACHE_SIZE_KEY + " is " + " is " + memstoreSize + " and " + HFILE_BLOCK_CACHE_SIZE_KEY + " is "
+ blockCacheSize); + blockCacheSize);
// TODO can adjust the value so as not exceed 80%. Is that correct? may be. // TODO can adjust the value so as not exceed 80%. Is that correct? may be.

View File

@ -18,7 +18,6 @@
*/ */
package org.apache.hadoop.hbase.regionserver; package org.apache.hadoop.hbase.regionserver;
import java.lang.management.ManagementFactory;
import java.util.concurrent.BlockingQueue; import java.util.concurrent.BlockingQueue;
import java.util.concurrent.Executors; import java.util.concurrent.Executors;
import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.LinkedBlockingQueue;
@ -29,8 +28,6 @@ import java.util.concurrent.atomic.AtomicLong;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.io.util.HeapMemorySizeUtil;
import org.apache.hadoop.hbase.regionserver.HeapMemoryManager.HeapMemoryTuneObserver; import org.apache.hadoop.hbase.regionserver.HeapMemoryManager.HeapMemoryTuneObserver;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
@ -45,7 +42,7 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
* collection on JVM. * collection on JVM.
* *
* The pool instance is globally unique and could be obtained through * The pool instance is globally unique and could be obtained through
* {@link MemStoreChunkPool#getPool(Configuration)} * {@link MemStoreChunkPool#initialize(long, float, float, int, boolean)}
* *
* {@link MemStoreChunkPool#getChunk()} is called when MemStoreLAB allocating * {@link MemStoreChunkPool#getChunk()} is called when MemStoreLAB allocating
* bytes, and {@link MemStoreChunkPool#putbackChunks(BlockingQueue)} is called * bytes, and {@link MemStoreChunkPool#putbackChunks(BlockingQueue)} is called
@ -55,10 +52,6 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
@InterfaceAudience.Private @InterfaceAudience.Private
public class MemStoreChunkPool implements HeapMemoryTuneObserver { public class MemStoreChunkPool implements HeapMemoryTuneObserver {
private static final Log LOG = LogFactory.getLog(MemStoreChunkPool.class); private static final Log LOG = LogFactory.getLog(MemStoreChunkPool.class);
final static String CHUNK_POOL_MAXSIZE_KEY = "hbase.hregion.memstore.chunkpool.maxsize";
final static String CHUNK_POOL_INITIALSIZE_KEY = "hbase.hregion.memstore.chunkpool.initialsize";
final static float POOL_MAX_SIZE_DEFAULT = 1.0f;
final static float POOL_INITIAL_SIZE_DEFAULT = 0.0f;
// Static reference to the MemStoreChunkPool // Static reference to the MemStoreChunkPool
static MemStoreChunkPool GLOBAL_INSTANCE; static MemStoreChunkPool GLOBAL_INSTANCE;
@ -68,7 +61,7 @@ public class MemStoreChunkPool implements HeapMemoryTuneObserver {
private int maxCount; private int maxCount;
// A queue of reclaimed chunks // A queue of reclaimed chunks
private final BlockingQueue<PooledChunk> reclaimedChunks; private final BlockingQueue<Chunk> reclaimedChunks;
private final int chunkSize; private final int chunkSize;
private final float poolSizePercentage; private final float poolSizePercentage;
@ -78,15 +71,17 @@ public class MemStoreChunkPool implements HeapMemoryTuneObserver {
private static final int statThreadPeriod = 60 * 5; private static final int statThreadPeriod = 60 * 5;
private final AtomicLong chunkCount = new AtomicLong(); private final AtomicLong chunkCount = new AtomicLong();
private final AtomicLong reusedChunkCount = new AtomicLong(); private final AtomicLong reusedChunkCount = new AtomicLong();
private final boolean offheap;
MemStoreChunkPool(Configuration conf, int chunkSize, int maxCount, MemStoreChunkPool(int chunkSize, int maxCount, int initialCount, float poolSizePercentage,
int initialCount, float poolSizePercentage) { boolean offheap) {
this.maxCount = maxCount; this.maxCount = maxCount;
this.chunkSize = chunkSize; this.chunkSize = chunkSize;
this.poolSizePercentage = poolSizePercentage; this.poolSizePercentage = poolSizePercentage;
this.reclaimedChunks = new LinkedBlockingQueue<PooledChunk>(); this.offheap = offheap;
this.reclaimedChunks = new LinkedBlockingQueue<>();
for (int i = 0; i < initialCount; i++) { for (int i = 0; i < initialCount; i++) {
PooledChunk chunk = new PooledChunk(chunkSize); Chunk chunk = this.offheap ? new OffheapChunk(chunkSize) : new OnheapChunk(chunkSize);
chunk.init(); chunk.init();
reclaimedChunks.add(chunk); reclaimedChunks.add(chunk);
} }
@ -108,8 +103,8 @@ public class MemStoreChunkPool implements HeapMemoryTuneObserver {
* @see #putbackChunk(Chunk) * @see #putbackChunk(Chunk)
* @see #putbackChunks(BlockingQueue) * @see #putbackChunks(BlockingQueue)
*/ */
PooledChunk getChunk() { Chunk getChunk() {
PooledChunk chunk = reclaimedChunks.poll(); Chunk chunk = reclaimedChunks.poll();
if (chunk != null) { if (chunk != null) {
chunk.reset(); chunk.reset();
reusedChunkCount.incrementAndGet(); reusedChunkCount.incrementAndGet();
@ -118,7 +113,7 @@ public class MemStoreChunkPool implements HeapMemoryTuneObserver {
while (true) { while (true) {
long created = this.chunkCount.get(); long created = this.chunkCount.get();
if (created < this.maxCount) { if (created < this.maxCount) {
chunk = new PooledChunk(chunkSize); chunk = this.offheap ? new OffheapChunk(this.chunkSize) : new OnheapChunk(this.chunkSize);
if (this.chunkCount.compareAndSet(created, created + 1)) { if (this.chunkCount.compareAndSet(created, created + 1)) {
break; break;
} }
@ -135,9 +130,9 @@ public class MemStoreChunkPool implements HeapMemoryTuneObserver {
* skip the remaining chunks * skip the remaining chunks
* @param chunks * @param chunks
*/ */
synchronized void putbackChunks(BlockingQueue<PooledChunk> chunks) { synchronized void putbackChunks(BlockingQueue<Chunk> chunks) {
int toAdd = Math.min(chunks.size(), this.maxCount - reclaimedChunks.size()); int toAdd = Math.min(chunks.size(), this.maxCount - reclaimedChunks.size());
PooledChunk chunk = null; Chunk chunk = null;
while ((chunk = chunks.poll()) != null && toAdd > 0) { while ((chunk = chunks.poll()) != null && toAdd > 0) {
reclaimedChunks.add(chunk); reclaimedChunks.add(chunk);
toAdd--; toAdd--;
@ -149,7 +144,7 @@ public class MemStoreChunkPool implements HeapMemoryTuneObserver {
* skip it * skip it
* @param chunk * @param chunk
*/ */
synchronized void putbackChunk(PooledChunk chunk) { synchronized void putbackChunk(Chunk chunk) {
if (reclaimedChunks.size() < this.maxCount) { if (reclaimedChunks.size() < this.maxCount) {
reclaimedChunks.add(chunk); reclaimedChunks.add(chunk);
} }
@ -191,51 +186,41 @@ public class MemStoreChunkPool implements HeapMemoryTuneObserver {
} }
/** /**
* @param conf
* @return the global MemStoreChunkPool instance * @return the global MemStoreChunkPool instance
*/ */
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="DC_DOUBLECHECK", @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "LI_LAZY_INIT_STATIC",
justification="Intentional") justification = "Method is called by single thread at the starting of RS")
static MemStoreChunkPool getPool(Configuration conf) { static MemStoreChunkPool initialize(long globalMemStoreSize, float poolSizePercentage,
float initialCountPercentage, int chunkSize, boolean offheap) {
if (GLOBAL_INSTANCE != null) return GLOBAL_INSTANCE; if (GLOBAL_INSTANCE != null) return GLOBAL_INSTANCE;
synchronized (MemStoreChunkPool.class) {
if (chunkPoolDisabled) return null; if (chunkPoolDisabled) return null;
if (GLOBAL_INSTANCE != null) return GLOBAL_INSTANCE;
// When MSLAB is turned OFF no need to init chunk pool at all.
if (!conf.getBoolean(MemStoreLAB.USEMSLAB_KEY, MemStoreLAB.USEMSLAB_DEFAULT)) {
chunkPoolDisabled = true;
return null;
}
float poolSizePercentage = conf.getFloat(CHUNK_POOL_MAXSIZE_KEY, POOL_MAX_SIZE_DEFAULT);
if (poolSizePercentage <= 0) { if (poolSizePercentage <= 0) {
chunkPoolDisabled = true; chunkPoolDisabled = true;
return null; return null;
} }
if (poolSizePercentage > 1.0) { if (poolSizePercentage > 1.0) {
throw new IllegalArgumentException(CHUNK_POOL_MAXSIZE_KEY + " must be between 0.0 and 1.0"); throw new IllegalArgumentException(
MemStoreLAB.CHUNK_POOL_MAXSIZE_KEY + " must be between 0.0 and 1.0");
} }
long heapMax = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax(); int maxCount = (int) (globalMemStoreSize * poolSizePercentage / chunkSize);
long globalMemStoreLimit = (long) (heapMax * HeapMemorySizeUtil.getGlobalMemStorePercent(conf,
false));
int chunkSize = conf.getInt(HeapMemStoreLAB.CHUNK_SIZE_KEY,
HeapMemStoreLAB.CHUNK_SIZE_DEFAULT);
int maxCount = (int) (globalMemStoreLimit * poolSizePercentage / chunkSize);
float initialCountPercentage = conf.getFloat(CHUNK_POOL_INITIALSIZE_KEY,
POOL_INITIAL_SIZE_DEFAULT);
if (initialCountPercentage > 1.0 || initialCountPercentage < 0) { if (initialCountPercentage > 1.0 || initialCountPercentage < 0) {
throw new IllegalArgumentException(CHUNK_POOL_INITIALSIZE_KEY throw new IllegalArgumentException(
+ " must be between 0.0 and 1.0"); MemStoreLAB.CHUNK_POOL_INITIALSIZE_KEY + " must be between 0.0 and 1.0");
} }
int initialCount = (int) (initialCountPercentage * maxCount); int initialCount = (int) (initialCountPercentage * maxCount);
LOG.info("Allocating MemStoreChunkPool with chunk size " + StringUtils.byteDesc(chunkSize) LOG.info("Allocating MemStoreChunkPool with chunk size " + StringUtils.byteDesc(chunkSize)
+ ", max count " + maxCount + ", initial count " + initialCount); + ", max count " + maxCount + ", initial count " + initialCount);
GLOBAL_INSTANCE = new MemStoreChunkPool(conf, chunkSize, maxCount, initialCount, GLOBAL_INSTANCE = new MemStoreChunkPool(chunkSize, maxCount, initialCount, poolSizePercentage,
poolSizePercentage); offheap);
return GLOBAL_INSTANCE; return GLOBAL_INSTANCE;
} }
/**
* @return The singleton instance of this pool.
*/
static MemStoreChunkPool getPool() {
return GLOBAL_INSTANCE;
} }
int getMaxCount() { int getMaxCount() {
@ -247,12 +232,6 @@ public class MemStoreChunkPool implements HeapMemoryTuneObserver {
chunkPoolDisabled = false; chunkPoolDisabled = false;
} }
public static class PooledChunk extends Chunk {
PooledChunk(int size) {
super(size);
}
}
@Override @Override
public void onHeapMemoryTune(long newMemstoreSize, long newBlockCacheSize) { public void onHeapMemoryTune(long newMemstoreSize, long newBlockCacheSize) {
int newMaxCount = (int) (newMemstoreSize * poolSizePercentage / chunkSize); int newMaxCount = (int) (newMemstoreSize * poolSizePercentage / chunkSize);

View File

@ -24,7 +24,7 @@ import com.google.common.base.Preconditions;
import java.io.IOException; import java.io.IOException;
import java.lang.Thread.UncaughtExceptionHandler; import java.lang.Thread.UncaughtExceptionHandler;
import java.lang.management.ManagementFactory; import java.lang.management.MemoryType;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.ConcurrentModificationException; import java.util.ConcurrentModificationException;
import java.util.HashMap; import java.util.HashMap;
@ -49,11 +49,12 @@ import org.apache.hadoop.hbase.DroppedSnapshotException;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.io.util.HeapMemorySizeUtil; import org.apache.hadoop.hbase.io.util.MemorySizeUtil;
import org.apache.hadoop.hbase.regionserver.Region.FlushResult; import org.apache.hadoop.hbase.regionserver.Region.FlushResult;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.HasThread; import org.apache.hadoop.hbase.util.HasThread;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.RemoteException;
@ -110,11 +111,20 @@ class MemStoreFlusher implements FlushRequester {
this.server = server; this.server = server;
this.threadWakeFrequency = this.threadWakeFrequency =
conf.getLong(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000); conf.getLong(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000);
long max = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax(); Pair<Long, MemoryType> pair = MemorySizeUtil.getGlobalMemstoreSize(conf);
float globalMemStorePercent = HeapMemorySizeUtil.getGlobalMemStorePercent(conf, true); this.globalMemStoreLimit = pair.getFirst();
this.globalMemStoreLimit = (long) (max * globalMemStorePercent); boolean onheap = pair.getSecond() == MemoryType.HEAP;
this.globalMemStoreLimitLowMarkPercent = // When off heap memstore in use we configure the global off heap space for memstore as bytes
HeapMemorySizeUtil.getGlobalMemStoreLowerMark(conf, globalMemStorePercent); // not as % of max memory size. In such case, the lower water mark should be specified using the
// key "hbase.regionserver.global.memstore.size.lower.limit" which says % of the global upper
// bound and defaults to 95%. In on heap case also specifying this way is ideal. But in the past
// we used to take lower bound also as the % of xmx (38% as default). For backward compatibility
// for this deprecated config,we will fall back to read that config when new one is missing.
// Only for on heap case, do this fallback mechanism. For off heap it makes no sense.
// TODO When to get rid of the deprecated config? ie
// "hbase.regionserver.global.memstore.lowerLimit". Can get rid of this boolean passing then.
this.globalMemStoreLimitLowMarkPercent = MemorySizeUtil.getGlobalMemStoreHeapLowerMark(conf,
onheap);
this.globalMemStoreLimitLowMark = this.globalMemStoreLimitLowMark =
(long) (this.globalMemStoreLimit * this.globalMemStoreLimitLowMarkPercent); (long) (this.globalMemStoreLimit * this.globalMemStoreLimitLowMarkPercent);
@ -126,7 +136,7 @@ class MemStoreFlusher implements FlushRequester {
+ TraditionalBinaryPrefix.long2String(this.globalMemStoreLimit, "", 1) + TraditionalBinaryPrefix.long2String(this.globalMemStoreLimit, "", 1)
+ ", globalMemStoreLimitLowMark=" + ", globalMemStoreLimitLowMark="
+ TraditionalBinaryPrefix.long2String(this.globalMemStoreLimitLowMark, "", 1) + TraditionalBinaryPrefix.long2String(this.globalMemStoreLimitLowMark, "", 1)
+ ", maxHeap=" + TraditionalBinaryPrefix.long2String(max, "", 1)); + ", Offheap=" + !onheap);
} }
public LongAdder getUpdatesBlockedMsHighWater() { public LongAdder getUpdatesBlockedMsHighWater() {

View File

@ -17,8 +17,10 @@
*/ */
package org.apache.hadoop.hbase.regionserver; package org.apache.hadoop.hbase.regionserver;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.util.ReflectionUtils;
/** /**
* A memstore-local allocation buffer. * A memstore-local allocation buffer.
@ -46,6 +48,19 @@ public interface MemStoreLAB {
String USEMSLAB_KEY = "hbase.hregion.memstore.mslab.enabled"; String USEMSLAB_KEY = "hbase.hregion.memstore.mslab.enabled";
boolean USEMSLAB_DEFAULT = true; boolean USEMSLAB_DEFAULT = true;
String MSLAB_CLASS_NAME = "hbase.regionserver.mslab.class";
String CHUNK_SIZE_KEY = "hbase.hregion.memstore.mslab.chunksize";
int CHUNK_SIZE_DEFAULT = 2048 * 1024;
String MAX_ALLOC_KEY = "hbase.hregion.memstore.mslab.max.allocation";
int MAX_ALLOC_DEFAULT = 256 * 1024; // allocs bigger than this don't go through
// allocator
// MSLAB pool related configs
String CHUNK_POOL_MAXSIZE_KEY = "hbase.hregion.memstore.chunkpool.maxsize";
String CHUNK_POOL_INITIALSIZE_KEY = "hbase.hregion.memstore.chunkpool.initialsize";
float POOL_MAX_SIZE_DEFAULT = 1.0f;
float POOL_INITIAL_SIZE_DEFAULT = 0.0f;
/** /**
* Allocates slice in this LAB and copy the passed Cell into this area. Returns new Cell instance * Allocates slice in this LAB and copy the passed Cell into this area. Returns new Cell instance
@ -68,4 +83,17 @@ public interface MemStoreLAB {
*/ */
void decScannerCount(); void decScannerCount();
public static MemStoreLAB newInstance(Configuration conf) {
MemStoreLAB memStoreLAB = null;
if (isEnabled(conf)) {
String className = conf.get(MSLAB_CLASS_NAME, MemStoreLABImpl.class.getName());
memStoreLAB = ReflectionUtils.instantiateWithCustomCtor(className,
new Class[] { Configuration.class }, new Object[] { conf });
}
return memStoreLAB;
}
public static boolean isEnabled(Configuration conf) {
return conf.getBoolean(USEMSLAB_KEY, USEMSLAB_DEFAULT);
}
} }

View File

@ -28,9 +28,9 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.regionserver.MemStoreChunkPool.PooledChunk;
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
@ -43,7 +43,7 @@ import com.google.common.base.Preconditions;
* slices into the array. * slices into the array.
* <p> * <p>
* The purpose of this class is to combat heap fragmentation in the * The purpose of this class is to combat heap fragmentation in the
* regionserver. By ensuring that all KeyValues in a given memstore refer * regionserver. By ensuring that all Cells in a given memstore refer
* only to large chunks of contiguous memory, we ensure that large blocks * only to large chunks of contiguous memory, we ensure that large blocks
* get freed up when the memstore is flushed. * get freed up when the memstore is flushed.
* <p> * <p>
@ -54,25 +54,23 @@ import com.google.common.base.Preconditions;
* TODO: we should probably benchmark whether word-aligning the allocations * TODO: we should probably benchmark whether word-aligning the allocations
* would provide a performance improvement - probably would speed up the * would provide a performance improvement - probably would speed up the
* Bytes.toLong/Bytes.toInt calls in KeyValue, but some of those are cached * Bytes.toLong/Bytes.toInt calls in KeyValue, but some of those are cached
* anyway * anyway.
* The chunks created by this MemStoreLAB can get pooled at {@link MemStoreChunkPool}.
* When the Chunk comes pool, it can be either an on heap or an off heap backed chunk. The chunks,
* which this MemStoreLAB creates on its own (when no chunk available from pool), those will be
* always on heap backed.
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public class HeapMemStoreLAB implements MemStoreLAB { public class MemStoreLABImpl implements MemStoreLAB {
static final String CHUNK_SIZE_KEY = "hbase.hregion.memstore.mslab.chunksize"; static final Log LOG = LogFactory.getLog(MemStoreLABImpl.class);
static final int CHUNK_SIZE_DEFAULT = 2048 * 1024;
static final String MAX_ALLOC_KEY = "hbase.hregion.memstore.mslab.max.allocation";
static final int MAX_ALLOC_DEFAULT = 256 * 1024; // allocs bigger than this don't go through
// allocator
static final Log LOG = LogFactory.getLog(HeapMemStoreLAB.class);
private AtomicReference<Chunk> curChunk = new AtomicReference<Chunk>(); private AtomicReference<Chunk> curChunk = new AtomicReference<Chunk>();
// A queue of chunks from pool contained by this memstore LAB // A queue of chunks from pool contained by this memstore LAB
// TODO: in the future, it would be better to have List implementation instead of Queue, // TODO: in the future, it would be better to have List implementation instead of Queue,
// as FIFO order is not so important here // as FIFO order is not so important here
@VisibleForTesting @VisibleForTesting
BlockingQueue<PooledChunk> pooledChunkQueue = null; BlockingQueue<Chunk> pooledChunkQueue = null;
private final int chunkSize; private final int chunkSize;
private final int maxAlloc; private final int maxAlloc;
private final MemStoreChunkPool chunkPool; private final MemStoreChunkPool chunkPool;
@ -87,19 +85,19 @@ public class HeapMemStoreLAB implements MemStoreLAB {
private final AtomicInteger openScannerCount = new AtomicInteger(); private final AtomicInteger openScannerCount = new AtomicInteger();
// Used in testing // Used in testing
public HeapMemStoreLAB() { public MemStoreLABImpl() {
this(new Configuration()); this(new Configuration());
} }
public HeapMemStoreLAB(Configuration conf) { public MemStoreLABImpl(Configuration conf) {
chunkSize = conf.getInt(CHUNK_SIZE_KEY, CHUNK_SIZE_DEFAULT); chunkSize = conf.getInt(CHUNK_SIZE_KEY, CHUNK_SIZE_DEFAULT);
maxAlloc = conf.getInt(MAX_ALLOC_KEY, MAX_ALLOC_DEFAULT); maxAlloc = conf.getInt(MAX_ALLOC_KEY, MAX_ALLOC_DEFAULT);
this.chunkPool = MemStoreChunkPool.getPool(conf); this.chunkPool = MemStoreChunkPool.getPool();
// currently chunkQueue is only used for chunkPool // currently chunkQueue is only used for chunkPool
if (this.chunkPool != null) { if (this.chunkPool != null) {
// set queue length to chunk pool max count to avoid keeping reference of // set queue length to chunk pool max count to avoid keeping reference of
// too many non-reclaimable chunks // too many non-reclaimable chunks
pooledChunkQueue = new LinkedBlockingQueue<PooledChunk>(chunkPool.getMaxCount()); pooledChunkQueue = new LinkedBlockingQueue<>(chunkPool.getMaxCount());
} }
// if we don't exclude allocations >CHUNK_SIZE, we'd infiniteloop on one! // if we don't exclude allocations >CHUNK_SIZE, we'd infiniteloop on one!
@ -132,7 +130,7 @@ public class HeapMemStoreLAB implements MemStoreLAB {
// try to retire this chunk // try to retire this chunk
tryRetireChunk(c); tryRetireChunk(c);
} }
return KeyValueUtil.copyCellTo(cell, c.getData(), allocOffset, size); return CellUtil.copyCellTo(cell, c.getData(), allocOffset, size);
} }
/** /**
@ -210,14 +208,14 @@ public class HeapMemStoreLAB implements MemStoreLAB {
// This is chunk from pool // This is chunk from pool
pooledChunk = true; pooledChunk = true;
} else { } else {
c = new Chunk(chunkSize); c = new OnheapChunk(chunkSize);// When chunk is not from pool, always make it as on heap.
} }
if (curChunk.compareAndSet(null, c)) { if (curChunk.compareAndSet(null, c)) {
// we won race - now we need to actually do the expensive // we won race - now we need to actually do the expensive
// allocation step // allocation step
c.init(); c.init();
if (pooledChunk) { if (pooledChunk) {
if (!this.closed && !this.pooledChunkQueue.offer((PooledChunk) c)) { if (!this.closed && !this.pooledChunkQueue.offer(c)) {
if (LOG.isTraceEnabled()) { if (LOG.isTraceEnabled()) {
LOG.trace("Chunk queue is full, won't reuse this new chunk. Current queue size: " LOG.trace("Chunk queue is full, won't reuse this new chunk. Current queue size: "
+ pooledChunkQueue.size()); + pooledChunkQueue.size());
@ -226,7 +224,7 @@ public class HeapMemStoreLAB implements MemStoreLAB {
} }
return c; return c;
} else if (pooledChunk) { } else if (pooledChunk) {
chunkPool.putbackChunk((PooledChunk) c); chunkPool.putbackChunk(c);
} }
// someone else won race - that's fine, we'll try to grab theirs // someone else won race - that's fine, we'll try to grab theirs
// in the next iteration of the loop. // in the next iteration of the loop.
@ -239,7 +237,7 @@ public class HeapMemStoreLAB implements MemStoreLAB {
} }
BlockingQueue<PooledChunk> getPooledChunks() { BlockingQueue<Chunk> getPooledChunks() {
return this.pooledChunkQueue; return this.pooledChunkQueue;
} }
} }

View File

@ -0,0 +1,54 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;
import java.nio.ByteBuffer;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import com.google.common.base.Preconditions;
/**
* An off heap chunk implementation.
*/
@InterfaceAudience.Private
public class OffheapChunk extends Chunk {
OffheapChunk(int size) {
super(size);
}
@Override
public void init() {
assert nextFreeOffset.get() == UNINITIALIZED;
try {
if (data == null) {
data = ByteBuffer.allocateDirect(this.size);
}
} catch (OutOfMemoryError e) {
boolean failInit = nextFreeOffset.compareAndSet(UNINITIALIZED, OOM);
assert failInit; // should be true.
throw e;
}
// Mark that it's ready for use
boolean initted = nextFreeOffset.compareAndSet(UNINITIALIZED, 0);
// We should always succeed the above CAS since only one thread
// calls init()!
Preconditions.checkState(initted, "Multiple threads tried to init same chunk");
}
}

View File

@ -0,0 +1,53 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;
import java.nio.ByteBuffer;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import com.google.common.base.Preconditions;
/**
* An on heap chunk implementation.
*/
@InterfaceAudience.Private
public class OnheapChunk extends Chunk {
OnheapChunk(int size) {
super(size);
}
public void init() {
assert nextFreeOffset.get() == UNINITIALIZED;
try {
if (data == null) {
data = ByteBuffer.allocate(this.size);
}
} catch (OutOfMemoryError e) {
boolean failInit = nextFreeOffset.compareAndSet(UNINITIALIZED, OOM);
assert failInit; // should be true.
throw e;
}
// Mark that it's ready for use
boolean initted = nextFreeOffset.compareAndSet(UNINITIALIZED, 0);
// We should always succeed the above CAS since only one thread
// calls init()!
Preconditions.checkState(initted, "Multiple threads tried to init same chunk");
}
}

View File

@ -45,7 +45,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.ByteBufferedCell; import org.apache.hadoop.hbase.ByteBufferCell;
import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellScannable; import org.apache.hadoop.hbase.CellScannable;
import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CellScanner;
@ -1174,8 +1174,8 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
// Since byte buffers can point all kinds of crazy places it's harder to keep track // Since byte buffers can point all kinds of crazy places it's harder to keep track
// of which blocks are kept alive by what byte buffer. // of which blocks are kept alive by what byte buffer.
// So we make a guess. // So we make a guess.
if (c instanceof ByteBufferedCell) { if (c instanceof ByteBufferCell) {
ByteBufferedCell bbCell = (ByteBufferedCell) c; ByteBufferCell bbCell = (ByteBufferCell) c;
ByteBuffer bb = bbCell.getValueByteBuffer(); ByteBuffer bb = bbCell.getValueByteBuffer();
if (bb != lastBlock) { if (bb != lastBlock) {
context.incrementResponseBlockSize(bb.capacity()); context.incrementResponseBlockSize(bb.capacity());

View File

@ -22,7 +22,6 @@ import com.google.common.base.Preconditions;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.util.ReflectionUtils;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
@ -35,8 +34,6 @@ import java.util.List;
@InterfaceAudience.Private @InterfaceAudience.Private
public final class SegmentFactory { public final class SegmentFactory {
static final String MSLAB_CLASS_NAME = "hbase.regionserver.mslab.class";
private SegmentFactory() {} private SegmentFactory() {}
private static SegmentFactory instance = new SegmentFactory(); private static SegmentFactory instance = new SegmentFactory();
@ -47,7 +44,7 @@ public final class SegmentFactory {
// create skip-list-based (non-flat) immutable segment from compacting old immutable segments // create skip-list-based (non-flat) immutable segment from compacting old immutable segments
public ImmutableSegment createImmutableSegment(final Configuration conf, public ImmutableSegment createImmutableSegment(final Configuration conf,
final CellComparator comparator, MemStoreSegmentsIterator iterator) { final CellComparator comparator, MemStoreSegmentsIterator iterator) {
return new ImmutableSegment(comparator, iterator, getMemStoreLAB(conf)); return new ImmutableSegment(comparator, iterator, MemStoreLAB.newInstance(conf));
} }
// create new flat immutable segment from compacting old immutable segments // create new flat immutable segment from compacting old immutable segments
@ -57,7 +54,7 @@ public final class SegmentFactory {
throws IOException { throws IOException {
Preconditions.checkArgument(segmentType == ImmutableSegment.Type.ARRAY_MAP_BASED, Preconditions.checkArgument(segmentType == ImmutableSegment.Type.ARRAY_MAP_BASED,
"wrong immutable segment type"); "wrong immutable segment type");
MemStoreLAB memStoreLAB = getMemStoreLAB(conf); MemStoreLAB memStoreLAB = MemStoreLAB.newInstance(conf);
return return
// the last parameter "false" means not to merge, but to compact the pipeline // the last parameter "false" means not to merge, but to compact the pipeline
// in order to create the new segment // in order to create the new segment
@ -77,7 +74,7 @@ public final class SegmentFactory {
// create mutable segment // create mutable segment
public MutableSegment createMutableSegment(final Configuration conf, CellComparator comparator) { public MutableSegment createMutableSegment(final Configuration conf, CellComparator comparator) {
MemStoreLAB memStoreLAB = getMemStoreLAB(conf); MemStoreLAB memStoreLAB = MemStoreLAB.newInstance(conf);
return generateMutableSegment(conf, comparator, memStoreLAB); return generateMutableSegment(conf, comparator, memStoreLAB);
} }
@ -103,16 +100,6 @@ public final class SegmentFactory {
return new MutableSegment(set, comparator, memStoreLAB); return new MutableSegment(set, comparator, memStoreLAB);
} }
private MemStoreLAB getMemStoreLAB(Configuration conf) {
MemStoreLAB memStoreLAB = null;
if (conf.getBoolean(MemStoreLAB.USEMSLAB_KEY, MemStoreLAB.USEMSLAB_DEFAULT)) {
String className = conf.get(MSLAB_CLASS_NAME, HeapMemStoreLAB.class.getName());
memStoreLAB = ReflectionUtils.instantiateWithCustomCtor(className,
new Class[] { Configuration.class }, new Object[] { conf });
}
return memStoreLAB;
}
private MemStoreLAB getMergedMemStoreLAB(Configuration conf, List<ImmutableSegment> segments) { private MemStoreLAB getMergedMemStoreLAB(Configuration conf, List<ImmutableSegment> segments) {
List<MemStoreLAB> mslabs = new ArrayList<MemStoreLAB>(); List<MemStoreLAB> mslabs = new ArrayList<MemStoreLAB>();
for (ImmutableSegment segment : segments) { for (ImmutableSegment segment : segments) {

View File

@ -56,7 +56,7 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.exceptions.TimeoutIOException; import org.apache.hadoop.hbase.exceptions.TimeoutIOException;
import org.apache.hadoop.hbase.io.util.HeapMemorySizeUtil; import org.apache.hadoop.hbase.io.util.MemorySizeUtil;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.CollectionUtils; import org.apache.hadoop.hbase.util.CollectionUtils;
import org.apache.hadoop.hbase.util.DrainBarrier; import org.apache.hadoop.hbase.util.DrainBarrier;
@ -386,8 +386,7 @@ public abstract class AbstractFSWAL<W> implements WAL {
this.logrollsize = (long) (blocksize this.logrollsize = (long) (blocksize
* conf.getFloat("hbase.regionserver.logroll.multiplier", 0.95f)); * conf.getFloat("hbase.regionserver.logroll.multiplier", 0.95f));
float memstoreRatio = conf.getFloat(HeapMemorySizeUtil.MEMSTORE_SIZE_KEY, conf.getFloat( float memstoreRatio = MemorySizeUtil.getGlobalMemStoreHeapPercent(conf, false);
HeapMemorySizeUtil.MEMSTORE_SIZE_OLD_KEY, HeapMemorySizeUtil.DEFAULT_MEMSTORE_SIZE));
boolean maxLogsDefined = conf.get("hbase.regionserver.maxlogs") != null; boolean maxLogsDefined = conf.get("hbase.regionserver.maxlogs") != null;
if (maxLogsDefined) { if (maxLogsDefined) {
LOG.warn("'hbase.regionserver.maxlogs' was deprecated."); LOG.warn("'hbase.regionserver.maxlogs' was deprecated.");

View File

@ -27,7 +27,7 @@ import java.util.regex.Pattern;
import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.TestCellUtil.ByteBufferedCellImpl; import org.apache.hadoop.hbase.TestCellUtil.ByteBufferCellImpl;
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
import org.apache.hadoop.hbase.testclassification.FilterTests; import org.apache.hadoop.hbase.testclassification.FilterTests;
import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.testclassification.SmallTests;
@ -108,7 +108,7 @@ public class TestSingleColumnValueFilter {
assertTrue("less than", filter.filterKeyValue(kv) == Filter.ReturnCode.NEXT_ROW); assertTrue("less than", filter.filterKeyValue(kv) == Filter.ReturnCode.NEXT_ROW);
filter.reset(); filter.reset();
byte[] buffer = kv.getBuffer(); byte[] buffer = kv.getBuffer();
Cell c = new ByteBufferedCellImpl(ByteBuffer.wrap(buffer), 0, buffer.length); Cell c = new ByteBufferCellImpl(ByteBuffer.wrap(buffer), 0, buffer.length);
assertTrue("less than", filter.filterKeyValue(c) == Filter.ReturnCode.NEXT_ROW); assertTrue("less than", filter.filterKeyValue(c) == Filter.ReturnCode.NEXT_ROW);
filter.reset(); filter.reset();
@ -117,7 +117,7 @@ public class TestSingleColumnValueFilter {
assertTrue("Equals 100", filter.filterKeyValue(kv) == Filter.ReturnCode.NEXT_ROW); assertTrue("Equals 100", filter.filterKeyValue(kv) == Filter.ReturnCode.NEXT_ROW);
filter.reset(); filter.reset();
buffer = kv.getBuffer(); buffer = kv.getBuffer();
c = new ByteBufferedCellImpl(ByteBuffer.wrap(buffer), 0, buffer.length); c = new ByteBufferCellImpl(ByteBuffer.wrap(buffer), 0, buffer.length);
assertTrue("Equals 100", filter.filterKeyValue(c) == Filter.ReturnCode.NEXT_ROW); assertTrue("Equals 100", filter.filterKeyValue(c) == Filter.ReturnCode.NEXT_ROW);
filter.reset(); filter.reset();
@ -126,7 +126,7 @@ public class TestSingleColumnValueFilter {
assertTrue("include 120", filter.filterKeyValue(kv) == Filter.ReturnCode.INCLUDE); assertTrue("include 120", filter.filterKeyValue(kv) == Filter.ReturnCode.INCLUDE);
filter.reset(); filter.reset();
buffer = kv.getBuffer(); buffer = kv.getBuffer();
c = new ByteBufferedCellImpl(ByteBuffer.wrap(buffer), 0, buffer.length); c = new ByteBufferCellImpl(ByteBuffer.wrap(buffer), 0, buffer.length);
assertTrue("include 120", filter.filterKeyValue(c) == Filter.ReturnCode.INCLUDE); assertTrue("include 120", filter.filterKeyValue(c) == Filter.ReturnCode.INCLUDE);
} }
@ -135,29 +135,29 @@ public class TestSingleColumnValueFilter {
KeyValue kv = new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER, VAL_2); KeyValue kv = new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER, VAL_2);
assertTrue("basicFilter1", filter.filterKeyValue(kv) == Filter.ReturnCode.INCLUDE); assertTrue("basicFilter1", filter.filterKeyValue(kv) == Filter.ReturnCode.INCLUDE);
byte[] buffer = kv.getBuffer(); byte[] buffer = kv.getBuffer();
Cell c = new ByteBufferedCellImpl(ByteBuffer.wrap(buffer), 0, buffer.length); Cell c = new ByteBufferCellImpl(ByteBuffer.wrap(buffer), 0, buffer.length);
assertTrue("basicFilter1", filter.filterKeyValue(c) == Filter.ReturnCode.INCLUDE); assertTrue("basicFilter1", filter.filterKeyValue(c) == Filter.ReturnCode.INCLUDE);
kv = new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER, VAL_3); kv = new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER, VAL_3);
assertTrue("basicFilter2", filter.filterKeyValue(kv) == Filter.ReturnCode.INCLUDE); assertTrue("basicFilter2", filter.filterKeyValue(kv) == Filter.ReturnCode.INCLUDE);
buffer = kv.getBuffer(); buffer = kv.getBuffer();
c = new ByteBufferedCellImpl(ByteBuffer.wrap(buffer), 0, buffer.length); c = new ByteBufferCellImpl(ByteBuffer.wrap(buffer), 0, buffer.length);
assertTrue("basicFilter2", filter.filterKeyValue(c) == Filter.ReturnCode.INCLUDE); assertTrue("basicFilter2", filter.filterKeyValue(c) == Filter.ReturnCode.INCLUDE);
kv = new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER, VAL_4); kv = new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER, VAL_4);
assertTrue("basicFilter3", filter.filterKeyValue(kv) == Filter.ReturnCode.INCLUDE); assertTrue("basicFilter3", filter.filterKeyValue(kv) == Filter.ReturnCode.INCLUDE);
buffer = kv.getBuffer(); buffer = kv.getBuffer();
c = new ByteBufferedCellImpl(ByteBuffer.wrap(buffer), 0, buffer.length); c = new ByteBufferCellImpl(ByteBuffer.wrap(buffer), 0, buffer.length);
assertTrue("basicFilter3", filter.filterKeyValue(c) == Filter.ReturnCode.INCLUDE); assertTrue("basicFilter3", filter.filterKeyValue(c) == Filter.ReturnCode.INCLUDE);
assertFalse("basicFilterNotNull", filter.filterRow()); assertFalse("basicFilterNotNull", filter.filterRow());
filter.reset(); filter.reset();
kv = new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER, VAL_1); kv = new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER, VAL_1);
assertTrue("basicFilter4", filter.filterKeyValue(kv) == Filter.ReturnCode.NEXT_ROW); assertTrue("basicFilter4", filter.filterKeyValue(kv) == Filter.ReturnCode.NEXT_ROW);
buffer = kv.getBuffer(); buffer = kv.getBuffer();
c = new ByteBufferedCellImpl(ByteBuffer.wrap(buffer), 0, buffer.length); c = new ByteBufferCellImpl(ByteBuffer.wrap(buffer), 0, buffer.length);
assertTrue("basicFilter4", filter.filterKeyValue(c) == Filter.ReturnCode.NEXT_ROW); assertTrue("basicFilter4", filter.filterKeyValue(c) == Filter.ReturnCode.NEXT_ROW);
kv = new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER, VAL_2); kv = new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER, VAL_2);
assertTrue("basicFilter4", filter.filterKeyValue(kv) == Filter.ReturnCode.NEXT_ROW); assertTrue("basicFilter4", filter.filterKeyValue(kv) == Filter.ReturnCode.NEXT_ROW);
buffer = kv.getBuffer(); buffer = kv.getBuffer();
c = new ByteBufferedCellImpl(ByteBuffer.wrap(buffer), 0, buffer.length); c = new ByteBufferCellImpl(ByteBuffer.wrap(buffer), 0, buffer.length);
assertTrue("basicFilter4", filter.filterKeyValue(c) == Filter.ReturnCode.NEXT_ROW); assertTrue("basicFilter4", filter.filterKeyValue(c) == Filter.ReturnCode.NEXT_ROW);
assertFalse("basicFilterAllRemaining", filter.filterAllRemaining()); assertFalse("basicFilterAllRemaining", filter.filterAllRemaining());
assertTrue("basicFilterNotNull", filter.filterRow()); assertTrue("basicFilterNotNull", filter.filterRow());
@ -166,12 +166,12 @@ public class TestSingleColumnValueFilter {
kv = new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER, VAL_1); kv = new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER, VAL_1);
assertTrue("basicFilter5", filter.filterKeyValue(kv) == Filter.ReturnCode.INCLUDE); assertTrue("basicFilter5", filter.filterKeyValue(kv) == Filter.ReturnCode.INCLUDE);
buffer = kv.getBuffer(); buffer = kv.getBuffer();
c = new ByteBufferedCellImpl(ByteBuffer.wrap(buffer), 0, buffer.length); c = new ByteBufferCellImpl(ByteBuffer.wrap(buffer), 0, buffer.length);
assertTrue("basicFilter5", filter.filterKeyValue(c) == Filter.ReturnCode.INCLUDE); assertTrue("basicFilter5", filter.filterKeyValue(c) == Filter.ReturnCode.INCLUDE);
kv = new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER, VAL_2); kv = new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER, VAL_2);
assertTrue("basicFilter5", filter.filterKeyValue(kv) == Filter.ReturnCode.INCLUDE); assertTrue("basicFilter5", filter.filterKeyValue(kv) == Filter.ReturnCode.INCLUDE);
buffer = kv.getBuffer(); buffer = kv.getBuffer();
c = new ByteBufferedCellImpl(ByteBuffer.wrap(buffer), 0, buffer.length); c = new ByteBufferCellImpl(ByteBuffer.wrap(buffer), 0, buffer.length);
assertTrue("basicFilter5", filter.filterKeyValue(c) == Filter.ReturnCode.INCLUDE); assertTrue("basicFilter5", filter.filterKeyValue(c) == Filter.ReturnCode.INCLUDE);
assertFalse("basicFilterNotNull", filter.filterRow()); assertFalse("basicFilterNotNull", filter.filterRow());
} }
@ -181,14 +181,14 @@ public class TestSingleColumnValueFilter {
KeyValue kv = new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER, FULLSTRING_1); KeyValue kv = new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER, FULLSTRING_1);
assertTrue("null1", filter.filterKeyValue(kv) == Filter.ReturnCode.INCLUDE); assertTrue("null1", filter.filterKeyValue(kv) == Filter.ReturnCode.INCLUDE);
byte[] buffer = kv.getBuffer(); byte[] buffer = kv.getBuffer();
Cell c = new ByteBufferedCellImpl(ByteBuffer.wrap(buffer), 0, buffer.length); Cell c = new ByteBufferCellImpl(ByteBuffer.wrap(buffer), 0, buffer.length);
assertTrue("null1", filter.filterKeyValue(c) == Filter.ReturnCode.INCLUDE); assertTrue("null1", filter.filterKeyValue(c) == Filter.ReturnCode.INCLUDE);
assertFalse("null1FilterRow", filter.filterRow()); assertFalse("null1FilterRow", filter.filterRow());
filter.reset(); filter.reset();
kv = new KeyValue(ROW, COLUMN_FAMILY, Bytes.toBytes("qual2"), FULLSTRING_2); kv = new KeyValue(ROW, COLUMN_FAMILY, Bytes.toBytes("qual2"), FULLSTRING_2);
assertTrue("null2", filter.filterKeyValue(kv) == Filter.ReturnCode.INCLUDE); assertTrue("null2", filter.filterKeyValue(kv) == Filter.ReturnCode.INCLUDE);
buffer = kv.getBuffer(); buffer = kv.getBuffer();
c = new ByteBufferedCellImpl(ByteBuffer.wrap(buffer), 0, buffer.length); c = new ByteBufferCellImpl(ByteBuffer.wrap(buffer), 0, buffer.length);
assertTrue("null2", filter.filterKeyValue(c) == Filter.ReturnCode.INCLUDE); assertTrue("null2", filter.filterKeyValue(c) == Filter.ReturnCode.INCLUDE);
assertTrue("null2FilterRow", filter.filterRow()); assertTrue("null2FilterRow", filter.filterRow());
} }
@ -200,13 +200,13 @@ public class TestSingleColumnValueFilter {
assertTrue("substrTrue", assertTrue("substrTrue",
filter.filterKeyValue(kv) == Filter.ReturnCode.INCLUDE); filter.filterKeyValue(kv) == Filter.ReturnCode.INCLUDE);
byte[] buffer = kv.getBuffer(); byte[] buffer = kv.getBuffer();
Cell c = new ByteBufferedCellImpl(ByteBuffer.wrap(buffer), 0, buffer.length); Cell c = new ByteBufferCellImpl(ByteBuffer.wrap(buffer), 0, buffer.length);
assertTrue("substrTrue", filter.filterKeyValue(c) == Filter.ReturnCode.INCLUDE); assertTrue("substrTrue", filter.filterKeyValue(c) == Filter.ReturnCode.INCLUDE);
kv = new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER, kv = new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER,
FULLSTRING_2); FULLSTRING_2);
assertTrue("substrFalse", filter.filterKeyValue(kv) == Filter.ReturnCode.INCLUDE); assertTrue("substrFalse", filter.filterKeyValue(kv) == Filter.ReturnCode.INCLUDE);
buffer = kv.getBuffer(); buffer = kv.getBuffer();
c = new ByteBufferedCellImpl(ByteBuffer.wrap(buffer), 0, buffer.length); c = new ByteBufferCellImpl(ByteBuffer.wrap(buffer), 0, buffer.length);
assertTrue("substrFalse", filter.filterKeyValue(c) == Filter.ReturnCode.INCLUDE); assertTrue("substrFalse", filter.filterKeyValue(c) == Filter.ReturnCode.INCLUDE);
assertFalse("substrFilterAllRemaining", filter.filterAllRemaining()); assertFalse("substrFilterAllRemaining", filter.filterAllRemaining());
assertFalse("substrFilterNotNull", filter.filterRow()); assertFalse("substrFilterNotNull", filter.filterRow());
@ -219,13 +219,13 @@ public class TestSingleColumnValueFilter {
assertTrue("regexTrue", assertTrue("regexTrue",
filter.filterKeyValue(kv) == Filter.ReturnCode.INCLUDE); filter.filterKeyValue(kv) == Filter.ReturnCode.INCLUDE);
byte[] buffer = kv.getBuffer(); byte[] buffer = kv.getBuffer();
Cell c = new ByteBufferedCellImpl(ByteBuffer.wrap(buffer), 0, buffer.length); Cell c = new ByteBufferCellImpl(ByteBuffer.wrap(buffer), 0, buffer.length);
assertTrue("regexTrue", filter.filterKeyValue(c) == Filter.ReturnCode.INCLUDE); assertTrue("regexTrue", filter.filterKeyValue(c) == Filter.ReturnCode.INCLUDE);
kv = new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER, kv = new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER,
FULLSTRING_2); FULLSTRING_2);
assertTrue("regexFalse", filter.filterKeyValue(kv) == Filter.ReturnCode.INCLUDE); assertTrue("regexFalse", filter.filterKeyValue(kv) == Filter.ReturnCode.INCLUDE);
buffer = kv.getBuffer(); buffer = kv.getBuffer();
c = new ByteBufferedCellImpl(ByteBuffer.wrap(buffer), 0, buffer.length); c = new ByteBufferCellImpl(ByteBuffer.wrap(buffer), 0, buffer.length);
assertTrue("regexFalse", filter.filterKeyValue(c) == Filter.ReturnCode.INCLUDE); assertTrue("regexFalse", filter.filterKeyValue(c) == Filter.ReturnCode.INCLUDE);
assertFalse("regexFilterAllRemaining", filter.filterAllRemaining()); assertFalse("regexFilterAllRemaining", filter.filterAllRemaining());
assertFalse("regexFilterNotNull", filter.filterRow()); assertFalse("regexFilterNotNull", filter.filterRow());
@ -238,7 +238,7 @@ public class TestSingleColumnValueFilter {
assertTrue("regexTrue", assertTrue("regexTrue",
filter.filterKeyValue(kv) == Filter.ReturnCode.INCLUDE); filter.filterKeyValue(kv) == Filter.ReturnCode.INCLUDE);
byte[] buffer = kv.getBuffer(); byte[] buffer = kv.getBuffer();
Cell c = new ByteBufferedCellImpl(ByteBuffer.wrap(buffer), 0, buffer.length); Cell c = new ByteBufferCellImpl(ByteBuffer.wrap(buffer), 0, buffer.length);
assertTrue("regexTrue", filter.filterKeyValue(c) == Filter.ReturnCode.INCLUDE); assertTrue("regexTrue", filter.filterKeyValue(c) == Filter.ReturnCode.INCLUDE);
assertFalse("regexFilterAllRemaining", filter.filterAllRemaining()); assertFalse("regexFilterAllRemaining", filter.filterAllRemaining());
assertFalse("regexFilterNotNull", filter.filterRow()); assertFalse("regexFilterNotNull", filter.filterRow());

View File

@ -44,7 +44,6 @@ public class TestCellFlatSet extends TestCase {
private Cell descCells[]; private Cell descCells[];
private CellArrayMap descCbOnHeap; private CellArrayMap descCbOnHeap;
private final static Configuration CONF = new Configuration(); private final static Configuration CONF = new Configuration();
private HeapMemStoreLAB mslab;
private KeyValue lowerOuterCell; private KeyValue lowerOuterCell;
private KeyValue upperOuterCell; private KeyValue upperOuterCell;
@ -73,9 +72,8 @@ public class TestCellFlatSet extends TestCase {
descCells = new Cell[] {kv4,kv3,kv2,kv1}; descCells = new Cell[] {kv4,kv3,kv2,kv1};
descCbOnHeap = new CellArrayMap(CellComparator.COMPARATOR,descCells,0,NUM_OF_CELLS,true); descCbOnHeap = new CellArrayMap(CellComparator.COMPARATOR,descCells,0,NUM_OF_CELLS,true);
CONF.setBoolean(MemStoreLAB.USEMSLAB_KEY, true); CONF.setBoolean(MemStoreLAB.USEMSLAB_KEY, true);
CONF.setFloat(MemStoreChunkPool.CHUNK_POOL_MAXSIZE_KEY, 0.2f); CONF.setFloat(MemStoreLAB.CHUNK_POOL_MAXSIZE_KEY, 0.2f);
MemStoreChunkPool.chunkPoolDisabled = false; MemStoreChunkPool.chunkPoolDisabled = false;
mslab = new HeapMemStoreLAB(CONF);
} }
/* Create and test CellSet based on CellArrayMap */ /* Create and test CellSet based on CellArrayMap */

View File

@ -19,6 +19,7 @@
package org.apache.hadoop.hbase.regionserver; package org.apache.hadoop.hbase.regionserver;
import java.io.IOException; import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List; import java.util.List;
@ -35,6 +36,7 @@ import org.apache.hadoop.hbase.KeepDeletedCells;
import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueTestUtil; import org.apache.hadoop.hbase.KeyValueTestUtil;
import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.util.MemorySizeUtil;
import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
@ -87,7 +89,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
super.internalSetUp(); super.internalSetUp();
Configuration conf = new Configuration(); Configuration conf = new Configuration();
conf.setBoolean(MemStoreLAB.USEMSLAB_KEY, true); conf.setBoolean(MemStoreLAB.USEMSLAB_KEY, true);
conf.setFloat(MemStoreChunkPool.CHUNK_POOL_MAXSIZE_KEY, 0.2f); conf.setFloat(MemStoreLAB.CHUNK_POOL_MAXSIZE_KEY, 0.2f);
conf.setInt(HRegion.MEMSTORE_PERIODIC_FLUSH_INTERVAL, 1000); conf.setInt(HRegion.MEMSTORE_PERIODIC_FLUSH_INTERVAL, 1000);
HBaseTestingUtility hbaseUtility = HBaseTestingUtility.createLocalHTU(conf); HBaseTestingUtility hbaseUtility = HBaseTestingUtility.createLocalHTU(conf);
HColumnDescriptor hcd = new HColumnDescriptor(FAMILY); HColumnDescriptor hcd = new HColumnDescriptor(FAMILY);
@ -95,7 +97,10 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
this.regionServicesForStores = region.getRegionServicesForStores(); this.regionServicesForStores = region.getRegionServicesForStores();
this.store = new HStore(region, hcd, conf); this.store = new HStore(region, hcd, conf);
chunkPool = MemStoreChunkPool.getPool(conf); long globalMemStoreLimit = (long) (ManagementFactory.getMemoryMXBean().getHeapMemoryUsage()
.getMax() * MemorySizeUtil.getGlobalMemStoreHeapPercent(conf, false));
chunkPool = MemStoreChunkPool.initialize(globalMemStoreLimit, 0.2f,
MemStoreLAB.POOL_INITIAL_SIZE_DEFAULT, MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false);
assertTrue(chunkPool != null); assertTrue(chunkPool != null);
} }

View File

@ -132,9 +132,9 @@ public class TestDefaultMemStore {
// make sure memstore size increased even when writing the same cell, if using MSLAB // make sure memstore size increased even when writing the same cell, if using MSLAB
assertEquals(Segment.getCellLength(kv), sizeChangeForSecondCell.getDataSize()); assertEquals(Segment.getCellLength(kv), sizeChangeForSecondCell.getDataSize());
// make sure chunk size increased even when writing the same cell, if using MSLAB // make sure chunk size increased even when writing the same cell, if using MSLAB
if (msLab instanceof HeapMemStoreLAB) { if (msLab instanceof MemStoreLABImpl) {
assertEquals(2 * Segment.getCellLength(kv), assertEquals(2 * Segment.getCellLength(kv),
((HeapMemStoreLAB) msLab).getCurrentChunk().getNextFreeOffset()); ((MemStoreLABImpl) msLab).getCurrentChunk().getNextFreeOffset());
} }
} else { } else {
// make sure no memstore size change w/o MSLAB // make sure no memstore size change w/o MSLAB

View File

@ -42,7 +42,7 @@ import org.apache.hadoop.hbase.io.hfile.CacheStats;
import org.apache.hadoop.hbase.io.hfile.Cacheable; import org.apache.hadoop.hbase.io.hfile.Cacheable;
import org.apache.hadoop.hbase.io.hfile.CachedBlock; import org.apache.hadoop.hbase.io.hfile.CachedBlock;
import org.apache.hadoop.hbase.io.hfile.ResizableBlockCache; import org.apache.hadoop.hbase.io.hfile.ResizableBlockCache;
import org.apache.hadoop.hbase.io.util.HeapMemorySizeUtil; import org.apache.hadoop.hbase.io.util.MemorySizeUtil;
import org.apache.hadoop.hbase.regionserver.HeapMemoryManager.TunerContext; import org.apache.hadoop.hbase.regionserver.HeapMemoryManager.TunerContext;
import org.apache.hadoop.hbase.regionserver.HeapMemoryManager.TunerResult; import org.apache.hadoop.hbase.regionserver.HeapMemoryManager.TunerResult;
import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests;
@ -62,7 +62,7 @@ public class TestHeapMemoryManager {
@Test @Test
public void testAutoTunerShouldBeOffWhenMaxMinRangesForMemstoreIsNotGiven() throws Exception { public void testAutoTunerShouldBeOffWhenMaxMinRangesForMemstoreIsNotGiven() throws Exception {
Configuration conf = HBaseConfiguration.create(); Configuration conf = HBaseConfiguration.create();
conf.setFloat(HeapMemorySizeUtil.MEMSTORE_SIZE_KEY, 0.02f); conf.setFloat(MemorySizeUtil.MEMSTORE_SIZE_KEY, 0.02f);
conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MAX_RANGE_KEY, 0.75f); conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MAX_RANGE_KEY, 0.75f);
conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MIN_RANGE_KEY, 0.03f); conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MIN_RANGE_KEY, 0.03f);
HeapMemoryManager manager = new HeapMemoryManager(new BlockCacheStub(0), HeapMemoryManager manager = new HeapMemoryManager(new BlockCacheStub(0),
@ -228,7 +228,7 @@ public class TestHeapMemoryManager {
blockCache.setTestBlockSize((long) (maxHeapSize * 0.4 * 0.8)); blockCache.setTestBlockSize((long) (maxHeapSize * 0.4 * 0.8));
regionServerAccounting.setTestMemstoreSize(0); regionServerAccounting.setTestMemstoreSize(0);
Configuration conf = HBaseConfiguration.create(); Configuration conf = HBaseConfiguration.create();
conf.setFloat(HeapMemorySizeUtil.MEMSTORE_SIZE_LOWER_LIMIT_KEY, 0.7f); conf.setFloat(MemorySizeUtil.MEMSTORE_SIZE_LOWER_LIMIT_KEY, 0.7f);
conf.setFloat(HeapMemoryManager.MEMSTORE_SIZE_MAX_RANGE_KEY, 0.75f); conf.setFloat(HeapMemoryManager.MEMSTORE_SIZE_MAX_RANGE_KEY, 0.75f);
conf.setFloat(HeapMemoryManager.MEMSTORE_SIZE_MIN_RANGE_KEY, 0.10f); conf.setFloat(HeapMemoryManager.MEMSTORE_SIZE_MIN_RANGE_KEY, 0.10f);
conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MAX_RANGE_KEY, 0.7f); conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MAX_RANGE_KEY, 0.7f);
@ -462,7 +462,7 @@ public class TestHeapMemoryManager {
conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MAX_RANGE_KEY, 0.7f); conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MAX_RANGE_KEY, 0.7f);
conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MIN_RANGE_KEY, 0.1f); conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MIN_RANGE_KEY, 0.1f);
conf.setInt(DefaultHeapMemoryTuner.NUM_PERIODS_TO_IGNORE, 0); conf.setInt(DefaultHeapMemoryTuner.NUM_PERIODS_TO_IGNORE, 0);
conf.setFloat(HeapMemorySizeUtil.MEMSTORE_SIZE_KEY, 0.4F); conf.setFloat(MemorySizeUtil.MEMSTORE_SIZE_KEY, 0.4F);
conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.3F); conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.3F);
conf.setFloat(HConstants.BUCKET_CACHE_SIZE_KEY, 0.1F); conf.setFloat(HConstants.BUCKET_CACHE_SIZE_KEY, 0.1F);
conf.set(HConstants.BUCKET_CACHE_IOENGINE_KEY, "heap"); conf.set(HConstants.BUCKET_CACHE_IOENGINE_KEY, "heap");

View File

@ -22,6 +22,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.exceptions.UnexpectedStateException; import org.apache.hadoop.hbase.exceptions.UnexpectedStateException;
import org.apache.hadoop.hbase.io.util.MemorySizeUtil;
import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests;
import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
@ -32,6 +33,7 @@ import org.junit.Test;
import org.junit.experimental.categories.Category; import org.junit.experimental.categories.Category;
import java.io.IOException; import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.util.List; import java.util.List;
import java.util.Random; import java.util.Random;
@ -50,10 +52,13 @@ public class TestMemStoreChunkPool {
@BeforeClass @BeforeClass
public static void setUpBeforeClass() throws Exception { public static void setUpBeforeClass() throws Exception {
conf.setBoolean(MemStoreLAB.USEMSLAB_KEY, true); conf.setBoolean(MemStoreLAB.USEMSLAB_KEY, true);
conf.setFloat(MemStoreChunkPool.CHUNK_POOL_MAXSIZE_KEY, 0.2f); conf.setFloat(MemStoreLAB.CHUNK_POOL_MAXSIZE_KEY, 0.2f);
chunkPoolDisabledBeforeTest = MemStoreChunkPool.chunkPoolDisabled; chunkPoolDisabledBeforeTest = MemStoreChunkPool.chunkPoolDisabled;
MemStoreChunkPool.chunkPoolDisabled = false; MemStoreChunkPool.chunkPoolDisabled = false;
chunkPool = MemStoreChunkPool.getPool(conf); long globalMemStoreLimit = (long) (ManagementFactory.getMemoryMXBean().getHeapMemoryUsage()
.getMax() * MemorySizeUtil.getGlobalMemStoreHeapPercent(conf, false));
chunkPool = MemStoreChunkPool.initialize(globalMemStoreLimit, 0.2f,
MemStoreLAB.POOL_INITIAL_SIZE_DEFAULT, MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false);
assertTrue(chunkPool != null); assertTrue(chunkPool != null);
} }
@ -70,7 +75,7 @@ public class TestMemStoreChunkPool {
@Test @Test
public void testReusingChunks() { public void testReusingChunks() {
Random rand = new Random(); Random rand = new Random();
MemStoreLAB mslab = new HeapMemStoreLAB(conf); MemStoreLAB mslab = new MemStoreLABImpl(conf);
int expectedOff = 0; int expectedOff = 0;
byte[] lastBuffer = null; byte[] lastBuffer = null;
final byte[] rk = Bytes.toBytes("r1"); final byte[] rk = Bytes.toBytes("r1");
@ -96,7 +101,7 @@ public class TestMemStoreChunkPool {
int chunkCount = chunkPool.getPoolSize(); int chunkCount = chunkPool.getPoolSize();
assertTrue(chunkCount > 0); assertTrue(chunkCount > 0);
// reconstruct mslab // reconstruct mslab
mslab = new HeapMemStoreLAB(conf); mslab = new MemStoreLABImpl(conf);
// chunk should be got from the pool, so we can reuse it. // chunk should be got from the pool, so we can reuse it.
KeyValue kv = new KeyValue(rk, cf, q, new byte[10]); KeyValue kv = new KeyValue(rk, cf, q, new byte[10]);
mslab.copyCellInto(kv); mslab.copyCellInto(kv);
@ -209,7 +214,7 @@ public class TestMemStoreChunkPool {
final int initialCount = 5; final int initialCount = 5;
final int chunkSize = 30; final int chunkSize = 30;
final int valSize = 7; final int valSize = 7;
MemStoreChunkPool pool = new MemStoreChunkPool(conf, chunkSize, maxCount, initialCount, 1); MemStoreChunkPool pool = new MemStoreChunkPool(chunkSize, maxCount, initialCount, 1, false);
assertEquals(initialCount, pool.getPoolSize()); assertEquals(initialCount, pool.getPoolSize());
assertEquals(maxCount, pool.getMaxCount()); assertEquals(maxCount, pool.getMaxCount());
MemStoreChunkPool.GLOBAL_INSTANCE = pool;// Replace the global ref with the new one we created. MemStoreChunkPool.GLOBAL_INSTANCE = pool;// Replace the global ref with the new one we created.
@ -221,7 +226,7 @@ public class TestMemStoreChunkPool {
Runnable r = new Runnable() { Runnable r = new Runnable() {
@Override @Override
public void run() { public void run() {
MemStoreLAB memStoreLAB = new HeapMemStoreLAB(conf); MemStoreLAB memStoreLAB = new MemStoreLABImpl(conf);
for (int i = 0; i < maxCount; i++) { for (int i = 0; i < maxCount; i++) {
memStoreLAB.copyCellInto(kv);// Try allocate size = chunkSize. Means every memStoreLAB.copyCellInto(kv);// Try allocate size = chunkSize. Means every
// allocate call will result in a new chunk // allocate call will result in a new chunk

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.regionserver;
import static org.junit.Assert.*; import static org.junit.Assert.*;
import java.lang.management.ManagementFactory;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@ -33,9 +34,11 @@ import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.MultithreadedTestUtil; import org.apache.hadoop.hbase.MultithreadedTestUtil;
import org.apache.hadoop.hbase.MultithreadedTestUtil.TestThread; import org.apache.hadoop.hbase.MultithreadedTestUtil.TestThread;
import org.apache.hadoop.hbase.io.util.MemorySizeUtil;
import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests;
import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.junit.BeforeClass;
import org.junit.Test; import org.junit.Test;
import com.google.common.collect.Iterables; import com.google.common.collect.Iterables;
@ -48,17 +51,27 @@ import org.junit.experimental.categories.Category;
@Category({RegionServerTests.class, SmallTests.class}) @Category({RegionServerTests.class, SmallTests.class})
public class TestMemStoreLAB { public class TestMemStoreLAB {
private final static Configuration conf = new Configuration();
private static final byte[] rk = Bytes.toBytes("r1"); private static final byte[] rk = Bytes.toBytes("r1");
private static final byte[] cf = Bytes.toBytes("f"); private static final byte[] cf = Bytes.toBytes("f");
private static final byte[] q = Bytes.toBytes("q"); private static final byte[] q = Bytes.toBytes("q");
@BeforeClass
public static void setUpBeforeClass() throws Exception {
long globalMemStoreLimit = (long) (ManagementFactory.getMemoryMXBean().getHeapMemoryUsage()
.getMax() * MemorySizeUtil.getGlobalMemStoreHeapPercent(conf, false));
MemStoreChunkPool.initialize(globalMemStoreLimit, 0.2f, MemStoreLAB.POOL_INITIAL_SIZE_DEFAULT,
MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false);
}
/** /**
* Test a bunch of random allocations * Test a bunch of random allocations
*/ */
@Test @Test
public void testLABRandomAllocation() { public void testLABRandomAllocation() {
Random rand = new Random(); Random rand = new Random();
MemStoreLAB mslab = new HeapMemStoreLAB(); MemStoreLAB mslab = new MemStoreLABImpl();
int expectedOff = 0; int expectedOff = 0;
byte[] lastBuffer = null; byte[] lastBuffer = null;
// 100K iterations by 0-1K alloc -> 50MB expected // 100K iterations by 0-1K alloc -> 50MB expected
@ -82,7 +95,7 @@ public class TestMemStoreLAB {
@Test @Test
public void testLABLargeAllocation() { public void testLABLargeAllocation() {
MemStoreLAB mslab = new HeapMemStoreLAB(); MemStoreLAB mslab = new MemStoreLABImpl();
KeyValue kv = new KeyValue(rk, cf, q, new byte[2 * 1024 * 1024]); KeyValue kv = new KeyValue(rk, cf, q, new byte[2 * 1024 * 1024]);
Cell newCell = mslab.copyCellInto(kv); Cell newCell = mslab.copyCellInto(kv);
assertNull("2MB allocation shouldn't be satisfied by LAB.", newCell); assertNull("2MB allocation shouldn't be satisfied by LAB.", newCell);
@ -100,7 +113,7 @@ public class TestMemStoreLAB {
final AtomicInteger totalAllocated = new AtomicInteger(); final AtomicInteger totalAllocated = new AtomicInteger();
final MemStoreLAB mslab = new HeapMemStoreLAB(); final MemStoreLAB mslab = new MemStoreLABImpl();
List<List<AllocRecord>> allocations = Lists.newArrayList(); List<List<AllocRecord>> allocations = Lists.newArrayList();
for (int i = 0; i < 10; i++) { for (int i = 0; i < 10; i++) {
@ -170,21 +183,21 @@ public class TestMemStoreLAB {
*/ */
@Test @Test
public void testLABChunkQueue() throws Exception { public void testLABChunkQueue() throws Exception {
HeapMemStoreLAB mslab = new HeapMemStoreLAB(); MemStoreLABImpl mslab = new MemStoreLABImpl();
// by default setting, there should be no chunks initialized in the pool // by default setting, there should be no chunks initialized in the pool
assertTrue(mslab.getPooledChunks().isEmpty()); assertTrue(mslab.getPooledChunks().isEmpty());
// reset mslab with chunk pool // reset mslab with chunk pool
Configuration conf = HBaseConfiguration.create(); Configuration conf = HBaseConfiguration.create();
conf.setDouble(MemStoreChunkPool.CHUNK_POOL_MAXSIZE_KEY, 0.1); conf.setDouble(MemStoreLAB.CHUNK_POOL_MAXSIZE_KEY, 0.1);
// set chunk size to default max alloc size, so we could easily trigger chunk retirement // set chunk size to default max alloc size, so we could easily trigger chunk retirement
conf.setLong(HeapMemStoreLAB.CHUNK_SIZE_KEY, HeapMemStoreLAB.MAX_ALLOC_DEFAULT); conf.setLong(MemStoreLABImpl.CHUNK_SIZE_KEY, MemStoreLABImpl.MAX_ALLOC_DEFAULT);
// reconstruct mslab // reconstruct mslab
MemStoreChunkPool.clearDisableFlag(); MemStoreChunkPool.clearDisableFlag();
mslab = new HeapMemStoreLAB(conf); mslab = new MemStoreLABImpl(conf);
// launch multiple threads to trigger frequent chunk retirement // launch multiple threads to trigger frequent chunk retirement
List<Thread> threads = new ArrayList<Thread>(); List<Thread> threads = new ArrayList<Thread>();
final KeyValue kv = new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("q"), final KeyValue kv = new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("q"),
new byte[HeapMemStoreLAB.MAX_ALLOC_DEFAULT - 24]); new byte[MemStoreLABImpl.MAX_ALLOC_DEFAULT - 24]);
for (int i = 0; i < 10; i++) { for (int i = 0; i < 10; i++) {
threads.add(getChunkQueueTestThread(mslab, "testLABChunkQueue-" + i, kv)); threads.add(getChunkQueueTestThread(mslab, "testLABChunkQueue-" + i, kv));
} }
@ -214,7 +227,7 @@ public class TestMemStoreLAB {
+ " after mslab closed but actually: " + queueLength, queueLength == 0); + " after mslab closed but actually: " + queueLength, queueLength == 0);
} }
private Thread getChunkQueueTestThread(final HeapMemStoreLAB mslab, String threadName, private Thread getChunkQueueTestThread(final MemStoreLABImpl mslab, String threadName,
Cell cellToCopyInto) { Cell cellToCopyInto) {
Thread thread = new Thread() { Thread thread = new Thread() {
boolean stopped = false; boolean stopped = false;