HBASE-17338 Treat Cell data size under global memstore heap size only when that Cell can not be copied to MSLAB.

This commit is contained in:
anoopsamjohn 2017-03-10 11:12:59 +05:30
parent c63a871176
commit ab5970773a
30 changed files with 289 additions and 379 deletions

View File

@ -40,7 +40,7 @@ public class ByteBufferKeyValue extends ByteBufferCell implements ExtendedCell {
protected final int length;
private long seqId = 0;
private static final int FIXED_OVERHEAD = ClassSize.OBJECT + ClassSize.REFERENCE
public static final int FIXED_OVERHEAD = ClassSize.OBJECT + ClassSize.REFERENCE
+ (2 * Bytes.SIZEOF_INT) + Bytes.SIZEOF_LONG;
public ByteBufferKeyValue(ByteBuffer buf, int offset, int length, long seqId) {
@ -301,11 +301,6 @@ public class ByteBufferKeyValue extends ByteBufferCell implements ExtendedCell {
Bytes.SIZEOF_LONG);
}
@Override
public long heapOverhead() {
return FIXED_OVERHEAD;
}
@Override
public Cell deepClone() {
byte[] copy = new byte[this.length];

View File

@ -621,15 +621,6 @@ public final class CellUtil {
}
}
@Override
public long heapOverhead() {
long overhead = ((ExtendedCell) this.cell).heapOverhead() + HEAP_SIZE_OVERHEAD;
if (this.tags != null) {
overhead += ClassSize.ARRAY;
}
return overhead;
}
@Override
public Cell deepClone() {
Cell clonedBaseCell = ((ExtendedCell) this.cell).deepClone();
@ -811,15 +802,6 @@ public final class CellUtil {
}
}
@Override
public long heapOverhead() {
long overhead = ((ExtendedCell) this.cell).heapOverhead() + HEAP_SIZE_OVERHEAD;
if (this.tags != null) {
overhead += ClassSize.ARRAY;
}
return overhead;
}
@Override
public Cell deepClone() {
Cell clonedBaseCell = ((ExtendedCell) this.cell).deepClone();
@ -963,15 +945,6 @@ public final class CellUtil {
}
}
@Override
public long heapOverhead() {
long overhead = super.heapOverhead() + ClassSize.REFERENCE;
if (this.value != null) {
overhead += ClassSize.ARRAY;
}
return overhead;
}
@Override
public Cell deepClone() {
Cell clonedBaseCell = ((ExtendedCell) this.cell).deepClone();
@ -1038,15 +1011,6 @@ public final class CellUtil {
ValueAndTagRewriteCell.write(buf, offset, this.cell, this.value, this.tags);
}
@Override
public long heapOverhead() {
long overhead = super.heapOverhead() + ClassSize.REFERENCE;
if (this.value != null) {
overhead += ClassSize.ARRAY;
}
return overhead;
}
@Override
public Cell deepClone() {
Cell clonedBaseCell = ((ExtendedCell) this.cell).deepClone();

View File

@ -68,11 +68,6 @@ public interface ExtendedCell extends Cell, SettableSequenceId, SettableTimestam
*/
void write(ByteBuffer buf, int offset);
/**
* @return The heap size overhead associated with this Cell.
*/
long heapOverhead();
/**
* Does a deep copy of the contents to a new memory area and returns it as a new cell.
* @return The deep cloned cell

View File

@ -120,8 +120,7 @@ public class IndividualBytesFieldCell implements ExtendedCell {
getValueLength(), getTagsLength(), withTags);
}
@Override
public long heapOverhead() {
private long heapOverhead() {
return FIXED_OVERHEAD
+ ClassSize.ARRAY // row , can not be null
+ ((family == null) ? 0 : ClassSize.ARRAY) // family , can be null

View File

@ -2804,16 +2804,6 @@ public class KeyValue implements ExtendedCell {
// of Cell to be returned back over the RPC
throw new IllegalStateException("A reader should never return this type of a Cell");
}
@Override
public long heapOverhead() {
return super.heapOverhead() + Bytes.SIZEOF_SHORT;
}
}
@Override
public long heapOverhead() {
return FIXED_OVERHEAD;
}
@Override

View File

@ -62,9 +62,4 @@ public class SizeCachedKeyValue extends KeyValue {
public long heapSize() {
return super.heapSize() + FIXED_OVERHEAD;
}
@Override
public long heapOverhead() {
return super.heapOverhead() + FIXED_OVERHEAD;
}
}

View File

@ -469,11 +469,6 @@ abstract class BufferedDataBlockEncoder extends AbstractDataBlockEncoder {
throw new UnsupportedOperationException();
}
@Override
public long heapOverhead() {
return FIXED_OVERHEAD;
}
@Override
public Cell deepClone() {
// This is not used in actual flow. Throwing UnsupportedOperationException
@ -719,11 +714,6 @@ abstract class BufferedDataBlockEncoder extends AbstractDataBlockEncoder {
throw new UnsupportedOperationException();
}
@Override
public long heapOverhead() {
return FIXED_OVERHEAD;
}
@Override
public Cell deepClone() {
// This is not used in actual flow. Throwing UnsupportedOperationException

View File

@ -153,7 +153,7 @@ public abstract class AbstractMemStore implements MemStore {
@Override
public MemstoreSize getSnapshotSize() {
return new MemstoreSize(this.snapshot.keySize(), this.snapshot.heapOverhead());
return new MemstoreSize(this.snapshot.keySize(), this.snapshot.heapSize());
}
@Override
@ -281,10 +281,10 @@ public abstract class AbstractMemStore implements MemStore {
protected abstract long keySize();
/**
* @return The total heap overhead of cells in this memstore. We will not consider cells in the
* @return The total heap size of cells in this memstore. We will not consider cells in the
* snapshot
*/
protected abstract long heapOverhead();
protected abstract long heapSize();
protected CellComparator getComparator() {
return comparator;

View File

@ -123,9 +123,9 @@ public class CompactingMemStore extends AbstractMemStore {
@Override
public MemstoreSize size() {
MemstoreSize memstoreSize = new MemstoreSize();
memstoreSize.incMemstoreSize(this.active.keySize(), this.active.heapOverhead());
memstoreSize.incMemstoreSize(this.active.keySize(), this.active.heapSize());
for (Segment item : pipeline.getSegments()) {
memstoreSize.incMemstoreSize(item.keySize(), item.heapOverhead());
memstoreSize.incMemstoreSize(item.keySize(), item.heapSize());
}
return memstoreSize;
}
@ -196,13 +196,13 @@ public class CompactingMemStore extends AbstractMemStore {
// if snapshot is empty the tail of the pipeline (or everything in the memstore) is flushed
if (compositeSnapshot) {
snapshotSize = pipeline.getPipelineSize();
snapshotSize.incMemstoreSize(this.active.keySize(), this.active.heapOverhead());
snapshotSize.incMemstoreSize(this.active.keySize(), this.active.heapSize());
} else {
snapshotSize = pipeline.getTailSize();
}
}
return snapshotSize.getDataSize() > 0 ? snapshotSize
: new MemstoreSize(this.active.keySize(), this.active.heapOverhead());
: new MemstoreSize(this.active.keySize(), this.active.heapSize());
}
@Override
@ -216,11 +216,11 @@ public class CompactingMemStore extends AbstractMemStore {
}
@Override
protected long heapOverhead() {
protected long heapSize() {
// Need to consider heapOverhead of all segments in pipeline and active
long h = this.active.heapOverhead();
long h = this.active.heapSize();
for (Segment segment : this.pipeline.getSegments()) {
h += segment.heapOverhead();
h += segment.heapSize();
}
return h;
}

View File

@ -141,24 +141,24 @@ public class CompactionPipeline {
long newDataSize = 0;
if(segment != null) newDataSize = segment.keySize();
long dataSizeDelta = suffixDataSize - newDataSize;
long suffixHeapOverhead = getSegmentsHeapOverhead(suffix);
long newHeapOverhead = 0;
if(segment != null) newHeapOverhead = segment.heapOverhead();
long heapOverheadDelta = suffixHeapOverhead - newHeapOverhead;
region.addMemstoreSize(new MemstoreSize(-dataSizeDelta, -heapOverheadDelta));
long suffixHeapSize = getSegmentsHeapSize(suffix);
long newHeapSize = 0;
if(segment != null) newHeapSize = segment.heapSize();
long heapSizeDelta = suffixHeapSize - newHeapSize;
region.addMemstoreSize(new MemstoreSize(-dataSizeDelta, -heapSizeDelta));
if (LOG.isDebugEnabled()) {
LOG.debug("Suffix data size: " + suffixDataSize + " new segment data size: "
+ newDataSize + ". Suffix heap overhead: " + suffixHeapOverhead
+ " new segment heap overhead: " + newHeapOverhead);
+ newDataSize + ". Suffix heap size: " + suffixHeapSize
+ " new segment heap size: " + newHeapSize);
}
}
return true;
}
private static long getSegmentsHeapOverhead(List<? extends Segment> list) {
private static long getSegmentsHeapSize(List<? extends Segment> list) {
long res = 0;
for (Segment segment : list) {
res += segment.heapOverhead();
res += segment.heapSize();
}
return res;
}
@ -234,20 +234,20 @@ public class CompactionPipeline {
public MemstoreSize getTailSize() {
LinkedList<? extends Segment> localCopy = readOnlyCopy;
if (localCopy.isEmpty()) return MemstoreSize.EMPTY_SIZE;
return new MemstoreSize(localCopy.peekLast().keySize(), localCopy.peekLast().heapOverhead());
if (localCopy.isEmpty()) return new MemstoreSize(true);
return new MemstoreSize(localCopy.peekLast().keySize(), localCopy.peekLast().heapSize());
}
public MemstoreSize getPipelineSize() {
long keySize = 0;
long heapOverhead = 0;
long heapSize = 0;
LinkedList<? extends Segment> localCopy = readOnlyCopy;
if (localCopy.isEmpty()) return MemstoreSize.EMPTY_SIZE;
if (localCopy.isEmpty()) return new MemstoreSize(true);
for (Segment segment : localCopy) {
keySize += segment.keySize();
heapOverhead += segment.heapOverhead();
heapSize += segment.heapSize();
}
return new MemstoreSize(keySize, heapOverhead);
return new MemstoreSize(keySize, heapSize);
}
private void swapSuffix(List<? extends Segment> suffix, ImmutableSegment segment,

View File

@ -22,7 +22,6 @@ import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.logging.Log;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellScanner;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Scan;
@ -43,7 +42,6 @@ import java.util.SortedSet;
public class CompositeImmutableSegment extends ImmutableSegment {
private final List<ImmutableSegment> segments;
private final CellComparator comparator;
// CompositeImmutableSegment is used for snapshots and snapshot should
// support getTimeRangeTracker() interface.
// Thus we hold a constant TRT build in the construction time from TRT of the given segments.
@ -53,7 +51,6 @@ public class CompositeImmutableSegment extends ImmutableSegment {
public CompositeImmutableSegment(CellComparator comparator, List<ImmutableSegment> segments) {
super(comparator);
this.comparator = comparator;
this.segments = segments;
this.timeRangeTracker = new TimeRangeTracker();
for (ImmutableSegment s : segments) {
@ -64,10 +61,12 @@ public class CompositeImmutableSegment extends ImmutableSegment {
}
@VisibleForTesting
@Override
public List<Segment> getAllSegments() {
return new LinkedList<>(segments);
}
@Override
public int getNumOfSegments() {
return segments.size();
}
@ -77,6 +76,7 @@ public class CompositeImmutableSegment extends ImmutableSegment {
* general segment scanner.
* @return a special scanner for the MemStoreSnapshot object
*/
@Override
public KeyValueScanner getSnapshotScanner() {
return getScanner(Long.MAX_VALUE, Long.MAX_VALUE);
}
@ -84,6 +84,7 @@ public class CompositeImmutableSegment extends ImmutableSegment {
/**
* @return whether the segment has any cells
*/
@Override
public boolean isEmpty() {
for (ImmutableSegment s : segments) {
if (!s.isEmpty()) return false;
@ -94,6 +95,7 @@ public class CompositeImmutableSegment extends ImmutableSegment {
/**
* @return number of cells in segment
*/
@Override
public int getCellsCount() {
int result = 0;
for (ImmutableSegment s : segments) {
@ -105,6 +107,7 @@ public class CompositeImmutableSegment extends ImmutableSegment {
/**
* @return the first cell in the segment that has equal or greater key than the given cell
*/
@Override
public Cell getFirstAfter(Cell cell) {
throw new IllegalStateException("Not supported by CompositeImmutableScanner");
}
@ -112,6 +115,7 @@ public class CompositeImmutableSegment extends ImmutableSegment {
/**
* Closing a segment before it is being discarded
*/
@Override
public void close() {
for (ImmutableSegment s : segments) {
s.close();
@ -123,14 +127,17 @@ public class CompositeImmutableSegment extends ImmutableSegment {
* otherwise the given cell is returned
* @return either the given cell or its clone
*/
@Override
public Cell maybeCloneWithAllocator(Cell cell) {
throw new IllegalStateException("Not supported by CompositeImmutableScanner");
}
@Override
public boolean shouldSeek(Scan scan, long oldestUnexpiredTS){
throw new IllegalStateException("Not supported by CompositeImmutableScanner");
}
@Override
public long getMinTimestamp(){
throw new IllegalStateException("Not supported by CompositeImmutableScanner");
}
@ -139,6 +146,7 @@ public class CompositeImmutableSegment extends ImmutableSegment {
* Creates the scanner for the given read point
* @return a scanner for the given read point
*/
@Override
public KeyValueScanner getScanner(long readPoint) {
// Long.MAX_VALUE is DEFAULT_SCANNER_ORDER
return getScanner(readPoint,Long.MAX_VALUE);
@ -148,6 +156,7 @@ public class CompositeImmutableSegment extends ImmutableSegment {
* Creates the scanner for the given read point, and a specific order in a list
* @return a scanner for the given read point
*/
@Override
public KeyValueScanner getScanner(long readPoint, long order) {
KeyValueScanner resultScanner;
List<KeyValueScanner> list = new ArrayList<>(segments.size());
@ -164,6 +173,7 @@ public class CompositeImmutableSegment extends ImmutableSegment {
return resultScanner;
}
@Override
public boolean isTagsPresent() {
for (ImmutableSegment s : segments) {
if (s.isTagsPresent()) return true;
@ -171,10 +181,12 @@ public class CompositeImmutableSegment extends ImmutableSegment {
return false;
}
@Override
public void incScannerCount() {
throw new IllegalStateException("Not supported by CompositeImmutableScanner");
}
@Override
public void decScannerCount() {
throw new IllegalStateException("Not supported by CompositeImmutableScanner");
}
@ -184,7 +196,7 @@ public class CompositeImmutableSegment extends ImmutableSegment {
* immutable CellSet after its creation in immutable segment constructor
* @return this object
*/
@Override
protected CompositeImmutableSegment setCellSet(CellSet cellSetOld, CellSet cellSetNew) {
throw new IllegalStateException("Not supported by CompositeImmutableScanner");
}
@ -192,17 +204,19 @@ public class CompositeImmutableSegment extends ImmutableSegment {
/**
* @return Sum of all cell sizes.
*/
@Override
public long keySize() {
return this.keySize;
}
/**
* @return The heap overhead of this segment.
* @return The heap size of this segment.
*/
public long heapOverhead() {
@Override
public long heapSize() {
long result = 0;
for (ImmutableSegment s : segments) {
result += s.heapOverhead();
result += s.heapSize();
}
return result;
}
@ -210,39 +224,43 @@ public class CompositeImmutableSegment extends ImmutableSegment {
/**
* Updates the heap size counter of the segment by the given delta
*/
@Override
protected void incSize(long delta, long heapOverhead) {
throw new IllegalStateException("Not supported by CompositeImmutableScanner");
}
protected void incHeapOverheadSize(long delta) {
throw new IllegalStateException("Not supported by CompositeImmutableScanner");
}
@Override
public long getMinSequenceId() {
throw new IllegalStateException("Not supported by CompositeImmutableScanner");
}
@Override
public TimeRangeTracker getTimeRangeTracker() {
return this.timeRangeTracker;
}
//*** Methods for SegmentsScanner
@Override
public Cell last() {
throw new IllegalStateException("Not supported by CompositeImmutableScanner");
}
@Override
public Iterator<Cell> iterator() {
throw new IllegalStateException("Not supported by CompositeImmutableScanner");
}
@Override
public SortedSet<Cell> headSet(Cell firstKeyOnRow) {
throw new IllegalStateException("Not supported by CompositeImmutableScanner");
}
@Override
public int compare(Cell left, Cell right) {
throw new IllegalStateException("Not supported by CompositeImmutableScanner");
}
@Override
public int compareRows(Cell left, Cell right) {
throw new IllegalStateException("Not supported by CompositeImmutableScanner");
}
@ -250,36 +268,28 @@ public class CompositeImmutableSegment extends ImmutableSegment {
/**
* @return a set of all cells in the segment
*/
@Override
protected CellSet getCellSet() {
throw new IllegalStateException("Not supported by CompositeImmutableScanner");
}
/**
* Returns the Cell comparator used by this segment
* @return the Cell comparator used by this segment
*/
protected CellComparator getComparator() {
return comparator;
}
@Override
protected void internalAdd(Cell cell, boolean mslabUsed, MemstoreSize memstoreSize) {
throw new IllegalStateException("Not supported by CompositeImmutableScanner");
}
@Override
protected void updateMetaInfo(Cell cellToAdd, boolean succ, boolean mslabUsed,
MemstoreSize memstoreSize) {
throw new IllegalStateException("Not supported by CompositeImmutableScanner");
}
protected long heapOverheadChange(Cell cell, boolean succ) {
throw new IllegalStateException("Not supported by CompositeImmutableScanner");
}
/**
* Returns a subset of the segment cell set, which starts with the given cell
* @param firstCell a cell in the segment
* @return a subset of the segment cell set, which starts with the given cell
*/
@Override
protected SortedSet<Cell> tailSet(Cell firstCell) {
throw new IllegalStateException("Not supported by CompositeImmutableScanner");
}

View File

@ -111,7 +111,7 @@ public class DefaultMemStore extends AbstractMemStore {
public MemstoreSize getFlushableSize() {
MemstoreSize snapshotSize = getSnapshotSize();
return snapshotSize.getDataSize() > 0 ? snapshotSize
: new MemstoreSize(keySize(), heapOverhead());
: new MemstoreSize(keySize(), heapSize());
}
@Override
@ -120,8 +120,8 @@ public class DefaultMemStore extends AbstractMemStore {
}
@Override
protected long heapOverhead() {
return this.active.heapOverhead();
protected long heapSize() {
return this.active.heapSize();
}
@Override
@ -160,7 +160,7 @@ public class DefaultMemStore extends AbstractMemStore {
@Override
public MemstoreSize size() {
return new MemstoreSize(this.active.keySize(), this.active.heapOverhead());
return new MemstoreSize(this.active.keySize(), this.active.heapSize());
}
/**
@ -205,12 +205,12 @@ public class DefaultMemStore extends AbstractMemStore {
memstore1.add(new KeyValue(Bytes.toBytes(i), fam, qf, i, empty), memstoreSize);
}
LOG.info("memstore1 estimated size="
+ (memstoreSize.getDataSize() + memstoreSize.getHeapOverhead()));
+ (memstoreSize.getDataSize() + memstoreSize.getHeapSize()));
for (int i = 0; i < count; i++) {
memstore1.add(new KeyValue(Bytes.toBytes(i), fam, qf, i, empty), memstoreSize);
}
LOG.info("memstore1 estimated size (2nd loading of same data)="
+ (memstoreSize.getDataSize() + memstoreSize.getHeapOverhead()));
+ (memstoreSize.getDataSize() + memstoreSize.getHeapSize()));
// Make a variably sized memstore.
DefaultMemStore memstore2 = new DefaultMemStore();
memstoreSize = new MemstoreSize();
@ -218,7 +218,7 @@ public class DefaultMemStore extends AbstractMemStore {
memstore2.add(new KeyValue(Bytes.toBytes(i), fam, qf, i, new byte[i]), memstoreSize);
}
LOG.info("memstore2 estimated size="
+ (memstoreSize.getDataSize() + memstoreSize.getHeapOverhead()));
+ (memstoreSize.getDataSize() + memstoreSize.getHeapSize()));
final int seconds = 30;
LOG.info("Waiting " + seconds + " seconds while heap dump is taken");
LOG.info("Exiting.");

View File

@ -1689,7 +1689,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
this.closed.set(true);
if (!canFlush) {
this.decrMemstoreSize(new MemstoreSize(memstoreDataSize.get(), getMemstoreHeapOverhead()));
this.decrMemstoreSize(new MemstoreSize(memstoreDataSize.get(), getMemstoreHeapSize()));
} else if (memstoreDataSize.get() != 0) {
LOG.error("Memstore size is " + memstoreDataSize.get());
}
@ -1713,12 +1713,12 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
}
}
private long getMemstoreHeapOverhead() {
long overhead = 0;
private long getMemstoreHeapSize() {
long size = 0;
for (Store s : this.stores.values()) {
overhead += s.getSizeOfMemStore().getHeapOverhead();
size += s.getSizeOfMemStore().getHeapSize();
}
return overhead;
return size;
}
@Override

View File

@ -380,7 +380,7 @@ public class HStore implements Store {
@Deprecated
public long getFlushableSize() {
MemstoreSize size = getSizeToFlush();
return size.getDataSize() + size.getHeapOverhead();
return size.getHeapSize();
}
@Override
@ -392,7 +392,7 @@ public class HStore implements Store {
@Deprecated
public long getSnapshotSize() {
MemstoreSize size = getSizeOfSnapshot();
return size.getDataSize() + size.getHeapOverhead();
return size.getHeapSize();
}
@Override
@ -2067,7 +2067,7 @@ public class HStore implements Store {
@Deprecated
public long getMemStoreSize() {
MemstoreSize size = getSizeOfMemStore();
return size.getDataSize() + size.getHeapOverhead();
return size.getHeapSize();
}
@Override
@ -2298,7 +2298,7 @@ public class HStore implements Store {
@Override
public long heapSize() {
MemstoreSize memstoreSize = this.memstore.size();
return DEEP_OVERHEAD + memstoreSize.getDataSize() + memstoreSize.getHeapOverhead();
return DEEP_OVERHEAD + memstoreSize.getHeapSize();
}
@Override

View File

@ -20,8 +20,6 @@ package org.apache.hadoop.hbase.regionserver;
import static org.apache.hadoop.hbase.HConstants.HFILE_BLOCK_CACHE_SIZE_KEY;
import java.lang.management.ManagementFactory;
import java.lang.management.MemoryType;
import java.lang.management.MemoryUsage;
import java.util.ArrayList;
import java.util.List;
@ -326,7 +324,7 @@ public class HeapMemoryManager {
// TODO : add support for offheap metrics
tunerContext.setCurBlockCacheUsed((float) blockCache.getCurrentSize() / maxHeapSize);
metricsHeapMemoryManager.setCurBlockCacheSizeGauge(blockCache.getCurrentSize());
long globalMemstoreHeapSize = regionServerAccounting.getGlobalMemstoreSize();
long globalMemstoreHeapSize = regionServerAccounting.getGlobalMemstoreHeapSize();
tunerContext.setCurMemStoreUsed((float) globalMemstoreHeapSize / maxHeapSize);
metricsHeapMemoryManager.setCurMemStoreSizeGauge(globalMemstoreHeapSize);
tunerContext.setCurBlockCacheSize(blockCachePercent);

View File

@ -21,8 +21,7 @@ package org.apache.hadoop.hbase.regionserver;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.ExtendedCell;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.util.ClassSize;
import org.apache.hadoop.hbase.client.Scan;
@ -119,14 +118,13 @@ public class ImmutableSegment extends Segment {
super(new CellSet(comparator), // initiailize the CellSet with empty CellSet
comparator, memStoreLAB);
type = Type.SKIPLIST_MAP_BASED;
MemstoreSize memstoreSize = new MemstoreSize();
while (iterator.hasNext()) {
Cell c = iterator.next();
// The scanner is doing all the elimination logic
// now we just copy it to the new segment
Cell newKV = maybeCloneWithAllocator(c);
boolean usedMSLAB = (newKV != c);
internalAdd(newKV, usedMSLAB, memstoreSize);
internalAdd(newKV, usedMSLAB, null);
}
this.timeRange = this.timeRangeTracker == null ? null : this.timeRangeTracker.toTimeRange();
}
@ -226,17 +224,13 @@ public class ImmutableSegment extends Segment {
}
@Override
protected long heapOverheadChange(Cell cell, boolean succ) {
protected long heapSizeChange(Cell cell, boolean succ) {
if (succ) {
switch (this.type) {
case SKIPLIST_MAP_BASED:
return super.heapOverheadChange(cell, succ);
return super.heapSizeChange(cell, succ);
case ARRAY_MAP_BASED:
if (cell instanceof ExtendedCell) {
return ClassSize
.align(ClassSize.CELL_ARRAY_MAP_ENTRY + ((ExtendedCell) cell).heapOverhead());
}
return ClassSize.align(ClassSize.CELL_ARRAY_MAP_ENTRY + KeyValue.FIXED_OVERHEAD);
return ClassSize.align(ClassSize.CELL_ARRAY_MAP_ENTRY + CellUtil.estimatedHeapSizeOf(cell));
}
}
return 0;

View File

@ -186,11 +186,11 @@ class MemStoreFlusher implements FlushRequester {
(bestRegionReplica.getMemstoreSize()
> secondaryMultiplier * regionToFlush.getMemstoreSize()))) {
LOG.info("Refreshing storefiles of region " + bestRegionReplica
+ " due to global heap pressure. Total memstore size="
+ " due to global heap pressure. Total memstore datasize="
+ StringUtils
.humanReadableInt(server.getRegionServerAccounting().getGlobalMemstoreDataSize())
+ " memstore heap overhead=" + StringUtils.humanReadableInt(
server.getRegionServerAccounting().getGlobalMemstoreHeapOverhead()));
+ " memstore heap size=" + StringUtils.humanReadableInt(
server.getRegionServerAccounting().getGlobalMemstoreHeapSize()));
flushedOne = refreshStoreFilesAndReclaimMemory(bestRegionReplica);
if (!flushedOne) {
LOG.info("Excluding secondary region " + bestRegionReplica +
@ -587,14 +587,20 @@ class MemStoreFlusher implements FlushRequester {
if (!blocked) {
startTime = EnvironmentEdgeManager.currentTime();
if (!server.getRegionServerAccounting().isOffheap()) {
logMsg("the global memstore size", "global memstore heap overhead");
logMsg("global memstore heapsize",
server.getRegionServerAccounting().getGlobalMemstoreHeapSize(),
server.getRegionServerAccounting().getGlobalMemstoreLimit());
} else {
switch (flushType) {
case ABOVE_OFFHEAP_HIGHER_MARK:
logMsg("the global offheap memstore size", "global memstore heap overhead");
logMsg("the global offheap memstore datasize",
server.getRegionServerAccounting().getGlobalMemstoreDataSize(),
server.getRegionServerAccounting().getGlobalMemstoreLimit());
break;
case ABOVE_ONHEAP_HIGHER_MARK:
logMsg("global memstore heap overhead", "");
logMsg("global memstore heapsize",
server.getRegionServerAccounting().getGlobalMemstoreHeapSize(),
server.getRegionServerAccounting().getGlobalOnHeapMemstoreLimit());
break;
default:
break;
@ -635,16 +641,10 @@ class MemStoreFlusher implements FlushRequester {
scope.close();
}
private void logMsg(String string1, String string2) {
private void logMsg(String string1, long val, long max) {
LOG.info("Blocking updates on " + server.toString() + ": " + string1 + " "
+ TraditionalBinaryPrefix
.long2String(server.getRegionServerAccounting().getGlobalMemstoreDataSize(), "", 1)
+ " + " + string2 + " "
+ TraditionalBinaryPrefix
.long2String(server.getRegionServerAccounting().getGlobalMemstoreHeapOverhead(), "", 1)
+ " is >= than blocking " + TraditionalBinaryPrefix.long2String(
server.getRegionServerAccounting().getGlobalMemstoreLimit(), "", 1)
+ " size");
+ TraditionalBinaryPrefix.long2String(val, "", 1) + " is >= than blocking "
+ TraditionalBinaryPrefix.long2String(max, "", 1) + " size");
}
@Override

View File

@ -29,7 +29,7 @@ public class MemStoreSnapshot {
private final long id;
private final int cellsCount;
private final long dataSize;
private final long heapOverhead;
private final long heapSize;
private final TimeRangeTracker timeRangeTracker;
private final KeyValueScanner scanner;
private final boolean tagsPresent;
@ -38,7 +38,7 @@ public class MemStoreSnapshot {
this.id = id;
this.cellsCount = snapshot.getCellsCount();
this.dataSize = snapshot.keySize();
this.heapOverhead = snapshot.heapOverhead();
this.heapSize = snapshot.heapSize();
this.timeRangeTracker = snapshot.getTimeRangeTracker();
this.scanner = snapshot.getSnapshotScanner();
this.tagsPresent = snapshot.isTagsPresent();
@ -65,8 +65,8 @@ public class MemStoreSnapshot {
return dataSize;
}
public long getHeapOverhead() {
return this.heapOverhead;
public long getHeapSize() {
return this.heapSize;
}
/**

View File

@ -20,26 +20,29 @@ package org.apache.hadoop.hbase.regionserver;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
/**
* Wraps the data size part and heap overhead of the memstore.
* Wraps the data size part and total heap space occupied by the memstore.
*/
@InterfaceAudience.Private
public class MemstoreSize {
// 'dataSize' tracks the Cell's data bytes size alone (Key bytes, value bytes). A cell's data can
// be in on heap or off heap area depending on the MSLAB and its configuration to be using on heap
// or off heap LABs
private long dataSize;
private long heapOverhead;
// 'heapSize' tracks all Cell's heap size occupancy. This will include Cell POJO heap overhead.
// When Cells in on heap area, this will include the cells data size as well.
private long heapSize;
final private boolean isEmpty;
static final MemstoreSize EMPTY_SIZE = new MemstoreSize(true);
public MemstoreSize() {
dataSize = 0;
heapOverhead = 0;
heapSize = 0;
isEmpty = false;
}
public MemstoreSize(boolean isEmpty) {
dataSize = 0;
heapOverhead = 0;
heapSize = 0;
this.isEmpty = isEmpty;
}
@ -47,40 +50,38 @@ public class MemstoreSize {
return isEmpty;
}
public MemstoreSize(long dataSize, long heapOverhead) {
public MemstoreSize(long dataSize, long heapSize) {
this.dataSize = dataSize;
this.heapOverhead = heapOverhead;
this.heapSize = heapSize;
this.isEmpty = false;
}
public void incMemstoreSize(long dataSize, long heapOverhead) {
this.dataSize += dataSize;
this.heapOverhead += heapOverhead;
public void incMemstoreSize(long dataSizeDelta, long heapSizeDelta) {
this.dataSize += dataSizeDelta;
this.heapSize += heapSizeDelta;
}
public void incMemstoreSize(MemstoreSize size) {
this.dataSize += size.dataSize;
this.heapOverhead += size.heapOverhead;
public void incMemstoreSize(MemstoreSize delta) {
this.dataSize += delta.dataSize;
this.heapSize += delta.heapSize;
}
public void decMemstoreSize(long dataSize, long heapOverhead) {
this.dataSize -= dataSize;
this.heapOverhead -= heapOverhead;
public void decMemstoreSize(long dataSizeDelta, long heapSizeDelta) {
this.dataSize -= dataSizeDelta;
this.heapSize -= heapSizeDelta;
}
public void decMemstoreSize(MemstoreSize size) {
this.dataSize -= size.dataSize;
this.heapOverhead -= size.heapOverhead;
public void decMemstoreSize(MemstoreSize delta) {
this.dataSize -= delta.dataSize;
this.heapSize -= delta.heapSize;
}
public long getDataSize() {
return isEmpty ? 0 : dataSize;
}
public long getHeapOverhead() {
return isEmpty ? 0 : heapOverhead;
public long getHeapSize() {
return isEmpty ? 0 : heapSize;
}
@Override
@ -89,18 +90,18 @@ public class MemstoreSize {
return false;
}
MemstoreSize other = (MemstoreSize) obj;
return getDataSize() == other.dataSize && getHeapOverhead() == other.heapOverhead;
return this.dataSize == other.dataSize && this.heapSize == other.heapSize;
}
@Override
public int hashCode() {
long h = 13 * this.dataSize;
h = h + 14 * this.heapOverhead;
h = h + 14 * this.heapSize;
return (int) h;
}
@Override
public String toString() {
return "dataSize=" + this.dataSize + " , heapOverhead=" + this.heapOverhead;
return "dataSize=" + this.dataSize + " , heapSize=" + this.heapSize;
}
}

View File

@ -85,10 +85,10 @@ public class MutableSegment extends Segment {
// decreased there as 0 only. Just keeping it as existing code now. We need to know the
// removed cell is from MSLAB or not. Will do once HBASE-16438 is in
int cellLen = getCellLength(cur);
long heapOverheadDelta = heapOverheadChange(cur, true);
this.incSize(-cellLen, -heapOverheadDelta);
long heapSize = heapSizeChange(cur, true);
this.incSize(-cellLen, -heapSize);
if (memstoreSize != null) {
memstoreSize.decMemstoreSize(cellLen, heapOverheadDelta);
memstoreSize.decMemstoreSize(cellLen, heapSize);
}
it.remove();
} else {

View File

@ -39,16 +39,16 @@ public class RegionServerAccounting {
// memstore data size
private final AtomicLong globalMemstoreDataSize = new AtomicLong(0);
// memstore heap over head size
private final AtomicLong globalMemstoreHeapOverhead = new AtomicLong(0);
// memstore heap size. When off heap MSLAB in place, this will be only heap overhead of the Cell
// POJOs and entry overhead of them onto memstore. When on heap MSLAB, this will be include heap
// overhead as well as the cell data size. Ya cell data is in on heap area only then.
private final AtomicLong globalMemstoreHeapSize = new AtomicLong(0);
// Store the edits size during replaying WAL. Use this to roll back the
// global memstore size once a region opening failed.
private final ConcurrentMap<byte[], MemstoreSize> replayEditsPerRegion =
new ConcurrentSkipListMap<>(Bytes.BYTES_COMPARATOR);
private final Configuration conf;
private long globalMemStoreLimit;
private final float globalMemStoreLimitLowMarkPercent;
private long globalMemStoreLimitLowMark;
@ -57,7 +57,6 @@ public class RegionServerAccounting {
private long globalOnHeapMemstoreLimitLowMark;
public RegionServerAccounting(Configuration conf) {
this.conf = conf;
Pair<Long, MemoryType> globalMemstoreSizePair = MemorySizeUtil.getGlobalMemstoreSize(conf);
this.globalMemStoreLimit = globalMemstoreSizePair.getFirst();
this.memType = globalMemstoreSizePair.getSecond();
@ -79,16 +78,16 @@ public class RegionServerAccounting {
(long) (this.globalOnHeapMemstoreLimit * this.globalMemStoreLimitLowMarkPercent);
}
public long getGlobalMemstoreLimit() {
long getGlobalMemstoreLimit() {
return this.globalMemStoreLimit;
}
public long getOnheapGlobalMemstoreLimit() {
long getGlobalOnHeapMemstoreLimit() {
return this.globalOnHeapMemstoreLimit;
}
// Called by the tuners.
public void setGlobalMemstoreLimits(long newGlobalMemstoreLimit) {
void setGlobalMemstoreLimits(long newGlobalMemstoreLimit) {
if (this.memType == MemoryType.HEAP) {
this.globalMemStoreLimit = newGlobalMemstoreLimit;
this.globalMemStoreLimitLowMark =
@ -100,15 +99,15 @@ public class RegionServerAccounting {
}
}
public boolean isOffheap() {
boolean isOffheap() {
return this.memType == MemoryType.NON_HEAP;
}
public long getGlobalMemstoreLimitLowMark() {
long getGlobalMemstoreLimitLowMark() {
return this.globalMemStoreLimitLowMark;
}
public float getGlobalMemstoreLimitLowMarkPercent() {
float getGlobalMemstoreLimitLowMarkPercent() {
return this.globalMemStoreLimitLowMarkPercent;
}
@ -118,24 +117,12 @@ public class RegionServerAccounting {
public long getGlobalMemstoreDataSize() {
return globalMemstoreDataSize.get();
}
/**
* @return the global memstore heap overhead size in the RegionServer
*/
public long getGlobalMemstoreHeapOverhead() {
return this.globalMemstoreHeapOverhead.get();
}
/**
* @return the global memstore data size and heap overhead size for an onheap memstore
* whereas return the heap overhead size for an offheap memstore
* @return the global memstore heap size in the RegionServer
*/
public long getGlobalMemstoreSize() {
if (isOffheap()) {
// get only the heap overhead for offheap memstore
return getGlobalMemstoreHeapOverhead();
} else {
return getGlobalMemstoreDataSize() + getGlobalMemstoreHeapOverhead();
}
public long getGlobalMemstoreHeapSize() {
return this.globalMemstoreHeapSize.get();
}
/**
@ -144,12 +131,12 @@ public class RegionServerAccounting {
*/
public void incGlobalMemstoreSize(MemstoreSize memStoreSize) {
globalMemstoreDataSize.addAndGet(memStoreSize.getDataSize());
globalMemstoreHeapOverhead.addAndGet(memStoreSize.getHeapOverhead());
globalMemstoreHeapSize.addAndGet(memStoreSize.getHeapSize());
}
public void decGlobalMemstoreSize(MemstoreSize memStoreSize) {
globalMemstoreDataSize.addAndGet(-memStoreSize.getDataSize());
globalMemstoreHeapOverhead.addAndGet(-memStoreSize.getHeapOverhead());
globalMemstoreHeapSize.addAndGet(-memStoreSize.getHeapSize());
}
/**
@ -160,7 +147,7 @@ public class RegionServerAccounting {
// for onheap memstore we check if the global memstore size and the
// global heap overhead is greater than the global memstore limit
if (memType == MemoryType.HEAP) {
if (getGlobalMemstoreDataSize() + getGlobalMemstoreHeapOverhead() >= globalMemStoreLimit) {
if (getGlobalMemstoreHeapSize() >= globalMemStoreLimit) {
return FlushType.ABOVE_ONHEAP_HIGHER_MARK;
}
} else {
@ -175,7 +162,7 @@ public class RegionServerAccounting {
// Indicates that global memstore size is above the configured
// 'hbase.regionserver.offheap.global.memstore.size'
return FlushType.ABOVE_OFFHEAP_HIGHER_MARK;
} else if (getGlobalMemstoreHeapOverhead() >= this.globalOnHeapMemstoreLimit) {
} else if (getGlobalMemstoreHeapSize() >= this.globalOnHeapMemstoreLimit) {
// Indicates that the offheap memstore's heap overhead is greater than the
// configured 'hbase.regionserver.global.memstore.size'.
return FlushType.ABOVE_ONHEAP_HIGHER_MARK;
@ -191,7 +178,7 @@ public class RegionServerAccounting {
// for onheap memstore we check if the global memstore size and the
// global heap overhead is greater than the global memstore lower mark limit
if (memType == MemoryType.HEAP) {
if (getGlobalMemstoreDataSize() + getGlobalMemstoreHeapOverhead() >= globalMemStoreLimitLowMark) {
if (getGlobalMemstoreHeapSize() >= globalMemStoreLimitLowMark) {
return FlushType.ABOVE_ONHEAP_LOWER_MARK;
}
} else {
@ -199,7 +186,7 @@ public class RegionServerAccounting {
// Indicates that the offheap memstore's data size is greater than the global memstore
// lower limit
return FlushType.ABOVE_OFFHEAP_LOWER_MARK;
} else if (getGlobalMemstoreHeapOverhead() >= globalOnHeapMemstoreLimitLowMark) {
} else if (getGlobalMemstoreHeapSize() >= globalOnHeapMemstoreLimitLowMark) {
// Indicates that the offheap memstore's heap overhead is greater than the global memstore
// onheap lower limit
return FlushType.ABOVE_ONHEAP_LOWER_MARK;
@ -215,11 +202,10 @@ public class RegionServerAccounting {
*/
public double getFlushPressure() {
if (memType == MemoryType.HEAP) {
return (getGlobalMemstoreDataSize() + getGlobalMemstoreHeapOverhead()) * 1.0
/ globalMemStoreLimitLowMark;
return (getGlobalMemstoreHeapSize()) * 1.0 / globalMemStoreLimitLowMark;
} else {
return Math.max(getGlobalMemstoreDataSize() * 1.0 / globalMemStoreLimitLowMark,
getGlobalMemstoreHeapOverhead() * 1.0 / globalOnHeapMemstoreLimitLowMark);
getGlobalMemstoreHeapSize() * 1.0 / globalOnHeapMemstoreLimitLowMark);
}
}

View File

@ -28,7 +28,7 @@ import java.util.concurrent.atomic.AtomicReference;
import org.apache.commons.logging.Log;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.ExtendedCell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
@ -62,7 +62,7 @@ public abstract class Segment {
// Sum of sizes of all Cells added to this Segment. Cell's heapSize is considered. This is not
// including the heap overhead of this class.
protected final AtomicLong dataSize;
protected final AtomicLong heapOverhead;
protected final AtomicLong heapSize;
protected final TimeRangeTracker timeRangeTracker;
protected volatile boolean tagsPresent;
@ -71,7 +71,7 @@ public abstract class Segment {
protected Segment(CellComparator comparator) {
this.comparator = comparator;
this.dataSize = new AtomicLong(0);
this.heapOverhead = new AtomicLong(0);
this.heapSize = new AtomicLong(0);
this.timeRangeTracker = new TimeRangeTracker();
}
@ -82,7 +82,7 @@ public abstract class Segment {
this.minSequenceId = Long.MAX_VALUE;
this.memStoreLAB = memStoreLAB;
this.dataSize = new AtomicLong(0);
this.heapOverhead = new AtomicLong(0);
this.heapSize = new AtomicLong(0);
this.tagsPresent = false;
this.timeRangeTracker = new TimeRangeTracker();
}
@ -93,7 +93,7 @@ public abstract class Segment {
this.minSequenceId = segment.getMinSequenceId();
this.memStoreLAB = segment.getMemStoreLAB();
this.dataSize = new AtomicLong(segment.keySize());
this.heapOverhead = new AtomicLong(segment.heapOverhead.get());
this.heapSize = new AtomicLong(segment.heapSize.get());
this.tagsPresent = segment.isTagsPresent();
this.timeRangeTracker = segment.getTimeRangeTracker();
}
@ -217,22 +217,19 @@ public abstract class Segment {
}
/**
* @return The heap overhead of this segment.
* @return The heap size of this segment.
*/
public long heapOverhead() {
return this.heapOverhead.get();
public long heapSize() {
return this.heapSize.get();
}
/**
* Updates the heap size counter of the segment by the given delta
* Updates the size counters of the segment by the given delta
*/
//TODO
protected void incSize(long delta, long heapOverhead) {
this.dataSize.addAndGet(delta);
this.heapOverhead.addAndGet(heapOverhead);
}
protected void incHeapOverheadSize(long delta) {
this.heapOverhead.addAndGet(delta);
this.heapSize.addAndGet(heapOverhead);
}
public long getMinSequenceId() {
@ -293,10 +290,10 @@ public abstract class Segment {
if (succ || mslabUsed) {
cellSize = getCellLength(cellToAdd);
}
long overhead = heapOverheadChange(cellToAdd, succ);
incSize(cellSize, overhead);
long heapSize = heapSizeChange(cellToAdd, succ);
incSize(cellSize, heapSize);
if (memstoreSize != null) {
memstoreSize.incMemstoreSize(cellSize, overhead);
memstoreSize.incMemstoreSize(cellSize, heapSize);
}
getTimeRangeTracker().includeTimestamp(cellToAdd);
minSequenceId = Math.min(minSequenceId, cellToAdd.getSequenceId());
@ -309,15 +306,14 @@ public abstract class Segment {
}
}
protected long heapOverheadChange(Cell cell, boolean succ) {
/**
* @return The increase in heap size because of this cell addition. This includes this cell POJO's
* heap size itself and additional overhead because of addition on to CSLM.
*/
protected long heapSizeChange(Cell cell, boolean succ) {
if (succ) {
if (cell instanceof ExtendedCell) {
return ClassSize
.align(ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY + ((ExtendedCell) cell).heapOverhead());
}
// All cells in server side will be of type ExtendedCell. If not just go with estimation on
// the heap overhead considering it is KeyValue.
return ClassSize.align(ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY + KeyValue.FIXED_OVERHEAD);
return ClassSize
.align(ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY + CellUtil.estimatedHeapSizeOf(cell));
}
return 0;
}
@ -350,9 +346,9 @@ public abstract class Segment {
public String toString() {
String res = "Store segment of type "+this.getClass().getName()+"; ";
res += "isEmpty "+(isEmpty()?"yes":"no")+"; ";
res += "cellCount "+getCellsCount()+"; ";
res += "cellsCount "+getCellsCount()+"; ";
res += "cellsSize "+keySize()+"; ";
res += "heapOverhead "+heapOverhead()+"; ";
res += "totalHeapSize "+heapSize()+"; ";
res += "Min ts "+getMinTimestamp()+"; ";
return res;
}

View File

@ -291,6 +291,9 @@ public interface Store extends HeapSize, StoreConfigInformation, PropagatingConf
/**
* @return The size of this store's memstore, in bytes
* @deprecated Since 2.0 and will be removed in 3.0. Use {@link #getSizeOfMemStore()} instead.
* <p>
* Note: When using off heap MSLAB feature, this will not account the cell data bytes size which
* is in off heap MSLAB area.
*/
@Deprecated
long getMemStoreSize();
@ -305,6 +308,9 @@ public interface Store extends HeapSize, StoreConfigInformation, PropagatingConf
* {@link #getMemStoreSize()} unless we are carrying snapshots and then it will be the size of
* outstanding snapshots.
* @deprecated Since 2.0 and will be removed in 3.0. Use {@link #getSizeToFlush()} instead.
* <p>
* Note: When using off heap MSLAB feature, this will not account the cell data bytes size which
* is in off heap MSLAB area.
*/
@Deprecated
long getFlushableSize();
@ -320,6 +326,9 @@ public interface Store extends HeapSize, StoreConfigInformation, PropagatingConf
* Returns the memstore snapshot size
* @return size of the memstore snapshot
* @deprecated Since 2.0 and will be removed in 3.0. Use {@link #getSizeOfSnapshot()} instead.
* <p>
* Note: When using off heap MSLAB feature, this will not account the cell data bytes size which
* is in off heap MSLAB area.
*/
@Deprecated
long getSnapshotSize();

View File

@ -32,7 +32,6 @@ import org.apache.hadoop.hbase.io.util.MemorySizeUtil;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ClassSize;
import org.apache.hadoop.hbase.util.EnvironmentEdge;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Threads;
@ -563,9 +562,11 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
// test 1 bucket
int totalCellsLen = addRowsByKeys(memstore, keys1);
long totalHeapOverhead = 4 * (KeyValue.FIXED_OVERHEAD + ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY);
int oneCellOnCSLMHeapSize = 120;
int oneCellOnCAHeapSize = 88;
long totalHeapSize = 4 * oneCellOnCSLMHeapSize;
assertEquals(totalCellsLen, regionServicesForStores.getMemstoreSize());
assertEquals(totalHeapOverhead, ((CompactingMemStore)memstore).heapOverhead());
assertEquals(totalHeapSize, ((CompactingMemStore)memstore).heapSize());
MemstoreSize size = memstore.getFlushableSize();
((CompactingMemStore)memstore).flushInMemory(); // push keys to pipeline and compact
@ -573,9 +574,9 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
// One cell is duplicated and the compaction will remove it. All cells of same size so adjusting
// totalCellsLen
totalCellsLen = (totalCellsLen * 3) / 4;
totalHeapOverhead = 3 * (KeyValue.FIXED_OVERHEAD + ClassSize.CELL_ARRAY_MAP_ENTRY);
totalHeapSize = 3 * oneCellOnCAHeapSize;
assertEquals(totalCellsLen, regionServicesForStores.getMemstoreSize());
assertEquals(totalHeapOverhead, ((CompactingMemStore)memstore).heapOverhead());
assertEquals(totalHeapSize, ((CompactingMemStore)memstore).heapSize());
size = memstore.getFlushableSize();
MemStoreSnapshot snapshot = memstore.snapshot(); // push keys to snapshot
@ -599,10 +600,12 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
String[] keys2 = { "A", "B", "D" };
int totalCellsLen1 = addRowsByKeys(memstore, keys1);
long totalHeapOverhead = 4 * (KeyValue.FIXED_OVERHEAD + ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY);
int oneCellOnCSLMHeapSize = 120;
int oneCellOnCAHeapSize = 88;
long totalHeapSize = 4 * oneCellOnCSLMHeapSize;
assertEquals(totalCellsLen1, regionServicesForStores.getMemstoreSize());
assertEquals(totalHeapOverhead, ((CompactingMemStore)memstore).heapOverhead());
assertEquals(totalHeapSize, ((CompactingMemStore)memstore).heapSize());
MemstoreSize size = memstore.getFlushableSize();
((CompactingMemStore)memstore).flushInMemory(); // push keys to pipeline and compact
@ -616,21 +619,21 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
// totalCellsLen
totalCellsLen1 = (totalCellsLen1 * 3) / 4;
assertEquals(totalCellsLen1, regionServicesForStores.getMemstoreSize());
totalHeapOverhead = 3 * (KeyValue.FIXED_OVERHEAD + ClassSize.CELL_ARRAY_MAP_ENTRY);
assertEquals(totalHeapOverhead, ((CompactingMemStore)memstore).heapOverhead());
totalHeapSize = 3 * oneCellOnCAHeapSize;
assertEquals(totalHeapSize, ((CompactingMemStore)memstore).heapSize());
int totalCellsLen2 = addRowsByKeys(memstore, keys2);
totalHeapOverhead += 3 * (KeyValue.FIXED_OVERHEAD + ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY);
totalHeapSize += 3 * oneCellOnCSLMHeapSize;
assertEquals(totalCellsLen1 + totalCellsLen2, regionServicesForStores.getMemstoreSize());
assertEquals(totalHeapOverhead, ((CompactingMemStore)memstore).heapOverhead());
assertEquals(totalHeapSize, ((CompactingMemStore)memstore).heapSize());
size = memstore.getFlushableSize();
((CompactingMemStore)memstore).flushInMemory(); // push keys to pipeline and compact
assertEquals(0, memstore.getSnapshot().getCellsCount());
totalCellsLen2 = totalCellsLen2 / 3;// 2 cells duplicated in set 2
assertEquals(totalCellsLen1 + totalCellsLen2, regionServicesForStores.getMemstoreSize());
totalHeapOverhead = 4 * (KeyValue.FIXED_OVERHEAD + ClassSize.CELL_ARRAY_MAP_ENTRY);
assertEquals(totalHeapOverhead, ((CompactingMemStore)memstore).heapOverhead());
totalHeapSize = 4 * oneCellOnCAHeapSize;
assertEquals(totalHeapSize, ((CompactingMemStore)memstore).heapSize());
size = memstore.getFlushableSize();
MemStoreSnapshot snapshot = memstore.snapshot(); // push keys to snapshot
@ -655,9 +658,11 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
String[] keys3 = { "D", "B", "B" };
int totalCellsLen1 = addRowsByKeys(memstore, keys1);// Adding 4 cells.
int oneCellOnCSLMHeapSize = 120;
int oneCellOnCAHeapSize = 88;
assertEquals(totalCellsLen1, region.getMemstoreSize());
long totalHeapOverhead = 4 * (KeyValue.FIXED_OVERHEAD + ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY);
assertEquals(totalHeapOverhead, ((CompactingMemStore)memstore).heapOverhead());
long totalHeapSize = 4 * oneCellOnCSLMHeapSize;
assertEquals(totalHeapSize, ((CompactingMemStore)memstore).heapSize());
MemstoreSize size = memstore.getFlushableSize();
((CompactingMemStore)memstore).flushInMemory(); // push keys to pipeline and compact
@ -668,16 +673,14 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
totalCellsLen1 = (totalCellsLen1 * 3) / 4;
assertEquals(totalCellsLen1, regionServicesForStores.getMemstoreSize());
// In memory flush to make a CellArrayMap instead of CSLM. See the overhead diff.
totalHeapOverhead = 3 * (KeyValue.FIXED_OVERHEAD + ClassSize.CELL_ARRAY_MAP_ENTRY);
assertEquals(totalHeapOverhead, ((CompactingMemStore)memstore).heapOverhead());
totalHeapSize = 3 * oneCellOnCAHeapSize;
assertEquals(totalHeapSize, ((CompactingMemStore)memstore).heapSize());
int totalCellsLen2 = addRowsByKeys(memstore, keys2);// Adding 3 more cells.
long totalHeapOverhead2 = 3
* (KeyValue.FIXED_OVERHEAD + ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY);
long totalHeapSize2 = 3 * oneCellOnCSLMHeapSize;
assertEquals(totalCellsLen1 + totalCellsLen2, regionServicesForStores.getMemstoreSize());
assertEquals(totalHeapOverhead + totalHeapOverhead2,
((CompactingMemStore) memstore).heapOverhead());
assertEquals(totalHeapSize + totalHeapSize2, ((CompactingMemStore) memstore).heapSize());
((CompactingMemStore) memstore).disableCompaction();
size = memstore.getFlushableSize();
@ -685,23 +688,18 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
assertEquals(0, memstore.getSnapshot().getCellsCount());
// No change in the cells data size. ie. memstore size. as there is no compaction.
assertEquals(totalCellsLen1 + totalCellsLen2, regionServicesForStores.getMemstoreSize());
assertEquals(totalHeapOverhead + totalHeapOverhead2,
((CompactingMemStore) memstore).heapOverhead());
assertEquals(totalHeapSize + totalHeapSize2, ((CompactingMemStore) memstore).heapSize());
int totalCellsLen3 = addRowsByKeys(memstore, keys3);// 3 more cells added
assertEquals(totalCellsLen1 + totalCellsLen2 + totalCellsLen3,
regionServicesForStores.getMemstoreSize());
long totalHeapOverhead3 = 3
* (KeyValue.FIXED_OVERHEAD + ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY);
assertEquals(totalHeapOverhead + totalHeapOverhead2 + totalHeapOverhead3,
((CompactingMemStore) memstore).heapOverhead());
long totalHeapSize3 = 3 * oneCellOnCSLMHeapSize;
assertEquals(totalHeapSize + totalHeapSize2 + totalHeapSize3,
((CompactingMemStore) memstore).heapSize());
((CompactingMemStore)memstore).enableCompaction();
size = memstore.getFlushableSize();
((CompactingMemStore)memstore).flushInMemory(); // push keys to pipeline and compact
while (((CompactingMemStore)memstore).isMemStoreFlushingInMemory()) {
Threads.sleep(10);
}
assertEquals(0, memstore.getSnapshot().getCellsCount());
// active flushed to pipeline and all 3 segments compacted. Will get rid of duplicated cells.
// Out of total 10, only 4 cells are unique
@ -710,8 +708,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
assertEquals(totalCellsLen1 + totalCellsLen2 + totalCellsLen3,
regionServicesForStores.getMemstoreSize());
// Only 4 unique cells left
assertEquals(4 * (KeyValue.FIXED_OVERHEAD + ClassSize.CELL_ARRAY_MAP_ENTRY),
((CompactingMemStore) memstore).heapOverhead());
assertEquals(4 * oneCellOnCAHeapSize, ((CompactingMemStore) memstore).heapSize());
size = memstore.getFlushableSize();
MemStoreSnapshot snapshot = memstore.snapshot(); // push keys to snapshot
@ -721,15 +718,13 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
assertEquals(0, regionServicesForStores.getMemstoreSize());
memstore.clearSnapshot(snapshot.getId());
//assertTrue(tstStr, false);
}
private int addRowsByKeys(final AbstractMemStore hmc, String[] keys) {
byte[] fam = Bytes.toBytes("testfamily");
byte[] qf = Bytes.toBytes("testqualifier");
long size = hmc.getActive().keySize();
long heapOverhead = hmc.getActive().heapOverhead();
long heapOverhead = hmc.getActive().heapSize();
int totalLen = 0;
for (int i = 0; i < keys.length; i++) {
long timestamp = System.currentTimeMillis();
@ -742,7 +737,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
LOG.debug("added kv: " + kv.getKeyString() + ", timestamp:" + kv.getTimestamp());
}
regionServicesForStores.addMemstoreSize(new MemstoreSize(hmc.getActive().keySize() - size,
hmc.getActive().heapOverhead() - heapOverhead));
hmc.getActive().heapSize() - heapOverhead));
return totalLen;
}

View File

@ -25,7 +25,6 @@ import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ClassSize;
import org.apache.hadoop.hbase.util.Threads;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
@ -80,23 +79,22 @@ public class TestCompactingToCellArrayMapMemStore extends TestCompactingMemStore
// test 1 bucket
long totalCellsLen = addRowsByKeys(memstore, keys1);
long totalHeapOverhead = 4 * (KeyValue.FIXED_OVERHEAD + ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY);
int oneCellOnCSLMHeapSize = 120;
int oneCellOnCAHeapSize = 88;
long totalHeapSize = 4 * oneCellOnCSLMHeapSize;
assertEquals(totalCellsLen, regionServicesForStores.getMemstoreSize());
assertEquals(totalHeapOverhead, ((CompactingMemStore)memstore).heapOverhead());
assertEquals(totalHeapSize, ((CompactingMemStore)memstore).heapSize());
assertEquals(4, memstore.getActive().getCellsCount());
MemstoreSize size = memstore.getFlushableSize();
((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline and compact
while (((CompactingMemStore) memstore).isMemStoreFlushingInMemory()) {
Threads.sleep(10);
}
assertEquals(0, memstore.getSnapshot().getCellsCount());
// One cell is duplicated and the compaction will remove it. All cells of same size so adjusting
// totalCellsLen
totalCellsLen = (totalCellsLen * 3) / 4;
assertEquals(totalCellsLen, regionServicesForStores.getMemstoreSize());
totalHeapOverhead = 3 * (KeyValue.FIXED_OVERHEAD + ClassSize.CELL_ARRAY_MAP_ENTRY);
assertEquals(totalHeapOverhead, ((CompactingMemStore)memstore).heapOverhead());
totalHeapSize = 3 * oneCellOnCAHeapSize;
assertEquals(totalHeapSize, ((CompactingMemStore)memstore).heapSize());
for ( Segment s : memstore.getSegments()) {
counter += s.getCellsCount();
}
@ -117,16 +115,14 @@ public class TestCompactingToCellArrayMapMemStore extends TestCompactingMemStore
String[] keys2 = { "A", "B", "D" };
long totalCellsLen1 = addRowsByKeys(memstore, keys1);
long totalHeapOverhead1 = 4
* (KeyValue.FIXED_OVERHEAD + ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY);
int oneCellOnCSLMHeapSize = 120;
int oneCellOnCAHeapSize = 88;
long totalHeapSize1 = 4 * oneCellOnCSLMHeapSize;
assertEquals(totalCellsLen1, regionServicesForStores.getMemstoreSize());
assertEquals(totalHeapOverhead1, ((CompactingMemStore) memstore).heapOverhead());
assertEquals(totalHeapSize1, ((CompactingMemStore) memstore).heapSize());
MemstoreSize size = memstore.getFlushableSize();
((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline and compact
while (((CompactingMemStore) memstore).isMemStoreFlushingInMemory()) {
Threads.sleep(1000);
}
int counter = 0;
for ( Segment s : memstore.getSegments()) {
counter += s.getCellsCount();
@ -136,27 +132,17 @@ public class TestCompactingToCellArrayMapMemStore extends TestCompactingMemStore
// One cell is duplicated and the compaction will remove it. All cells of same size so adjusting
// totalCellsLen
totalCellsLen1 = (totalCellsLen1 * 3) / 4;
totalHeapOverhead1 = 3 * (KeyValue.FIXED_OVERHEAD + ClassSize.CELL_ARRAY_MAP_ENTRY);
totalHeapSize1 = 3 * oneCellOnCAHeapSize;
assertEquals(totalCellsLen1, regionServicesForStores.getMemstoreSize());
assertEquals(totalHeapOverhead1, ((CompactingMemStore) memstore).heapOverhead());
assertEquals(totalHeapSize1, ((CompactingMemStore) memstore).heapSize());
long totalCellsLen2 = addRowsByKeys(memstore, keys2);
long totalHeapOverhead2 = 3
* (KeyValue.FIXED_OVERHEAD + ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY);
long totalHeapSize2 = 3 * oneCellOnCSLMHeapSize;
assertEquals(totalCellsLen1 + totalCellsLen2, regionServicesForStores.getMemstoreSize());
assertEquals(totalHeapOverhead1 + totalHeapOverhead2,
((CompactingMemStore) memstore).heapOverhead());
assertEquals(totalHeapSize1 + totalHeapSize2, ((CompactingMemStore) memstore).heapSize());
size = memstore.getFlushableSize();
((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline and compact
int i = 0;
while (((CompactingMemStore) memstore).isMemStoreFlushingInMemory()) {
Threads.sleep(10);
if (i > 10000000) {
((CompactingMemStore) memstore).debug();
assertTrue("\n\n<<< Infinite loop! :( \n", false);
}
}
assertEquals(0, memstore.getSnapshot().getCellsCount());
counter = 0;
for ( Segment s : memstore.getSegments()) {
@ -165,9 +151,8 @@ public class TestCompactingToCellArrayMapMemStore extends TestCompactingMemStore
assertEquals(4,counter);
totalCellsLen2 = totalCellsLen2 / 3;// 2 cells duplicated in set 2
assertEquals(totalCellsLen1 + totalCellsLen2, regionServicesForStores.getMemstoreSize());
totalHeapOverhead2 = 1 * (KeyValue.FIXED_OVERHEAD + ClassSize.CELL_ARRAY_MAP_ENTRY);
assertEquals(totalHeapOverhead1 + totalHeapOverhead2,
((CompactingMemStore) memstore).heapOverhead());
totalHeapSize2 = 1 * oneCellOnCAHeapSize;
assertEquals(totalHeapSize1 + totalHeapSize2, ((CompactingMemStore) memstore).heapSize());
size = memstore.getFlushableSize();
MemStoreSnapshot snapshot = memstore.snapshot(); // push keys to snapshot
@ -186,48 +171,42 @@ public class TestCompactingToCellArrayMapMemStore extends TestCompactingMemStore
String[] keys3 = { "D", "B", "B" };
long totalCellsLen1 = addRowsByKeys(memstore, keys1);
long totalHeapOverhead1 = 4
* (KeyValue.FIXED_OVERHEAD + ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY);
int oneCellOnCSLMHeapSize = 120;
int oneCellOnCAHeapSize = 88;
long totalHeapSize1 = 4 * oneCellOnCSLMHeapSize;
assertEquals(totalCellsLen1, region.getMemstoreSize());
assertEquals(totalHeapOverhead1, ((CompactingMemStore) memstore).heapOverhead());
assertEquals(totalHeapSize1, ((CompactingMemStore) memstore).heapSize());
MemstoreSize size = memstore.getFlushableSize();
((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline and compact
while (((CompactingMemStore) memstore).isMemStoreFlushingInMemory()) {
Threads.sleep(10);
}
assertEquals(0, memstore.getSnapshot().getCellsCount());
// One cell is duplicated and the compaction will remove it. All cells of same size so adjusting
// totalCellsLen
totalCellsLen1 = (totalCellsLen1 * 3) / 4;
totalHeapOverhead1 = 3 * (KeyValue.FIXED_OVERHEAD + ClassSize.CELL_ARRAY_MAP_ENTRY);
totalHeapSize1 = 3 * oneCellOnCAHeapSize;
assertEquals(totalCellsLen1, regionServicesForStores.getMemstoreSize());
assertEquals(totalHeapOverhead1, ((CompactingMemStore) memstore).heapOverhead());
assertEquals(totalHeapSize1, ((CompactingMemStore) memstore).heapSize());
long totalCellsLen2 = addRowsByKeys(memstore, keys2);
long totalHeapOverhead2 = 3
* (KeyValue.FIXED_OVERHEAD + ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY);
long totalHeapSize2 = 3 * oneCellOnCSLMHeapSize;
assertEquals(totalCellsLen1 + totalCellsLen2, regionServicesForStores.getMemstoreSize());
assertEquals(totalHeapOverhead1 + totalHeapOverhead2,
((CompactingMemStore) memstore).heapOverhead());
assertEquals(totalHeapSize1 + totalHeapSize2, ((CompactingMemStore) memstore).heapSize());
((CompactingMemStore) memstore).disableCompaction();
size = memstore.getFlushableSize();
((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline without compaction
assertEquals(0, memstore.getSnapshot().getCellsCount());
assertEquals(totalCellsLen1 + totalCellsLen2, regionServicesForStores.getMemstoreSize());
assertEquals(totalHeapOverhead1 + totalHeapOverhead2,
((CompactingMemStore) memstore).heapOverhead());
assertEquals(totalHeapSize1 + totalHeapSize2, ((CompactingMemStore) memstore).heapSize());
long totalCellsLen3 = addRowsByKeys(memstore, keys3);
long totalHeapOverhead3 = 3
* (KeyValue.FIXED_OVERHEAD + ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY);
long totalHeapSize3 = 3 * oneCellOnCSLMHeapSize;
assertEquals(totalCellsLen1 + totalCellsLen2 + totalCellsLen3,
regionServicesForStores.getMemstoreSize());
assertEquals(totalHeapOverhead1 + totalHeapOverhead2 + totalHeapOverhead3,
((CompactingMemStore) memstore).heapOverhead());
assertEquals(totalHeapSize1 + totalHeapSize2 + totalHeapSize3,
((CompactingMemStore) memstore).heapSize());
((CompactingMemStore) memstore).enableCompaction();
size = memstore.getFlushableSize();
@ -243,8 +222,7 @@ public class TestCompactingToCellArrayMapMemStore extends TestCompactingMemStore
assertEquals(totalCellsLen1 + totalCellsLen2 + totalCellsLen3,
regionServicesForStores.getMemstoreSize());
// Only 4 unique cells left
assertEquals(4 * (KeyValue.FIXED_OVERHEAD + ClassSize.CELL_ARRAY_MAP_ENTRY),
((CompactingMemStore) memstore).heapOverhead());
assertEquals(4 * oneCellOnCAHeapSize, ((CompactingMemStore) memstore).heapSize());
size = memstore.getFlushableSize();
MemStoreSnapshot snapshot = memstore.snapshot(); // push keys to snapshot

View File

@ -122,8 +122,6 @@ public class TestDefaultMemStore {
this.memstore.add(kv, sizeChangeForSecondCell);
// make sure memstore size increase won't double-count MSLAB chunk size
assertEquals(Segment.getCellLength(kv), sizeChangeForFirstCell.getDataSize());
assertEquals(this.memstore.active.heapOverheadChange(kv, true),
sizeChangeForFirstCell.getHeapOverhead());
Segment segment = this.memstore.getActive();
MemStoreLAB msLab = segment.getMemStoreLAB();
if (msLab != null) {
@ -137,7 +135,7 @@ public class TestDefaultMemStore {
} else {
// make sure no memstore size change w/o MSLAB
assertEquals(0, sizeChangeForSecondCell.getDataSize());
assertEquals(0, sizeChangeForSecondCell.getHeapOverhead());
assertEquals(0, sizeChangeForSecondCell.getHeapSize());
}
}

View File

@ -24,7 +24,6 @@ import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.lang.management.ManagementFactory;
import java.lang.management.MemoryType;
import java.util.Iterator;
import org.apache.hadoop.conf.Configuration;
@ -928,7 +927,7 @@ public class TestHeapMemoryManager {
}
@Override
public long getGlobalMemstoreHeapOverhead() {
public long getGlobalMemstoreHeapSize() {
return testMemstoreSize;
}

View File

@ -193,7 +193,8 @@ public class TestPerColumnFamilyFlush {
// We should have cleared out only CF1, since we chose the flush thresholds
// and number of puts accordingly.
assertEquals(MemstoreSize.EMPTY_SIZE, cf1MemstoreSize);
assertEquals(0, cf1MemstoreSize.getDataSize());
assertEquals(0, cf1MemstoreSize.getHeapSize());
// Nothing should have happened to CF2, ...
assertEquals(cf2MemstoreSize, oldCF2MemstoreSize);
// ... or CF3
@ -229,8 +230,10 @@ public class TestPerColumnFamilyFlush {
.getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
// CF1 and CF2, both should be absent.
assertEquals(MemstoreSize.EMPTY_SIZE, cf1MemstoreSize);
assertEquals(MemstoreSize.EMPTY_SIZE, cf2MemstoreSize);
assertEquals(0, cf1MemstoreSize.getDataSize());
assertEquals(0, cf1MemstoreSize.getHeapSize());
assertEquals(0, cf2MemstoreSize.getDataSize());
assertEquals(0, cf2MemstoreSize.getHeapSize());
// CF3 shouldn't have been touched.
assertEquals(cf3MemstoreSize, oldCF3MemstoreSize);
assertEquals(totalMemstoreSize, cf3MemstoreSize.getDataSize());
@ -310,9 +313,12 @@ public class TestPerColumnFamilyFlush {
region.getWAL().getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
// Everything should have been cleared
assertEquals(MemstoreSize.EMPTY_SIZE, cf1MemstoreSize);
assertEquals(MemstoreSize.EMPTY_SIZE, cf2MemstoreSize);
assertEquals(MemstoreSize.EMPTY_SIZE, cf3MemstoreSize);
assertEquals(0, cf1MemstoreSize.getDataSize());
assertEquals(0, cf1MemstoreSize.getHeapSize());
assertEquals(0, cf2MemstoreSize.getDataSize());
assertEquals(0, cf2MemstoreSize.getHeapSize());
assertEquals(0, cf3MemstoreSize.getDataSize());
assertEquals(0, cf3MemstoreSize.getHeapSize());
assertEquals(0, totalMemstoreSize);
assertEquals(HConstants.NO_SEQNUM, smallestSeqInRegionCurrentMemstore);
HBaseTestingUtility.closeRegionAndWAL(region);

View File

@ -245,7 +245,8 @@ public class TestStore {
Assert.assertEquals(kvSize2, size);
flushStore(store, id++);
size = store.memstore.getFlushableSize();
Assert.assertEquals(MemstoreSize.EMPTY_SIZE, size);
assertEquals(0, size.getDataSize());
assertEquals(0, size.getHeapSize());
return null;
}
});

View File

@ -232,15 +232,16 @@ public class TestWalAndCompactingMemStoreFlush {
// CF1 was flushed to memory, but there is nothing to compact, and CF1 was flattened
assertTrue(cf1MemstoreSizePhaseII.getDataSize() == cf1MemstoreSizePhaseI.getDataSize());
assertTrue(cf1MemstoreSizePhaseII.getHeapOverhead() < cf1MemstoreSizePhaseI.getHeapOverhead());
assertTrue(cf1MemstoreSizePhaseII.getHeapSize() < cf1MemstoreSizePhaseI.getHeapSize());
// CF2 should become empty
assertEquals(MemstoreSize.EMPTY_SIZE, cf2MemstoreSizePhaseII);
assertEquals(0, cf2MemstoreSizePhaseII.getDataSize());
assertEquals(0, cf2MemstoreSizePhaseII.getHeapSize());
// verify that CF3 was flushed to memory and was compacted (this is approximation check)
assertTrue(cf3MemstoreSizePhaseI.getDataSize() > cf3MemstoreSizePhaseII.getDataSize());
assertTrue(
cf3MemstoreSizePhaseI.getHeapOverhead() / 2 > cf3MemstoreSizePhaseII.getHeapOverhead());
cf3MemstoreSizePhaseI.getHeapSize() / 2 > cf3MemstoreSizePhaseII.getHeapSize());
// Now the smallest LSN in the region should be the same as the smallest
// LSN in the memstore of CF1.
@ -295,7 +296,8 @@ public class TestWalAndCompactingMemStoreFlush {
// CF1's pipeline component (inserted before first flush) should be flushed to disk
// CF2 should be flushed to disk
assertTrue(cf1MemstoreSizePhaseIII.getDataSize() > cf1MemstoreSizePhaseIV.getDataSize());
assertEquals(MemstoreSize.EMPTY_SIZE, cf2MemstoreSizePhaseIV);
assertEquals(0, cf2MemstoreSizePhaseIV.getDataSize());
assertEquals(0, cf2MemstoreSizePhaseIV.getHeapSize());
// CF3 shouldn't have been touched.
assertEquals(cf3MemstoreSizePhaseIV, cf3MemstoreSizePhaseII);
@ -318,9 +320,12 @@ public class TestWalAndCompactingMemStoreFlush {
long smallestSeqInRegionCurrentMemstorePhaseV = getWAL(region)
.getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
assertEquals(MemstoreSize.EMPTY_SIZE , cf1MemstoreSizePhaseV);
assertEquals(MemstoreSize.EMPTY_SIZE, cf2MemstoreSizePhaseV);
assertEquals(MemstoreSize.EMPTY_SIZE, cf3MemstoreSizePhaseV);
assertEquals(0, cf1MemstoreSizePhaseV.getDataSize());
assertEquals(0, cf1MemstoreSizePhaseV.getHeapSize());
assertEquals(0, cf2MemstoreSizePhaseV.getDataSize());
assertEquals(0, cf2MemstoreSizePhaseV.getHeapSize());
assertEquals(0, cf3MemstoreSizePhaseV.getDataSize());
assertEquals(0, cf3MemstoreSizePhaseV.getHeapSize());
// What happens when we hit the memstore limit, but we are not able to find
// any Column Family above the threshold?
@ -463,14 +468,14 @@ public class TestWalAndCompactingMemStoreFlush {
/* PHASE II - validation */
// CF1 was flushed to memory, should be flattened and take less space
assertEquals(cf1MemstoreSizePhaseII.getDataSize() , cf1MemstoreSizePhaseI.getDataSize());
assertTrue(cf1MemstoreSizePhaseII.getHeapOverhead() < cf1MemstoreSizePhaseI.getHeapOverhead());
assertTrue(cf1MemstoreSizePhaseII.getHeapSize() < cf1MemstoreSizePhaseI.getHeapSize());
// CF2 should become empty
assertEquals(MemstoreSize.EMPTY_SIZE, cf2MemstoreSizePhaseII);
assertEquals(0, cf2MemstoreSizePhaseII.getDataSize());
assertEquals(0, cf2MemstoreSizePhaseII.getHeapSize());
// verify that CF3 was flushed to memory and was not compacted (this is an approximation check)
// if compacted CF# should be at least twice less because its every key was duplicated
assertEquals(cf3MemstoreSizePhaseII.getDataSize() , cf3MemstoreSizePhaseI.getDataSize());
assertTrue(
cf3MemstoreSizePhaseI.getHeapOverhead() / 2 < cf3MemstoreSizePhaseII.getHeapOverhead());
assertTrue(cf3MemstoreSizePhaseI.getHeapSize() / 2 < cf3MemstoreSizePhaseII.getHeapSize());
// Now the smallest LSN in the region should be the same as the smallest
// LSN in the memstore of CF1.
@ -533,7 +538,8 @@ public class TestWalAndCompactingMemStoreFlush {
// CF1's biggest pipeline component (inserted before first flush) should be flushed to disk
// CF2 should remain empty
assertTrue(cf1MemstoreSizePhaseIII.getDataSize() > cf1MemstoreSizePhaseIV.getDataSize());
assertEquals(MemstoreSize.EMPTY_SIZE, cf2MemstoreSizePhaseIV);
assertEquals(0, cf2MemstoreSizePhaseIV.getDataSize());
assertEquals(0, cf2MemstoreSizePhaseIV.getHeapSize());
// CF3 shouldn't have been touched.
assertEquals(cf3MemstoreSizePhaseIV, cf3MemstoreSizePhaseII);
// the smallest LSN of CF3 shouldn't change
@ -561,9 +567,12 @@ public class TestWalAndCompactingMemStoreFlush {
/*------------------------------------------------------------------------------*/
/* PHASE V - validation */
assertEquals(MemstoreSize.EMPTY_SIZE, cf1MemstoreSizePhaseV);
assertEquals(MemstoreSize.EMPTY_SIZE, cf2MemstoreSizePhaseV);
assertEquals(MemstoreSize.EMPTY_SIZE, cf3MemstoreSizePhaseV);
assertEquals(0, cf1MemstoreSizePhaseV.getDataSize());
assertEquals(0, cf1MemstoreSizePhaseV.getHeapSize());
assertEquals(0, cf2MemstoreSizePhaseV.getDataSize());
assertEquals(0, cf2MemstoreSizePhaseV.getHeapSize());
assertEquals(0, cf3MemstoreSizePhaseV.getDataSize());
assertEquals(0, cf3MemstoreSizePhaseV.getHeapSize());
// The total memstores size should be empty
assertEquals(0, totalMemstoreSizePhaseV);
// Because there is nothing in any memstore the WAL's LSN should be -1
@ -683,7 +692,8 @@ public class TestWalAndCompactingMemStoreFlush {
long smallestSeqCF3PhaseII = region.getOldestSeqIdOfStore(FAMILY3);
// CF2 should have been cleared
assertEquals(MemstoreSize.EMPTY_SIZE, cf2MemstoreSizePhaseII);
assertEquals(0, cf2MemstoreSizePhaseII.getDataSize());
assertEquals(0, cf2MemstoreSizePhaseII.getHeapSize());
String s = "\n\n----------------------------------\n"
+ "Upon initial insert and flush, LSN of CF1 is:"
@ -816,7 +826,8 @@ public class TestWalAndCompactingMemStoreFlush {
long smallestSeqCF3PhaseII = region.getOldestSeqIdOfStore(FAMILY3);
// CF2 should have been cleared
assertEquals(MemstoreSize.EMPTY_SIZE, cf2MemstoreSizePhaseII);
assertEquals(0, cf2MemstoreSizePhaseII.getDataSize());
assertEquals(0, cf2MemstoreSizePhaseII.getHeapSize());
// Add same entries to compact them later
for (int i = 1; i <= 1200; i++) {