HBASE-15950 Fix memstore size estimates to be more tighter
This commit is contained in:
parent
5f9d1a71c8
commit
bff35d6387
|
@ -44,7 +44,6 @@ import org.apache.hadoop.hbase.util.Bytes;
|
|||
import org.apache.hadoop.hbase.util.ClassSize;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.io.RawComparator;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
|
||||
/**
|
||||
|
@ -2598,11 +2597,18 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId,
|
|||
int sum = 0;
|
||||
sum += ClassSize.OBJECT;// the KeyValue object itself
|
||||
sum += ClassSize.REFERENCE;// pointer to "bytes"
|
||||
sum += ClassSize.align(ClassSize.ARRAY);// "bytes"
|
||||
sum += ClassSize.align(length);// number of bytes of data in the "bytes" array
|
||||
sum += 2 * Bytes.SIZEOF_INT;// offset, length
|
||||
sum += Bytes.SIZEOF_LONG;// memstoreTS
|
||||
return ClassSize.align(sum);
|
||||
|
||||
/*
|
||||
* Deep object overhead for this KV consists of two parts. The first part is the KV object
|
||||
* itself, while the second part is the backing byte[]. We will only count the array overhead
|
||||
* from the byte[] only if this is the first KV in there.
|
||||
*/
|
||||
return ClassSize.align(sum) +
|
||||
(offset == 0
|
||||
? ClassSize.sizeOf(bytes, length) // count both length and object overhead
|
||||
: length); // only count the number of bytes
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -39,8 +39,6 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
|||
public class ClassSize {
|
||||
private static final Log LOG = LogFactory.getLog(ClassSize.class);
|
||||
|
||||
private static int nrOfRefsPerObj = 2;
|
||||
|
||||
/** Array overhead */
|
||||
public static final int ARRAY;
|
||||
|
||||
|
@ -126,35 +124,125 @@ public class ClassSize {
|
|||
JDK7 = major == 1 && minor == 7;
|
||||
}
|
||||
|
||||
/**
|
||||
* MemoryLayout abstracts details about the JVM object layout. Default implementation is used in
|
||||
* case Unsafe is not available.
|
||||
*/
|
||||
private static class MemoryLayout {
|
||||
int headerSize() {
|
||||
return 2 * oopSize();
|
||||
}
|
||||
|
||||
int arrayHeaderSize() {
|
||||
return (int) align(3 * oopSize());
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the size of an "ordinary object pointer". Either 4 or 8, depending on 32/64 bit,
|
||||
* and CompressedOops
|
||||
*/
|
||||
int oopSize() {
|
||||
return is32BitJVM() ? 4 : 8;
|
||||
}
|
||||
|
||||
/**
|
||||
* Aligns a number to 8.
|
||||
* @param num number to align to 8
|
||||
* @return smallest number >= input that is a multiple of 8
|
||||
*/
|
||||
public long align(long num) {
|
||||
//The 7 comes from that the alignSize is 8 which is the number of bytes
|
||||
//stored and sent together
|
||||
return ((num + 7) >> 3) << 3;
|
||||
}
|
||||
|
||||
long sizeOf(byte[] b, int len) {
|
||||
return align(arrayHeaderSize() + len);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* UnsafeLayout uses Unsafe to guesstimate the object-layout related parameters like object header
|
||||
* sizes and oop sizes
|
||||
* See HBASE-15950.
|
||||
*/
|
||||
private static class UnsafeLayout extends MemoryLayout {
|
||||
@SuppressWarnings("unused")
|
||||
private static final class HeaderSize {
|
||||
private byte a;
|
||||
}
|
||||
|
||||
public UnsafeLayout() {
|
||||
}
|
||||
|
||||
@Override
|
||||
int headerSize() {
|
||||
try {
|
||||
return (int) UnsafeAccess.theUnsafe.objectFieldOffset(
|
||||
HeaderSize.class.getDeclaredField("a"));
|
||||
} catch (NoSuchFieldException | SecurityException e) {
|
||||
LOG.error(e);
|
||||
}
|
||||
return super.headerSize();
|
||||
}
|
||||
|
||||
@Override
|
||||
int arrayHeaderSize() {
|
||||
return UnsafeAccess.theUnsafe.arrayBaseOffset(byte[].class);
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("static-access")
|
||||
int oopSize() {
|
||||
// Unsafe.addressSize() returns 8, even with CompressedOops. This is how many bytes each
|
||||
// element is allocated in an Object[].
|
||||
return UnsafeAccess.theUnsafe.ARRAY_OBJECT_INDEX_SCALE;
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("static-access")
|
||||
long sizeOf(byte[] b, int len) {
|
||||
return align(arrayHeaderSize() + len * UnsafeAccess.theUnsafe.ARRAY_BYTE_INDEX_SCALE);
|
||||
}
|
||||
}
|
||||
|
||||
private static MemoryLayout getMemoryLayout() {
|
||||
// Have a safeguard in case Unsafe estimate is wrong. This is static context, there is
|
||||
// no configuration, so we look at System property.
|
||||
String enabled = System.getProperty("hbase.memorylayout.use.unsafe");
|
||||
if (UnsafeAvailChecker.isAvailable() && (enabled == null || Boolean.parseBoolean(enabled))) {
|
||||
LOG.debug("Using Unsafe to estimate memory layout");
|
||||
return new UnsafeLayout();
|
||||
}
|
||||
LOG.debug("Not using Unsafe to estimate memory layout");
|
||||
return new MemoryLayout();
|
||||
}
|
||||
|
||||
private static final MemoryLayout memoryLayout = getMemoryLayout();
|
||||
|
||||
/**
|
||||
* Method for reading the arc settings and setting overheads according
|
||||
* to 32-bit or 64-bit architecture.
|
||||
*/
|
||||
static {
|
||||
//Default value is set to 8, covering the case when arcModel is unknown
|
||||
if (is32BitJVM()) {
|
||||
REFERENCE = 4;
|
||||
} else {
|
||||
REFERENCE = 8;
|
||||
}
|
||||
REFERENCE = memoryLayout.oopSize();
|
||||
|
||||
OBJECT = 2 * REFERENCE;
|
||||
OBJECT = memoryLayout.headerSize();
|
||||
|
||||
ARRAY = align(3 * REFERENCE);
|
||||
ARRAY = memoryLayout.arrayHeaderSize();
|
||||
|
||||
ARRAYLIST = align(OBJECT + align(REFERENCE) + align(ARRAY) +
|
||||
(2 * Bytes.SIZEOF_INT));
|
||||
ARRAYLIST = align(OBJECT + REFERENCE + (2 * Bytes.SIZEOF_INT)) + align(ARRAY);
|
||||
|
||||
//noinspection PointlessArithmeticExpression
|
||||
BYTE_BUFFER = align(OBJECT + align(REFERENCE) + align(ARRAY) +
|
||||
BYTE_BUFFER = align(OBJECT + REFERENCE +
|
||||
(5 * Bytes.SIZEOF_INT) +
|
||||
(3 * Bytes.SIZEOF_BOOLEAN) + Bytes.SIZEOF_LONG);
|
||||
(3 * Bytes.SIZEOF_BOOLEAN) + Bytes.SIZEOF_LONG) + align(ARRAY);
|
||||
|
||||
INTEGER = align(OBJECT + Bytes.SIZEOF_INT);
|
||||
|
||||
MAP_ENTRY = align(OBJECT + 5 * REFERENCE + Bytes.SIZEOF_BOOLEAN);
|
||||
|
||||
TREEMAP = align(OBJECT + (2 * Bytes.SIZEOF_INT) + align(7 * REFERENCE));
|
||||
TREEMAP = align(OBJECT + (2 * Bytes.SIZEOF_INT) + 7 * REFERENCE);
|
||||
|
||||
// STRING is different size in jdk6 and jdk7. Just use what we estimate as size rather than
|
||||
// have a conditional on whether jdk7.
|
||||
|
@ -174,9 +262,9 @@ public class ClassSize {
|
|||
// The size changes from jdk7 to jdk8, estimate the size rather than use a conditional
|
||||
CONCURRENT_SKIPLISTMAP = (int) estimateBase(ConcurrentSkipListMap.class, false);
|
||||
|
||||
CONCURRENT_SKIPLISTMAP_ENTRY = align(
|
||||
CONCURRENT_SKIPLISTMAP_ENTRY =
|
||||
align(OBJECT + (3 * REFERENCE)) + /* one node per entry */
|
||||
align((OBJECT + (3 * REFERENCE))/2)); /* one index per two entries */
|
||||
align((OBJECT + (3 * REFERENCE))/2); /* one index per two entries */
|
||||
|
||||
REENTRANT_LOCK = align(OBJECT + (3 * REFERENCE));
|
||||
|
||||
|
@ -218,8 +306,7 @@ public class ClassSize {
|
|||
private static int [] getSizeCoefficients(Class cl, boolean debug) {
|
||||
int primitives = 0;
|
||||
int arrays = 0;
|
||||
//The number of references that a new object takes
|
||||
int references = nrOfRefsPerObj;
|
||||
int references = 0;
|
||||
int index = 0;
|
||||
|
||||
for ( ; null != cl; cl = cl.getSuperclass()) {
|
||||
|
@ -275,15 +362,14 @@ public class ClassSize {
|
|||
* @return the size estimate, in bytes
|
||||
*/
|
||||
private static long estimateBaseFromCoefficients(int [] coeff, boolean debug) {
|
||||
long prealign_size = coeff[0] + align(coeff[1] * ARRAY) + coeff[2] * REFERENCE;
|
||||
long prealign_size = OBJECT + coeff[0] + coeff[2] * REFERENCE;
|
||||
|
||||
// Round up to a multiple of 8
|
||||
long size = align(prealign_size);
|
||||
if(debug) {
|
||||
long size = align(prealign_size) + align(coeff[1] * ARRAY);
|
||||
if (debug) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Primitives=" + coeff[0] + ", arrays=" + coeff[1] +
|
||||
", references(includes " + nrOfRefsPerObj +
|
||||
" for object overhead)=" + coeff[2] + ", refSize " + REFERENCE +
|
||||
", references=" + coeff[2] + ", refSize " + REFERENCE +
|
||||
", size=" + size + ", prealign_size=" + prealign_size);
|
||||
}
|
||||
}
|
||||
|
@ -321,9 +407,7 @@ public class ClassSize {
|
|||
* @return smallest number >= input that is a multiple of 8
|
||||
*/
|
||||
public static long align(long num) {
|
||||
//The 7 comes from that the alignSize is 8 which is the number of bytes
|
||||
//stored and sent together
|
||||
return ((num + 7) >> 3) << 3;
|
||||
return memoryLayout.align(num);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -335,5 +419,9 @@ public class ClassSize {
|
|||
return model != null && model.equals("32");
|
||||
}
|
||||
|
||||
public static long sizeOf(byte[] b, int len) {
|
||||
return memoryLayout.sizeOf(b, len);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.regionserver;
|
|||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import java.io.IOException;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.ThreadPoolExecutor;
|
||||
|
|
|
@ -5262,8 +5262,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
|
|||
@Override
|
||||
public void releaseRowLocks(List<RowLock> rowLocks) {
|
||||
if (rowLocks != null) {
|
||||
for (RowLock rowLock : rowLocks) {
|
||||
rowLock.release();
|
||||
for (int i = 0; i < rowLocks.size(); i++) {
|
||||
rowLocks.get(i).release();
|
||||
}
|
||||
rowLocks.clear();
|
||||
}
|
||||
|
|
|
@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.filter.FirstKeyValueMatchingQualifiersFilter;
|
|||
import org.apache.hadoop.hbase.filter.RandomRowFilter;
|
||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.ClassSize;
|
||||
import org.apache.hadoop.hbase.util.Pair;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
|
@ -362,7 +363,7 @@ public class TestPartialResultsFromClientSide {
|
|||
if (CELL_HEAP_SIZE == -1) {
|
||||
// Do a partial scan that will return a single result with a single cell
|
||||
Scan scan = new Scan();
|
||||
scan.setMaxResultSize(1);
|
||||
scan.setMaxResultSize(2);
|
||||
scan.setAllowPartialResults(true);
|
||||
ResultScanner scanner = TABLE.getScanner(scan);
|
||||
|
||||
|
@ -372,7 +373,11 @@ public class TestPartialResultsFromClientSide {
|
|||
assertTrue(result.rawCells() != null);
|
||||
assertTrue(result.rawCells().length == 1);
|
||||
|
||||
CELL_HEAP_SIZE = CellUtil.estimatedHeapSizeOf(result.rawCells()[0]);
|
||||
// Estimate the cell heap size. One difference is that on server side, the KV Heap size is
|
||||
// estimated differently in case the cell is backed up by MSLAB byte[] (no overhead for
|
||||
// backing array). Thus below calculation is a bit brittle.
|
||||
CELL_HEAP_SIZE = CellUtil.estimatedHeapSizeOf(result.rawCells()[0])
|
||||
- (ClassSize.ARRAY+3);
|
||||
if (LOG.isInfoEnabled()) LOG.info("Cell heap size: " + CELL_HEAP_SIZE);
|
||||
scanner.close();
|
||||
}
|
||||
|
|
|
@ -39,7 +39,6 @@ import org.apache.hadoop.hbase.util.ClassSize;
|
|||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
import org.junit.experimental.categories.Category;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.lang.management.ManagementFactory;
|
||||
import java.lang.management.RuntimeMXBean;
|
||||
|
@ -57,6 +56,7 @@ import java.util.concurrent.atomic.AtomicLong;
|
|||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
/**
|
||||
* Testing the sizing that HeapSize offers and compares to the size given by
|
||||
|
@ -68,17 +68,17 @@ public class TestHeapSize {
|
|||
// List of classes implementing HeapSize
|
||||
// BatchOperation, BatchUpdate, BlockIndex, Entry, Entry<K,V>, HStoreKey
|
||||
// KeyValue, LruBlockCache, LruHashMap<K,V>, Put, WALKey
|
||||
|
||||
|
||||
@BeforeClass
|
||||
public static void beforeClass() throws Exception {
|
||||
// Print detail on jvm so we know what is different should below test fail.
|
||||
RuntimeMXBean b = ManagementFactory.getRuntimeMXBean();
|
||||
LOG.info("name=" + b.getName());
|
||||
LOG.info("specname=" + b.getSpecName());
|
||||
LOG.info("specvendor=" + b.getSpecVendor());
|
||||
LOG.info("name=" + b.getName());
|
||||
LOG.info("specname=" + b.getSpecName());
|
||||
LOG.info("specvendor=" + b.getSpecVendor());
|
||||
LOG.info("vmname=" + b.getVmName());
|
||||
LOG.info("vmversion=" + b.getVmVersion());
|
||||
LOG.info("vmvendor=" + b.getVmVendor());
|
||||
LOG.info("vmversion=" + b.getVmVersion());
|
||||
LOG.info("vmvendor=" + b.getVmVendor());
|
||||
Map<String, String> p = b.getSystemProperties();
|
||||
LOG.info("properties=" + p);
|
||||
}
|
||||
|
@ -132,7 +132,7 @@ public class TestHeapSize {
|
|||
// Object
|
||||
cl = Object.class;
|
||||
expected = ClassSize.estimateBase(cl, false);
|
||||
actual = ClassSize.OBJECT;
|
||||
actual = ClassSize.align(ClassSize.OBJECT);
|
||||
if(expected != actual) {
|
||||
ClassSize.estimateBase(cl, true);
|
||||
assertEquals(expected, actual);
|
||||
|
@ -390,5 +390,25 @@ public class TestHeapSize {
|
|||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testReferenceSize() {
|
||||
LOG.info("ClassSize.REFERENCE is " + ClassSize.REFERENCE);
|
||||
// oop should be either 4 or 8
|
||||
assertTrue(ClassSize.REFERENCE == 4 || ClassSize.REFERENCE == 8);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testObjectSize() throws IOException {
|
||||
LOG.info("header:" + ClassSize.OBJECT);
|
||||
LOG.info("array header:" + ClassSize.ARRAY);
|
||||
|
||||
if (ClassSize.is32BitJVM()) {
|
||||
assertEquals(ClassSize.OBJECT, 8);
|
||||
} else {
|
||||
assertTrue(ClassSize.OBJECT == 12 || ClassSize.OBJECT == 16); // depending on CompressedOops
|
||||
}
|
||||
assertEquals(ClassSize.OBJECT + 4, ClassSize.ARRAY);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -823,9 +823,9 @@ public class TestHFileBlock {
|
|||
|
||||
protected void testBlockHeapSizeInternals() {
|
||||
if (ClassSize.is32BitJVM()) {
|
||||
assertTrue(HFileBlock.MULTI_BYTE_BUFFER_HEAP_SIZE == 64);
|
||||
assertEquals(64, HFileBlock.MULTI_BYTE_BUFFER_HEAP_SIZE);
|
||||
} else {
|
||||
assertTrue(HFileBlock.MULTI_BYTE_BUFFER_HEAP_SIZE == 104);
|
||||
assertEquals(72, HFileBlock.MULTI_BYTE_BUFFER_HEAP_SIZE);
|
||||
}
|
||||
|
||||
for (int size : new int[] { 100, 256, 12345 }) {
|
||||
|
|
|
@ -76,6 +76,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
|
|||
chunkPool.clearChunks();
|
||||
}
|
||||
|
||||
@Override
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
super.internalSetUp();
|
||||
|
@ -101,6 +102,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
|
|||
* @throws IOException
|
||||
* @throws CloneNotSupportedException
|
||||
*/
|
||||
@Override
|
||||
@Test
|
||||
public void testScanAcrossSnapshot2() throws IOException, CloneNotSupportedException {
|
||||
// we are going to the scanning across snapshot with two kvs
|
||||
|
@ -139,6 +141,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
|
|||
* Test memstore snapshots
|
||||
* @throws IOException
|
||||
*/
|
||||
@Override
|
||||
@Test
|
||||
public void testSnapshotting() throws IOException {
|
||||
final int snapshotCount = 5;
|
||||
|
@ -158,6 +161,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
|
|||
/** Test getNextRow from memstore
|
||||
* @throws InterruptedException
|
||||
*/
|
||||
@Override
|
||||
@Test
|
||||
public void testGetNextRow() throws Exception {
|
||||
addRows(this.memstore);
|
||||
|
@ -205,6 +209,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
@Test
|
||||
public void testGet_memstoreAndSnapShot() throws IOException {
|
||||
byte[] row = Bytes.toBytes("testrow");
|
||||
|
@ -251,6 +256,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
|
|||
* This causes OOME pretty quickly if we use MSLAB for upsert
|
||||
* since each 2M chunk is held onto by a single reference.
|
||||
*/
|
||||
@Override
|
||||
@Test
|
||||
public void testUpsertMSLAB() throws Exception {
|
||||
|
||||
|
@ -289,6 +295,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
|
|||
*
|
||||
* @throws Exception
|
||||
*/
|
||||
@Override
|
||||
@Test
|
||||
public void testUpsertMemstoreSize() throws Exception {
|
||||
long oldSize = memstore.size();
|
||||
|
@ -327,6 +334,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
|
|||
* various edit operations in memstore.
|
||||
* @throws Exception
|
||||
*/
|
||||
@Override
|
||||
@Test
|
||||
public void testUpdateToTimeOfOldestEdit() throws Exception {
|
||||
try {
|
||||
|
@ -581,7 +589,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
|
|||
|
||||
// test 1 bucket
|
||||
addRowsByKeys(memstore, keys1);
|
||||
assertEquals(704, regionServicesForStores.getGlobalMemstoreTotalSize());
|
||||
assertEquals(496, regionServicesForStores.getGlobalMemstoreTotalSize());
|
||||
|
||||
long size = memstore.getFlushableSize();
|
||||
((CompactingMemStore)memstore).flushInMemory(); // push keys to pipeline and compact
|
||||
|
@ -589,7 +597,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
|
|||
Threads.sleep(10);
|
||||
}
|
||||
assertEquals(0, memstore.getSnapshot().getCellsCount());
|
||||
assertEquals(528, regionServicesForStores.getGlobalMemstoreTotalSize());
|
||||
assertEquals(376, regionServicesForStores.getGlobalMemstoreTotalSize());
|
||||
|
||||
size = memstore.getFlushableSize();
|
||||
MemStoreSnapshot snapshot = memstore.snapshot(); // push keys to snapshot
|
||||
|
@ -608,7 +616,8 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
|
|||
String[] keys2 = { "A", "B", "D" };
|
||||
|
||||
addRowsByKeys(memstore, keys1);
|
||||
assertEquals(704, regionServicesForStores.getGlobalMemstoreTotalSize());
|
||||
|
||||
assertEquals(496, regionServicesForStores.getGlobalMemstoreTotalSize());
|
||||
|
||||
long size = memstore.getFlushableSize();
|
||||
((CompactingMemStore)memstore).flushInMemory(); // push keys to pipeline and compact
|
||||
|
@ -616,10 +625,10 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
|
|||
Threads.sleep(1000);
|
||||
}
|
||||
assertEquals(0, memstore.getSnapshot().getCellsCount());
|
||||
assertEquals(528, regionServicesForStores.getGlobalMemstoreTotalSize());
|
||||
assertEquals(376, regionServicesForStores.getGlobalMemstoreTotalSize());
|
||||
|
||||
addRowsByKeys(memstore, keys2);
|
||||
assertEquals(1056, regionServicesForStores.getGlobalMemstoreTotalSize());
|
||||
assertEquals(752, regionServicesForStores.getGlobalMemstoreTotalSize());
|
||||
|
||||
size = memstore.getFlushableSize();
|
||||
((CompactingMemStore)memstore).flushInMemory(); // push keys to pipeline and compact
|
||||
|
@ -627,7 +636,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
|
|||
Threads.sleep(10);
|
||||
}
|
||||
assertEquals(0, memstore.getSnapshot().getCellsCount());
|
||||
assertEquals(704, regionServicesForStores.getGlobalMemstoreTotalSize());
|
||||
assertEquals(496, regionServicesForStores.getGlobalMemstoreTotalSize());
|
||||
|
||||
size = memstore.getFlushableSize();
|
||||
MemStoreSnapshot snapshot = memstore.snapshot(); // push keys to snapshot
|
||||
|
@ -647,7 +656,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
|
|||
String[] keys3 = { "D", "B", "B" };
|
||||
|
||||
addRowsByKeys(memstore, keys1);
|
||||
assertEquals(704, region.getMemstoreSize());
|
||||
assertEquals(496, region.getMemstoreSize());
|
||||
|
||||
long size = memstore.getFlushableSize();
|
||||
((CompactingMemStore)memstore).flushInMemory(); // push keys to pipeline and compact
|
||||
|
@ -658,7 +667,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
|
|||
Threads.sleep(10);
|
||||
}
|
||||
assertEquals(0, memstore.getSnapshot().getCellsCount());
|
||||
assertEquals(528, regionServicesForStores.getGlobalMemstoreTotalSize());
|
||||
assertEquals(376, regionServicesForStores.getGlobalMemstoreTotalSize());
|
||||
|
||||
addRowsByKeys(memstore, keys2);
|
||||
|
||||
|
@ -666,16 +675,16 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
|
|||
region.getMemstoreSize() + ", Memstore Total Size: " +
|
||||
regionServicesForStores.getGlobalMemstoreTotalSize() + "\n\n";
|
||||
|
||||
assertEquals(1056, regionServicesForStores.getGlobalMemstoreTotalSize());
|
||||
assertEquals(752, regionServicesForStores.getGlobalMemstoreTotalSize());
|
||||
|
||||
((CompactingMemStore)memstore).disableCompaction();
|
||||
size = memstore.getFlushableSize();
|
||||
((CompactingMemStore)memstore).flushInMemory(); // push keys to pipeline without compaction
|
||||
assertEquals(0, memstore.getSnapshot().getCellsCount());
|
||||
assertEquals(1056, regionServicesForStores.getGlobalMemstoreTotalSize());
|
||||
assertEquals(752, regionServicesForStores.getGlobalMemstoreTotalSize());
|
||||
|
||||
addRowsByKeys(memstore, keys3);
|
||||
assertEquals(1584, regionServicesForStores.getGlobalMemstoreTotalSize());
|
||||
assertEquals(1128, regionServicesForStores.getGlobalMemstoreTotalSize());
|
||||
|
||||
((CompactingMemStore)memstore).enableCompaction();
|
||||
size = memstore.getFlushableSize();
|
||||
|
@ -684,7 +693,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
|
|||
Threads.sleep(10);
|
||||
}
|
||||
assertEquals(0, memstore.getSnapshot().getCellsCount());
|
||||
assertEquals(704, regionServicesForStores.getGlobalMemstoreTotalSize());
|
||||
assertEquals(496, regionServicesForStores.getGlobalMemstoreTotalSize());
|
||||
|
||||
size = memstore.getFlushableSize();
|
||||
MemStoreSnapshot snapshot = memstore.snapshot(); // push keys to snapshot
|
||||
|
@ -701,6 +710,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
|
|||
private void addRowsByKeys(final AbstractMemStore hmc, String[] keys) {
|
||||
byte[] fam = Bytes.toBytes("testfamily");
|
||||
byte[] qf = Bytes.toBytes("testqualifier");
|
||||
long size = hmc.getActive().getSize();
|
||||
for (int i = 0; i < keys.length; i++) {
|
||||
long timestamp = System.currentTimeMillis();
|
||||
Threads.sleep(1); // to make sure each kv gets a different ts
|
||||
|
@ -708,10 +718,9 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
|
|||
byte[] val = Bytes.toBytes(keys[i] + i);
|
||||
KeyValue kv = new KeyValue(row, fam, qf, timestamp, val);
|
||||
hmc.add(kv);
|
||||
LOG.debug("added kv: " + kv.getKeyString() + ", timestamp" + kv.getTimestamp());
|
||||
long size = AbstractMemStore.heapSizeChange(kv, true);
|
||||
regionServicesForStores.addAndGetGlobalMemstoreSize(size);
|
||||
LOG.debug("added kv: " + kv.getKeyString() + ", timestamp:" + kv.getTimestamp());
|
||||
}
|
||||
regionServicesForStores.addAndGetGlobalMemstoreSize(hmc.getActive().getSize() - size);
|
||||
}
|
||||
|
||||
private class EnvironmentEdgeForMemstoreTest implements EnvironmentEdge {
|
||||
|
|
|
@ -72,8 +72,6 @@ public class TestWalAndCompactingMemStoreFlush {
|
|||
public static final byte[] FAMILY2 = FAMILIES[1];
|
||||
public static final byte[] FAMILY3 = FAMILIES[2];
|
||||
|
||||
|
||||
|
||||
private HRegion initHRegion(String callingMethod, Configuration conf) throws IOException {
|
||||
int i=0;
|
||||
HTableDescriptor htd = new HTableDescriptor(TABLENAME);
|
||||
|
@ -90,8 +88,6 @@ public class TestWalAndCompactingMemStoreFlush {
|
|||
return HBaseTestingUtility.createRegionAndWAL(info, path, conf, htd);
|
||||
}
|
||||
|
||||
|
||||
|
||||
// A helper function to create puts.
|
||||
private Put createPut(int familyNum, int putNum) {
|
||||
byte[] qf = Bytes.toBytes("q" + familyNum);
|
||||
|
@ -102,7 +98,6 @@ public class TestWalAndCompactingMemStoreFlush {
|
|||
return p;
|
||||
}
|
||||
|
||||
|
||||
// A helper function to create double puts, so something can be compacted later.
|
||||
private Put createDoublePut(int familyNum, int putNum) {
|
||||
byte[] qf = Bytes.toBytes("q" + familyNum);
|
||||
|
@ -115,16 +110,12 @@ public class TestWalAndCompactingMemStoreFlush {
|
|||
return p;
|
||||
}
|
||||
|
||||
|
||||
// A helper function to create gets.
|
||||
private Get createGet(int familyNum, int putNum) {
|
||||
byte[] row = Bytes.toBytes("row" + familyNum + "-" + putNum);
|
||||
return new Get(row);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
// A helper function to verify edits.
|
||||
void verifyEdit(int familyNum, int putNum, Table table) throws IOException {
|
||||
Result r = table.get(createGet(familyNum, putNum));
|
||||
|
@ -138,10 +129,6 @@ public class TestWalAndCompactingMemStoreFlush {
|
|||
Arrays.equals(r.getFamilyMap(family).get(qf), val));
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@Test(timeout = 180000)
|
||||
public void testSelectiveFlushWhenEnabled() throws IOException {
|
||||
|
||||
|
@ -150,7 +137,7 @@ public class TestWalAndCompactingMemStoreFlush {
|
|||
conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 600 * 1024);
|
||||
conf.set(FlushPolicyFactory.HBASE_FLUSH_POLICY_KEY, FlushNonSloppyStoresFirstPolicy.class
|
||||
.getName());
|
||||
conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN, 300 *
|
||||
conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN, 200 *
|
||||
1024);
|
||||
conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.5);
|
||||
|
||||
|
@ -388,14 +375,6 @@ public class TestWalAndCompactingMemStoreFlush {
|
|||
HBaseTestingUtility.closeRegionAndWAL(region);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@Test(timeout = 180000)
|
||||
public void testSelectiveFlushWhenEnabledAndWALinCompaction() throws IOException {
|
||||
// Set up the configuration
|
||||
|
@ -403,7 +382,7 @@ public class TestWalAndCompactingMemStoreFlush {
|
|||
conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 600 * 1024);
|
||||
conf.set(FlushPolicyFactory.HBASE_FLUSH_POLICY_KEY, FlushNonSloppyStoresFirstPolicy.class
|
||||
.getName());
|
||||
conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN, 300 *
|
||||
conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN, 200 *
|
||||
1024);
|
||||
conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.5);
|
||||
|
||||
|
@ -535,10 +514,6 @@ public class TestWalAndCompactingMemStoreFlush {
|
|||
HBaseTestingUtility.closeRegionAndWAL(region);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
// Find the (first) region which has the specified name.
|
||||
private static Pair<Region, HRegionServer> getRegionWithName(TableName tableName) {
|
||||
MiniHBaseCluster cluster = TEST_UTIL.getMiniHBaseCluster();
|
||||
|
@ -552,7 +527,6 @@ public class TestWalAndCompactingMemStoreFlush {
|
|||
return null;
|
||||
}
|
||||
|
||||
|
||||
private WAL getWAL(Region region) {
|
||||
return ((HRegion)region).getWAL();
|
||||
}
|
||||
|
@ -560,6 +534,4 @@ public class TestWalAndCompactingMemStoreFlush {
|
|||
private int getNumRolledLogFiles(Region region) {
|
||||
return ((FSHLog)getWAL(region)).getNumRolledLogFiles();
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue