HBASE-1607 Redo MemStore heap sizing to be accurate, testable, and more like new LruBlockCache
git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@790999 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
1f85226103
commit
faacd72a21
|
@ -435,6 +435,8 @@ Release 0.20.0 - Unreleased
|
||||||
Watcher
|
Watcher
|
||||||
HBASE-1597 Prevent unnecessary caching of blocks during compactions
|
HBASE-1597 Prevent unnecessary caching of blocks during compactions
|
||||||
(Jon Gray via Stack)
|
(Jon Gray via Stack)
|
||||||
|
HBASE-1607 Redo MemStore heap sizing to be accurate, testable, and more
|
||||||
|
like new LruBlockCache (Jon Gray via Stack)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
HBASE-1412 Change values for delete column and column family in KeyValue
|
HBASE-1412 Change values for delete column and column family in KeyValue
|
||||||
|
|
|
@ -33,8 +33,8 @@ import org.apache.hadoop.hbase.client.Get;
|
||||||
import org.apache.hadoop.hbase.client.HTable;
|
import org.apache.hadoop.hbase.client.HTable;
|
||||||
import org.apache.hadoop.hbase.client.Put;
|
import org.apache.hadoop.hbase.client.Put;
|
||||||
import org.apache.hadoop.hbase.client.Result;
|
import org.apache.hadoop.hbase.client.Result;
|
||||||
import org.apache.hadoop.hbase.io.Cell;
|
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
import org.apache.hadoop.hbase.util.ClassSize;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The Region Historian task is to keep track of every modification a region
|
* The Region Historian task is to keep track of every modification a region
|
||||||
|
@ -49,8 +49,6 @@ public class RegionHistorian implements HConstants {
|
||||||
|
|
||||||
private HTable metaTable;
|
private HTable metaTable;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/** Singleton reference */
|
/** Singleton reference */
|
||||||
private static RegionHistorian historian;
|
private static RegionHistorian historian;
|
||||||
|
|
||||||
|
@ -333,4 +331,7 @@ public class RegionHistorian implements HConstants {
|
||||||
LOG.debug("Offlined");
|
LOG.debug("Offlined");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static final long FIXED_OVERHEAD = ClassSize.align(
|
||||||
|
ClassSize.OBJECT + ClassSize.REFERENCE);
|
||||||
}
|
}
|
|
@ -55,6 +55,7 @@ import org.apache.hadoop.hbase.HServerInfo;
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
import org.apache.hadoop.hbase.util.ClassSize;
|
||||||
import org.apache.hadoop.hbase.util.FSUtils;
|
import org.apache.hadoop.hbase.util.FSUtils;
|
||||||
import org.apache.hadoop.hbase.KeyValue;
|
import org.apache.hadoop.hbase.KeyValue;
|
||||||
import org.apache.hadoop.io.SequenceFile;
|
import org.apache.hadoop.io.SequenceFile;
|
||||||
|
@ -111,7 +112,7 @@ public class HLog implements HConstants, Syncable {
|
||||||
private final int flushlogentries;
|
private final int flushlogentries;
|
||||||
private final AtomicInteger unflushedEntries = new AtomicInteger(0);
|
private final AtomicInteger unflushedEntries = new AtomicInteger(0);
|
||||||
private volatile long lastLogFlushTime;
|
private volatile long lastLogFlushTime;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Current log file.
|
* Current log file.
|
||||||
*/
|
*/
|
||||||
|
@ -1117,4 +1118,9 @@ public class HLog implements HConstants, Syncable {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static final long FIXED_OVERHEAD = ClassSize.align(
|
||||||
|
ClassSize.OBJECT + (5 * ClassSize.REFERENCE) +
|
||||||
|
ClassSize.ATOMIC_INTEGER + Bytes.SIZEOF_INT + (3 * Bytes.SIZEOF_LONG));
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -54,9 +54,11 @@ import org.apache.hadoop.hbase.client.Get;
|
||||||
import org.apache.hadoop.hbase.client.Put;
|
import org.apache.hadoop.hbase.client.Put;
|
||||||
import org.apache.hadoop.hbase.client.Result;
|
import org.apache.hadoop.hbase.client.Result;
|
||||||
import org.apache.hadoop.hbase.client.Scan;
|
import org.apache.hadoop.hbase.client.Scan;
|
||||||
|
import org.apache.hadoop.hbase.io.HeapSize;
|
||||||
import org.apache.hadoop.hbase.io.Reference.Range;
|
import org.apache.hadoop.hbase.io.Reference.Range;
|
||||||
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
import org.apache.hadoop.hbase.util.ClassSize;
|
||||||
import org.apache.hadoop.hbase.util.FSUtils;
|
import org.apache.hadoop.hbase.util.FSUtils;
|
||||||
import org.apache.hadoop.hbase.util.Writables;
|
import org.apache.hadoop.hbase.util.Writables;
|
||||||
import org.apache.hadoop.util.Progressable;
|
import org.apache.hadoop.util.Progressable;
|
||||||
|
@ -98,7 +100,7 @@ import org.apache.hadoop.util.StringUtils;
|
||||||
* regionName is a unique identifier for this HRegion. (startKey, endKey]
|
* regionName is a unique identifier for this HRegion. (startKey, endKey]
|
||||||
* defines the keyspace for this HRegion.
|
* defines the keyspace for this HRegion.
|
||||||
*/
|
*/
|
||||||
public class HRegion implements HConstants { // , Writable{
|
public class HRegion implements HConstants, HeapSize { // , Writable{
|
||||||
static final Log LOG = LogFactory.getLog(HRegion.class);
|
static final Log LOG = LogFactory.getLog(HRegion.class);
|
||||||
static final String SPLITDIR = "splits";
|
static final String SPLITDIR = "splits";
|
||||||
static final String MERGEDIR = "merges";
|
static final String MERGEDIR = "merges";
|
||||||
|
@ -2322,45 +2324,28 @@ public class HRegion implements HConstants { // , Writable{
|
||||||
+ " in table " + regionInfo.getTableDesc());
|
+ " in table " + regionInfo.getTableDesc());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static final long FIXED_OVERHEAD = ClassSize.align(
|
||||||
|
(3 * Bytes.SIZEOF_LONG) + (2 * Bytes.SIZEOF_INT) + Bytes.SIZEOF_BOOLEAN +
|
||||||
|
(21 * ClassSize.REFERENCE) + ClassSize.OBJECT);
|
||||||
|
|
||||||
|
public static final long DEEP_OVERHEAD = ClassSize.align(FIXED_OVERHEAD +
|
||||||
|
ClassSize.OBJECT + (2 * ClassSize.ATOMIC_BOOLEAN) +
|
||||||
|
ClassSize.ATOMIC_LONG + ClassSize.ATOMIC_INTEGER +
|
||||||
|
ClassSize.CONCURRENT_HASHMAP +
|
||||||
|
(16 * ClassSize.CONCURRENT_HASHMAP_ENTRY) +
|
||||||
|
(16 * ClassSize.CONCURRENT_HASHMAP_SEGMENT) +
|
||||||
|
ClassSize.CONCURRENT_SKIPLISTMAP + ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY +
|
||||||
|
RegionHistorian.FIXED_OVERHEAD + HLog.FIXED_OVERHEAD +
|
||||||
|
ClassSize.align(ClassSize.OBJECT + (5 * Bytes.SIZEOF_BOOLEAN)) +
|
||||||
|
(3 * ClassSize.REENTRANT_LOCK));
|
||||||
|
|
||||||
// //HBaseAdmin Debugging
|
@Override
|
||||||
// /**
|
public long heapSize() {
|
||||||
// * @return number of stores in the region
|
long heapSize = DEEP_OVERHEAD;
|
||||||
// */
|
for(Store store : this.stores.values()) {
|
||||||
// public int getNumStores() {
|
heapSize += store.heapSize();
|
||||||
// return this.numStores;
|
}
|
||||||
// }
|
return heapSize;
|
||||||
// /**
|
}
|
||||||
// * @return the name of the region
|
|
||||||
// */
|
|
||||||
// public byte [] getRegionsName() {
|
|
||||||
// return this.name;
|
|
||||||
// }
|
|
||||||
// /**
|
|
||||||
// * @return the number of files in every store
|
|
||||||
// */
|
|
||||||
// public int [] getStoresSize() {
|
|
||||||
// return this.storeSize;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// //Writable, used for debugging purposes only
|
|
||||||
// public void readFields(final DataInput in)
|
|
||||||
// throws IOException {
|
|
||||||
// this.name = Bytes.readByteArray(in);
|
|
||||||
// this.numStores = in.readInt();
|
|
||||||
// this.storeSize = new int [numStores];
|
|
||||||
// for(int i=0; i<this.numStores; i++) {
|
|
||||||
// this.storeSize[i] = in.readInt();
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// public void write(final DataOutput out)
|
|
||||||
// throws IOException {
|
|
||||||
// Bytes.writeByteArray(out, this.regionInfo.getRegionName());
|
|
||||||
// out.writeInt(this.stores.size());
|
|
||||||
// for(Store store : this.stores.values()) {
|
|
||||||
// out.writeInt(store.getNumberOfstorefiles());
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,14 +30,17 @@ import java.util.List;
|
||||||
import java.util.NavigableSet;
|
import java.util.NavigableSet;
|
||||||
import java.util.SortedSet;
|
import java.util.SortedSet;
|
||||||
import java.util.TreeSet;
|
import java.util.TreeSet;
|
||||||
|
import java.util.concurrent.atomic.AtomicLong;
|
||||||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.KeyValue;
|
import org.apache.hadoop.hbase.KeyValue;
|
||||||
|
import org.apache.hadoop.hbase.io.HeapSize;
|
||||||
import org.apache.hadoop.hbase.regionserver.DeleteCompare.DeleteCode;
|
import org.apache.hadoop.hbase.regionserver.DeleteCompare.DeleteCode;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
import org.apache.hadoop.hbase.util.ClassSize;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The MemStore holds in-memory modifications to the Store. Modifications
|
* The MemStore holds in-memory modifications to the Store. Modifications
|
||||||
|
@ -50,7 +53,7 @@ import org.apache.hadoop.hbase.util.Bytes;
|
||||||
* TODO: With new KVSLS, need to make sure we update HeapSize with difference
|
* TODO: With new KVSLS, need to make sure we update HeapSize with difference
|
||||||
* in KV size.
|
* in KV size.
|
||||||
*/
|
*/
|
||||||
class MemStore {
|
public class MemStore implements HeapSize {
|
||||||
private static final Log LOG = LogFactory.getLog(MemStore.class);
|
private static final Log LOG = LogFactory.getLog(MemStore.class);
|
||||||
|
|
||||||
private final long ttl;
|
private final long ttl;
|
||||||
|
@ -75,8 +78,8 @@ class MemStore {
|
||||||
// Used comparing versions -- same r/c and type but different timestamp.
|
// Used comparing versions -- same r/c and type but different timestamp.
|
||||||
final KeyValue.KVComparator comparatorIgnoreTimestamp;
|
final KeyValue.KVComparator comparatorIgnoreTimestamp;
|
||||||
|
|
||||||
// TODO: Fix this guess by studying jprofiler
|
// Used to track own heapSize
|
||||||
private final static int ESTIMATED_KV_HEAP_TAX = 60;
|
final AtomicLong size;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Default constructor. Used for tests.
|
* Default constructor. Used for tests.
|
||||||
|
@ -98,6 +101,7 @@ class MemStore {
|
||||||
this.comparatorIgnoreType = this.comparator.getComparatorIgnoringType();
|
this.comparatorIgnoreType = this.comparator.getComparatorIgnoringType();
|
||||||
this.kvset = new KeyValueSkipListSet(c);
|
this.kvset = new KeyValueSkipListSet(c);
|
||||||
this.snapshot = new KeyValueSkipListSet(c);
|
this.snapshot = new KeyValueSkipListSet(c);
|
||||||
|
this.size = new AtomicLong(DEEP_OVERHEAD);
|
||||||
}
|
}
|
||||||
|
|
||||||
void dump() {
|
void dump() {
|
||||||
|
@ -129,6 +133,8 @@ class MemStore {
|
||||||
if (!this.kvset.isEmpty()) {
|
if (!this.kvset.isEmpty()) {
|
||||||
this.snapshot = this.kvset;
|
this.snapshot = this.kvset;
|
||||||
this.kvset = new KeyValueSkipListSet(this.comparator);
|
this.kvset = new KeyValueSkipListSet(this.comparator);
|
||||||
|
// Reset heap to not include any keys
|
||||||
|
this.size.set(DEEP_OVERHEAD);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -181,7 +187,8 @@ class MemStore {
|
||||||
long size = -1;
|
long size = -1;
|
||||||
this.lock.readLock().lock();
|
this.lock.readLock().lock();
|
||||||
try {
|
try {
|
||||||
size = heapSize(kv, this.kvset.add(kv));
|
size = heapSizeChange(kv, this.kvset.add(kv));
|
||||||
|
this.size.addAndGet(size);
|
||||||
} finally {
|
} finally {
|
||||||
this.lock.readLock().unlock();
|
this.lock.readLock().unlock();
|
||||||
}
|
}
|
||||||
|
@ -254,33 +261,19 @@ class MemStore {
|
||||||
//Delete all the entries effected by the last added delete
|
//Delete all the entries effected by the last added delete
|
||||||
for (KeyValue kv : deletes) {
|
for (KeyValue kv : deletes) {
|
||||||
notpresent = this.kvset.remove(kv);
|
notpresent = this.kvset.remove(kv);
|
||||||
size -= heapSize(kv, notpresent);
|
size -= heapSizeChange(kv, notpresent);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Adding the delete to memstore. Add any value, as long as
|
// Adding the delete to memstore. Add any value, as long as
|
||||||
// same instance each time.
|
// same instance each time.
|
||||||
size += heapSize(delete, this.kvset.add(delete));
|
size += heapSizeChange(delete, this.kvset.add(delete));
|
||||||
} finally {
|
} finally {
|
||||||
this.lock.readLock().unlock();
|
this.lock.readLock().unlock();
|
||||||
}
|
}
|
||||||
|
this.size.addAndGet(size);
|
||||||
return size;
|
return size;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Calculate how the memstore size has changed, approximately. Be careful.
|
|
||||||
* If class changes, be sure to change the size calculation.
|
|
||||||
* Add in tax of Map.Entry.
|
|
||||||
* @param kv
|
|
||||||
* @param notpresent True if the kv was NOT present in the set.
|
|
||||||
* @return Size
|
|
||||||
*/
|
|
||||||
long heapSize(final KeyValue kv, final boolean notpresent) {
|
|
||||||
return notpresent?
|
|
||||||
// Add overhead for value byte array and for Map.Entry -- 57 bytes
|
|
||||||
// on x64 according to jprofiler.
|
|
||||||
ESTIMATED_KV_HEAP_TAX + 57 + kv.getLength(): 0; // Guess no change in size.
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param kv Find the row that comes after this one. If null, we return the
|
* @param kv Find the row that comes after this one. If null, we return the
|
||||||
* first.
|
* first.
|
||||||
|
@ -694,6 +687,42 @@ class MemStore {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public final static long FIXED_OVERHEAD = ClassSize.align(
|
||||||
|
ClassSize.OBJECT + Bytes.SIZEOF_LONG + (7 * ClassSize.REFERENCE));
|
||||||
|
|
||||||
|
public final static long DEEP_OVERHEAD = ClassSize.align(FIXED_OVERHEAD +
|
||||||
|
ClassSize.REENTRANT_LOCK + ClassSize.ATOMIC_LONG +
|
||||||
|
(2 * ClassSize.CONCURRENT_SKIPLISTMAP));
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Calculate how the MemStore size has changed. Includes overhead of the
|
||||||
|
* backing Map.
|
||||||
|
* @param kv
|
||||||
|
* @param notpresent True if the kv was NOT present in the set.
|
||||||
|
* @return Size
|
||||||
|
*/
|
||||||
|
long heapSizeChange(final KeyValue kv, final boolean notpresent) {
|
||||||
|
return notpresent ?
|
||||||
|
ClassSize.align(ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY + kv.heapSize()):
|
||||||
|
0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the entire heap usage for this MemStore not including keys in the
|
||||||
|
* snapshot.
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public long heapSize() {
|
||||||
|
return size.get();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the heap usage of KVs in this MemStore.
|
||||||
|
*/
|
||||||
|
public long keySize() {
|
||||||
|
return heapSize() - DEEP_OVERHEAD;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Code to help figure if our approximation of object heap sizes is close
|
* Code to help figure if our approximation of object heap sizes is close
|
||||||
|
|
|
@ -49,6 +49,7 @@ import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||||
import org.apache.hadoop.hbase.KeyValue.KeyComparator;
|
import org.apache.hadoop.hbase.KeyValue.KeyComparator;
|
||||||
import org.apache.hadoop.hbase.client.Get;
|
import org.apache.hadoop.hbase.client.Get;
|
||||||
import org.apache.hadoop.hbase.client.Scan;
|
import org.apache.hadoop.hbase.client.Scan;
|
||||||
|
import org.apache.hadoop.hbase.io.HeapSize;
|
||||||
import org.apache.hadoop.hbase.io.SequenceFile;
|
import org.apache.hadoop.hbase.io.SequenceFile;
|
||||||
import org.apache.hadoop.hbase.io.hfile.Compression;
|
import org.apache.hadoop.hbase.io.hfile.Compression;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||||
|
@ -56,6 +57,7 @@ import org.apache.hadoop.hbase.io.hfile.HFileScanner;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFile.CompactionReader;
|
import org.apache.hadoop.hbase.io.hfile.HFile.CompactionReader;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFile.Reader;
|
import org.apache.hadoop.hbase.io.hfile.HFile.Reader;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
import org.apache.hadoop.hbase.util.ClassSize;
|
||||||
import org.apache.hadoop.hbase.util.FSUtils;
|
import org.apache.hadoop.hbase.util.FSUtils;
|
||||||
import org.apache.hadoop.util.Progressable;
|
import org.apache.hadoop.util.Progressable;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
|
@ -83,7 +85,7 @@ import org.apache.hadoop.util.StringUtils;
|
||||||
* <p>Locking and transactions are handled at a higher level. This API should
|
* <p>Locking and transactions are handled at a higher level. This API should
|
||||||
* not be called directly but by an HRegion manager.
|
* not be called directly but by an HRegion manager.
|
||||||
*/
|
*/
|
||||||
public class Store implements HConstants {
|
public class Store implements HConstants, HeapSize {
|
||||||
static final Log LOG = LogFactory.getLog(Store.class);
|
static final Log LOG = LogFactory.getLog(Store.class);
|
||||||
/**
|
/**
|
||||||
* Comparator that looks at columns and compares their family portions.
|
* Comparator that looks at columns and compares their family portions.
|
||||||
|
@ -510,7 +512,7 @@ public class Store implements HConstants {
|
||||||
if (!isExpired(kv, oldestTimestamp)) {
|
if (!isExpired(kv, oldestTimestamp)) {
|
||||||
writer.append(kv);
|
writer.append(kv);
|
||||||
entries++;
|
entries++;
|
||||||
flushed += this.memstore.heapSize(kv, true);
|
flushed += this.memstore.heapSizeChange(kv, true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// B. Write out the log sequence number that corresponds to this output
|
// B. Write out the log sequence number that corresponds to this output
|
||||||
|
@ -1627,4 +1629,19 @@ public class Store implements HConstants {
|
||||||
Bytes.toBytes(newValue));
|
Bytes.toBytes(newValue));
|
||||||
return new ICVResult(newValue, newKv.heapSize(), newKv);
|
return new ICVResult(newValue, newKv.heapSize(), newKv);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static final long FIXED_OVERHEAD = ClassSize.align(
|
||||||
|
ClassSize.OBJECT + (17 * ClassSize.REFERENCE) +
|
||||||
|
(5 * Bytes.SIZEOF_LONG) + (3 * Bytes.SIZEOF_INT) + Bytes.SIZEOF_BOOLEAN +
|
||||||
|
ClassSize.align(ClassSize.ARRAY));
|
||||||
|
|
||||||
|
public static final long DEEP_OVERHEAD = ClassSize.align(FIXED_OVERHEAD +
|
||||||
|
ClassSize.OBJECT + ClassSize.REENTRANT_LOCK +
|
||||||
|
ClassSize.CONCURRENT_SKIPLISTMAP +
|
||||||
|
ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY + ClassSize.OBJECT);
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long heapSize() {
|
||||||
|
return DEEP_OVERHEAD + this.memstore.heapSize();
|
||||||
|
}
|
||||||
}
|
}
|
|
@ -73,7 +73,25 @@ public class ClassSize {
|
||||||
|
|
||||||
/** Overhead for ConcurrentHashMap.Segment */
|
/** Overhead for ConcurrentHashMap.Segment */
|
||||||
public static int CONCURRENT_HASHMAP_SEGMENT = 0;
|
public static int CONCURRENT_HASHMAP_SEGMENT = 0;
|
||||||
|
|
||||||
|
/** Overhead for ConcurrentSkipListMap */
|
||||||
|
public static int CONCURRENT_SKIPLISTMAP = 0;
|
||||||
|
|
||||||
|
/** Overhead for ConcurrentSkipListMap Entry */
|
||||||
|
public static int CONCURRENT_SKIPLISTMAP_ENTRY = 0;
|
||||||
|
|
||||||
|
/** Overhead for ReentrantReadWriteLock */
|
||||||
|
public static int REENTRANT_LOCK = 0;
|
||||||
|
|
||||||
|
/** Overhead for AtomicLong */
|
||||||
|
public static int ATOMIC_LONG = 0;
|
||||||
|
|
||||||
|
/** Overhead for AtomicInteger */
|
||||||
|
public static int ATOMIC_INTEGER = 0;
|
||||||
|
|
||||||
|
/** Overhead for AtomicBoolean */
|
||||||
|
public static int ATOMIC_BOOLEAN = 0;
|
||||||
|
|
||||||
private static final String THIRTY_TWO = "32";
|
private static final String THIRTY_TWO = "32";
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -118,6 +136,21 @@ public class ClassSize {
|
||||||
|
|
||||||
CONCURRENT_HASHMAP_SEGMENT = align(REFERENCE + OBJECT +
|
CONCURRENT_HASHMAP_SEGMENT = align(REFERENCE + OBJECT +
|
||||||
(3 * Bytes.SIZEOF_INT) + Bytes.SIZEOF_FLOAT + ARRAY);
|
(3 * Bytes.SIZEOF_INT) + Bytes.SIZEOF_FLOAT + ARRAY);
|
||||||
|
|
||||||
|
CONCURRENT_SKIPLISTMAP = align(Bytes.SIZEOF_INT + OBJECT + (8 * REFERENCE));
|
||||||
|
|
||||||
|
CONCURRENT_SKIPLISTMAP_ENTRY = align(
|
||||||
|
align(OBJECT + (3 * REFERENCE)) + /* one node per entry */
|
||||||
|
align((OBJECT + (3 * REFERENCE))/2)); /* one index per two entries */
|
||||||
|
|
||||||
|
REENTRANT_LOCK = align(OBJECT + (3 * REFERENCE));
|
||||||
|
|
||||||
|
ATOMIC_LONG = align(OBJECT + Bytes.SIZEOF_LONG);
|
||||||
|
|
||||||
|
ATOMIC_INTEGER = align(OBJECT + Bytes.SIZEOF_INT);
|
||||||
|
|
||||||
|
ATOMIC_BOOLEAN = align(OBJECT + Bytes.SIZEOF_BOOLEAN);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -5,13 +5,23 @@ import java.nio.ByteBuffer;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.TreeMap;
|
import java.util.TreeMap;
|
||||||
import java.util.concurrent.ConcurrentHashMap;
|
import java.util.concurrent.ConcurrentHashMap;
|
||||||
|
import java.util.concurrent.ConcurrentSkipListMap;
|
||||||
|
import java.util.concurrent.atomic.AtomicBoolean;
|
||||||
|
import java.util.concurrent.atomic.AtomicInteger;
|
||||||
|
import java.util.concurrent.atomic.AtomicLong;
|
||||||
|
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.hbase.KeyValue;
|
import org.apache.hadoop.hbase.KeyValue;
|
||||||
|
import org.apache.hadoop.hbase.RegionHistorian;
|
||||||
|
import org.apache.hadoop.hbase.client.HTable;
|
||||||
import org.apache.hadoop.hbase.client.Put;
|
import org.apache.hadoop.hbase.client.Put;
|
||||||
import org.apache.hadoop.hbase.io.hfile.CachedBlock;
|
import org.apache.hadoop.hbase.io.hfile.CachedBlock;
|
||||||
import org.apache.hadoop.hbase.io.hfile.LruBlockCache;
|
import org.apache.hadoop.hbase.io.hfile.LruBlockCache;
|
||||||
|
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||||
|
import org.apache.hadoop.hbase.regionserver.MemStore;
|
||||||
|
import org.apache.hadoop.hbase.regionserver.Store;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.ClassSize;
|
import org.apache.hadoop.hbase.util.ClassSize;
|
||||||
|
|
||||||
|
@ -99,6 +109,60 @@ public class TestHeapSize extends TestCase {
|
||||||
ClassSize.estimateBase(cl, true);
|
ClassSize.estimateBase(cl, true);
|
||||||
assertEquals(expected, actual);
|
assertEquals(expected, actual);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ConcurrentHashMap
|
||||||
|
cl = ConcurrentHashMap.class;
|
||||||
|
expected = ClassSize.estimateBase(cl, false);
|
||||||
|
actual = ClassSize.CONCURRENT_HASHMAP;
|
||||||
|
if(expected != actual) {
|
||||||
|
ClassSize.estimateBase(cl, true);
|
||||||
|
assertEquals(expected, actual);
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConcurrentSkipListMap
|
||||||
|
cl = ConcurrentSkipListMap.class;
|
||||||
|
expected = ClassSize.estimateBase(cl, false);
|
||||||
|
actual = ClassSize.CONCURRENT_SKIPLISTMAP;
|
||||||
|
if(expected != actual) {
|
||||||
|
ClassSize.estimateBase(cl, true);
|
||||||
|
assertEquals(expected, actual);
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReentrantReadWriteLock
|
||||||
|
cl = ReentrantReadWriteLock.class;
|
||||||
|
expected = ClassSize.estimateBase(cl, false);
|
||||||
|
actual = ClassSize.REENTRANT_LOCK;
|
||||||
|
if(expected != actual) {
|
||||||
|
ClassSize.estimateBase(cl, true);
|
||||||
|
assertEquals(expected, actual);
|
||||||
|
}
|
||||||
|
|
||||||
|
// AtomicLong
|
||||||
|
cl = AtomicLong.class;
|
||||||
|
expected = ClassSize.estimateBase(cl, false);
|
||||||
|
actual = ClassSize.ATOMIC_LONG;
|
||||||
|
if(expected != actual) {
|
||||||
|
ClassSize.estimateBase(cl, true);
|
||||||
|
assertEquals(expected, actual);
|
||||||
|
}
|
||||||
|
|
||||||
|
// AtomicInteger
|
||||||
|
cl = AtomicInteger.class;
|
||||||
|
expected = ClassSize.estimateBase(cl, false);
|
||||||
|
actual = ClassSize.ATOMIC_INTEGER;
|
||||||
|
if(expected != actual) {
|
||||||
|
ClassSize.estimateBase(cl, true);
|
||||||
|
assertEquals(expected, actual);
|
||||||
|
}
|
||||||
|
|
||||||
|
// AtomicBoolean
|
||||||
|
cl = AtomicBoolean.class;
|
||||||
|
expected = ClassSize.estimateBase(cl, false);
|
||||||
|
actual = ClassSize.ATOMIC_BOOLEAN;
|
||||||
|
if(expected != actual) {
|
||||||
|
ClassSize.estimateBase(cl, true);
|
||||||
|
assertEquals(expected, actual);
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -124,18 +188,21 @@ public class TestHeapSize extends TestCase {
|
||||||
assertEquals(expected, actual);
|
assertEquals(expected, actual);
|
||||||
}
|
}
|
||||||
|
|
||||||
//LruBlockCache Overhead
|
//Put
|
||||||
cl = LruBlockCache.class;
|
cl = Put.class;
|
||||||
actual = LruBlockCache.CACHE_FIXED_OVERHEAD;
|
|
||||||
expected = ClassSize.estimateBase(cl, false);
|
expected = ClassSize.estimateBase(cl, false);
|
||||||
|
//The actual TreeMap is not included in the above calculation
|
||||||
|
expected += ClassSize.TREEMAP;
|
||||||
|
Put put = new Put(Bytes.toBytes(""));
|
||||||
|
actual = put.heapSize();
|
||||||
if(expected != actual) {
|
if(expected != actual) {
|
||||||
ClassSize.estimateBase(cl, true);
|
ClassSize.estimateBase(cl, true);
|
||||||
assertEquals(expected, actual);
|
assertEquals(expected, actual);
|
||||||
}
|
}
|
||||||
|
|
||||||
// LruBlockCache Map Fixed Overhead
|
//LruBlockCache Overhead
|
||||||
cl = ConcurrentHashMap.class;
|
cl = LruBlockCache.class;
|
||||||
actual = ClassSize.CONCURRENT_HASHMAP;
|
actual = LruBlockCache.CACHE_FIXED_OVERHEAD;
|
||||||
expected = ClassSize.estimateBase(cl, false);
|
expected = ClassSize.estimateBase(cl, false);
|
||||||
if(expected != actual) {
|
if(expected != actual) {
|
||||||
ClassSize.estimateBase(cl, true);
|
ClassSize.estimateBase(cl, true);
|
||||||
|
@ -157,17 +224,64 @@ public class TestHeapSize extends TestCase {
|
||||||
assertEquals(expected, actual);
|
assertEquals(expected, actual);
|
||||||
}
|
}
|
||||||
|
|
||||||
//Put
|
// MemStore Overhead
|
||||||
cl = Put.class;
|
cl = MemStore.class;
|
||||||
|
actual = MemStore.FIXED_OVERHEAD;
|
||||||
expected = ClassSize.estimateBase(cl, false);
|
expected = ClassSize.estimateBase(cl, false);
|
||||||
//The actual TreeMap is not included in the above calculation
|
|
||||||
expected += ClassSize.TREEMAP;
|
|
||||||
Put put = new Put(Bytes.toBytes(""));
|
|
||||||
actual = put.heapSize();
|
|
||||||
if(expected != actual) {
|
if(expected != actual) {
|
||||||
ClassSize.estimateBase(cl, true);
|
ClassSize.estimateBase(cl, true);
|
||||||
assertEquals(expected, actual);
|
assertEquals(expected, actual);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MemStore Deep Overhead
|
||||||
|
actual = MemStore.DEEP_OVERHEAD;
|
||||||
|
expected = ClassSize.estimateBase(cl, false);
|
||||||
|
expected += ClassSize.estimateBase(ReentrantReadWriteLock.class, false);
|
||||||
|
expected += ClassSize.estimateBase(AtomicLong.class, false);
|
||||||
|
expected += ClassSize.estimateBase(ConcurrentSkipListMap.class, false);
|
||||||
|
expected += ClassSize.estimateBase(ConcurrentSkipListMap.class, false);
|
||||||
|
if(expected != actual) {
|
||||||
|
ClassSize.estimateBase(cl, true);
|
||||||
|
ClassSize.estimateBase(ReentrantReadWriteLock.class, true);
|
||||||
|
ClassSize.estimateBase(AtomicLong.class, true);
|
||||||
|
ClassSize.estimateBase(ConcurrentSkipListMap.class, true);
|
||||||
|
assertEquals(expected, actual);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store Overhead
|
||||||
|
cl = Store.class;
|
||||||
|
actual = Store.FIXED_OVERHEAD;
|
||||||
|
expected = ClassSize.estimateBase(cl, false);
|
||||||
|
if(expected != actual) {
|
||||||
|
ClassSize.estimateBase(cl, true);
|
||||||
|
assertEquals(expected, actual);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Region Overhead
|
||||||
|
cl = HRegion.class;
|
||||||
|
actual = HRegion.FIXED_OVERHEAD;
|
||||||
|
expected = ClassSize.estimateBase(cl, false);
|
||||||
|
if(expected != actual) {
|
||||||
|
ClassSize.estimateBase(cl, true);
|
||||||
|
assertEquals(expected, actual);
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegionHistorian Overhead
|
||||||
|
cl = RegionHistorian.class;
|
||||||
|
actual = RegionHistorian.FIXED_OVERHEAD;
|
||||||
|
expected = ClassSize.estimateBase(cl, false);
|
||||||
|
if(expected != actual) {
|
||||||
|
ClassSize.estimateBase(cl, true);
|
||||||
|
assertEquals(expected, actual);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Currently NOT testing Deep Overheads of many of these classes.
|
||||||
|
// Deep overheads cover a vast majority of stuff, but will not be 100%
|
||||||
|
// accurate because it's unclear when we're referencing stuff that's already
|
||||||
|
// accounted for. But we have satisfied our two core requirements.
|
||||||
|
// Sizing is quite accurate now, and our tests will throw errors if
|
||||||
|
// any of these classes are modified without updating overhead sizes.
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue