From 8732e8d44fd82d96e74aa45b3622fde3ba211ad3 Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Wed, 24 Jun 2009 20:03:26 +0000 Subject: [PATCH] HBASE-1562 How to handle the setting of 32 bit versus 64 bit machines git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@788164 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES.txt | 2 + .../org/apache/hadoop/hbase/KeyValue.java | 5 +- .../org/apache/hadoop/hbase/client/Put.java | 37 +++-- .../org/apache/hadoop/hbase/io/HeapSize.java | 32 ----- .../apache/hadoop/hbase/io/hfile/HFile.java | 23 ++-- .../hadoop/hbase/io/hfile/LruBlockCache.java | 28 ++-- .../hadoop/hbase/regionserver/HLogKey.java | 3 +- .../hadoop/hbase/regionserver/LruHashMap.java | 20 ++- .../apache/hadoop/hbase/util/ClassSize.java | 129 +++++++++++------- .../apache/hadoop/hbase/io/TestHeapSize.java | 27 ++-- .../hadoop/hbase/io/hfile/TestHFile.java | 20 ++- 11 files changed, 171 insertions(+), 155 deletions(-) diff --git a/CHANGES.txt b/CHANGES.txt index e957a8fccdc..d36d1c99817 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -399,6 +399,8 @@ Release 0.20.0 - Unreleased ConcurrentSkipListSet HBASE-1578 Change the name of the in-memory updates from 'memcache' to 'memtable' or.... + HBASE-1562 How to handle the setting of 32 bit versus 64 bit machines + (Erik Holstad via Stack) OPTIMIZATIONS HBASE-1412 Change values for delete column and column family in KeyValue diff --git a/src/java/org/apache/hadoop/hbase/KeyValue.java b/src/java/org/apache/hadoop/hbase/KeyValue.java index 67ff09183ab..9151b7f5c86 100644 --- a/src/java/org/apache/hadoop/hbase/KeyValue.java +++ b/src/java/org/apache/hadoop/hbase/KeyValue.java @@ -1784,8 +1784,9 @@ public class KeyValue implements Writable, HeapSize { // HeapSize public long heapSize() { - return ClassSize.alignSize(HeapSize.OBJECT + HeapSize.REFERENCE + - HeapSize.BYTE_ARRAY + length + (2 * Bytes.SIZEOF_INT)); + return ClassSize.align(ClassSize.OBJECT + ClassSize.REFERENCE + + ClassSize.align(ClassSize.ARRAY + length) + + (2 * Bytes.SIZEOF_INT)); } // Writable diff --git a/src/java/org/apache/hadoop/hbase/client/Put.java b/src/java/org/apache/hadoop/hbase/client/Put.java index e7666692e68..caa57d8ef70 100644 --- a/src/java/org/apache/hadoop/hbase/client/Put.java +++ b/src/java/org/apache/hadoop/hbase/client/Put.java @@ -52,9 +52,10 @@ public class Put implements HeapSize, Writable, Comparable { private Map> familyMap = new TreeMap>(Bytes.BYTES_COMPARATOR); - private static final long OVERHEAD = ClassSize.alignSize(HeapSize.OBJECT + - 1 * HeapSize.REFERENCE + 1 * HeapSize.ARRAY + 2 * Bytes.SIZEOF_LONG + - 1 * Bytes.SIZEOF_BOOLEAN + 1 * HeapSize.REFERENCE + HeapSize.TREEMAP_SIZE); + private static final long OVERHEAD = ClassSize.align( + ClassSize.OBJECT + ClassSize.REFERENCE + + 2 * Bytes.SIZEOF_LONG + Bytes.SIZEOF_BOOLEAN + + ClassSize.REFERENCE + ClassSize.TREEMAP); /** Constructor for Writable. DO NOT USE */ public Put() {} @@ -201,10 +202,16 @@ public class Put implements HeapSize, Writable, Comparable { this.timestamp = timestamp; } + /** + * @return the number of different families included in this put + */ public int numFamilies() { return familyMap.size(); } - + + /** + * @return the total number of KeyValues that will be added with this put + */ public int size() { int size = 0; for(List kvList : this.familyMap.values()) { @@ -270,28 +277,30 @@ public class Put implements HeapSize, Writable, Comparable { //HeapSize public long heapSize() { long heapsize = OVERHEAD; - heapsize += ClassSize.alignSize(this.row.length); - + //Adding row + heapsize += ClassSize.align(ClassSize.ARRAY + this.row.length); + //Adding map overhead + heapsize += + ClassSize.align(this.familyMap.size() * ClassSize.MAP_ENTRY); for(Map.Entry> entry : this.familyMap.entrySet()) { - //Adding entry overhead - heapsize += HeapSize.MAP_ENTRY_SIZE; - //Adding key overhead - heapsize += HeapSize.REFERENCE + HeapSize.ARRAY + - ClassSize.alignSize(entry.getKey().length); + heapsize += + ClassSize.align(ClassSize.ARRAY + entry.getKey().length); //This part is kinds tricky since the JVM can reuse references if you //store the same value, but have a good match with SizeOf at the moment //Adding value overhead - heapsize += HeapSize.REFERENCE + HeapSize.ARRAYLIST_SIZE; + heapsize += ClassSize.align(ClassSize.ARRAYLIST); int size = entry.getValue().size(); - heapsize += size * HeapSize.REFERENCE; + heapsize += ClassSize.align(ClassSize.ARRAY + + size * ClassSize.REFERENCE); + for(KeyValue kv : entry.getValue()) { heapsize += kv.heapSize(); } } - return heapsize; + return ClassSize.align((int)heapsize); } //Writable diff --git a/src/java/org/apache/hadoop/hbase/io/HeapSize.java b/src/java/org/apache/hadoop/hbase/io/HeapSize.java index f2150486fa6..d7b737c848d 100644 --- a/src/java/org/apache/hadoop/hbase/io/HeapSize.java +++ b/src/java/org/apache/hadoop/hbase/io/HeapSize.java @@ -38,38 +38,6 @@ package org.apache.hadoop.hbase.io; * */ public interface HeapSize { - - /** Reference size is 8 bytes on 64-bit, 4 bytes on 32-bit */ - static final int REFERENCE = 8; - - /** Object overhead is minimum 2 * reference size (8 bytes on 64-bit) */ - static final int OBJECT = 2 * REFERENCE; - - /** Array overhead */ - static final int ARRAY = 3 * REFERENCE; - - /** OverHead for nested arrays */ - static final int MULTI_ARRAY = (4 * REFERENCE) + ARRAY; - - /** Byte arrays are fixed size below plus its length, 8 byte aligned */ - static final int BYTE_ARRAY = 3 * REFERENCE; - - /** Overhead for ByteBuffer */ - static final int BYTE_BUFFER = 56; - - /** String overhead */ - static final int STRING_SIZE = 64; - - /** Overhead for ArrayList(0) */ - static final int ARRAYLIST_SIZE = 64; - - /** Overhead for TreeMap */ - static final int TREEMAP_SIZE = 80; - - /** Overhead for entry in map */ - static final int MAP_ENTRY_SIZE = 64; - - /** * @return Approximate 'exclusive deep size' of implementing object. Includes * count of payload and hosting object sizings. diff --git a/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java b/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java index 58b04461152..383fbaaae4a 100644 --- a/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java +++ b/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java @@ -1339,10 +1339,6 @@ public class HFile { */ final RawComparator comparator; - static final int OVERHEAD = (int)ClassSize.alignSize(HeapSize.OBJECT + - 2 * Bytes.SIZEOF_INT + 1 * HeapSize.MULTI_ARRAY + 2 * HeapSize.ARRAY + - 4 * HeapSize.REFERENCE); - /* * Shutdown default constructor */ @@ -1498,23 +1494,28 @@ public class HFile { } public long heapSize() { - long size = OVERHEAD; - + long heapsize = ClassSize.align(ClassSize.OBJECT + + 2 * Bytes.SIZEOF_INT + (3 + 1) * ClassSize.REFERENCE); //Calculating the size of blockKeys if(blockKeys != null) { + //Adding array + references overhead + heapsize += ClassSize.align(ClassSize.ARRAY + + blockKeys.length * ClassSize.REFERENCE); + //Adding bytes for(byte [] bs : blockKeys) { - size += HeapSize.MULTI_ARRAY; - size += ClassSize.alignSize(bs.length); + heapsize += ClassSize.align(ClassSize.ARRAY + bs.length); } } if(blockOffsets != null) { - size += blockOffsets.length * Bytes.SIZEOF_LONG; + heapsize += ClassSize.align(ClassSize.ARRAY + + blockOffsets.length * Bytes.SIZEOF_LONG); } if(blockDataSizes != null) { - size += blockDataSizes.length * Bytes.SIZEOF_INT; + heapsize += ClassSize.align(ClassSize.ARRAY + + blockDataSizes.length * Bytes.SIZEOF_INT); } - return size; + return ClassSize.align(heapsize); } } diff --git a/src/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/src/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 3d0f4e0c947..dceef51e95e 100644 --- a/src/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/src/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -90,9 +90,10 @@ implements HeapSize, Map, BlockCache { private long missCount = 0; /** Memory overhead of this Object (for HeapSize) */ - private static final int OVERHEAD = (int)ClassSize.alignSize(HeapSize.OBJECT + - 1 * Bytes.SIZEOF_FLOAT + 2 * Bytes.SIZEOF_INT + 1 * HeapSize.ARRAY + - 3 * HeapSize.REFERENCE + 4 * Bytes.SIZEOF_LONG); + private static final int OVERHEAD = ClassSize.align( + ClassSize.OBJECT + 1 * Bytes.SIZEOF_FLOAT + 2 * Bytes.SIZEOF_INT + + ClassSize.align(ClassSize.ARRAY) + 3 * ClassSize.REFERENCE + + 4 * Bytes.SIZEOF_LONG); /** * Constructs a new, empty map with the specified initial capacity, @@ -119,7 +120,7 @@ implements HeapSize, Map, BlockCache { if (loadFactor <= 0 || Float.isNaN(loadFactor)) { throw new IllegalArgumentException("Load factor must be > 0"); } - if (maxMemUsage <= (OVERHEAD + initialCapacity * HeapSize.REFERENCE)) { + if (maxMemUsage <= (OVERHEAD + initialCapacity * ClassSize.REFERENCE)) { throw new IllegalArgumentException("Max memory usage too small to " + "support base overhead"); } @@ -300,7 +301,7 @@ implements HeapSize, Map, BlockCache { * @return memory usage of map in bytes */ public long heapSize() { - return (memTotal - memFree); + return ClassSize.align(memTotal - memFree); } //-------------------------------------------------------------------------- @@ -503,7 +504,7 @@ implements HeapSize, Map, BlockCache { * @return baseline memory overhead of object in bytes */ private long getMinimumUsage() { - return OVERHEAD + (entries.length * HeapSize.REFERENCE); + return OVERHEAD + (entries.length * ClassSize.REFERENCE); } //-------------------------------------------------------------------------- @@ -724,7 +725,7 @@ implements HeapSize, Map, BlockCache { } // Determine how much additional space will be required to grow the array - long requiredSpace = (newCapacity - oldCapacity) * HeapSize.REFERENCE; + long requiredSpace = (newCapacity - oldCapacity) * ClassSize.REFERENCE; // Verify/enforce we have sufficient memory to grow checkAndFreeMemory(requiredSpace); @@ -833,7 +834,6 @@ implements HeapSize, Map, BlockCache { */ private void init() { memFree -= OVERHEAD; - memFree -= (entries.length * HeapSize.REFERENCE); } //-------------------------------------------------------------------------- @@ -975,8 +975,9 @@ implements HeapSize, Map, BlockCache { protected long heapSize; /** The baseline overhead memory usage of this class */ - static final int OVERHEAD = HeapSize.OBJECT + 5 * HeapSize.REFERENCE + - 1 * Bytes.SIZEOF_INT + 1 * Bytes.SIZEOF_LONG; + static final int OVERHEAD = ClassSize.OBJECT + + 5 * ClassSize.REFERENCE + 1 * Bytes.SIZEOF_INT + + 1 * Bytes.SIZEOF_LONG; /** * Create a new entry. @@ -1139,8 +1140,8 @@ implements HeapSize, Map, BlockCache { * @return size of String in bytes */ private long heapSize(String s) { - return HeapSize.STRING_SIZE + - ClassSize.alignSize(s.length() * Bytes.SIZEOF_CHAR); + return ClassSize.STRING + ClassSize.align(ClassSize.ARRAY + + s.length() * Bytes.SIZEOF_CHAR); } /** @@ -1148,7 +1149,8 @@ implements HeapSize, Map, BlockCache { * @return size of ByteBuffer in bytes */ private long heapSize(ByteBuffer b) { - return HeapSize.BYTE_BUFFER + ClassSize.alignSize(b.capacity()); + return ClassSize.BYTE_BUFFER + + ClassSize.align(ClassSize.ARRAY + b.capacity()); } } diff --git a/src/java/org/apache/hadoop/hbase/regionserver/HLogKey.java b/src/java/org/apache/hadoop/hbase/regionserver/HLogKey.java index 3d30a322fed..7b0c06fac6a 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/HLogKey.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/HLogKey.java @@ -22,6 +22,7 @@ package org.apache.hadoop.hbase.regionserver; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.ClassSize; import org.apache.hadoop.io.*; import java.io.*; @@ -42,7 +43,7 @@ public class HLogKey implements WritableComparable, HeapSize { private long logSeqNum; // Time at which this edit was written. private long writeTime; - private int HEAP_TAX = HeapSize.OBJECT + (2 * HeapSize.BYTE_ARRAY) + + private int HEAP_TAX = ClassSize.OBJECT + (2 * ClassSize.ARRAY) + (2 * Bytes.SIZEOF_LONG); /** Writable Consructor -- Do not use. */ diff --git a/src/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java b/src/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java index 40752858fbb..745a16e679b 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java @@ -28,13 +28,11 @@ import java.util.Set; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.ClassSize; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; - - - /** * The LruHashMap is a memory-aware HashMap with a configurable maximum * memory footprint. @@ -67,8 +65,8 @@ implements HeapSize, Map { /** Memory overhead of this Object (for HeapSize) */ private static final int OVERHEAD = 5 * Bytes.SIZEOF_LONG + - 2 * Bytes.SIZEOF_INT + 2 * Bytes.SIZEOF_FLOAT + 3 * HeapSize.REFERENCE + - 1 * HeapSize.ARRAY; + 2 * Bytes.SIZEOF_INT + 2 * Bytes.SIZEOF_FLOAT + 3 * ClassSize.REFERENCE + + 1 * ClassSize.ARRAY; /** Load factor allowed (usually 75%) */ private final float loadFactor; @@ -119,7 +117,7 @@ implements HeapSize, Map { if (loadFactor <= 0 || Float.isNaN(loadFactor)) { throw new IllegalArgumentException("Load factor must be > 0"); } - if (maxMemUsage <= (OVERHEAD + initialCapacity * HeapSize.REFERENCE)) { + if (maxMemUsage <= (OVERHEAD + initialCapacity * ClassSize.REFERENCE)) { throw new IllegalArgumentException("Max memory usage too small to " + "support base overhead"); } @@ -472,7 +470,7 @@ implements HeapSize, Map { * @return baseline memory overhead of object in bytes */ private long getMinimumUsage() { - return OVERHEAD + (entries.length * HeapSize.REFERENCE); + return OVERHEAD + (entries.length * ClassSize.REFERENCE); } //-------------------------------------------------------------------------- @@ -693,7 +691,7 @@ implements HeapSize, Map { } // Determine how much additional space will be required to grow the array - long requiredSpace = (newCapacity - oldCapacity) * HeapSize.REFERENCE; + long requiredSpace = (newCapacity - oldCapacity) * ClassSize.REFERENCE; // Verify/enforce we have sufficient memory to grow checkAndFreeMemory(requiredSpace); @@ -802,7 +800,7 @@ implements HeapSize, Map { */ private void init() { memFree -= OVERHEAD; - memFree -= (entries.length * HeapSize.REFERENCE); + memFree -= (entries.length * ClassSize.REFERENCE); } //-------------------------------------------------------------------------- @@ -927,8 +925,8 @@ implements HeapSize, Map { protected static class Entry implements Map.Entry, HeapSize { /** The baseline overhead memory usage of this class */ - static final int OVERHEAD = 1 * Bytes.SIZEOF_LONG + 5 * HeapSize.REFERENCE + - 2 * Bytes.SIZEOF_INT; + static final int OVERHEAD = 1 * Bytes.SIZEOF_LONG + + 5 * ClassSize.REFERENCE + 2 * Bytes.SIZEOF_INT; /** The key */ protected final K key; diff --git a/src/java/org/apache/hadoop/hbase/util/ClassSize.java b/src/java/org/apache/hadoop/hbase/util/ClassSize.java index 4f964185e3f..31cdb2a8695 100755 --- a/src/java/org/apache/hadoop/hbase/util/ClassSize.java +++ b/src/java/org/apache/hadoop/hbase/util/ClassSize.java @@ -22,10 +22,10 @@ package org.apache.hadoop.hbase.util; import java.lang.reflect.Field; import java.lang.reflect.Modifier; +import java.util.Properties; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.io.HeapSize; /** * Class for determining the "size" of a class, an attempt to calculate the @@ -36,34 +36,72 @@ import org.apache.hadoop.hbase.io.HeapSize; public class ClassSize { static final Log LOG = LogFactory.getLog(ClassSize.class); - private int refSize; - private int minObjectSize; + private static int nrOfRefsPerObj = 2; + + /** Array overhead */ + public static int ARRAY = 0; + + /** Overhead for ArrayList(0) */ + public static int ARRAYLIST = 0; + + /** Overhead for ByteBuffer */ + public static int BYTE_BUFFER = 0; + + /** Overhead for an Integer */ + public static int INTEGER = 0; + + /** Overhead for entry in map */ + public static int MAP_ENTRY = 0; + + /** Object overhead is minimum 2 * reference size (8 bytes on 64-bit) */ + public static int OBJECT = 0; + + /** Reference size is 8 bytes on 64-bit, 4 bytes on 32-bit */ + public static int REFERENCE = 0; + + /** String overhead */ + public static int STRING = 0; + + /** Overhead for TreeMap */ + public static int TREEMAP = 0; + + private static final String THIRTY_TWO = "32"; /** - * Constructor - * @throws Exception + * Method for reading the arc settings and setting overheads according + * to 32-bit or 64-bit architecture. */ - public ClassSize() throws Exception{ + static { // Figure out whether this is a 32 or 64 bit machine. - Runtime runtime = Runtime.getRuntime(); - int loops = 10; - int sz = 0; - for(int i = 0; i < loops; i++) { - cleaner(runtime, i); - long memBase = runtime.totalMemory() - runtime.freeMemory(); - Object[] junk = new Object[10000]; - cleaner(runtime, i); - long memUsed = runtime.totalMemory() - runtime.freeMemory() - memBase; - sz = (int)((memUsed + junk.length/2)/junk.length); - if(sz > 0 ) { - break; - } + Properties sysProps = System.getProperties(); + String arcModel = sysProps.getProperty("sun.arch.data.model"); + + //Default value is set to 8, covering the case when arcModel is unknown + REFERENCE = 8; + if (arcModel.equals(THIRTY_TWO)) { + REFERENCE = 4; } + + ARRAY = 3 * REFERENCE; - refSize = ( 4 > sz) ? 4 : sz; - minObjectSize = 4*refSize; + ARRAYLIST = align(OBJECT + REFERENCE + Bytes.SIZEOF_INT + + align(Bytes.SIZEOF_INT)); + + BYTE_BUFFER = align(OBJECT + REFERENCE + Bytes.SIZEOF_INT + + 3 * Bytes.SIZEOF_BOOLEAN + 4 * Bytes.SIZEOF_INT + Bytes.SIZEOF_LONG); + + INTEGER = align(OBJECT + Bytes.SIZEOF_INT); + + MAP_ENTRY = align(OBJECT + 5 * REFERENCE + Bytes.SIZEOF_BOOLEAN); + + OBJECT = 2 * REFERENCE; + + TREEMAP = align(OBJECT + 2 * Bytes.SIZEOF_INT + (5+2) * REFERENCE + + ClassSize.align(OBJECT + Bytes.SIZEOF_INT)); + + STRING = align(OBJECT + REFERENCE + 3 * Bytes.SIZEOF_INT); } - + /** * The estimate of the size of a class instance depends on whether the JVM * uses 32 or 64 bit addresses, that is it depends on the size of an object @@ -78,10 +116,12 @@ public class ClassSize { * primitives, the second the number of arrays and the third the number of * references. */ - private int [] getSizeCoefficients(Class cl, boolean debug) { + @SuppressWarnings("unchecked") + private static int [] getSizeCoefficients(Class cl, boolean debug) { int primitives = 0; int arrays = 0; - int references = HeapSize.OBJECT / HeapSize.REFERENCE; + //The number of references that a new object takes + int references = nrOfRefsPerObj; for( ; null != cl; cl = cl.getSuperclass()) { Field[] field = cl.getDeclaredFields(); @@ -91,8 +131,9 @@ public class ClassSize { Class fieldClass = field[i].getType(); if( fieldClass.isArray()){ arrays++; + references++; } - else if(! fieldClass.isPrimitive()){ + else if(!fieldClass.isPrimitive()){ references++; } else {// Is simple primitive @@ -136,21 +177,21 @@ public class ClassSize { * * @return the size estimate, in bytes */ - private long estimateBaseFromCoefficients(int [] coeff, boolean debug) { - int size = coeff[0] + (coeff[1]*4 + coeff[2])*refSize; + private static long estimateBaseFromCoefficients(int [] coeff, boolean debug) { + long size = coeff[0] + align(coeff[1]*ARRAY) + coeff[2]*REFERENCE; // Round up to a multiple of 8 - size = (int)alignSize(size); + size = align(size); if(debug) { if (LOG.isDebugEnabled()) { // Write out region name as string and its encoded name. LOG.debug("Primitives " + coeff[0] + ", arrays " + coeff[1] + - ", references(inlcuding " + HeapSize.OBJECT + - ", for object overhead) " + coeff[2] + ", refSize " + refSize + + ", references(inlcuding " + nrOfRefsPerObj + + ", for object overhead) " + coeff[2] + ", refSize " + REFERENCE + ", size " + size); } } - return (size < minObjectSize) ? minObjectSize : size; + return size; } /** @@ -162,33 +203,29 @@ public class ClassSize { * * @return the size estimate in bytes. */ - public long estimateBase(Class cl, boolean debug) { + @SuppressWarnings("unchecked") + public static long estimateBase(Class cl, boolean debug) { return estimateBaseFromCoefficients( getSizeCoefficients(cl, debug), debug); } /** - * Tries to clear all the memory used to estimate the reference size for the - * current JVM - * @param runtime - * @param i - * @throws Exception + * Aligns a number to 8. + * @param num number to align to 8 + * @return smallest number >= input that is a multiple of 8 */ - private void cleaner(Runtime runtime, int i) throws Exception{ - Thread.sleep(i*1000); - runtime.gc();runtime.gc(); runtime.gc();runtime.gc();runtime.gc(); - runtime.runFinalization(); + public static int align(int num) { + return (int)(align((long)num)); } - /** * Aligns a number to 8. * @param num number to align to 8 * @return smallest number >= input that is a multiple of 8 */ - public static long alignSize(int num) { - int aligned = (num + 7)/8; - aligned *= 8; - return aligned; + public static long align(long num) { + //The 7 comes from that the alignSize is 8 which is the number of bytes + //stored and sent together + return ((num + 7) >> 3) << 3; } } diff --git a/src/test/org/apache/hadoop/hbase/io/TestHeapSize.java b/src/test/org/apache/hadoop/hbase/io/TestHeapSize.java index d74fcbbb53a..10bc465dae9 100644 --- a/src/test/org/apache/hadoop/hbase/io/TestHeapSize.java +++ b/src/test/org/apache/hadoop/hbase/io/TestHeapSize.java @@ -1,5 +1,7 @@ package org.apache.hadoop.hbase.io; +import java.io.IOException; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.KeyValue; @@ -24,47 +26,46 @@ public class TestHeapSize extends TestCase { * Testing the classes that implements HeapSize and are a part of 0.20. * Some are not tested here for example BlockIndex which is tested in * TestHFile since it is a non public class + * @throws IOException */ - public void testSizes() { - ClassSize cs = null; + @SuppressWarnings("unchecked") + public void testSizes() throws IOException { Class cl = null; long expected = 0L; long actual = 0L; - try { - cs = new ClassSize(); - } catch(Exception e) {} //KeyValue cl = KeyValue.class; - expected = cs.estimateBase(cl, false); + expected = ClassSize.estimateBase(cl, false); + KeyValue kv = new KeyValue(); actual = kv.heapSize(); if(expected != actual) { - cs.estimateBase(cl, true); + ClassSize.estimateBase(cl, true); assertEquals(expected, actual); } //LruBlockCache cl = LruBlockCache.class; - expected = cs.estimateBase(cl, false); + expected = ClassSize.estimateBase(cl, false); LruBlockCache c = new LruBlockCache(1,1,200); //Since minimum size for the for a LruBlockCache is 1 //we need to remove one reference from the heapsize - actual = c.heapSize() - HeapSize.REFERENCE; + actual = c.heapSize();// - ClassSize.REFERENCE_SIZE; if(expected != actual) { - cs.estimateBase(cl, true); + ClassSize.estimateBase(cl, true); assertEquals(expected, actual); } //Put cl = Put.class; - expected = cs.estimateBase(cl, false); + expected = ClassSize.estimateBase(cl, false); //The actual TreeMap is not included in the above calculation - expected += HeapSize.TREEMAP_SIZE; + expected += ClassSize.TREEMAP; Put put = new Put(Bytes.toBytes("")); actual = put.heapSize(); if(expected != actual) { - cs.estimateBase(cl, true); + ClassSize.estimateBase(cl, true); assertEquals(expected, actual); } } diff --git a/src/test/org/apache/hadoop/hbase/io/hfile/TestHFile.java b/src/test/org/apache/hadoop/hbase/io/hfile/TestHFile.java index b66bb08b879..bdac1e096ae 100644 --- a/src/test/org/apache/hadoop/hbase/io/hfile/TestHFile.java +++ b/src/test/org/apache/hadoop/hbase/io/hfile/TestHFile.java @@ -29,7 +29,6 @@ import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseTestCase; -import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.io.hfile.HFile.BlockIndex; import org.apache.hadoop.hbase.io.hfile.HFile.Reader; import org.apache.hadoop.hbase.io.hfile.HFile.Writer; @@ -251,25 +250,22 @@ public class TestHFile extends HBaseTestCase { /** * Checks if the HeapSize calculator is within reason */ - public void testHeapSizeForBlockIndex() { - ClassSize cs = null; + @SuppressWarnings("unchecked") + public void testHeapSizeForBlockIndex() throws IOException{ Class cl = null; long expected = 0L; long actual = 0L; - try { - cs = new ClassSize(); - } catch(Exception e) {} - //KeyValue cl = BlockIndex.class; - expected = cs.estimateBase(cl, false); + expected = ClassSize.estimateBase(cl, false); BlockIndex bi = new BlockIndex(Bytes.BYTES_RAWCOMPARATOR); actual = bi.heapSize(); - //Since we have a [[]] in BlockIndex and the checker only sees the [] we - // miss a MULTI_ARRAY which is 4*Reference = 32 B - actual -= 32; + //Since the arrays in BlockIndex(byte [][] blockKeys, long [] blockOffsets, + //int [] blockDataSizes) are all null they are not going to show up in the + //HeapSize calculation, so need to remove those array costs from ecpected. + expected -= ClassSize.align(3 * ClassSize.ARRAY); if(expected != actual) { - cs.estimateBase(cl, true); + ClassSize.estimateBase(cl, true); assertEquals(expected, actual); } }