diff --git a/conf/hbase-env.sh b/conf/hbase-env.sh index 91aca76334f..e6d1c9c90ff 100644 --- a/conf/hbase-env.sh +++ b/conf/hbase-env.sh @@ -66,10 +66,12 @@ export HBASE_OPTS="-XX:+UseConcMarkSweepGC" # If FILE-PATH is not replaced, the log file(.gc) would still be generated in the HBASE_LOG_DIR . # export CLIENT_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc: -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=1 -XX:GCLogFileSize=512M" -# Uncomment below if you intend to use the EXPERIMENTAL off heap cache. -# export HBASE_OPTS="$HBASE_OPTS -XX:MaxDirectMemorySize=" -# Set hbase.offheapcache.percentage in hbase-site.xml to a nonzero value. - +# Uncomment below if you intend to use off heap cache. +# export HBASE_OPTS="$HBASE_OPTS -XX:MaxDirectMemorySize=SET_THIS_TO_HOW_MANY_GIGS_OF_OFFHEAP" +# For example, to allocate 8G of offheap, set SET_THIS_TO_HOW_MANY_GIGS_OF_OFFHEAP to 8G as in: +# export HBASE_OPTS="$HBASE_OPTS -XX:MaxDirectMemorySize=8G" +# See the package documentation for org.apache.hadoop.hbase.io.hfile for other configurations +# needed setting up off-heap block caching. # Uncomment and adjust to enable JMX exporting # See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access. diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferArray.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferArray.java index 2851a42d4e6..8699d324f9a 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferArray.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferArray.java @@ -55,7 +55,7 @@ public final class ByteBufferArray { this.bufferSize = (int) roundUp(capacity / 16, 32768); this.bufferCount = (int) (roundUp(capacity, bufferSize) / bufferSize); LOG.info("Allocating buffers total=" + StringUtils.byteDesc(capacity) - + " , sizePerBuffer=" + StringUtils.byteDesc(bufferSize) + ", count=" + + ", sizePerBuffer=" + StringUtils.byteDesc(bufferSize) + ", count=" + bufferCount + ", direct=" + directByteBuffer); buffers = new ByteBuffer[bufferCount + 1]; locks = new Lock[bufferCount + 1]; diff --git a/hbase-common/src/main/resources/hbase-default.xml b/hbase-common/src/main/resources/hbase-default.xml index 71b2952ff0b..c61f347e27e 100644 --- a/hbase-common/src/main/resources/hbase-default.xml +++ b/hbase-common/src/main/resources/hbase-default.xml @@ -939,8 +939,10 @@ possible configurations would overwhelm and obscure the important. hbase.offheapcache.percentage 0 The percentage of the off heap space (-XX:MaxDirectMemorySize) to be - allocated towards the experimental off heap cache. If you desire the cache to be - disabled, simply set this value to 0. + allocated towards the experimental off heap "SlabCache" (This is different to + the BucketCache -- see the package javadoc for org.apache.hadoop.hbase.io.hfile + for more on your options). If you desire the cache to be disabled, simply set this + value to 0. hbase.data.umask.enable diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java index 59fd2c06630..321685fd749 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java @@ -33,6 +33,8 @@ import org.apache.hadoop.hbase.io.hfile.slab.SlabCache; import org.apache.hadoop.hbase.util.DirectMemoryUtils; import org.apache.hadoop.util.StringUtils; +import com.google.common.annotations.VisibleForTesting; + /** * Stores all of the cache objects and configuration for a single HFile. */ @@ -83,7 +85,7 @@ public class CacheConfig { * to the file that will host the file-based cache. See BucketCache#getIOEngineFromName() for * list of supported ioengine options. * - *

Set this option and a non-zero {@link BUCKET_CACHE_SIZE_KEY} to enable bucket cache. + *

Set this option and a non-zero {@link #BUCKET_CACHE_SIZE_KEY} to enable bucket cache. */ public static final String BUCKET_CACHE_IOENGINE_KEY = "hbase.bucketcache.ioengine"; @@ -91,10 +93,10 @@ public class CacheConfig { * When using bucket cache, this is a float that EITHER represents a percentage of total heap * memory size to give to the cache (if < 1.0) OR, it is the capacity in megabytes of the cache. * - *

The resultant size is further divided if {@link BUCKET_CACHE_COMBINED_KEY} is set (It is + *

The resultant size is further divided if {@link #BUCKET_CACHE_COMBINED_KEY} is set (It is * set by default. When false, bucket cache serves as an "L2" cache to the "L1" * {@link LruBlockCache}). The percentage is set in - * with {@link BUCKET_CACHE_COMBINED_PERCENTAGE_KEY} float. + * with {@link #BUCKET_CACHE_COMBINED_PERCENTAGE_KEY} float. */ public static final String BUCKET_CACHE_SIZE_KEY = "hbase.bucketcache.size"; @@ -115,7 +117,7 @@ public class CacheConfig { /** * A float which designates how much of the overall cache to give to bucket cache - * and how much to on-heap lru cache when {@link BUCKET_CACHE_COMBINED_KEY} is set. + * and how much to on-heap lru cache when {@link #BUCKET_CACHE_COMBINED_KEY} is set. */ public static final String BUCKET_CACHE_COMBINED_PERCENTAGE_KEY = "hbase.bucketcache.percentage.in.combinedcache"; @@ -251,6 +253,7 @@ public class CacheConfig { this.cacheBloomsOnWrite = cacheBloomsOnWrite; this.evictOnClose = evictOnClose; this.cacheCompressed = cacheCompressed; + LOG.info(this); } /** @@ -369,13 +372,13 @@ public class CacheConfig { if (!isBlockCacheEnabled()) { return "CacheConfig:disabled"; } - return "CacheConfig:enabled " + - "[cacheDataOnRead=" + shouldCacheDataOnRead() + "] " + - "[cacheDataOnWrite=" + shouldCacheDataOnWrite() + "] " + - "[cacheIndexesOnWrite=" + shouldCacheIndexesOnWrite() + "] " + - "[cacheBloomsOnWrite=" + shouldCacheBloomsOnWrite() + "] " + - "[cacheEvictOnClose=" + shouldEvictOnClose() + "] " + - "[cacheCompressed=" + shouldCacheCompressed() + "]"; + return "blockCache=" + getBlockCache() + + ", cacheDataOnRead=" + shouldCacheDataOnRead() + + ", cacheDataOnWrite=" + shouldCacheDataOnWrite() + + ", cacheIndexesOnWrite=" + shouldCacheIndexesOnWrite() + + ", cacheBloomsOnWrite=" + shouldCacheBloomsOnWrite() + + ", cacheEvictOnClose=" + shouldEvictOnClose() + + ", cacheCompressed=" + shouldCacheCompressed(); } // Static block cache reference and methods @@ -384,7 +387,9 @@ public class CacheConfig { * Static reference to the block cache, or null if no caching should be used * at all. */ - private static BlockCache globalBlockCache; + // Clear this if in tests you'd make more than one block cache instance. + @VisibleForTesting + static BlockCache GLOBAL_BLOCK_CACHE_INSTANCE; /** Boolean whether we have disabled the block cache entirely. */ private static boolean blockCacheDisabled = false; @@ -396,7 +401,7 @@ public class CacheConfig { * @return The block cache or null. */ public static synchronized BlockCache instantiateBlockCache(Configuration conf) { - if (globalBlockCache != null) return globalBlockCache; + if (GLOBAL_BLOCK_CACHE_INSTANCE != null) return GLOBAL_BLOCK_CACHE_INSTANCE; if (blockCacheDisabled) return null; float cachePercentage = conf.getFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, @@ -414,10 +419,10 @@ public class CacheConfig { MemoryUsage mu = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage(); long lruCacheSize = (long) (mu.getMax() * cachePercentage); int blockSize = conf.getInt("hbase.offheapcache.minblocksize", HConstants.DEFAULT_BLOCKSIZE); - long offHeapCacheSize = - (long) (conf.getFloat("hbase.offheapcache.percentage", (float) 0) * + long slabCacheOffHeapCacheSize = + (long) (conf.getFloat(SLAB_CACHE_OFFHEAP_PERCENTAGE_KEY, (float) 0) * DirectMemoryUtils.getDirectMemorySize()); - if (offHeapCacheSize <= 0) { + if (slabCacheOffHeapCacheSize <= 0) { String bucketCacheIOEngineName = conf.get(BUCKET_CACHE_IOENGINE_KEY, null); float bucketCachePercentage = conf.getFloat(BUCKET_CACHE_SIZE_KEY, 0F); // A percentage of max heap size or a absolute value with unit megabytes @@ -452,19 +457,19 @@ public class CacheConfig { throw new RuntimeException(ioex); } } - LOG.info("Allocating LruBlockCache with maximum size " + - StringUtils.humanReadableInt(lruCacheSize) + ", blockSize=" + blockSize); + LOG.info("Allocating LruBlockCache size=" + + StringUtils.byteDesc(lruCacheSize) + ", blockSize=" + StringUtils.byteDesc(blockSize)); LruBlockCache lruCache = new LruBlockCache(lruCacheSize, blockSize); lruCache.setVictimCache(bucketCache); if (bucketCache != null && combinedWithLru) { - globalBlockCache = new CombinedBlockCache(lruCache, bucketCache); + GLOBAL_BLOCK_CACHE_INSTANCE = new CombinedBlockCache(lruCache, bucketCache); } else { - globalBlockCache = lruCache; + GLOBAL_BLOCK_CACHE_INSTANCE = lruCache; } } else { - globalBlockCache = new DoubleBlockCache( - lruCacheSize, offHeapCacheSize, blockSize, blockSize, conf); + GLOBAL_BLOCK_CACHE_INSTANCE = new DoubleBlockCache( + lruCacheSize, slabCacheOffHeapCacheSize, blockSize, blockSize, conf); } - return globalBlockCache; + return GLOBAL_BLOCK_CACHE_INSTANCE; } -} +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java index 37221dff388..6f23d1648cf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java @@ -31,8 +31,9 @@ import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; * CombinedBlockCache is an abstraction layer that combines * {@link LruBlockCache} and {@link BucketCache}. The smaller lruCache is used * to cache bloom blocks and index blocks. The larger bucketCache is used to - * cache data blocks. {@link #getBlock(BlockCacheKey, boolean, boolean) reads - * first from the smaller lruCache before looking for the block in the bucketCache. + * cache data blocks. {@link #getBlock(BlockCacheKey, boolean, boolean)}, boolean, boolean) reads + * first from the smaller lruCache before looking for the block in the bucketCache. Blocks evicted + * from lruCache are put into the bucket cache. * Metrics are the combined size and hits and misses of both caches. * */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/DoubleBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/DoubleBlockCache.java index 0e32b4b0f24..274f847d4b3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/DoubleBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/DoubleBlockCache.java @@ -63,15 +63,15 @@ public class DoubleBlockCache implements ResizableBlockCache, HeapSize { long onHeapBlockSize, long offHeapBlockSize, Configuration conf) { LOG.info("Creating on-heap cache of size " - + StringUtils.humanReadableInt(onHeapSize) - + "bytes with an average block size of " - + StringUtils.humanReadableInt(onHeapBlockSize) + " bytes."); + + StringUtils.byteDesc(onHeapSize) + + " with an average block size of " + + StringUtils.byteDesc(onHeapBlockSize)); onHeapCache = new LruBlockCache(onHeapSize, onHeapBlockSize, conf); LOG.info("Creating off-heap cache of size " - + StringUtils.humanReadableInt(offHeapSize) - + "bytes with an average block size of " - + StringUtils.humanReadableInt(offHeapBlockSize) + " bytes."); + + StringUtils.byteDesc(offHeapSize) + + "with an average block size of " + + StringUtils.byteDesc(offHeapBlockSize)); offHeapCache = new SlabCache(offHeapSize, offHeapBlockSize); offHeapCache.addSlabByConf(conf); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/package-info.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/package-info.java index 1f449674a2e..be063f4584d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/package-info.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/package-info.java @@ -16,28 +16,104 @@ * limitations under the License. */ /** - * Provides implementations of {@link HFile} and HFile + * Provides implementations of {@link org.apache.hadoop.hbase.io.hfile.HFile} and HFile * {@link org.apache.hadoop.hbase.io.hfile.BlockCache}. Caches are configured (and instantiated) * by {@link org.apache.hadoop.hbase.io.hfile.CacheConfig}. See head of the * {@link org.apache.hadoop.hbase.io.hfile.CacheConfig} class for constants that define * cache options and configuration keys to use setting cache options. Cache implementations - * include the on-heap {@link org.apache.hadoop.hbase.io.hfile.LruBlockCache}, + * include the default, native on-heap {@link org.apache.hadoop.hbase.io.hfile.LruBlockCache}, * a {@link org.apache.hadoop.hbase.io.hfile.slab.SlabCache} that can serve as an L2 for - * {@link org.apache.hadoop.hbase.io.hfile.LruBlockCache}, and a - * {@link org.apache.hadoop.hbase.io.hfile.bucket.BucketCache} that has a bunch of deploy types - * including L2 for LRUBlockCache or using + * {@link org.apache.hadoop.hbase.io.hfile.LruBlockCache} (hosted inside the class + * {@link org.apache.hadoop.hbase.io.hfile.DoubleBlockCache} that caches blocks in BOTH L1 and L2, + * and on evict, moves from L1 to L2, etc), and a + * {@link org.apache.hadoop.hbase.io.hfile.bucket.BucketCache} that has a bunch of deploy formats + * including acting as a L2 for LruBlockCache -- when a block is evicted from LruBlockCache, it + * goes to the BucketCache and when we search a block, we look in both places -- or using * {@link org.apache.hadoop.hbase.io.hfile.CombinedBlockCache}, as - * host for data blocks with meta blocks in the LRUBlockCache as well as onheap, offheap, and - * file options). + * a host for data blocks with meta blocks in the LRUBlockCache as well as onheap, offheap, and + * file options. * + *

Which BlockCache should I use?

+ * BucketCache has seen more production deploys and has more deploy options. Fetching will always + * be slower when fetching from BucketCache but latencies tend to be less erratic over time + * (roughly because GC is less). SlabCache tends to do more GCs as blocks are moved between L1 + * and L2 always, at least given the way {@link org.apache.hadoop.hbase.io.hfile.DoubleBlockCache} + * currently works. It is tough doing an apples to apples compare since their hosting classes, + * {@link org.apache.hadoop.hbase.io.hfile.CombinedBlockCache} for BucketCache vs + * {@link org.apache.hadoop.hbase.io.hfile.DoubleBlockCache} operate so differently. + * See Nick Dimiduk's + * BlockCache 101 for some numbers. See + * also the description of HBASE-7404 + * where Chunhui Shen lists issues he found with BlockCache (inefficent use of memory, doesn't + * help w/ GC). + * *

Enabling {@link org.apache.hadoop.hbase.io.hfile.slab.SlabCache}

- * {@link org.apache.hadoop.hbase.io.hfile.slab.SlabCache} has seen little use and will likely - * be deprecated in the near future. To enable it, - * set the float hbase.offheapcache.percentage to some value between 0 and 1. This + * {@link org.apache.hadoop.hbase.io.hfile.slab.SlabCache} is the original offheap block cache + * but unfortunately has seen little use. It is originally described in + * Caching + * in Apache HBase: SlabCache.To enable it, + * set the float hbase.offheapcache.percentage + * ({@link CacheConfig#SLAB_CACHE_OFFHEAP_PERCENTAGE_KEY}) to some value between 0 and 1 in + * your hbase-site.xml file. This * enables {@link org.apache.hadoop.hbase.io.hfile.DoubleBlockCache}, a facade over * {@link org.apache.hadoop.hbase.io.hfile.LruBlockCache} and - * {@link org.apache.hadoop.hbase.io.hfile.slab.SlabCache}. The value set here will be - * multiplied by whatever the setting for -XX:MaxDirectMemorySize is and this is what + * {@link org.apache.hadoop.hbase.io.hfile.slab.SlabCache}. DoubleBlockCache works as follows. + * When caching, it + * "...attempts to cache the block in both caches, while readblock reads first from the faster + * onheap cache before looking for the block in the off heap cache. Metrics are the + * combined size and hits and misses of both caches." The value set in + * hbase.offheapcache.percentage will be + * multiplied by whatever the setting for -XX:MaxDirectMemorySize is in + * your hbase-env.sh configuration file and this is what * will be used by {@link org.apache.hadoop.hbase.io.hfile.slab.SlabCache} as its offheap store. + * Onheap store will be whatever the float {@link HConstants#HFILE_BLOCK_CACHE_SIZE_KEY} setting is + * (some value between 0 and 1) times the size of the allocated java heap. + * + *

Restart (or rolling restart) your cluster for the configs to take effect. Check logs to + * ensure your configurations came out as expected. + * + *

Enabling {@link org.apache.hadoop.hbase.io.hfile.bucket.BucketCache}

+ * Ensure the SlabCache config hbase.offheapcache.percentage is not set (or set to 0). + * At this point, it is probably best to read the code to learn the list of bucket cache options + * and how they combine (to be fixed). Read the options and defaults for BucketCache in the + * head of the {@link org.apache.hadoop.hbase.io.hfile.CacheConfig}. + * + *

Here is a simple example of how to enable a 4G + * offheap bucket cache with 1G onheap cache. + * The onheap/offheap caches + * are managed by {@link org.apache.hadoop.hbase.io.hfile.CombinedBlockCache} by default. For the + * CombinedBlockCache (from the class comment), "The smaller lruCache is used + * to cache bloom blocks and index blocks, the larger bucketCache is used to + * cache data blocks. getBlock reads first from the smaller lruCache before + * looking for the block in the bucketCache. Metrics are the combined size and + * hits and misses of both caches." To disable CombinedBlockCache and have the BucketCache act + * as a strict L2 cache to the L1 LruBlockCache (i.e. on eviction from L1, blocks go to L2), set + * {@link org.apache.hadoop.hbase.io.hfile.CacheConfig#BUCKET_CACHE_COMBINED_KEY} to false. + * Also by default, unless you change it, + * {@link CacheConfig#BUCKET_CACHE_COMBINED_PERCENTAGE_KEY} defaults to 0.9 (see + * the top of the CacheConfig in the BucketCache defaults section). This means that whatever + * size you set for the bucket cache with + * {@link org.apache.hadoop.hbase.io.hfile.CacheConfig#BUCKET_CACHE_SIZE_KEY}, + * 90% will be used for offheap and 10% of the size will be used + * by the onheap {@link org.apache.hadoop.hbase.io.hfile.LruBlockCache}. + *

Back to the example of setting an onheap cache of 1G and ofheap of 4G, in + * hbase-env.sh ensure the java option -XX:MaxDirectMemorySize is + * enabled and 5G in size: e.g. -XX:MaxDirectMemorySize=5G. Then in + * hbase-site.xml add the following configurations: +

<property>
+  <name>hbase.bucketcache.ioengine</name>
+  <value>offheap</value>
+</property>
+<property>
+  <name>hbase.bucketcache.percentage.in.combinedcache</name>
+  <value>0.8</value>
+</property>
+<property>
+  <name>hbase.bucketcache.size</name>
+  <value>5120</value>
+</property>
. Above we set a cache of 5G, 80% of which will be offheap (4G) and 1G onheap. + * Restart (or rolling restart) your cluster for the configs to take effect. Check logs to ensure + * your configurations came out as expected. + * */ package org.apache.hadoop.hbase.io.hfile; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SlabCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SlabCache.java index 9b4621ec3ed..8397538c52e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SlabCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SlabCache.java @@ -50,11 +50,12 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder; * SlabCache is composed of multiple SingleSizeCaches. It uses a TreeMap in * order to determine where a given element fits. Redirects gets and puts to the * correct SingleSizeCache. + * + *

It is configured with a call to {@link #addSlab(int, int)} * **/ @InterfaceAudience.Private public class SlabCache implements SlabItemActionWatcher, BlockCache, HeapSize { - private final ConcurrentHashMap backingStore; private final TreeMap sizer; static final Log LOG = LogFactory.getLog(SlabCache.class); @@ -71,13 +72,26 @@ public class SlabCache implements SlabItemActionWatcher, BlockCache, HeapSize { private static final long CACHE_FIXED_OVERHEAD = ClassSize.estimateBase( SlabCache.class, false); + /** + * Key used reading from configuration list of the percentage of our total space we allocate to + * the slabs. Defaults: "0.80", "0.20". + * @see #SLAB_CACHE_SIZES_KEY Must have corresponding number of elements. + */ + static final String SLAB_CACHE_PROPORTIONS_KEY = "hbase.offheapcache.slab.proportions"; + + /** + * Configuration key for list of the blocksize of the slabs in bytes. (E.g. the slab holds + * blocks of this size). Defaults: avgBlockSize * 11 / 10, avgBlockSize * 21 / 10 + * @see #SLAB_CACHE_PROPORTIONS_KEY + */ + static final String SLAB_CACHE_SIZES_KEY = "hbase.offheapcache.slab.sizes"; + /** * Default constructor, creates an empty SlabCache. * * @param size Total size allocated to the SlabCache. (Bytes) * @param avgBlockSize Average size of a block being cached. **/ - public SlabCache(long size, long avgBlockSize) { this.avgBlockSize = avgBlockSize; this.size = size; @@ -108,9 +122,8 @@ public class SlabCache implements SlabItemActionWatcher, BlockCache, HeapSize { */ public void addSlabByConf(Configuration conf) { // Proportions we allocate to each slab of the total size. - String[] porportions = conf.getStrings( - "hbase.offheapcache.slab.proportions", "0.80", "0.20"); - String[] sizes = conf.getStrings("hbase.offheapcache.slab.sizes", + String[] porportions = conf.getStrings(SLAB_CACHE_PROPORTIONS_KEY, "0.80", "0.20"); + String[] sizes = conf.getStrings(SLAB_CACHE_SIZES_KEY, Long.valueOf(avgBlockSize * 11 / 10).toString(), Long.valueOf(avgBlockSize * 21 / 10).toString()); @@ -178,8 +191,8 @@ public class SlabCache implements SlabItemActionWatcher, BlockCache, HeapSize { } private void addSlab(int blockSize, int numBlocks) { - LOG.info("Creating a slab of blockSize " + blockSize + " with " + numBlocks - + " blocks, " + StringUtils.humanReadableInt(blockSize * (long) numBlocks) + "bytes."); + LOG.info("Creating slab of blockSize " + blockSize + " with " + numBlocks + + " blocks, " + StringUtils.byteDesc(blockSize * (long) numBlocks) + "bytes."); sizer.put(blockSize, new SingleSizeCache(blockSize, numBlocks, this)); } @@ -325,6 +338,7 @@ public class SlabCache implements SlabItemActionWatcher, BlockCache, HeapSize { /* * Statistics thread. Periodically prints the cache statistics to the log. + * TODO: Fix. Just emit to metrics. Don't run a thread just to do a log. */ static class StatisticsThread extends HasThread { SlabCache ourcache; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java index 173b1b03f2b..4a3c31f67ce 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java @@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.catalog.MetaReader; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; +import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.mapreduce.hadoopbackport.JarFinder; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; @@ -280,6 +281,8 @@ public class TableMapReduceUtil { public static void resetCacheConfig(Configuration conf) { conf.setFloat( HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT); + conf.setFloat(CacheConfig.SLAB_CACHE_OFFHEAP_PERCENTAGE_KEY, 0f); + conf.setFloat(CacheConfig.BUCKET_CACHE_SIZE_KEY, 0f); conf.setFloat("hbase.offheapcache.percentage", 0f); conf.setFloat("hbase.bucketcache.size", 0f); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java index 20110f2d402..ca6f4443dfa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java @@ -337,8 +337,7 @@ public interface HLog { * able to sync an explicit edit only (the current default implementation syncs up to the time * of the sync call syncing whatever is behind the sync). * @throws IOException - * @deprecated Use - * {@link #appendNoSync(HRegionInfo, HLogKey, WALEdit, HTableDescriptor, AtomicLong, boolean)} + * @deprecated Use {@link #appendNoSync(HTableDescriptor, HRegionInfo, HLogKey, WALEdit, AtomicLong, boolean)} * instead because you can get back the region edit/sequenceid; it is set into the passed in * key. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java index b7722234ff8..9e92d5ebf1f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java @@ -131,7 +131,7 @@ import com.google.protobuf.Service; * *

* To perform authorization checks, {@code AccessController} relies on the - * {@link org.apache.hadoop.hbase.ipc.RpcServerEngine} being loaded to provide + * RpcServerEngine being loaded to provide * the user identities for remote requests. *

* diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/DirectMemoryUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/DirectMemoryUtils.java index 466a498e8a9..f75a5f6a1cd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/DirectMemoryUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/DirectMemoryUtils.java @@ -26,6 +26,11 @@ import java.lang.reflect.Method; import java.nio.ByteBuffer; import java.util.List; +import javax.management.JMException; +import javax.management.MBeanServer; +import javax.management.MalformedObjectNameException; +import javax.management.ObjectName; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -33,11 +38,6 @@ import org.apache.hadoop.classification.InterfaceStability; import com.google.common.base.Preconditions; -import javax.management.JMException; -import javax.management.MBeanServer; -import javax.management.MalformedObjectNameException; -import javax.management.ObjectName; - /** * Utilities for interacting with and monitoring DirectByteBuffer allocations. */ @@ -71,7 +71,7 @@ public class DirectMemoryUtils { try { a = BEAN_SERVER.getAttribute(NIO_DIRECT_POOL, MEMORY_USED); } catch (JMException e) { - LOG.debug("Failed to retrieve nio.BufferPool direct MemoryUsed attribute.", e); + LOG.debug("Failed to retrieve nio.BufferPool direct MemoryUsed attribute: " + e); } } HAS_MEMORY_USED_ATTRIBUTE = a != null; diff --git a/pom.xml b/pom.xml index e717cca1d70..52a67ec9731 100644 --- a/pom.xml +++ b/pom.xml @@ -525,7 +525,8 @@ using method parallelization class ! --> ${surefire.testFailureIgnore} ${surefire.timeout} - -enableassertions -Xmx1900m -XX:MaxPermSize=100m -Djava.security.egd=file:/dev/./urandom -Djava.net.preferIPv4Stack=true -Djava.awt.headless=true + + -enableassertions -XX:MaxDirectMemorySize=1G -Xmx1900m -XX:MaxPermSize=100m -Djava.security.egd=file:/dev/./urandom -Djava.net.preferIPv4Stack=true -Djava.awt.headless=true ${test.output.tofile} diff --git a/src/main/docbkx/book.xml b/src/main/docbkx/book.xml index 023bd2b3dd1..0fb3eba8fdc 100644 --- a/src/main/docbkx/book.xml +++ b/src/main/docbkx/book.xml @@ -1590,7 +1590,9 @@ rs.close(); Block Cache Below we describe the default block cache implementation, the LRUBlockCache. Read for an understanding of how it works and an overview of the facility it provides. - Other, off-heap options have since been added. After reading the below, + Other, off-heap options have since been added. These are described in the + javadoc org.apache.hadoop.hbase.io.hfile package description. + After reading the below, be sure to visit the blog series BlockCache 101 by Nick Dimiduk where other Block Cache implementations are described. @@ -1670,6 +1672,12 @@ rs.close(); +
Offheap Block Cache + There are a few options for configuring an off-heap cache for blocks read from HDFS. + The options and their setup are described in a javadoc package doc. See + org.apache.hadoop.hbase.io.hfile package description. + +