HBASE-11171 More doc improvements on block cache options

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1595267 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2014-05-16 17:10:12 +00:00
parent 247eadc582
commit b42fd10a32
14 changed files with 180 additions and 69 deletions

View File

@ -66,10 +66,12 @@ export HBASE_OPTS="-XX:+UseConcMarkSweepGC"
# If FILE-PATH is not replaced, the log file(.gc) would still be generated in the HBASE_LOG_DIR .
# export CLIENT_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:<FILE-PATH> -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=1 -XX:GCLogFileSize=512M"
# Uncomment below if you intend to use the EXPERIMENTAL off heap cache.
# export HBASE_OPTS="$HBASE_OPTS -XX:MaxDirectMemorySize="
# Set hbase.offheapcache.percentage in hbase-site.xml to a nonzero value.
# Uncomment below if you intend to use off heap cache.
# export HBASE_OPTS="$HBASE_OPTS -XX:MaxDirectMemorySize=SET_THIS_TO_HOW_MANY_GIGS_OF_OFFHEAP"
# For example, to allocate 8G of offheap, set SET_THIS_TO_HOW_MANY_GIGS_OF_OFFHEAP to 8G as in:
# export HBASE_OPTS="$HBASE_OPTS -XX:MaxDirectMemorySize=8G"
# See the package documentation for org.apache.hadoop.hbase.io.hfile for other configurations
# needed setting up off-heap block caching.
# Uncomment and adjust to enable JMX exporting
# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.

View File

@ -939,8 +939,10 @@ possible configurations would overwhelm and obscure the important.
<name>hbase.offheapcache.percentage</name>
<value>0</value>
<description>The percentage of the off heap space (-XX:MaxDirectMemorySize) to be
allocated towards the experimental off heap cache. If you desire the cache to be
disabled, simply set this value to 0.</description>
allocated towards the experimental off heap "SlabCache" (This is different to
the BucketCache -- see the package javadoc for org.apache.hadoop.hbase.io.hfile
for more on your options). If you desire the cache to be disabled, simply set this
value to 0.</description>
</property>
<property>
<name>hbase.data.umask.enable</name>

View File

@ -33,6 +33,8 @@ import org.apache.hadoop.hbase.io.hfile.slab.SlabCache;
import org.apache.hadoop.hbase.util.DirectMemoryUtils;
import org.apache.hadoop.util.StringUtils;
import com.google.common.annotations.VisibleForTesting;
/**
* Stores all of the cache objects and configuration for a single HFile.
*/
@ -83,7 +85,7 @@ public class CacheConfig {
* to the file that will host the file-based cache. See BucketCache#getIOEngineFromName() for
* list of supported ioengine options.
*
* <p>Set this option and a non-zero {@link BUCKET_CACHE_SIZE_KEY} to enable bucket cache.
* <p>Set this option and a non-zero {@link #BUCKET_CACHE_SIZE_KEY} to enable bucket cache.
*/
public static final String BUCKET_CACHE_IOENGINE_KEY = "hbase.bucketcache.ioengine";
@ -91,10 +93,10 @@ public class CacheConfig {
* When using bucket cache, this is a float that EITHER represents a percentage of total heap
* memory size to give to the cache (if < 1.0) OR, it is the capacity in megabytes of the cache.
*
* <p>The resultant size is further divided if {@link BUCKET_CACHE_COMBINED_KEY} is set (It is
* <p>The resultant size is further divided if {@link #BUCKET_CACHE_COMBINED_KEY} is set (It is
* set by default. When false, bucket cache serves as an "L2" cache to the "L1"
* {@link LruBlockCache}). The percentage is set in
* with {@link BUCKET_CACHE_COMBINED_PERCENTAGE_KEY} float.
* with {@link #BUCKET_CACHE_COMBINED_PERCENTAGE_KEY} float.
*/
public static final String BUCKET_CACHE_SIZE_KEY = "hbase.bucketcache.size";
@ -115,7 +117,7 @@ public class CacheConfig {
/**
* A float which designates how much of the overall cache to give to bucket cache
* and how much to on-heap lru cache when {@link BUCKET_CACHE_COMBINED_KEY} is set.
* and how much to on-heap lru cache when {@link #BUCKET_CACHE_COMBINED_KEY} is set.
*/
public static final String BUCKET_CACHE_COMBINED_PERCENTAGE_KEY =
"hbase.bucketcache.percentage.in.combinedcache";
@ -251,6 +253,7 @@ public class CacheConfig {
this.cacheBloomsOnWrite = cacheBloomsOnWrite;
this.evictOnClose = evictOnClose;
this.cacheCompressed = cacheCompressed;
LOG.info(this);
}
/**
@ -369,13 +372,13 @@ public class CacheConfig {
if (!isBlockCacheEnabled()) {
return "CacheConfig:disabled";
}
return "CacheConfig:enabled " +
"[cacheDataOnRead=" + shouldCacheDataOnRead() + "] " +
"[cacheDataOnWrite=" + shouldCacheDataOnWrite() + "] " +
"[cacheIndexesOnWrite=" + shouldCacheIndexesOnWrite() + "] " +
"[cacheBloomsOnWrite=" + shouldCacheBloomsOnWrite() + "] " +
"[cacheEvictOnClose=" + shouldEvictOnClose() + "] " +
"[cacheCompressed=" + shouldCacheCompressed() + "]";
return "blockCache=" + getBlockCache() +
", cacheDataOnRead=" + shouldCacheDataOnRead() +
", cacheDataOnWrite=" + shouldCacheDataOnWrite() +
", cacheIndexesOnWrite=" + shouldCacheIndexesOnWrite() +
", cacheBloomsOnWrite=" + shouldCacheBloomsOnWrite() +
", cacheEvictOnClose=" + shouldEvictOnClose() +
", cacheCompressed=" + shouldCacheCompressed();
}
// Static block cache reference and methods
@ -384,7 +387,9 @@ public class CacheConfig {
* Static reference to the block cache, or null if no caching should be used
* at all.
*/
private static BlockCache globalBlockCache;
// Clear this if in tests you'd make more than one block cache instance.
@VisibleForTesting
static BlockCache GLOBAL_BLOCK_CACHE_INSTANCE;
/** Boolean whether we have disabled the block cache entirely. */
private static boolean blockCacheDisabled = false;
@ -396,7 +401,7 @@ public class CacheConfig {
* @return The block cache or <code>null</code>.
*/
public static synchronized BlockCache instantiateBlockCache(Configuration conf) {
if (globalBlockCache != null) return globalBlockCache;
if (GLOBAL_BLOCK_CACHE_INSTANCE != null) return GLOBAL_BLOCK_CACHE_INSTANCE;
if (blockCacheDisabled) return null;
float cachePercentage = conf.getFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY,
@ -414,10 +419,10 @@ public class CacheConfig {
MemoryUsage mu = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
long lruCacheSize = (long) (mu.getMax() * cachePercentage);
int blockSize = conf.getInt("hbase.offheapcache.minblocksize", HConstants.DEFAULT_BLOCKSIZE);
long offHeapCacheSize =
(long) (conf.getFloat("hbase.offheapcache.percentage", (float) 0) *
long slabCacheOffHeapCacheSize =
(long) (conf.getFloat(SLAB_CACHE_OFFHEAP_PERCENTAGE_KEY, (float) 0) *
DirectMemoryUtils.getDirectMemorySize());
if (offHeapCacheSize <= 0) {
if (slabCacheOffHeapCacheSize <= 0) {
String bucketCacheIOEngineName = conf.get(BUCKET_CACHE_IOENGINE_KEY, null);
float bucketCachePercentage = conf.getFloat(BUCKET_CACHE_SIZE_KEY, 0F);
// A percentage of max heap size or a absolute value with unit megabytes
@ -452,19 +457,19 @@ public class CacheConfig {
throw new RuntimeException(ioex);
}
}
LOG.info("Allocating LruBlockCache with maximum size " +
StringUtils.humanReadableInt(lruCacheSize) + ", blockSize=" + blockSize);
LOG.info("Allocating LruBlockCache size=" +
StringUtils.byteDesc(lruCacheSize) + ", blockSize=" + StringUtils.byteDesc(blockSize));
LruBlockCache lruCache = new LruBlockCache(lruCacheSize, blockSize);
lruCache.setVictimCache(bucketCache);
if (bucketCache != null && combinedWithLru) {
globalBlockCache = new CombinedBlockCache(lruCache, bucketCache);
GLOBAL_BLOCK_CACHE_INSTANCE = new CombinedBlockCache(lruCache, bucketCache);
} else {
globalBlockCache = lruCache;
GLOBAL_BLOCK_CACHE_INSTANCE = lruCache;
}
} else {
globalBlockCache = new DoubleBlockCache(
lruCacheSize, offHeapCacheSize, blockSize, blockSize, conf);
GLOBAL_BLOCK_CACHE_INSTANCE = new DoubleBlockCache(
lruCacheSize, slabCacheOffHeapCacheSize, blockSize, blockSize, conf);
}
return globalBlockCache;
return GLOBAL_BLOCK_CACHE_INSTANCE;
}
}

View File

@ -31,8 +31,9 @@ import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache;
* CombinedBlockCache is an abstraction layer that combines
* {@link LruBlockCache} and {@link BucketCache}. The smaller lruCache is used
* to cache bloom blocks and index blocks. The larger bucketCache is used to
* cache data blocks. {@link #getBlock(BlockCacheKey, boolean, boolean) reads
* first from the smaller lruCache before looking for the block in the bucketCache.
* cache data blocks. {@link #getBlock(BlockCacheKey, boolean, boolean)}, boolean, boolean) reads
* first from the smaller lruCache before looking for the block in the bucketCache. Blocks evicted
* from lruCache are put into the bucket cache.
* Metrics are the combined size and hits and misses of both caches.
*
*/

View File

@ -63,15 +63,15 @@ public class DoubleBlockCache implements ResizableBlockCache, HeapSize {
long onHeapBlockSize, long offHeapBlockSize, Configuration conf) {
LOG.info("Creating on-heap cache of size "
+ StringUtils.humanReadableInt(onHeapSize)
+ "bytes with an average block size of "
+ StringUtils.humanReadableInt(onHeapBlockSize) + " bytes.");
+ StringUtils.byteDesc(onHeapSize)
+ " with an average block size of "
+ StringUtils.byteDesc(onHeapBlockSize));
onHeapCache = new LruBlockCache(onHeapSize, onHeapBlockSize, conf);
LOG.info("Creating off-heap cache of size "
+ StringUtils.humanReadableInt(offHeapSize)
+ "bytes with an average block size of "
+ StringUtils.humanReadableInt(offHeapBlockSize) + " bytes.");
+ StringUtils.byteDesc(offHeapSize)
+ "with an average block size of "
+ StringUtils.byteDesc(offHeapBlockSize));
offHeapCache = new SlabCache(offHeapSize, offHeapBlockSize);
offHeapCache.addSlabByConf(conf);

View File

@ -16,28 +16,104 @@
* limitations under the License.
*/
/**
* Provides implementations of {@link HFile} and HFile
* Provides implementations of {@link org.apache.hadoop.hbase.io.hfile.HFile} and HFile
* {@link org.apache.hadoop.hbase.io.hfile.BlockCache}. Caches are configured (and instantiated)
* by {@link org.apache.hadoop.hbase.io.hfile.CacheConfig}. See head of the
* {@link org.apache.hadoop.hbase.io.hfile.CacheConfig} class for constants that define
* cache options and configuration keys to use setting cache options. Cache implementations
* include the on-heap {@link org.apache.hadoop.hbase.io.hfile.LruBlockCache},
* include the default, native on-heap {@link org.apache.hadoop.hbase.io.hfile.LruBlockCache},
* a {@link org.apache.hadoop.hbase.io.hfile.slab.SlabCache} that can serve as an L2 for
* {@link org.apache.hadoop.hbase.io.hfile.LruBlockCache}, and a
* {@link org.apache.hadoop.hbase.io.hfile.bucket.BucketCache} that has a bunch of deploy types
* including L2 for LRUBlockCache or using
* {@link org.apache.hadoop.hbase.io.hfile.LruBlockCache} (hosted inside the class
* {@link org.apache.hadoop.hbase.io.hfile.DoubleBlockCache} that caches blocks in BOTH L1 and L2,
* and on evict, moves from L1 to L2, etc), and a
* {@link org.apache.hadoop.hbase.io.hfile.bucket.BucketCache} that has a bunch of deploy formats
* including acting as a L2 for LruBlockCache -- when a block is evicted from LruBlockCache, it
* goes to the BucketCache and when we search a block, we look in both places -- or using
* {@link org.apache.hadoop.hbase.io.hfile.CombinedBlockCache}, as
* host for data blocks with meta blocks in the LRUBlockCache as well as onheap, offheap, and
* file options).
* a host for data blocks with meta blocks in the LRUBlockCache as well as onheap, offheap, and
* file options.
*
* <h1>Which BlockCache should I use?</h1>
* BucketCache has seen more production deploys and has more deploy options. Fetching will always
* be slower when fetching from BucketCache but latencies tend to be less erratic over time
* (roughly because GC is less). SlabCache tends to do more GCs as blocks are moved between L1
* and L2 always, at least given the way {@link org.apache.hadoop.hbase.io.hfile.DoubleBlockCache}
* currently works. It is tough doing an apples to apples compare since their hosting classes,
* {@link org.apache.hadoop.hbase.io.hfile.CombinedBlockCache} for BucketCache vs
* {@link org.apache.hadoop.hbase.io.hfile.DoubleBlockCache} operate so differently.
* See Nick Dimiduk's
* <a href="http://www.n10k.com/blog/blockcache-101/">BlockCache 101</a> for some numbers. See
* also the description of <a href="https://issues.apache.org/jira/browse/HBASE-7404">HBASE-7404</a>
* where Chunhui Shen lists issues he found with BlockCache (inefficent use of memory, doesn't
* help w/ GC).
*
* <h1>Enabling {@link org.apache.hadoop.hbase.io.hfile.slab.SlabCache}</h2>
* {@link org.apache.hadoop.hbase.io.hfile.slab.SlabCache} has seen little use and will likely
* be deprecated in the near future. To enable it,
* set the float <code>hbase.offheapcache.percentage</code> to some value between 0 and 1. This
* {@link org.apache.hadoop.hbase.io.hfile.slab.SlabCache} is the original offheap block cache
* but unfortunately has seen little use. It is originally described in
* <a href="http://blog.cloudera.com/blog/2012/01/caching-in-hbase-slabcache/">Caching
* in Apache HBase: SlabCache</a>.To enable it,
* set the float <code>hbase.offheapcache.percentage</code>
* ({@link CacheConfig#SLAB_CACHE_OFFHEAP_PERCENTAGE_KEY}) to some value between 0 and 1 in
* your <code>hbase-site.xml</code> file. This
* enables {@link org.apache.hadoop.hbase.io.hfile.DoubleBlockCache}, a facade over
* {@link org.apache.hadoop.hbase.io.hfile.LruBlockCache} and
* {@link org.apache.hadoop.hbase.io.hfile.slab.SlabCache}. The value set here will be
* multiplied by whatever the setting for <code>-XX:MaxDirectMemorySize</code> is and this is what
* {@link org.apache.hadoop.hbase.io.hfile.slab.SlabCache}. DoubleBlockCache works as follows.
* When caching, it
* "...attempts to cache the block in both caches, while readblock reads first from the faster
* onheap cache before looking for the block in the off heap cache. Metrics are the
* combined size and hits and misses of both caches." The value set in
* <code>hbase.offheapcache.percentage</code> will be
* multiplied by whatever the setting for <code>-XX:MaxDirectMemorySize</code> is in
* your <code>hbase-env.sh</code> configuration file and this is what
* will be used by {@link org.apache.hadoop.hbase.io.hfile.slab.SlabCache} as its offheap store.
* Onheap store will be whatever the float {@link HConstants#HFILE_BLOCK_CACHE_SIZE_KEY} setting is
* (some value between 0 and 1) times the size of the allocated java heap.
*
* <p>Restart (or rolling restart) your cluster for the configs to take effect. Check logs to
* ensure your configurations came out as expected.
*
* <h1>Enabling {@link org.apache.hadoop.hbase.io.hfile.bucket.BucketCache}</h2>
* Ensure the SlabCache config <code>hbase.offheapcache.percentage</code> is not set (or set to 0).
* At this point, it is probably best to read the code to learn the list of bucket cache options
* and how they combine (to be fixed). Read the options and defaults for BucketCache in the
* head of the {@link org.apache.hadoop.hbase.io.hfile.CacheConfig}.
*
* <p>Here is a simple example of how to enable a <code>4G</code>
* offheap bucket cache with 1G onheap cache.
* The onheap/offheap caches
* are managed by {@link org.apache.hadoop.hbase.io.hfile.CombinedBlockCache} by default. For the
* CombinedBlockCache (from the class comment), "The smaller lruCache is used
* to cache bloom blocks and index blocks, the larger bucketCache is used to
* cache data blocks. getBlock reads first from the smaller lruCache before
* looking for the block in the bucketCache. Metrics are the combined size and
* hits and misses of both caches." To disable CombinedBlockCache and have the BucketCache act
* as a strict L2 cache to the L1 LruBlockCache (i.e. on eviction from L1, blocks go to L2), set
* {@link org.apache.hadoop.hbase.io.hfile.CacheConfig#BUCKET_CACHE_COMBINED_KEY} to false.
* Also by default, unless you change it,
* {@link CacheConfig#BUCKET_CACHE_COMBINED_PERCENTAGE_KEY} defaults to <code>0.9</code> (see
* the top of the CacheConfig in the BucketCache defaults section). This means that whatever
* size you set for the bucket cache with
* {@link org.apache.hadoop.hbase.io.hfile.CacheConfig#BUCKET_CACHE_SIZE_KEY},
* <code>90%</code> will be used for offheap and <code>10%</code> of the size will be used
* by the onheap {@link org.apache.hadoop.hbase.io.hfile.LruBlockCache}.
* <p>Back to the example of setting an onheap cache of 1G and ofheap of 4G, in
* <code>hbase-env.sh</code> ensure the java option <code>-XX:MaxDirectMemorySize</code> is
* enabled and 5G in size: e.g. <code>-XX:MaxDirectMemorySize=5G</code>. Then in
* <code>hbase-site.xml</code> add the following configurations:
<pre>&lt;property>
&lt;name>hbase.bucketcache.ioengine&lt;/name>
&lt;value>offheap&lt;/value>
&lt;/property>
&lt;property>
&lt;name>hbase.bucketcache.percentage.in.combinedcache&lt;/name>
&lt;value>0.8&lt;/value>
&lt;/property>
&lt;property>
&lt;name>hbase.bucketcache.size&lt;/name>
&lt;value>5120&lt;/value>
&lt;/property></pre>. Above we set a cache of 5G, 80% of which will be offheap (4G) and 1G onheap.
* Restart (or rolling restart) your cluster for the configs to take effect. Check logs to ensure
* your configurations came out as expected.
*
*/
package org.apache.hadoop.hbase.io.hfile;

View File

@ -51,10 +51,11 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
* order to determine where a given element fits. Redirects gets and puts to the
* correct SingleSizeCache.
*
* <p>It is configured with a call to {@link #addSlab(int, int)}
*
**/
@InterfaceAudience.Private
public class SlabCache implements SlabItemActionWatcher, BlockCache, HeapSize {
private final ConcurrentHashMap<BlockCacheKey, SingleSizeCache> backingStore;
private final TreeMap<Integer, SingleSizeCache> sizer;
static final Log LOG = LogFactory.getLog(SlabCache.class);
@ -71,13 +72,26 @@ public class SlabCache implements SlabItemActionWatcher, BlockCache, HeapSize {
private static final long CACHE_FIXED_OVERHEAD = ClassSize.estimateBase(
SlabCache.class, false);
/**
* Key used reading from configuration list of the percentage of our total space we allocate to
* the slabs. Defaults: "0.80", "0.20".
* @see #SLAB_CACHE_SIZES_KEY Must have corresponding number of elements.
*/
static final String SLAB_CACHE_PROPORTIONS_KEY = "hbase.offheapcache.slab.proportions";
/**
* Configuration key for list of the blocksize of the slabs in bytes. (E.g. the slab holds
* blocks of this size). Defaults: avgBlockSize * 11 / 10, avgBlockSize * 21 / 10
* @see #SLAB_CACHE_PROPORTIONS_KEY
*/
static final String SLAB_CACHE_SIZES_KEY = "hbase.offheapcache.slab.sizes";
/**
* Default constructor, creates an empty SlabCache.
*
* @param size Total size allocated to the SlabCache. (Bytes)
* @param avgBlockSize Average size of a block being cached.
**/
public SlabCache(long size, long avgBlockSize) {
this.avgBlockSize = avgBlockSize;
this.size = size;
@ -108,9 +122,8 @@ public class SlabCache implements SlabItemActionWatcher, BlockCache, HeapSize {
*/
public void addSlabByConf(Configuration conf) {
// Proportions we allocate to each slab of the total size.
String[] porportions = conf.getStrings(
"hbase.offheapcache.slab.proportions", "0.80", "0.20");
String[] sizes = conf.getStrings("hbase.offheapcache.slab.sizes",
String[] porportions = conf.getStrings(SLAB_CACHE_PROPORTIONS_KEY, "0.80", "0.20");
String[] sizes = conf.getStrings(SLAB_CACHE_SIZES_KEY,
Long.valueOf(avgBlockSize * 11 / 10).toString(),
Long.valueOf(avgBlockSize * 21 / 10).toString());
@ -178,8 +191,8 @@ public class SlabCache implements SlabItemActionWatcher, BlockCache, HeapSize {
}
private void addSlab(int blockSize, int numBlocks) {
LOG.info("Creating a slab of blockSize " + blockSize + " with " + numBlocks
+ " blocks, " + StringUtils.humanReadableInt(blockSize * (long) numBlocks) + "bytes.");
LOG.info("Creating slab of blockSize " + blockSize + " with " + numBlocks
+ " blocks, " + StringUtils.byteDesc(blockSize * (long) numBlocks) + "bytes.");
sizer.put(blockSize, new SingleSizeCache(blockSize, numBlocks, this));
}
@ -325,6 +338,7 @@ public class SlabCache implements SlabItemActionWatcher, BlockCache, HeapSize {
/*
* Statistics thread. Periodically prints the cache statistics to the log.
* TODO: Fix. Just emit to metrics. Don't run a thread just to do a log.
*/
static class StatisticsThread extends HasThread {
SlabCache ourcache;

View File

@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.catalog.MetaReader;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.mapreduce.hadoopbackport.JarFinder;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
@ -280,6 +281,8 @@ public class TableMapReduceUtil {
public static void resetCacheConfig(Configuration conf) {
conf.setFloat(
HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT);
conf.setFloat(CacheConfig.SLAB_CACHE_OFFHEAP_PERCENTAGE_KEY, 0f);
conf.setFloat(CacheConfig.BUCKET_CACHE_SIZE_KEY, 0f);
conf.setFloat("hbase.offheapcache.percentage", 0f);
conf.setFloat("hbase.bucketcache.size", 0f);
}

View File

@ -337,8 +337,7 @@ public interface HLog {
* able to sync an explicit edit only (the current default implementation syncs up to the time
* of the sync call syncing whatever is behind the sync).
* @throws IOException
* @deprecated Use
* {@link #appendNoSync(HRegionInfo, HLogKey, WALEdit, HTableDescriptor, AtomicLong, boolean)}
* @deprecated Use {@link #appendNoSync(HTableDescriptor, HRegionInfo, HLogKey, WALEdit, AtomicLong, boolean)}
* instead because you can get back the region edit/sequenceid; it is set into the passed in
* <code>key</code>.
*/

View File

@ -131,7 +131,7 @@ import com.google.protobuf.Service;
*
* <p>
* To perform authorization checks, {@code AccessController} relies on the
* {@link org.apache.hadoop.hbase.ipc.RpcServerEngine} being loaded to provide
* RpcServerEngine being loaded to provide
* the user identities for remote requests.
* </p>
*

View File

@ -26,6 +26,11 @@ import java.lang.reflect.Method;
import java.nio.ByteBuffer;
import java.util.List;
import javax.management.JMException;
import javax.management.MBeanServer;
import javax.management.MalformedObjectNameException;
import javax.management.ObjectName;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
@ -33,11 +38,6 @@ import org.apache.hadoop.classification.InterfaceStability;
import com.google.common.base.Preconditions;
import javax.management.JMException;
import javax.management.MBeanServer;
import javax.management.MalformedObjectNameException;
import javax.management.ObjectName;
/**
* Utilities for interacting with and monitoring DirectByteBuffer allocations.
*/
@ -71,7 +71,7 @@ public class DirectMemoryUtils {
try {
a = BEAN_SERVER.getAttribute(NIO_DIRECT_POOL, MEMORY_USED);
} catch (JMException e) {
LOG.debug("Failed to retrieve nio.BufferPool direct MemoryUsed attribute.", e);
LOG.debug("Failed to retrieve nio.BufferPool direct MemoryUsed attribute: " + e);
}
}
HAS_MEMORY_USED_ATTRIBUTE = a != null;

View File

@ -525,7 +525,8 @@
using method parallelization class ! -->
<testFailureIgnore>${surefire.testFailureIgnore}</testFailureIgnore>
<forkedProcessTimeoutInSeconds>${surefire.timeout}</forkedProcessTimeoutInSeconds>
<argLine>-enableassertions -Xmx1900m -XX:MaxPermSize=100m -Djava.security.egd=file:/dev/./urandom -Djava.net.preferIPv4Stack=true -Djava.awt.headless=true</argLine>
<!--Allocate some direct memory for direct memory tests-->
<argLine>-enableassertions -XX:MaxDirectMemorySize=1G -Xmx1900m -XX:MaxPermSize=100m -Djava.security.egd=file:/dev/./urandom -Djava.net.preferIPv4Stack=true -Djava.awt.headless=true</argLine>
<redirectTestOutputToFile>${test.output.tofile}</redirectTestOutputToFile>
</configuration>
<executions>

View File

@ -1590,7 +1590,9 @@ rs.close();
<title>Block Cache</title>
<para>Below we describe the default block cache implementation, the LRUBlockCache.
Read for an understanding of how it works and an overview of the facility it provides.
Other, off-heap options have since been added. After reading the below,
Other, off-heap options have since been added. These are described in the
javadoc <link xlink:href="http://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/io/hfile/package-summary.html#package_description">org.apache.hadoop.hbase.io.hfile package description</link>.
After reading the below,
be sure to visit the blog series <link xlink:href="http://www.n10k.com/blog/blockcache-101/">BlockCache 101</link> by Nick Dimiduk
where other Block Cache implementations are described.
</para>
@ -1670,6 +1672,12 @@ rs.close();
</listitem>
</itemizedlist>
</section>
<section xml:id="offheap.blockcache"><title>Offheap Block Cache</title>
<para>There are a few options for configuring an off-heap cache for blocks read from HDFS.
The options and their setup are described in a javadoc package doc. See
<link xlink:href="http://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/io/hfile/package-summary.html#package_description">org.apache.hadoop.hbase.io.hfile package description</link>.
</para>
</section>
</section>
<section xml:id="wal">