HBASE-14793 Allow limiting size of block into L1 block cache.

This commit is contained in:
Elliott Clark 2015-11-10 12:21:32 -08:00
parent f8ee447c55
commit 604c9b2cca
10 changed files with 107 additions and 16 deletions

View File

@ -227,6 +227,9 @@ public interface MetricsRegionServerSource extends BaseSource {
String BLOCK_CACHE_EXPRESS_HIT_PERCENT = "blockCacheExpressHitPercent"; String BLOCK_CACHE_EXPRESS_HIT_PERCENT = "blockCacheExpressHitPercent";
String BLOCK_CACHE_EXPRESS_HIT_PERCENT_DESC = String BLOCK_CACHE_EXPRESS_HIT_PERCENT_DESC =
"The percent of the time that requests with the cache turned on hit the cache."; "The percent of the time that requests with the cache turned on hit the cache.";
String BLOCK_CACHE_FAILED_INSERTION_COUNT = "blockCacheFailedInsertionCount";
String BLOCK_CACHE_FAILED_INSERTION_COUNT_DESC = "Number of times that a block cache " +
"insertion failed. Usually due to size restrictions.";
String RS_START_TIME_NAME = "regionServerStartTime"; String RS_START_TIME_NAME = "regionServerStartTime";
String ZOOKEEPER_QUORUM_NAME = "zookeeperQuorum"; String ZOOKEEPER_QUORUM_NAME = "zookeeperQuorum";
String SERVER_NAME_NAME = "serverName"; String SERVER_NAME_NAME = "serverName";

View File

@ -233,6 +233,11 @@ public interface MetricsRegionServerWrapper {
*/ */
double getBlockCacheHitCachingPercent(); double getBlockCacheHitCachingPercent();
/**
* Number of cache insertions that failed.
*/
long getBlockCacheFailedInsertions();
/** /**
* Force a re-computation of the metrics. * Force a re-computation of the metrics.
*/ */

View File

@ -35,6 +35,7 @@ import org.apache.hadoop.metrics2.lib.MutableCounterLong;
public class MetricsRegionServerSourceImpl public class MetricsRegionServerSourceImpl
extends BaseSourceImpl implements MetricsRegionServerSource { extends BaseSourceImpl implements MetricsRegionServerSource {
final MetricsRegionServerWrapper rsWrap; final MetricsRegionServerWrapper rsWrap;
private final MetricHistogram putHisto; private final MetricHistogram putHisto;
private final MetricHistogram deleteHisto; private final MetricHistogram deleteHisto;
@ -250,6 +251,8 @@ public class MetricsRegionServerSourceImpl
rsWrap.getBlockCacheHitPercent()) rsWrap.getBlockCacheHitPercent())
.addGauge(Interns.info(BLOCK_CACHE_EXPRESS_HIT_PERCENT, .addGauge(Interns.info(BLOCK_CACHE_EXPRESS_HIT_PERCENT,
BLOCK_CACHE_EXPRESS_HIT_PERCENT_DESC), rsWrap.getBlockCacheHitCachingPercent()) BLOCK_CACHE_EXPRESS_HIT_PERCENT_DESC), rsWrap.getBlockCacheHitCachingPercent())
.addCounter(Interns.info(BLOCK_CACHE_FAILED_INSERTION_COUNT,
BLOCK_CACHE_FAILED_INSERTION_COUNT_DESC),rsWrap.getBlockCacheFailedInsertions())
.addCounter(Interns.info(UPDATES_BLOCKED_TIME, UPDATES_BLOCKED_DESC), .addCounter(Interns.info(UPDATES_BLOCKED_TIME, UPDATES_BLOCKED_DESC),
rsWrap.getUpdatesBlockedTime()) rsWrap.getUpdatesBlockedTime())
.addCounter(Interns.info(FLUSHED_CELLS, FLUSHED_CELLS_DESC), .addCounter(Interns.info(FLUSHED_CELLS, FLUSHED_CELLS_DESC),

View File

@ -74,6 +74,9 @@ public class CacheStats {
/** The total number of blocks for primary replica that have been evicted */ /** The total number of blocks for primary replica that have been evicted */
private final AtomicLong primaryEvictedBlockCount = new AtomicLong(0); private final AtomicLong primaryEvictedBlockCount = new AtomicLong(0);
/** The total number of blocks that were not inserted. */
private final AtomicLong failedInserts = new AtomicLong(0);
/** The number of metrics periods to include in window */ /** The number of metrics periods to include in window */
private final int numPeriodsInWindow; private final int numPeriodsInWindow;
/** Hit counts for each period in window */ /** Hit counts for each period in window */
@ -154,6 +157,10 @@ public class CacheStats {
} }
} }
public long failInsert() {
return failedInserts.incrementAndGet();
}
public long getRequestCount() { public long getRequestCount() {
return getHitCount() + getMissCount(); return getHitCount() + getMissCount();
} }
@ -218,6 +225,10 @@ public class CacheStats {
return ((float)getEvictedCount()/(float)getEvictionCount()); return ((float)getEvictedCount()/(float)getEvictionCount());
} }
public long getFailedInserts() {
return failedInserts.get();
}
public void rollMetricsPeriod() { public void rollMetricsPeriod() {
hitCounts[windowIndex] = getHitCount() - lastHitCount; hitCounts[windowIndex] = getHitCount() - lastHitCount;
lastHitCount = getHitCount(); lastHitCount = getHitCount();

View File

@ -142,12 +142,15 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize {
/** Statistics thread */ /** Statistics thread */
static final int statThreadPeriod = 60 * 5; static final int statThreadPeriod = 60 * 5;
private static final String LRU_MAX_BLOCK_SIZE = "hbase.lru.max.block.size";
private static final long DEFAULT_MAX_BLOCK_SIZE = 16L * 1024L * 1024L;
/** Concurrent map (the cache) */ /** Concurrent map (the cache) */
private final Map<BlockCacheKey,LruCachedBlock> map; private final Map<BlockCacheKey,LruCachedBlock> map;
/** Eviction lock (locked when eviction in process) */ /** Eviction lock (locked when eviction in process) */
private final ReentrantLock evictionLock = new ReentrantLock(true); private final ReentrantLock evictionLock = new ReentrantLock(true);
private final long maxBlockSize;
/** Volatile boolean to track if we are in an eviction process or not */ /** Volatile boolean to track if we are in an eviction process or not */
private volatile boolean evictionInProgress = false; private volatile boolean evictionInProgress = false;
@ -225,7 +228,8 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize {
DEFAULT_SINGLE_FACTOR, DEFAULT_SINGLE_FACTOR,
DEFAULT_MULTI_FACTOR, DEFAULT_MULTI_FACTOR,
DEFAULT_MEMORY_FACTOR, DEFAULT_MEMORY_FACTOR,
false false,
DEFAULT_MAX_BLOCK_SIZE
); );
} }
@ -239,7 +243,8 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize {
conf.getFloat(LRU_SINGLE_PERCENTAGE_CONFIG_NAME, DEFAULT_SINGLE_FACTOR), conf.getFloat(LRU_SINGLE_PERCENTAGE_CONFIG_NAME, DEFAULT_SINGLE_FACTOR),
conf.getFloat(LRU_MULTI_PERCENTAGE_CONFIG_NAME, DEFAULT_MULTI_FACTOR), conf.getFloat(LRU_MULTI_PERCENTAGE_CONFIG_NAME, DEFAULT_MULTI_FACTOR),
conf.getFloat(LRU_MEMORY_PERCENTAGE_CONFIG_NAME, DEFAULT_MEMORY_FACTOR), conf.getFloat(LRU_MEMORY_PERCENTAGE_CONFIG_NAME, DEFAULT_MEMORY_FACTOR),
conf.getBoolean(LRU_IN_MEMORY_FORCE_MODE_CONFIG_NAME, DEFAULT_IN_MEMORY_FORCE_MODE) conf.getBoolean(LRU_IN_MEMORY_FORCE_MODE_CONFIG_NAME, DEFAULT_IN_MEMORY_FORCE_MODE),
conf.getLong(LRU_MAX_BLOCK_SIZE, DEFAULT_MAX_BLOCK_SIZE)
); );
} }
@ -264,7 +269,8 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize {
public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, public LruBlockCache(long maxSize, long blockSize, boolean evictionThread,
int mapInitialSize, float mapLoadFactor, int mapConcurrencyLevel, int mapInitialSize, float mapLoadFactor, int mapConcurrencyLevel,
float minFactor, float acceptableFactor, float singleFactor, float minFactor, float acceptableFactor, float singleFactor,
float multiFactor, float memoryFactor, boolean forceInMemory) { float multiFactor, float memoryFactor, boolean forceInMemory, long maxBlockSize) {
this.maxBlockSize = maxBlockSize;
if(singleFactor + multiFactor + memoryFactor != 1 || if(singleFactor + multiFactor + memoryFactor != 1 ||
singleFactor < 0 || multiFactor < 0 || memoryFactor < 0) { singleFactor < 0 || multiFactor < 0 || memoryFactor < 0) {
throw new IllegalArgumentException("Single, multi, and memory factors " + throw new IllegalArgumentException("Single, multi, and memory factors " +
@ -326,6 +332,21 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize {
@Override @Override
public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory, public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory,
final boolean cacheDataInL1) { final boolean cacheDataInL1) {
if (buf.heapSize() > maxBlockSize) {
// If there are a lot of blocks that are too
// big this can make the logs way too noisy.
// So we log 2%
if (stats.failInsert() % 50 == 0) {
LOG.warn("Trying to cache too large a block "
+ cacheKey.getHfileName() + " @ "
+ cacheKey.getOffset()
+ " is " + buf.heapSize()
+ " which is larger than " + maxBlockSize);
}
return;
}
LruCachedBlock cb = map.get(cacheKey); LruCachedBlock cb = map.get(cacheKey);
if (cb != null) { if (cb != null) {
// compare the contents, if they are not equal, we are in big trouble // compare the contents, if they are not equal, we are in big trouble
@ -883,8 +904,8 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize {
} }
public final static long CACHE_FIXED_OVERHEAD = ClassSize.align( public final static long CACHE_FIXED_OVERHEAD = ClassSize.align(
(3 * Bytes.SIZEOF_LONG) + (9 * ClassSize.REFERENCE) + (3 * Bytes.SIZEOF_LONG) + (10 * ClassSize.REFERENCE) +
(5 * Bytes.SIZEOF_FLOAT) + Bytes.SIZEOF_BOOLEAN (5 * Bytes.SIZEOF_FLOAT) + (2 * Bytes.SIZEOF_BOOLEAN)
+ ClassSize.OBJECT); + ClassSize.OBJECT);
@Override @Override

View File

@ -150,7 +150,6 @@ public class BucketCache implements BlockCache, HeapSize {
private final AtomicLong heapSize = new AtomicLong(0); private final AtomicLong heapSize = new AtomicLong(0);
/** Current number of cached elements */ /** Current number of cached elements */
private final AtomicLong blockNumber = new AtomicLong(0); private final AtomicLong blockNumber = new AtomicLong(0);
private final AtomicLong failedBlockAdditions = new AtomicLong(0);
/** Cache access count (sequential ID) */ /** Cache access count (sequential ID) */
private final AtomicLong accessCount = new AtomicLong(0); private final AtomicLong accessCount = new AtomicLong(0);
@ -374,7 +373,7 @@ public class BucketCache implements BlockCache, HeapSize {
} }
if (!successfulAddition) { if (!successfulAddition) {
ramCache.remove(cacheKey); ramCache.remove(cacheKey);
failedBlockAdditions.incrementAndGet(); cacheStats.failInsert();
} else { } else {
this.blockNumber.incrementAndGet(); this.blockNumber.incrementAndGet();
this.heapSize.addAndGet(cachedItem.heapSize()); this.heapSize.addAndGet(cachedItem.heapSize());
@ -514,7 +513,7 @@ public class BucketCache implements BlockCache, HeapSize {
long usedSize = bucketAllocator.getUsedSize(); long usedSize = bucketAllocator.getUsedSize();
long freeSize = totalSize - usedSize; long freeSize = totalSize - usedSize;
long cacheSize = getRealCacheSize(); long cacheSize = getRealCacheSize();
LOG.info("failedBlockAdditions=" + getFailedBlockAdditions() + ", " + LOG.info("failedBlockAdditions=" + cacheStats.getFailedInserts() + ", " +
"totalSize=" + StringUtils.byteDesc(totalSize) + ", " + "totalSize=" + StringUtils.byteDesc(totalSize) + ", " +
"freeSize=" + StringUtils.byteDesc(freeSize) + ", " + "freeSize=" + StringUtils.byteDesc(freeSize) + ", " +
"usedSize=" + StringUtils.byteDesc(usedSize) +", " + "usedSize=" + StringUtils.byteDesc(usedSize) +", " +
@ -535,10 +534,6 @@ public class BucketCache implements BlockCache, HeapSize {
cacheStats.reset(); cacheStats.reset();
} }
public long getFailedBlockAdditions() {
return this.failedBlockAdditions.get();
}
public long getRealCacheSize() { public long getRealCacheSize() {
return this.realCacheSize.get(); return this.realCacheSize.get();
} }

View File

@ -310,6 +310,11 @@ class MetricsRegionServerWrapperImpl
return (ratio * 100); return (ratio * 100);
} }
@Override
public long getBlockCacheFailedInsertions() {
return this.cacheStats.getFailedInserts();
}
@Override public void forceRecompute() { @Override public void forceRecompute() {
this.runnable.run(); this.runnable.run();
} }

View File

@ -19,6 +19,8 @@
package org.apache.hadoop.hbase.io.hfile; package org.apache.hadoop.hbase.io.hfile;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import java.nio.ByteBuffer; import java.nio.ByteBuffer;
@ -265,7 +267,8 @@ public class TestLruBlockCache {
0.33f, // single 0.33f, // single
0.33f, // multi 0.33f, // multi
0.34f, // memory 0.34f, // memory
false); false,
16 * 1024 * 1024);
CachedItem [] singleBlocks = generateFixedBlocks(5, blockSize, "single"); CachedItem [] singleBlocks = generateFixedBlocks(5, blockSize, "single");
CachedItem [] multiBlocks = generateFixedBlocks(5, blockSize, "multi"); CachedItem [] multiBlocks = generateFixedBlocks(5, blockSize, "multi");
@ -385,7 +388,8 @@ public class TestLruBlockCache {
0.2f, // single 0.2f, // single
0.3f, // multi 0.3f, // multi
0.5f, // memory 0.5f, // memory
true); true,
16 * 1024 * 1024);
CachedItem [] singleBlocks = generateFixedBlocks(10, blockSize, "single"); CachedItem [] singleBlocks = generateFixedBlocks(10, blockSize, "single");
CachedItem [] multiBlocks = generateFixedBlocks(10, blockSize, "multi"); CachedItem [] multiBlocks = generateFixedBlocks(10, blockSize, "multi");
@ -490,7 +494,8 @@ public class TestLruBlockCache {
0.33f, // single 0.33f, // single
0.33f, // multi 0.33f, // multi
0.34f, // memory 0.34f, // memory
false); false,
16 * 1024 * 1024);
CachedItem [] singleBlocks = generateFixedBlocks(20, blockSize, "single"); CachedItem [] singleBlocks = generateFixedBlocks(20, blockSize, "single");
CachedItem [] multiBlocks = generateFixedBlocks(5, blockSize, "multi"); CachedItem [] multiBlocks = generateFixedBlocks(5, blockSize, "multi");
@ -538,6 +543,43 @@ public class TestLruBlockCache {
} }
@Test
public void testMaxBlockSize() throws Exception {
long maxSize = 100000;
long blockSize = calculateBlockSize(maxSize, 10);
LruBlockCache cache = new LruBlockCache(maxSize, blockSize, false,
(int)Math.ceil(1.2*maxSize/blockSize),
LruBlockCache.DEFAULT_LOAD_FACTOR,
LruBlockCache.DEFAULT_CONCURRENCY_LEVEL,
0.66f, // min
0.99f, // acceptable
0.33f, // single
0.33f, // multi
0.34f, // memory
false,
1024);
CachedItem [] tooLong = generateFixedBlocks(10, 1024+5, "long");
CachedItem [] small = generateFixedBlocks(15, 600, "small");
for (CachedItem i:tooLong) {
cache.cacheBlock(i.cacheKey, i);
}
for (CachedItem i:small) {
cache.cacheBlock(i.cacheKey, i);
}
assertEquals(15,cache.getBlockCount());
for (CachedItem i:small) {
assertNotNull(cache.getBlock(i.cacheKey, true, false, false));
}
for (CachedItem i:tooLong) {
assertNull(cache.getBlock(i.cacheKey, true, false, false));
}
assertEquals(10, cache.getStats().getFailedInserts());
}
// test setMaxSize // test setMaxSize
@Test @Test
public void testResizeBlockCache() throws Exception { public void testResizeBlockCache() throws Exception {
@ -554,7 +596,8 @@ public class TestLruBlockCache {
0.33f, // single 0.33f, // single
0.33f, // multi 0.33f, // multi
0.34f, // memory 0.34f, // memory
false); false,
16 * 1024 * 1024);
CachedItem [] singleBlocks = generateFixedBlocks(10, blockSize, "single"); CachedItem [] singleBlocks = generateFixedBlocks(10, blockSize, "single");
CachedItem [] multiBlocks = generateFixedBlocks(10, blockSize, "multi"); CachedItem [] multiBlocks = generateFixedBlocks(10, blockSize, "multi");

View File

@ -210,6 +210,10 @@ public class MetricsRegionServerWrapperStub implements MetricsRegionServerWrappe
return 97; return 97;
} }
@Override
public long getBlockCacheFailedInsertions() {
return 36;
}
@Override @Override
public long getUpdatesBlockedTime() { public long getUpdatesBlockedTime() {

View File

@ -86,6 +86,7 @@ public class TestMetricsRegionServer {
HELPER.assertCounter("blockCacheEvictionCount", 418, serverSource); HELPER.assertCounter("blockCacheEvictionCount", 418, serverSource);
HELPER.assertGauge("blockCacheCountHitPercent", 98, serverSource); HELPER.assertGauge("blockCacheCountHitPercent", 98, serverSource);
HELPER.assertGauge("blockCacheExpressHitPercent", 97, serverSource); HELPER.assertGauge("blockCacheExpressHitPercent", 97, serverSource);
HELPER.assertCounter("blockCacheFailedInsertionCount", 36, serverSource);
HELPER.assertCounter("updatesBlockedTime", 419, serverSource); HELPER.assertCounter("updatesBlockedTime", 419, serverSource);
} }