HBASE-16993 BucketCache throw java.io.IOException: Invalid HFile block magic when configuring hbase.bucketcache.bucket.sizes.
This commit is contained in:
parent
0c49185c3e
commit
bc93b6610b
|
@ -905,7 +905,7 @@ possible configurations would overwhelm and obscure the important.
|
|||
<description>A comma-separated list of sizes for buckets for the bucketcache.
|
||||
Can be multiple sizes. List block sizes in order from smallest to largest.
|
||||
The sizes you use will depend on your data access patterns.
|
||||
Must be a multiple of 1024 else you will run into
|
||||
Must be a multiple of 256 else you will run into
|
||||
'java.io.IOException: Invalid HFile block magic' when you go to read from cache.
|
||||
If you specify no values here, then you pick up the default bucketsizes set
|
||||
in code (See BucketAllocator#DEFAULT_BUCKET_SIZES).
|
||||
|
|
|
@ -584,7 +584,8 @@ public class CacheConfig {
|
|||
* @return Returns L2 block cache instance (for now it is BucketCache BlockCache all the time)
|
||||
* or null if not supposed to be a L2.
|
||||
*/
|
||||
private static BlockCache getL2(final Configuration c) {
|
||||
@VisibleForTesting
|
||||
static BlockCache getL2(final Configuration c) {
|
||||
final boolean useExternal = c.getBoolean(EXTERNAL_BLOCKCACHE_KEY, EXTERNAL_BLOCKCACHE_DEFAULT);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Trying to use " + (useExternal?" External":" Internal") + " l2 cache");
|
||||
|
@ -594,10 +595,8 @@ public class CacheConfig {
|
|||
if (useExternal) {
|
||||
return getExternalBlockcache(c);
|
||||
}
|
||||
|
||||
// otherwise use the bucket cache.
|
||||
return getBucketCache(c);
|
||||
|
||||
}
|
||||
|
||||
private static BlockCache getExternalBlockcache(Configuration c) {
|
||||
|
@ -651,7 +650,17 @@ public class CacheConfig {
|
|||
if (configuredBucketSizes != null) {
|
||||
bucketSizes = new int[configuredBucketSizes.length];
|
||||
for (int i = 0; i < configuredBucketSizes.length; i++) {
|
||||
bucketSizes[i] = Integer.parseInt(configuredBucketSizes[i].trim());
|
||||
int bucketSize = Integer.parseInt(configuredBucketSizes[i].trim());
|
||||
if (bucketSize % 256 != 0) {
|
||||
// We need all the bucket sizes to be multiples of 256. Having all the configured bucket
|
||||
// sizes to be multiples of 256 will ensure that the block offsets within buckets,
|
||||
// that are calculated, will also be multiples of 256.
|
||||
// See BucketEntry where offset to each block is represented using 5 bytes (instead of 8
|
||||
// bytes long). We would like to save heap overhead as less as possible.
|
||||
throw new IllegalArgumentException("Illegal value: " + bucketSize + " configured for '"
|
||||
+ BUCKET_CACHE_BUCKETS_KEY + "'. All bucket sizes to be multiples of 256");
|
||||
}
|
||||
bucketSizes[i] = bucketSize;
|
||||
}
|
||||
}
|
||||
BucketCache bucketCache = null;
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.io.hfile;
|
|||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.lang.management.ManagementFactory;
|
||||
|
@ -414,6 +415,19 @@ public class TestCacheConfig {
|
|||
assertDataBlockCount(lrubc, 1);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testL2CacheWithInvalidBucketSize() {
|
||||
Configuration c = new Configuration(this.conf);
|
||||
c.set(HConstants.BUCKET_CACHE_IOENGINE_KEY, "offheap");
|
||||
c.set(CacheConfig.BUCKET_CACHE_BUCKETS_KEY, "256,512,1024,2048,4000,4096");
|
||||
c.setFloat(HConstants.BUCKET_CACHE_SIZE_KEY, 1024);
|
||||
try {
|
||||
CacheConfig.getL2(c);
|
||||
fail("Should throw IllegalArgumentException when passing illegal value for bucket size");
|
||||
} catch (IllegalArgumentException e) {
|
||||
}
|
||||
}
|
||||
|
||||
private void assertDataBlockCount(final LruBlockCache bc, final int expected) {
|
||||
Map<BlockType, Integer> blocks = bc.getBlockTypeCountsForTest();
|
||||
assertEquals(expected, blocks == null? 0:
|
||||
|
|
Loading…
Reference in New Issue