HBASE-21498 Master OOM when SplitTableRegionProcedure new CacheConfig and instantiate a new BlockCache

This commit is contained in:
Guanghao Zhang 2018-11-19 22:10:43 +08:00
parent 8339e44361
commit 27a0f205c5
19 changed files with 94 additions and 108 deletions

View File

@ -215,7 +215,7 @@ public class CacheConfig {
* @param family column family configuration
*/
public CacheConfig(Configuration conf, ColumnFamilyDescriptor family) {
this(CacheConfig.instantiateBlockCache(conf),
this(GLOBAL_BLOCK_CACHE_INSTANCE,
conf.getBoolean(CACHE_DATA_ON_READ_KEY, DEFAULT_CACHE_DATA_ON_READ)
&& family.isBlockCacheEnabled(),
family.isInMemory(),
@ -245,14 +245,10 @@ public class CacheConfig {
* @param conf hbase configuration
*/
public CacheConfig(Configuration conf) {
this(conf, true);
}
public CacheConfig(Configuration conf, boolean enableBlockCache) {
this(conf, enableBlockCache,
this(GLOBAL_BLOCK_CACHE_INSTANCE,
conf.getBoolean(CACHE_DATA_ON_READ_KEY, DEFAULT_CACHE_DATA_ON_READ),
DEFAULT_IN_MEMORY, // This is a family-level setting so can't be set
// strictly from conf
// strictly from conf
conf.getBoolean(CACHE_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_DATA_ON_WRITE),
conf.getBoolean(CACHE_INDEX_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_INDEXES_ON_WRITE),
conf.getBoolean(CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_BLOOMS_ON_WRITE),
@ -263,28 +259,6 @@ public class CacheConfig {
LOG.info("Created cacheConfig: " + this);
}
private CacheConfig(Configuration conf, boolean enableBlockCache,
final boolean cacheDataOnRead, final boolean inMemory,
final boolean cacheDataOnWrite, final boolean cacheIndexesOnWrite,
final boolean cacheBloomsOnWrite, final boolean evictOnClose,
final boolean cacheDataCompressed, final boolean prefetchOnOpen,
final boolean dropBehindCompaction) {
if (enableBlockCache) {
this.blockCache = CacheConfig.instantiateBlockCache(conf);
} else {
this.blockCache = null;
}
this.cacheDataOnRead = cacheDataOnRead;
this.inMemory = inMemory;
this.cacheDataOnWrite = cacheDataOnWrite;
this.cacheIndexesOnWrite = cacheIndexesOnWrite;
this.cacheBloomsOnWrite = cacheBloomsOnWrite;
this.evictOnClose = evictOnClose;
this.cacheDataCompressed = cacheDataCompressed;
this.prefetchOnOpen = prefetchOnOpen;
this.dropBehindCompaction = dropBehindCompaction;
}
/**
* Create a block cache configuration with the specified cache and configuration parameters.
* @param blockCache reference to block cache, null if completely disabled
@ -669,12 +643,18 @@ public class CacheConfig {
* @return The block cache or <code>null</code>.
*/
public static synchronized BlockCache instantiateBlockCache(Configuration conf) {
if (GLOBAL_BLOCK_CACHE_INSTANCE != null) return GLOBAL_BLOCK_CACHE_INSTANCE;
if (blockCacheDisabled) return null;
if (GLOBAL_BLOCK_CACHE_INSTANCE != null) {
return GLOBAL_BLOCK_CACHE_INSTANCE;
}
if (blockCacheDisabled) {
return null;
}
LruBlockCache onHeapCache = getOnHeapCacheInternal(conf);
// blockCacheDisabled is set as a side-effect of getL1Internal(), so check it again after the
// call.
if (blockCacheDisabled) return null;
if (blockCacheDisabled) {
return null;
}
boolean useExternal = conf.getBoolean(EXTERNAL_BLOCKCACHE_KEY, EXTERNAL_BLOCKCACHE_DEFAULT);
if (useExternal) {
L2_CACHE_INSTANCE = getExternalBlockcache(conf);

View File

@ -42,11 +42,6 @@ public class MobCacheConfig extends CacheConfig {
instantiateMobFileCache(conf);
}
public MobCacheConfig(Configuration conf, boolean needBlockCache) {
super(conf, needBlockCache);
instantiateMobFileCache(conf);
}
/**
* Instantiates the MobFileCache.
* @param conf The current configuration.

View File

@ -585,12 +585,17 @@ public class HRegionServer extends HasThread implements
// init superusers and add the server principal (if using security)
// or process owner as default super user.
Superusers.initialize(conf);
regionServerAccounting = new RegionServerAccounting(conf);
boolean isMasterNotCarryTable =
this instanceof HMaster && !LoadBalancer.isTablesOnMaster(conf);
cacheConfig = new CacheConfig(conf, !isMasterNotCarryTable);
mobCacheConfig = new MobCacheConfig(conf, !isMasterNotCarryTable);
// no need to instantiate global block cache when master not carry table
if (!isMasterNotCarryTable) {
CacheConfig.instantiateBlockCache(conf);
}
cacheConfig = new CacheConfig(conf);
mobCacheConfig = new MobCacheConfig(conf);
uncaughtExceptionHandler = new UncaughtExceptionHandler() {
@Override
public void uncaughtException(Thread t, Throwable e) {

View File

@ -112,6 +112,7 @@ public class TestEncodedSeekers {
if(includeTags) {
testUtil.getConfiguration().setInt(HFile.FORMAT_VERSION_KEY, 3);
}
CacheConfig.instantiateBlockCache(testUtil.getConfiguration());
LruBlockCache cache =
(LruBlockCache)new CacheConfig(testUtil.getConfiguration()).getBlockCache();
cache.clearCache();

View File

@ -19,8 +19,6 @@ package org.apache.hadoop.hbase.io.hfile;
import static org.junit.Assert.*;
import com.fasterxml.jackson.core.JsonGenerationException;
import com.fasterxml.jackson.databind.JsonMappingException;
import java.io.IOException;
import java.util.Map;
import java.util.NavigableSet;
@ -84,9 +82,10 @@ public class TestBlockCacheReporting {
}
@Test
public void testBucketCache() throws JsonGenerationException, JsonMappingException, IOException {
public void testBucketCache() throws IOException {
this.conf.set(HConstants.BUCKET_CACHE_IOENGINE_KEY, "offheap");
this.conf.setInt(HConstants.BUCKET_CACHE_SIZE_KEY, 100);
CacheConfig.instantiateBlockCache(this.conf);
CacheConfig cc = new CacheConfig(this.conf);
assertTrue(cc.getBlockCache() instanceof CombinedBlockCache);
logPerBlock(cc.getBlockCache());
@ -102,7 +101,8 @@ public class TestBlockCacheReporting {
}
@Test
public void testLruBlockCache() throws JsonGenerationException, JsonMappingException, IOException {
public void testLruBlockCache() throws IOException {
CacheConfig.instantiateBlockCache(this.conf);
CacheConfig cc = new CacheConfig(this.conf);
assertTrue(cc.isBlockCacheEnabled());
assertTrue(CacheConfig.DEFAULT_IN_MEMORY == cc.isInMemory());
@ -131,8 +131,7 @@ public class TestBlockCacheReporting {
}
}
private void logPerFile(final BlockCacheUtil.CachedBlocksByFile cbsbf)
throws JsonGenerationException, JsonMappingException, IOException {
private void logPerFile(final BlockCacheUtil.CachedBlocksByFile cbsbf) throws IOException {
for (Map.Entry<String, NavigableSet<CachedBlock>> e:
cbsbf.getCachedBlockStatsByFile().entrySet()) {
int count = 0;
@ -154,10 +153,9 @@ public class TestBlockCacheReporting {
}
}
private BlockCacheUtil.CachedBlocksByFile logPerBlock(final BlockCache bc)
throws JsonGenerationException, JsonMappingException, IOException {
private BlockCacheUtil.CachedBlocksByFile logPerBlock(final BlockCache bc) throws IOException {
BlockCacheUtil.CachedBlocksByFile cbsbf = new BlockCacheUtil.CachedBlocksByFile();
for (CachedBlock cb: bc) {
for (CachedBlock cb : bc) {
LOG.info(cb.toString());
LOG.info(BlockCacheUtil.toJSON(bc));
cbsbf.update(cb);

View File

@ -209,6 +209,7 @@ public class TestCacheConfig {
@Test
public void testDisableCacheDataBlock() throws IOException {
Configuration conf = HBaseConfiguration.create();
CacheConfig.instantiateBlockCache(conf);
CacheConfig cacheConfig = new CacheConfig(conf);
assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.DATA));
assertFalse(cacheConfig.shouldCacheCompressed(BlockCategory.DATA));
@ -274,6 +275,7 @@ public class TestCacheConfig {
@Test
public void testCacheConfigDefaultLRUBlockCache() {
CacheConfig.instantiateBlockCache(this.conf);
CacheConfig cc = new CacheConfig(this.conf);
assertTrue(cc.isBlockCacheEnabled());
assertTrue(CacheConfig.DEFAULT_IN_MEMORY == cc.isInMemory());
@ -307,6 +309,7 @@ public class TestCacheConfig {
private void doBucketCacheConfigTest() {
final int bcSize = 100;
this.conf.setInt(HConstants.BUCKET_CACHE_SIZE_KEY, bcSize);
CacheConfig.instantiateBlockCache(this.conf);
CacheConfig cc = new CacheConfig(this.conf);
basicBlockCacheOps(cc, false, false);
assertTrue(cc.getBlockCache() instanceof CombinedBlockCache);
@ -338,6 +341,7 @@ public class TestCacheConfig {
long bcExpectedSize = 100 * 1024 * 1024; // MB.
assertTrue(lruExpectedSize < bcExpectedSize);
this.conf.setInt(HConstants.BUCKET_CACHE_SIZE_KEY, bcSize);
CacheConfig.instantiateBlockCache(this.conf);
CacheConfig cc = new CacheConfig(this.conf);
basicBlockCacheOps(cc, false, false);
assertTrue(cc.getBlockCache() instanceof CombinedBlockCache);

View File

@ -160,6 +160,7 @@ public class TestCacheOnWrite {
Configuration conf = TEST_UTIL.getConfiguration();
List<BlockCache> blockcaches = new ArrayList<>();
// default
CacheConfig.instantiateBlockCache(conf);
blockcaches.add(new CacheConfig(conf).getBlockCache());
//set LruBlockCache.LRU_HARD_CAPACITY_LIMIT_FACTOR_CONFIG_NAME to 2.0f due to HBASE-16287
@ -228,7 +229,6 @@ public class TestCacheOnWrite {
conf.setBoolean(CacheConfig.CACHE_DATA_BLOCKS_COMPRESSED_KEY, cacheCompressedData);
cowType.modifyConf(conf);
fs = HFileSystem.get(conf);
CacheConfig.GLOBAL_BLOCK_CACHE_INSTANCE = blockCache;
cacheConf =
new CacheConfig(blockCache, true, true, cowType.shouldBeCached(BlockType.DATA),
cowType.shouldBeCached(BlockType.LEAF_INDEX),

View File

@ -106,6 +106,7 @@ public class TestForceCacheImportantBlocks {
// Make sure we make a new one each time.
CacheConfig.clearGlobalInstances();
HFile.DATABLOCK_READ_COUNT.reset();
CacheConfig.instantiateBlockCache(TEST_UTIL.getConfiguration());
}
@Test

View File

@ -524,67 +524,59 @@ public class TestHFileBlockIndex {
* @throws IOException
*/
@Test
public void testMidKeyOnLeafIndexBlockBoundary() throws IOException {
Path hfilePath = new Path(TEST_UTIL.getDataTestDir(),
"hfile_for_midkey");
int maxChunkSize = 512;
conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, maxChunkSize);
// should open hfile.block.index.cacheonwrite
conf.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY, true);
public void testMidKeyOnLeafIndexBlockBoundary() throws IOException {
Path hfilePath = new Path(TEST_UTIL.getDataTestDir(), "hfile_for_midkey");
int maxChunkSize = 512;
conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, maxChunkSize);
// should open hfile.block.index.cacheonwrite
conf.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY, true);
CacheConfig.instantiateBlockCache(conf);
CacheConfig cacheConf = new CacheConfig(conf);
BlockCache blockCache = cacheConf.getBlockCache();
// Evict all blocks that were cached-on-write by the previous invocation.
blockCache.evictBlocksByHfileName(hfilePath.getName());
// Write the HFile
HFileContext meta =
new HFileContextBuilder().withBlockSize(SMALL_BLOCK_SIZE).withCompression(Algorithm.NONE)
.withDataBlockEncoding(DataBlockEncoding.NONE).build();
HFile.Writer writer =
HFile.getWriterFactory(conf, cacheConf).withPath(fs, hfilePath).withFileContext(meta)
.create();
Random rand = new Random(19231737);
byte[] family = Bytes.toBytes("f");
byte[] qualifier = Bytes.toBytes("q");
int kvNumberToBeWritten = 16;
// the new generated hfile will contain 2 leaf-index blocks and 16 data blocks,
// midkey is just on the boundary of the first leaf-index block
for (int i = 0; i < kvNumberToBeWritten; ++i) {
byte[] row = RandomKeyValueUtil.randomOrderedFixedLengthKey(rand, i, 30);
CacheConfig cacheConf = new CacheConfig(conf);
BlockCache blockCache = cacheConf.getBlockCache();
// Evict all blocks that were cached-on-write by the previous invocation.
blockCache.evictBlocksByHfileName(hfilePath.getName());
// Write the HFile
{
HFileContext meta = new HFileContextBuilder()
.withBlockSize(SMALL_BLOCK_SIZE)
.withCompression(Algorithm.NONE)
.withDataBlockEncoding(DataBlockEncoding.NONE)
.build();
HFile.Writer writer =
HFile.getWriterFactory(conf, cacheConf)
.withPath(fs, hfilePath)
.withFileContext(meta)
.create();
Random rand = new Random(19231737);
byte[] family = Bytes.toBytes("f");
byte[] qualifier = Bytes.toBytes("q");
int kvNumberToBeWritten = 16;
// the new generated hfile will contain 2 leaf-index blocks and 16 data blocks,
// midkey is just on the boundary of the first leaf-index block
for (int i = 0; i < kvNumberToBeWritten; ++i) {
byte[] row = RandomKeyValueUtil.randomOrderedFixedLengthKey(rand, i, 30);
// Key will be interpreted by KeyValue.KEY_COMPARATOR
KeyValue kv = new KeyValue(row, family, qualifier, EnvironmentEdgeManager.currentTime(),
RandomKeyValueUtil.randomFixedLengthValue(rand, SMALL_BLOCK_SIZE));
writer.append(kv);
}
writer.close();
// Key will be interpreted by KeyValue.KEY_COMPARATOR
KeyValue kv =
new KeyValue(row, family, qualifier, EnvironmentEdgeManager.currentTime(),
RandomKeyValueUtil.randomFixedLengthValue(rand, SMALL_BLOCK_SIZE));
writer.append(kv);
}
writer.close();
}
// close hfile.block.index.cacheonwrite
conf.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY, false);
// close hfile.block.index.cacheonwrite
conf.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY, false);
// Read the HFile
HFile.Reader reader = HFile.createReader(fs, hfilePath, cacheConf, true, conf);
// Read the HFile
HFile.Reader reader = HFile.createReader(fs, hfilePath, cacheConf, true, conf);
boolean hasArrayIndexOutOfBoundsException = false;
try {
// get the mid-key.
reader.midKey();
} catch (ArrayIndexOutOfBoundsException e) {
hasArrayIndexOutOfBoundsException = true;
} finally {
reader.close();
}
boolean hasArrayIndexOutOfBoundsException = false;
try {
// get the mid-key.
reader.midKey();
} catch (ArrayIndexOutOfBoundsException e) {
hasArrayIndexOutOfBoundsException = true;
} finally {
reader.close();
}
// to check if ArrayIndexOutOfBoundsException occurred
assertFalse(hasArrayIndexOutOfBoundsException);
}
// to check if ArrayIndexOutOfBoundsException occurred
assertFalse(hasArrayIndexOutOfBoundsException);
}
/**
* Testing block index through the HFile writer/reader APIs. Allows to test
@ -597,6 +589,7 @@ public class TestHFileBlockIndex {
public void testHFileWriterAndReader() throws IOException {
Path hfilePath = new Path(TEST_UTIL.getDataTestDir(),
"hfile_for_block_index");
CacheConfig.instantiateBlockCache(conf);
CacheConfig cacheConf = new CacheConfig(conf);
BlockCache blockCache = cacheConf.getBlockCache();

View File

@ -64,6 +64,7 @@ public class TestPrefetch {
conf.setBoolean(CacheConfig.PREFETCH_BLOCKS_ON_OPEN_KEY, true);
fs = HFileSystem.get(conf);
CacheConfig.blockCacheDisabled = false;
CacheConfig.instantiateBlockCache(conf);
cacheConf = new CacheConfig(conf);
}

View File

@ -88,6 +88,7 @@ public class TestScannerFromBucketCache {
conf.setFloat("hbase.regionserver.global.memstore.size", 0.1f);
}
tableName = TableName.valueOf(name.getMethodName());
CacheConfig.instantiateBlockCache(conf);
}
@After

View File

@ -124,6 +124,7 @@ public class TestScannerSelectionUsingKeyRange {
Scan scan = new Scan(Bytes.toBytes("aaa"), Bytes.toBytes("aaz"));
CacheConfig.blockCacheDisabled = false;
CacheConfig.instantiateBlockCache(conf);
CacheConfig cacheConf = new CacheConfig(conf);
LruBlockCache cache = (LruBlockCache) cacheConf.getBlockCache();
cache.clearCache();

View File

@ -104,6 +104,7 @@ public class TestScannerSelectionUsingTTL {
@Test
public void testScannerSelection() throws IOException {
Configuration conf = TEST_UTIL.getConfiguration();
CacheConfig.instantiateBlockCache(conf);
conf.setBoolean("hbase.store.delete.expired.storefile", false);
HColumnDescriptor hcd =
new HColumnDescriptor(FAMILY_BYTES)

View File

@ -76,6 +76,7 @@ public class TestBlocksRead {
public static void setUp() throws Exception {
// disable compactions in this test.
TEST_UTIL.getConfiguration().setInt("hbase.hstore.compactionThreshold", 10000);
CacheConfig.instantiateBlockCache(TEST_UTIL.getConfiguration());
}
@AfterClass

View File

@ -61,8 +61,8 @@ public class TestBlocksScanned extends HBaseTestCase {
@Before
public void setUp() throws Exception {
super.setUp();
TEST_UTIL = new HBaseTestingUtility();
CacheConfig.instantiateBlockCache(TEST_UTIL.getConfiguration());
}
@Test

View File

@ -161,6 +161,7 @@ public class TestCacheOnWriteInSchema {
conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, false);
conf.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY, false);
conf.setBoolean(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, false);
CacheConfig.instantiateBlockCache(conf);
fs = HFileSystem.get(conf);

View File

@ -139,6 +139,7 @@ public class TestCompoundBloomFilter {
fs = FileSystem.get(conf);
CacheConfig.instantiateBlockCache(conf);
cacheConf = new CacheConfig(conf);
blockCache = cacheConf.getBlockCache();
assertNotNull(blockCache);

View File

@ -924,7 +924,6 @@ public class TestHStoreFile extends HBaseTestCase {
scan.setTimeRange(27, 50);
scan.setColumnFamilyTimeRange(family, 7, 50);
assertTrue(scanner.shouldUseScanner(scan, store, Long.MIN_VALUE));
}
@Test
@ -935,6 +934,7 @@ public class TestHStoreFile extends HBaseTestCase {
Path baseDir = new Path(new Path(testDir, "7e0102"),"twoCOWEOC");
// Grab the block cache and get the initial hit/miss counts
CacheConfig.instantiateBlockCache(conf);
BlockCache bc = new CacheConfig(conf).getBlockCache();
assertNotNull(bc);
CacheStats cs = bc.getStats();

View File

@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.PrivateCellUtil;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
@ -80,6 +81,7 @@ public class TestRecoveredEdits {
@Test
public void testReplayWorksThoughLotsOfFlushing() throws
IOException {
CacheConfig.instantiateBlockCache(TEST_UTIL.getConfiguration());
for(MemoryCompactionPolicy policy : MemoryCompactionPolicy.values()) {
testReplayWorksWithMemoryCompactionPolicy(policy);
}