HBASE-11007 BLOCKCACHE in schema descriptor seems not aptly named

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1595938 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2014-05-19 15:53:06 +00:00
parent 6c7af961fa
commit 021bdae697
4 changed files with 60 additions and 38 deletions

View File

@ -77,6 +77,12 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
"ENCODE_ON_DISK";
public static final String DATA_BLOCK_ENCODING =
"DATA_BLOCK_ENCODING";
/**
* Key for the BLOCKCACHE attribute.
* A more exact name would be CACHE_DATA_ON_READ because this flag sets whether or not we
* cache DATA blocks. We always cache INDEX and BLOOM blocks; caching these blocks cannot be
* disabled.
*/
public static final String BLOCKCACHE = "BLOCKCACHE";
public static final String CACHE_DATA_ON_WRITE = "CACHE_DATA_ON_WRITE";
public static final String CACHE_INDEX_ON_WRITE = "CACHE_INDEX_ON_WRITE";
@ -792,7 +798,8 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
}
/**
* @return True if MapFile blocks should be cached.
* @return True if hfile DATA type blocks should be cached (You cannot disable caching of INDEX
* and BLOOM type blocks).
*/
public boolean isBlockCacheEnabled() {
String value = getValue(BLOCKCACHE);
@ -802,7 +809,8 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
}
/**
* @param blockCacheEnabled True if MapFile blocks should be cached.
* @param blockCacheEnabled True if hfile DATA type blocks should be cached (We always cache
* INDEX and BLOOM blocks; you cannot turn this off).
* @return this (for chained invocation)
*/
public HColumnDescriptor setBlockCacheEnabled(boolean blockCacheEnabled) {

View File

@ -161,7 +161,9 @@ public class CacheConfig {
/**
* Whether blocks should be cached on read (default is on if there is a
* cache but this can be turned off on a per-family or per-request basis)
* cache but this can be turned off on a per-family or per-request basis).
* If off we will STILL cache meta blocks; i.e. INDEX and BLOOM types.
* This cannot be disabled.
*/
private boolean cacheDataOnRead;
@ -232,7 +234,8 @@ public class CacheConfig {
* Create a block cache configuration with the specified cache and
* configuration parameters.
* @param blockCache reference to block cache, null if completely disabled
* @param cacheDataOnRead whether data blocks should be cached on read
* @param cacheDataOnRead whether DATA blocks should be cached on read (we always cache INDEX
* blocks and BLOOM blocks; this cannot be disabled).
* @param inMemory whether blocks should be flagged as in-memory
* @param cacheDataOnWrite whether data blocks should be cached on write
* @param cacheIndexesOnWrite whether index blocks should be cached on write
@ -283,7 +286,8 @@ public class CacheConfig {
}
/**
* Returns whether the blocks of this HFile should be cached on read or not.
* Returns whether the DATA blocks of this HFile should be cached on read or not (we always
* cache the meta blocks, the INDEX and BLOOM blocks).
* @return true if blocks should be cached on read, false if not
*/
public boolean shouldCacheDataOnRead() {

View File

@ -87,6 +87,12 @@ public class CacheStats {
this.requestCachingCounts = initializeZeros(numPeriodsInWindow);
}
@Override
public String toString() {
return "hitCount=" + this.hitCount + ", hitCachingCount=" + this.hitCachingCount +
", missCount=" + this.missCount;
}
public void miss(boolean caching) {
missCount.incrementAndGet();
if (caching) missCachingCount.incrementAndGet();

View File

@ -16,11 +16,12 @@
*/
package org.apache.hadoop.hbase.io.hfile;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
@ -29,10 +30,10 @@ import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.io.compress.Compression;
import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory;
import org.apache.hadoop.hbase.regionserver.BloomType;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.Before;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.runner.RunWith;
@ -43,11 +44,14 @@ import org.junit.runners.Parameterized.Parameters;
* Make sure we always cache important block types, such as index blocks, as
* long as we have a block cache, even though block caching might be disabled
* for the column family.
*
* <p>TODO: This test writes a lot of data and only tests the most basic of metrics. Cache stats
* need to reveal more about what is being cached whether DATA or INDEX blocks and then we could
* do more verification in this test.
*/
@Category(MediumTests.class)
@RunWith(Parameterized.class)
public class TestForceCacheImportantBlocks {
private final HBaseTestingUtility TEST_UTIL = HBaseTestingUtility.createLocalHTU();
private static final String TABLE = "myTable";
@ -68,6 +72,8 @@ public class TestForceCacheImportantBlocks {
Compression.Algorithm.GZ;
private static final BloomType BLOOM_TYPE = BloomType.ROW;
@SuppressWarnings("unused")
// Currently unused.
private final int hfileVersion;
private final boolean cfCacheEnabled;
@ -76,7 +82,7 @@ public class TestForceCacheImportantBlocks {
// HFile versions
return Arrays.asList(new Object[][] {
new Object[] { new Integer(2), false },
new Object[] { new Integer(2), true }
new Object[] { new Integer(3), true }
});
}
@ -84,42 +90,42 @@ public class TestForceCacheImportantBlocks {
boolean cfCacheEnabled) {
this.hfileVersion = hfileVersion;
this.cfCacheEnabled = cfCacheEnabled;
TEST_UTIL.getConfiguration().setInt(HFile.FORMAT_VERSION_KEY,
hfileVersion);
TEST_UTIL.getConfiguration().setInt(HFile.FORMAT_VERSION_KEY, hfileVersion);
}
@Before
public void setup() {
// Make sure we make a new one each time.
CacheConfig.GLOBAL_BLOCK_CACHE_INSTANCE = null;
HFile.dataBlockReadCnt.set(0);
}
@Test
public void testCacheBlocks() throws IOException {
// Set index block size to be the same as normal block size.
TEST_UTIL.getConfiguration().setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY,
BLOCK_SIZE);
HColumnDescriptor hcd =
new HColumnDescriptor(Bytes.toBytes(CF))
.setMaxVersions(MAX_VERSIONS)
.setCompressionType(COMPRESSION_ALGORITHM)
.setBloomFilterType(BLOOM_TYPE);
TEST_UTIL.getConfiguration().setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, BLOCK_SIZE);
HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toBytes(CF)).setMaxVersions(MAX_VERSIONS).
setCompressionType(COMPRESSION_ALGORITHM).
setBloomFilterType(BLOOM_TYPE);
hcd.setBlocksize(BLOCK_SIZE);
hcd.setBlockCacheEnabled(cfCacheEnabled);
HRegion region = TEST_UTIL.createTestRegion(TABLE, hcd);
writeTestData(region);
for (int i = 0; i < NUM_ROWS; ++i) {
Get get = new Get(Bytes.toBytes("row" + i));
region.get(get);
}
List<BlockCategory> importantBlockCategories =
new ArrayList<BlockCategory>();
importantBlockCategories.add(BlockCategory.BLOOM);
if (hfileVersion == 2) {
// We only have index blocks for HFile v2.
importantBlockCategories.add(BlockCategory.INDEX);
}
CacheStats stats =
region.getStores().get(hcd.getName()).getCacheConfig().getBlockCache().getStats();
assertEquals(0, stats.getHitCount());
assertEquals(0, HFile.dataBlockReadCnt.get());
// Do a single get, take count of caches. If we are NOT caching DATA blocks, the miss
// count should go up. Otherwise, all should be cached and the miss count should not rise.
region.get(new Get(Bytes.toBytes("row" + 0)));
assertTrue(stats.getHitCount() > 0);
assertTrue(HFile.dataBlockReadCnt.get() > 0);
long missCount = stats.getMissCount();
region.get(new Get(Bytes.toBytes("row" + 0)));
if (this.cfCacheEnabled) assertEquals(missCount, stats.getMissCount());
else assertTrue(stats.getMissCount() > missCount);
}
private void writeTestData(HRegion region) throws IOException {
for (int i = 0; i < NUM_ROWS; ++i) {
Put put = new Put(Bytes.toBytes("row" + i));
@ -135,6 +141,4 @@ public class TestForceCacheImportantBlocks {
}
}
}
}