Revert "HBASE-23066 Allow cache on write during compactions when prefetching is (#919)"

TestCacheOnWrite failing all the time.

This reverts commit d561130e82.
This commit is contained in:
Sean Busbey 2019-12-11 10:17:46 -06:00
parent a553b78c1c
commit ec9bd20ec8
3 changed files with 52 additions and 100 deletions

View File

@ -81,12 +81,6 @@ public class CacheConfig {
*/ */
public static final String PREFETCH_BLOCKS_ON_OPEN_KEY = "hbase.rs.prefetchblocksonopen"; public static final String PREFETCH_BLOCKS_ON_OPEN_KEY = "hbase.rs.prefetchblocksonopen";
/**
* Configuration key to cache blocks when a compacted file is written
*/
public static final String CACHE_COMPACTED_BLOCKS_ON_WRITE_KEY =
"hbase.rs.cachecompactedblocksonwrite";
public static final String DROP_BEHIND_CACHE_COMPACTION_KEY = public static final String DROP_BEHIND_CACHE_COMPACTION_KEY =
"hbase.hfile.drop.behind.compaction"; "hbase.hfile.drop.behind.compaction";
@ -99,7 +93,6 @@ public class CacheConfig {
public static final boolean DEFAULT_EVICT_ON_CLOSE = false; public static final boolean DEFAULT_EVICT_ON_CLOSE = false;
public static final boolean DEFAULT_CACHE_DATA_COMPRESSED = false; public static final boolean DEFAULT_CACHE_DATA_COMPRESSED = false;
public static final boolean DEFAULT_PREFETCH_ON_OPEN = false; public static final boolean DEFAULT_PREFETCH_ON_OPEN = false;
public static final boolean DEFAULT_CACHE_COMPACTED_BLOCKS_ON_WRITE = false;
public static final boolean DROP_BEHIND_CACHE_COMPACTION_DEFAULT = true; public static final boolean DROP_BEHIND_CACHE_COMPACTION_DEFAULT = true;
/** /**
@ -131,11 +124,6 @@ public class CacheConfig {
/** Whether data blocks should be prefetched into the cache */ /** Whether data blocks should be prefetched into the cache */
private final boolean prefetchOnOpen; private final boolean prefetchOnOpen;
/**
* Whether data blocks should be cached when compacted file is written
*/
private final boolean cacheCompactedDataOnWrite;
private final boolean dropBehindCompaction; private final boolean dropBehindCompaction;
// Local reference to the block cache // Local reference to the block cache
@ -186,8 +174,6 @@ public class CacheConfig {
(family == null ? false : family.isEvictBlocksOnClose()); (family == null ? false : family.isEvictBlocksOnClose());
this.prefetchOnOpen = conf.getBoolean(PREFETCH_BLOCKS_ON_OPEN_KEY, DEFAULT_PREFETCH_ON_OPEN) || this.prefetchOnOpen = conf.getBoolean(PREFETCH_BLOCKS_ON_OPEN_KEY, DEFAULT_PREFETCH_ON_OPEN) ||
(family == null ? false : family.isPrefetchBlocksOnOpen()); (family == null ? false : family.isPrefetchBlocksOnOpen());
this.cacheCompactedDataOnWrite = conf.getBoolean(CACHE_COMPACTED_BLOCKS_ON_WRITE_KEY,
DEFAULT_CACHE_COMPACTED_BLOCKS_ON_WRITE);
this.blockCache = blockCache; this.blockCache = blockCache;
this.byteBuffAllocator = byteBuffAllocator; this.byteBuffAllocator = byteBuffAllocator;
LOG.info("Created cacheConfig: " + this + (family == null ? "" : " for family " + family) + LOG.info("Created cacheConfig: " + this + (family == null ? "" : " for family " + family) +
@ -207,7 +193,6 @@ public class CacheConfig {
this.evictOnClose = cacheConf.evictOnClose; this.evictOnClose = cacheConf.evictOnClose;
this.cacheDataCompressed = cacheConf.cacheDataCompressed; this.cacheDataCompressed = cacheConf.cacheDataCompressed;
this.prefetchOnOpen = cacheConf.prefetchOnOpen; this.prefetchOnOpen = cacheConf.prefetchOnOpen;
this.cacheCompactedDataOnWrite = cacheConf.cacheCompactedDataOnWrite;
this.dropBehindCompaction = cacheConf.dropBehindCompaction; this.dropBehindCompaction = cacheConf.dropBehindCompaction;
this.blockCache = cacheConf.blockCache; this.blockCache = cacheConf.blockCache;
this.byteBuffAllocator = cacheConf.byteBuffAllocator; this.byteBuffAllocator = cacheConf.byteBuffAllocator;
@ -222,7 +207,6 @@ public class CacheConfig {
this.evictOnClose = false; this.evictOnClose = false;
this.cacheDataCompressed = false; this.cacheDataCompressed = false;
this.prefetchOnOpen = false; this.prefetchOnOpen = false;
this.cacheCompactedDataOnWrite = false;
this.dropBehindCompaction = false; this.dropBehindCompaction = false;
this.blockCache = null; this.blockCache = null;
this.byteBuffAllocator = ByteBuffAllocator.HEAP; this.byteBuffAllocator = ByteBuffAllocator.HEAP;
@ -335,13 +319,6 @@ public class CacheConfig {
return this.prefetchOnOpen; return this.prefetchOnOpen;
} }
/**
* @return true if blocks should be cached while writing during compaction, false if not
*/
public boolean shouldCacheCompactedBlocksOnWrite() {
return this.cacheCompactedDataOnWrite;
}
/** /**
* Return true if we may find this type of block in block cache. * Return true if we may find this type of block in block cache.
* <p> * <p>

View File

@ -1118,9 +1118,9 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat
boolean shouldDropBehind) throws IOException { boolean shouldDropBehind) throws IOException {
final CacheConfig writerCacheConf; final CacheConfig writerCacheConf;
if (isCompaction) { if (isCompaction) {
// Don't cache data on write on compactions, unless specifically configured to do so // Don't cache data on write on compactions.
writerCacheConf = new CacheConfig(cacheConf); writerCacheConf = new CacheConfig(cacheConf);
writerCacheConf.setCacheDataOnWrite(cacheConf.shouldCacheCompactedBlocksOnWrite()); writerCacheConf.setCacheDataOnWrite(false);
} else { } else {
writerCacheConf = cacheConf; writerCacheConf = cacheConf;
} }

View File

@ -53,8 +53,6 @@ import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache;
import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.regionserver.BloomType;
import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.HStore;
import org.apache.hadoop.hbase.regionserver.HStoreFile;
import org.apache.hadoop.hbase.regionserver.StoreFileWriter; import org.apache.hadoop.hbase.regionserver.StoreFileWriter;
import org.apache.hadoop.hbase.testclassification.IOTests; import org.apache.hadoop.hbase.testclassification.IOTests;
import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.LargeTests;
@ -407,82 +405,59 @@ public class TestCacheOnWrite {
storeFilePath = sfw.getPath(); storeFilePath = sfw.getPath();
} }
private void testCachingDataBlocksDuringCompactionInternals(boolean useTags, boolean cacheBlocksOnCompaction) private void testNotCachingDataBlocksDuringCompactionInternals(boolean useTags)
throws IOException, InterruptedException { throws IOException, InterruptedException {
// create a localConf // TODO: need to change this test if we add a cache size threshold for
Configuration localConf = new Configuration(conf); // compactions, or if we implement some other kind of intelligent logic for
try { // deciding what blocks to cache-on-write on compaction.
// Set the conf if testing caching compacted blocks on write final String table = "CompactionCacheOnWrite";
conf.setBoolean(CacheConfig.CACHE_COMPACTED_BLOCKS_ON_WRITE_KEY, cacheBlocksOnCompaction); final String cf = "myCF";
final byte[] cfBytes = Bytes.toBytes(cf);
// TODO: need to change this test if we add a cache size threshold for final int maxVersions = 3;
// compactions, or if we implement some other kind of intelligent logic for ColumnFamilyDescriptor cfd =
// deciding what blocks to cache-on-write on compaction. ColumnFamilyDescriptorBuilder.newBuilder(cfBytes).setCompressionType(compress)
final String table = "CompactionCacheOnWrite"; .setBloomFilterType(BLOOM_TYPE).setMaxVersions(maxVersions)
final String cf = "myCF"; .setDataBlockEncoding(NoOpDataBlockEncoder.INSTANCE.getDataBlockEncoding()).build();
final byte[] cfBytes = Bytes.toBytes(cf); HRegion region = TEST_UTIL.createTestRegion(table, cfd, blockCache);
final int maxVersions = 3; int rowIdx = 0;
ColumnFamilyDescriptor cfd = ColumnFamilyDescriptorBuilder.newBuilder(cfBytes) long ts = EnvironmentEdgeManager.currentTime();
.setCompressionType(compress).setBloomFilterType(BLOOM_TYPE).setMaxVersions(maxVersions) for (int iFile = 0; iFile < 5; ++iFile) {
.setDataBlockEncoding(NoOpDataBlockEncoder.INSTANCE.getDataBlockEncoding()).build(); for (int iRow = 0; iRow < 500; ++iRow) {
HRegion region = TEST_UTIL.createTestRegion(table, cfd, blockCache); String rowStr = "" + (rowIdx * rowIdx * rowIdx) + "row" + iFile + "_" +
int rowIdx = 0; iRow;
long ts = EnvironmentEdgeManager.currentTime(); Put p = new Put(Bytes.toBytes(rowStr));
for (int iFile = 0; iFile < 5; ++iFile) { ++rowIdx;
for (int iRow = 0; iRow < 500; ++iRow) { for (int iCol = 0; iCol < 10; ++iCol) {
String rowStr = "" + (rowIdx * rowIdx * rowIdx) + "row" + iFile + "_" + iRow; String qualStr = "col" + iCol;
Put p = new Put(Bytes.toBytes(rowStr)); String valueStr = "value_" + rowStr + "_" + qualStr;
++rowIdx; for (int iTS = 0; iTS < 5; ++iTS) {
for (int iCol = 0; iCol < 10; ++iCol) { if (useTags) {
String qualStr = "col" + iCol; Tag t = new ArrayBackedTag((byte) 1, "visibility");
String valueStr = "value_" + rowStr + "_" + qualStr; Tag[] tags = new Tag[1];
for (int iTS = 0; iTS < 5; ++iTS) { tags[0] = t;
if (useTags) { KeyValue kv = new KeyValue(Bytes.toBytes(rowStr), cfBytes, Bytes.toBytes(qualStr),
Tag t = new ArrayBackedTag((byte) 1, "visibility"); HConstants.LATEST_TIMESTAMP, Bytes.toBytes(valueStr), tags);
Tag[] tags = new Tag[1]; p.add(kv);
tags[0] = t; } else {
KeyValue kv = new KeyValue(Bytes.toBytes(rowStr), cfBytes, Bytes.toBytes(qualStr), p.addColumn(cfBytes, Bytes.toBytes(qualStr), ts++, Bytes.toBytes(valueStr));
HConstants.LATEST_TIMESTAMP, Bytes.toBytes(valueStr), tags);
p.add(kv);
} else {
p.addColumn(cfBytes, Bytes.toBytes(qualStr), ts++, Bytes.toBytes(valueStr));
}
} }
} }
p.setDurability(Durability.ASYNC_WAL);
region.put(p);
} }
region.flush(true); p.setDurability(Durability.ASYNC_WAL);
region.put(p);
} }
region.flush(true);
clearBlockCache(blockCache);
assertEquals(0, blockCache.getBlockCount());
region.compact(false);
LOG.debug("compactStores() returned");
boolean dataBlockCached = false;
for (CachedBlock block : blockCache) {
if (BlockType.ENCODED_DATA.equals(block.getBlockType())
|| BlockType.DATA.equals(block.getBlockType())) {
dataBlockCached = true;
break;
}
}
// Data blocks should be cached in instances where we are caching blocks on write. In the case
// of testing
// BucketCache, we cannot verify block type as it is not stored in the cache.
assertTrue(
"\nTest description: " + testDescription + "\nprefetchCompactedBlocksOnWrite: "
+ cacheBlocksOnCompaction + "\n",
(cacheBlocksOnCompaction && !(blockCache instanceof BucketCache)) == dataBlockCached);
region.close();
} finally {
// reset back
conf = new Configuration(localConf);
} }
clearBlockCache(blockCache);
assertEquals(0, blockCache.getBlockCount());
region.compact(false);
LOG.debug("compactStores() returned");
for (CachedBlock block: blockCache) {
assertNotEquals(BlockType.ENCODED_DATA, block.getBlockType());
assertNotEquals(BlockType.DATA, block.getBlockType());
}
region.close();
} }
@Test @Test
@ -492,8 +467,8 @@ public class TestCacheOnWrite {
} }
@Test @Test
public void testCachingDataBlocksDuringCompaction() throws IOException, InterruptedException { public void testNotCachingDataBlocksDuringCompaction() throws IOException, InterruptedException {
testCachingDataBlocksDuringCompactionInternals(false, false); testNotCachingDataBlocksDuringCompactionInternals(false);
testCachingDataBlocksDuringCompactionInternals(true, true); testNotCachingDataBlocksDuringCompactionInternals(true);
} }
} }