HBASE-15385 PREFETCH_BLOCKS_ON_OPEN in HColumnDescriptor is ignored

This commit is contained in:
stack 2016-04-20 09:38:30 -07:00
parent 3e1bdccc53
commit fa215a67e2
4 changed files with 26 additions and 5 deletions

View File

@ -246,11 +246,14 @@ public class CacheConfig {
HColumnDescriptor.DEFAULT_CACHE_DATA_IN_L1) || family.isCacheDataInL1(),
conf.getBoolean(DROP_BEHIND_CACHE_COMPACTION_KEY, DROP_BEHIND_CACHE_COMPACTION_DEFAULT)
);
LOG.info("Created cacheConfig for " + family.getNameAsString() + ": " + this);
}
/**
* Create a cache configuration using the specified configuration object and
* defaults for family level settings.
* defaults for family level settings. Only use if no column family context. Prefer
* {@link CacheConfig#CacheConfig(Configuration, HColumnDescriptor)}
* @see #CacheConfig(Configuration, HColumnDescriptor)
* @param conf hbase configuration
*/
public CacheConfig(Configuration conf) {
@ -268,6 +271,7 @@ public class CacheConfig {
HColumnDescriptor.DEFAULT_CACHE_DATA_IN_L1),
conf.getBoolean(DROP_BEHIND_CACHE_COMPACTION_KEY, DROP_BEHIND_CACHE_COMPACTION_DEFAULT)
);
LOG.info("Created cacheConfig: " + this);
}
/**
@ -303,7 +307,6 @@ public class CacheConfig {
this.prefetchOnOpen = prefetchOnOpen;
this.cacheDataInL1 = cacheDataInL1;
this.dropBehindCompaction = dropBehindCompaction;
LOG.info(this);
}
/**

View File

@ -253,7 +253,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
try {
end = getTrailer().getLoadOnOpenDataOffset();
if (LOG.isTraceEnabled()) {
LOG.trace("File=" + path.toString() + ", offset=" + offset + ", end=" + end);
LOG.trace("Prefetch=" + path.toString() + ", offset=" + offset + ", end=" + end);
}
// TODO: Could we use block iterator in here? Would that get stuff into the cache?
HFileBlock prevBlock = null;
@ -279,11 +279,11 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
} catch (IOException e) {
// IOExceptions are probably due to region closes (relocation, etc.)
if (LOG.isTraceEnabled()) {
LOG.trace("File=" + path.toString() + ", offset=" + offset + ", end=" + end, e);
LOG.trace("Prefetch=" + path.toString() + ", offset=" + offset + ", end=" + end, e);
}
} catch (Exception e) {
// Other exceptions are interesting
LOG.warn("File=" + path.toString() + ", offset=" + offset + ", end=" + end, e);
LOG.warn("Prefetch=" + path.toString() + ", offset=" + offset + ", end=" + end, e);
} finally {
PrefetchExecutor.complete(path);
}

View File

@ -27,6 +27,9 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.fs.HFileSystem;
@ -60,6 +63,16 @@ public class TestPrefetch {
cacheConf = new CacheConfig(conf);
}
@Test
public void testPrefetchSetInHCDWorks() {
HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toBytes("f"));
hcd.setPrefetchBlocksOnOpen(true);
Configuration c = HBaseConfiguration.create();
assertFalse(c.getBoolean(CacheConfig.PREFETCH_BLOCKS_ON_OPEN_KEY, false));
CacheConfig cc = new CacheConfig(c, hcd);
assertTrue(cc.shouldPrefetchOnOpen());
}
@Test(timeout=60000)
public void testPrefetch() throws Exception {
Path storeFile = writeStoreFile();

View File

@ -207,6 +207,11 @@ tableDesc.addFamily(cfDesc);
See the API documentation for
link:https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/io/hfile/CacheConfig.html[CacheConfig].
To see prefetch in operation, enable TRACE level logging on
`org.apache.hadoop.hbase.io.hfile.HFileReaderImpl` in hbase-2.0+
or on `org.apache.hadoop.hbase.io.hfile.HFileReaderV2` in earlier versions, hbase-1.x, of HBase.
[[perf.rs.memstore.size]]
=== `hbase.regionserver.global.memstore.size`