HBASE-4027 Off Heap Cache never creates Slabs
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1164674 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
58e75dd65c
commit
7994ce3ea9
@ -234,6 +234,7 @@ Release 0.91.0 - Unreleased
|
|||||||
HBASE-4273 java.lang.NullPointerException when a table is being disabled and
|
HBASE-4273 java.lang.NullPointerException when a table is being disabled and
|
||||||
HMaster restarts (Ming Ma)
|
HMaster restarts (Ming Ma)
|
||||||
HBASE-4310 SlabCache metrics bugfix (Li Pi)
|
HBASE-4310 SlabCache metrics bugfix (Li Pi)
|
||||||
|
HBASE-4027 Off Heap Cache never creates Slabs (Li Pi)
|
||||||
|
|
||||||
IMPROVEMENTS
|
IMPROVEMENTS
|
||||||
HBASE-3290 Max Compaction Size (Nicolas Spiegelberg via Stack)
|
HBASE-3290 Max Compaction Size (Nicolas Spiegelberg via Stack)
|
||||||
|
@ -45,7 +45,6 @@ public class DoubleBlockCache implements BlockCache, HeapSize {
|
|||||||
private final SlabCache offHeapCache;
|
private final SlabCache offHeapCache;
|
||||||
private final CacheStats stats;
|
private final CacheStats stats;
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Default constructor. Specify maximum size and expected average block size
|
* Default constructor. Specify maximum size and expected average block size
|
||||||
* (approximation is fine).
|
* (approximation is fine).
|
||||||
@ -53,26 +52,28 @@ public class DoubleBlockCache implements BlockCache, HeapSize {
|
|||||||
* All other factors will be calculated based on defaults specified in this
|
* All other factors will be calculated based on defaults specified in this
|
||||||
* class.
|
* class.
|
||||||
*
|
*
|
||||||
* @param maxSize
|
* @param onHeapSize maximum size of the onHeapCache, in bytes.
|
||||||
* maximum size of cache, in bytes
|
* @param offHeapSize maximum size of the offHeapCache, in bytes.
|
||||||
* @param blockSize
|
* @param onHeapBlockSize average block size of the on heap cache.
|
||||||
* approximate size of each block, in bytes
|
* @param offHeapBlockSize average block size for the off heap cache
|
||||||
|
* @param conf configuration file. currently used only by the off heap cache.
|
||||||
*/
|
*/
|
||||||
public DoubleBlockCache(long onHeapSize, long offHeapSize, long blockSizeLru,
|
public DoubleBlockCache(long onHeapSize, long offHeapSize,
|
||||||
long blockSizeSlab) {
|
long onHeapBlockSize, long offHeapBlockSize, Configuration conf) {
|
||||||
|
|
||||||
LOG.info("Creating on-heap cache of size "
|
LOG.info("Creating on-heap cache of size "
|
||||||
+ StringUtils.humanReadableInt(onHeapSize)
|
+ StringUtils.humanReadableInt(onHeapSize)
|
||||||
+ "bytes with an average block size of "
|
+ "bytes with an average block size of "
|
||||||
+ StringUtils.humanReadableInt(blockSizeLru) + " bytes.");
|
+ StringUtils.humanReadableInt(onHeapBlockSize) + " bytes.");
|
||||||
onHeapCache = new LruBlockCache(onHeapSize, blockSizeLru);
|
onHeapCache = new LruBlockCache(onHeapSize, onHeapBlockSize);
|
||||||
|
|
||||||
LOG.info("Creating off-heap cache of size "
|
LOG.info("Creating off-heap cache of size "
|
||||||
+ StringUtils.humanReadableInt(offHeapSize)
|
+ StringUtils.humanReadableInt(offHeapSize)
|
||||||
+ "bytes with an average block size of "
|
+ "bytes with an average block size of "
|
||||||
+ StringUtils.humanReadableInt(blockSizeSlab) + " bytes.");
|
+ StringUtils.humanReadableInt(offHeapBlockSize) + " bytes.");
|
||||||
offHeapCache = new SlabCache(offHeapSize, blockSizeSlab);
|
offHeapCache = new SlabCache(offHeapSize, offHeapBlockSize);
|
||||||
|
|
||||||
|
offHeapCache.addSlabByConf(conf);
|
||||||
this.stats = new CacheStats();
|
this.stats = new CacheStats();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -223,7 +223,7 @@ public class StoreFile {
|
|||||||
"bloomType=" + bt + " (disabled in config)");
|
"bloomType=" + bt + " (disabled in config)");
|
||||||
this.bloomType = BloomType.NONE;
|
this.bloomType = BloomType.NONE;
|
||||||
}
|
}
|
||||||
|
|
||||||
// cache the modification time stamp of this store file
|
// cache the modification time stamp of this store file
|
||||||
FileStatus[] stats = fs.listStatus(p);
|
FileStatus[] stats = fs.listStatus(p);
|
||||||
if (stats != null && stats.length == 1) {
|
if (stats != null && stats.length == 1) {
|
||||||
@ -384,7 +384,7 @@ public class StoreFile {
|
|||||||
if(offHeapCacheSize <= 0) {
|
if(offHeapCacheSize <= 0) {
|
||||||
hfileBlockCache = new LruBlockCache(cacheSize, DEFAULT_BLOCKSIZE_SMALL);
|
hfileBlockCache = new LruBlockCache(cacheSize, DEFAULT_BLOCKSIZE_SMALL);
|
||||||
} else {
|
} else {
|
||||||
hfileBlockCache = new DoubleBlockCache(cacheSize, offHeapCacheSize, DEFAULT_BLOCKSIZE_SMALL, blockSize);
|
hfileBlockCache = new DoubleBlockCache(cacheSize, offHeapCacheSize, DEFAULT_BLOCKSIZE_SMALL, blockSize, conf);
|
||||||
}
|
}
|
||||||
return hfileBlockCache;
|
return hfileBlockCache;
|
||||||
}
|
}
|
||||||
@ -400,7 +400,7 @@ public class StoreFile {
|
|||||||
/**
|
/**
|
||||||
* @return the cached value of HDFS blocks distribution. The cached value is
|
* @return the cached value of HDFS blocks distribution. The cached value is
|
||||||
* calculated when store file is opened.
|
* calculated when store file is opened.
|
||||||
*/
|
*/
|
||||||
public HDFSBlocksDistribution getHDFSBlockDistribution() {
|
public HDFSBlocksDistribution getHDFSBlockDistribution() {
|
||||||
return this.hdfsBlocksDistribution;
|
return this.hdfsBlocksDistribution;
|
||||||
}
|
}
|
||||||
@ -417,17 +417,17 @@ public class StoreFile {
|
|||||||
* @param reference The reference
|
* @param reference The reference
|
||||||
* @param reference The referencePath
|
* @param reference The referencePath
|
||||||
* @return HDFS blocks distribution
|
* @return HDFS blocks distribution
|
||||||
*/
|
*/
|
||||||
static private HDFSBlocksDistribution computeRefFileHDFSBlockDistribution(
|
static private HDFSBlocksDistribution computeRefFileHDFSBlockDistribution(
|
||||||
FileSystem fs, Reference reference, Path referencePath) throws IOException {
|
FileSystem fs, Reference reference, Path referencePath) throws IOException {
|
||||||
if ( referencePath == null) {
|
if ( referencePath == null) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
FileStatus status = fs.getFileStatus(referencePath);
|
FileStatus status = fs.getFileStatus(referencePath);
|
||||||
long start = 0;
|
long start = 0;
|
||||||
long length = 0;
|
long length = 0;
|
||||||
|
|
||||||
if (Reference.isTopFileRegion(reference.getFileRegion())) {
|
if (Reference.isTopFileRegion(reference.getFileRegion())) {
|
||||||
start = status.getLen()/2;
|
start = status.getLen()/2;
|
||||||
length = status.getLen() - status.getLen()/2;
|
length = status.getLen() - status.getLen()/2;
|
||||||
@ -437,14 +437,14 @@ public class StoreFile {
|
|||||||
}
|
}
|
||||||
return FSUtils.computeHDFSBlocksDistribution(fs, status, start, length);
|
return FSUtils.computeHDFSBlocksDistribution(fs, status, start, length);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* helper function to compute HDFS blocks distribution of a given file.
|
* helper function to compute HDFS blocks distribution of a given file.
|
||||||
* For reference file, it is an estimate
|
* For reference file, it is an estimate
|
||||||
* @param fs The FileSystem
|
* @param fs The FileSystem
|
||||||
* @param o The path of the file
|
* @param o The path of the file
|
||||||
* @return HDFS blocks distribution
|
* @return HDFS blocks distribution
|
||||||
*/
|
*/
|
||||||
static public HDFSBlocksDistribution computeHDFSBlockDistribution(
|
static public HDFSBlocksDistribution computeHDFSBlockDistribution(
|
||||||
FileSystem fs, Path p) throws IOException {
|
FileSystem fs, Path p) throws IOException {
|
||||||
if (isReference(p)) {
|
if (isReference(p)) {
|
||||||
@ -457,8 +457,8 @@ public class StoreFile {
|
|||||||
return FSUtils.computeHDFSBlocksDistribution(fs, status, 0, length);
|
return FSUtils.computeHDFSBlocksDistribution(fs, status, 0, length);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* compute HDFS block distribution, for reference file, it is an estimate
|
* compute HDFS block distribution, for reference file, it is an estimate
|
||||||
*/
|
*/
|
||||||
@ -473,7 +473,7 @@ public class StoreFile {
|
|||||||
this.fs, status, 0, length);
|
this.fs, status, 0, length);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Opens reader on this store file. Called by Constructor.
|
* Opens reader on this store file. Called by Constructor.
|
||||||
* @return Reader for the store file.
|
* @return Reader for the store file.
|
||||||
@ -492,9 +492,9 @@ public class StoreFile {
|
|||||||
this.inMemory,
|
this.inMemory,
|
||||||
this.conf.getBoolean(HFile.EVICT_BLOCKS_ON_CLOSE_KEY, true));
|
this.conf.getBoolean(HFile.EVICT_BLOCKS_ON_CLOSE_KEY, true));
|
||||||
}
|
}
|
||||||
|
|
||||||
computeHDFSBlockDistribution();
|
computeHDFSBlockDistribution();
|
||||||
|
|
||||||
// Load up indices and fileinfo.
|
// Load up indices and fileinfo.
|
||||||
metadataMap = Collections.unmodifiableMap(this.reader.loadFileInfo());
|
metadataMap = Collections.unmodifiableMap(this.reader.loadFileInfo());
|
||||||
// Read in our metadata.
|
// Read in our metadata.
|
||||||
@ -950,8 +950,8 @@ public class StoreFile {
|
|||||||
public Path getPath() {
|
public Path getPath() {
|
||||||
return this.writer.getPath();
|
return this.writer.getPath();
|
||||||
}
|
}
|
||||||
|
|
||||||
boolean hasBloom() {
|
boolean hasBloom() {
|
||||||
return this.bloomFilterWriter != null;
|
return this.bloomFilterWriter != null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user