diff --git a/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java b/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java index ae871c4b7f5..69d8521c3ec 100644 --- a/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java +++ b/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java @@ -149,14 +149,13 @@ public class MemcachedBlockCache implements BlockCache { // Update stats if this request doesn't have it turned off 100% of the time if (updateCacheMetrics) { if (result == null) { - cacheStats.miss(caching, cacheKey.isPrimary()); + cacheStats.miss(caching, cacheKey.isPrimary(), cacheKey.getBlockType()); } else { - cacheStats.hit(caching, cacheKey.isPrimary()); + cacheStats.hit(caching, cacheKey.isPrimary(), cacheKey.getBlockType()); } } } - return result; } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java index 9693bbac33f..b0d8c248283 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java @@ -297,6 +297,29 @@ public interface MetricsRegionServerSource extends BaseSource { String BLOCK_CACHE_FAILED_INSERTION_COUNT = "blockCacheFailedInsertionCount"; String BLOCK_CACHE_FAILED_INSERTION_COUNT_DESC = "Number of times that a block cache " + "insertion failed. Usually due to size restrictions."; + String BLOCK_CACHE_DATA_MISS_COUNT = "blockCacheDataMissCount"; + String BLOCK_CACHE_ENCODED_DATA_MISS_COUNT = "blockCacheEncodedDataMissCount"; + String BLOCK_CACHE_LEAF_INDEX_MISS_COUNT = "blockCacheLeafIndexMissCount"; + String BLOCK_CACHE_BLOOM_CHUNK_MISS_COUNT = "blockCacheBloomChunkMissCount"; + String BLOCK_CACHE_META_MISS_COUNT = "blockCacheMetaMissCount"; + String BLOCK_CACHE_ROOT_INDEX_MISS_COUNT = "blockCacheRootIndexMissCount"; + String BLOCK_CACHE_INTERMEDIATE_INDEX_MISS_COUNT = "blockCacheIntermediateIndexMissCount"; + String BLOCK_CACHE_FILE_INFO_MISS_COUNT = "blockCacheFileInfoMissCount"; + String BLOCK_CACHE_GENERAL_BLOOM_META_MISS_COUNT = "blockCacheGeneralBloomMetaMissCount"; + String BLOCK_CACHE_DELETE_FAMILY_BLOOM_MISS_COUNT = "blockCacheDeleteFamilyBloomMissCount"; + String BLOCK_CACHE_TRAILER_MISS_COUNT = "blockCacheTrailerMissCount"; + String BLOCK_CACHE_DATA_HIT_COUNT = "blockCacheDataHitCount"; + String BLOCK_CACHE_ENCODED_DATA_HIT_COUNT = "blockCacheEncodedDataHitCount"; + String BLOCK_CACHE_LEAF_INDEX_HIT_COUNT = "blockCacheLeafIndexHitCount"; + String BLOCK_CACHE_BLOOM_CHUNK_HIT_COUNT = "blockCacheBloomChunkHitCount"; + String BLOCK_CACHE_META_HIT_COUNT = "blockCacheMetaHitCount"; + String BLOCK_CACHE_ROOT_INDEX_HIT_COUNT = "blockCacheRootIndexHitCount"; + String BLOCK_CACHE_INTERMEDIATE_INDEX_HIT_COUNT = "blockCacheIntermediateIndexHitCount"; + String BLOCK_CACHE_FILE_INFO_HIT_COUNT = "blockCacheFileInfoHitCount"; + String BLOCK_CACHE_GENERAL_BLOOM_META_HIT_COUNT = "blockCacheGeneralBloomMetaHitCount"; + String BLOCK_CACHE_DELETE_FAMILY_BLOOM_HIT_COUNT = "blockCacheDeleteFamilyBloomHitCount"; + String BLOCK_CACHE_TRAILER_HIT_COUNT = "blockCacheTrailerHitCount"; + String RS_START_TIME_NAME = "regionServerStartTime"; String ZOOKEEPER_QUORUM_NAME = "zookeeperQuorum"; String SERVER_NAME_NAME = "serverName"; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java index 5ecda04341d..cf0c7ac4732 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java @@ -417,4 +417,44 @@ public interface MetricsRegionServerWrapper { * Get the average region size to this region server. */ long getAverageRegionSize(); + + long getDataMissCount(); + + long getLeafIndexMissCount(); + + long getBloomChunkMissCount(); + + long getMetaMissCount(); + + long getRootIndexMissCount(); + + long getIntermediateIndexMissCount(); + + long getFileInfoMissCount(); + + long getGeneralBloomMetaMissCount(); + + long getDeleteFamilyBloomMissCount(); + + long getTrailerMissCount(); + + long getDataHitCount(); + + long getLeafIndexHitCount(); + + long getBloomChunkHitCount(); + + long getMetaHitCount(); + + long getRootIndexHitCount(); + + long getIntermediateIndexHitCount(); + + long getFileInfoHitCount(); + + long getGeneralBloomMetaHitCount(); + + long getDeleteFamilyBloomHitCount(); + + long getTrailerHitCount(); } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java index c625d49dff1..0fff5dbea9e 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java @@ -377,6 +377,7 @@ public class MetricsRegionServerSourceImpl rsWrap.getCompactionQueueSize()) .addGauge(Interns.info(FLUSH_QUEUE_LENGTH, FLUSH_QUEUE_LENGTH_DESC), rsWrap.getFlushQueueSize()) + .addGauge(Interns.info(BLOCK_CACHE_FREE_SIZE, BLOCK_CACHE_FREE_DESC), rsWrap.getBlockCacheFreeSize()) .addGauge(Interns.info(BLOCK_CACHE_COUNT, BLOCK_CACHE_COUNT_DESC), @@ -401,6 +402,41 @@ public class MetricsRegionServerSourceImpl BLOCK_CACHE_EXPRESS_HIT_PERCENT_DESC), rsWrap.getBlockCacheHitCachingPercent()) .addCounter(Interns.info(BLOCK_CACHE_FAILED_INSERTION_COUNT, BLOCK_CACHE_FAILED_INSERTION_COUNT_DESC),rsWrap.getBlockCacheFailedInsertions()) + .addCounter(Interns.info(BLOCK_CACHE_DATA_MISS_COUNT, ""), rsWrap.getDataMissCount()) + .addCounter(Interns.info(BLOCK_CACHE_LEAF_INDEX_MISS_COUNT, ""), + rsWrap.getLeafIndexMissCount()) + .addCounter(Interns.info(BLOCK_CACHE_BLOOM_CHUNK_MISS_COUNT, ""), + rsWrap.getBloomChunkMissCount()) + .addCounter(Interns.info(BLOCK_CACHE_META_MISS_COUNT, ""), rsWrap.getMetaMissCount()) + .addCounter(Interns.info(BLOCK_CACHE_ROOT_INDEX_MISS_COUNT, ""), + rsWrap.getRootIndexMissCount()) + .addCounter(Interns.info(BLOCK_CACHE_INTERMEDIATE_INDEX_MISS_COUNT, ""), + rsWrap.getIntermediateIndexMissCount()) + .addCounter(Interns.info(BLOCK_CACHE_FILE_INFO_MISS_COUNT, ""), + rsWrap.getFileInfoMissCount()) + .addCounter(Interns.info(BLOCK_CACHE_GENERAL_BLOOM_META_MISS_COUNT, ""), + rsWrap.getGeneralBloomMetaMissCount()) + .addCounter(Interns.info(BLOCK_CACHE_DELETE_FAMILY_BLOOM_MISS_COUNT, ""), + rsWrap.getDeleteFamilyBloomMissCount()) + .addCounter(Interns.info(BLOCK_CACHE_TRAILER_MISS_COUNT, ""), + rsWrap.getTrailerMissCount()) + .addCounter(Interns.info(BLOCK_CACHE_DATA_HIT_COUNT, ""), rsWrap.getDataHitCount()) + .addCounter(Interns.info(BLOCK_CACHE_LEAF_INDEX_HIT_COUNT, ""), + rsWrap.getLeafIndexHitCount()) + .addCounter(Interns.info(BLOCK_CACHE_BLOOM_CHUNK_HIT_COUNT, ""), + rsWrap.getBloomChunkHitCount()) + .addCounter(Interns.info(BLOCK_CACHE_META_HIT_COUNT, ""), rsWrap.getMetaHitCount()) + .addCounter(Interns.info(BLOCK_CACHE_ROOT_INDEX_HIT_COUNT, ""), + rsWrap.getRootIndexHitCount()) + .addCounter(Interns.info(BLOCK_CACHE_INTERMEDIATE_INDEX_HIT_COUNT, ""), + rsWrap.getIntermediateIndexHitCount()) + .addCounter(Interns.info(BLOCK_CACHE_FILE_INFO_HIT_COUNT, ""), + rsWrap.getFileInfoHitCount()) + .addCounter(Interns.info(BLOCK_CACHE_GENERAL_BLOOM_META_HIT_COUNT, ""), + rsWrap.getGeneralBloomMetaHitCount()) + .addCounter(Interns.info(BLOCK_CACHE_DELETE_FAMILY_BLOOM_HIT_COUNT, ""), + rsWrap.getDeleteFamilyBloomHitCount()) + .addCounter(Interns.info(BLOCK_CACHE_TRAILER_HIT_COUNT, ""), rsWrap.getTrailerHitCount()) .addCounter(Interns.info(UPDATES_BLOCKED_TIME, UPDATES_BLOCKED_DESC), rsWrap.getUpdatesBlockedTime()) .addCounter(Interns.info(FLUSHED_CELLS, FLUSHED_CELLS_DESC), @@ -415,7 +451,6 @@ public class MetricsRegionServerSourceImpl rsWrap.getCompactedCellsSize()) .addCounter(Interns.info(MAJOR_COMPACTED_CELLS_SIZE, MAJOR_COMPACTED_CELLS_SIZE_DESC), rsWrap.getMajorCompactedCellsSize()) - .addCounter( Interns.info(CELLS_COUNT_COMPACTED_FROM_MOB, CELLS_COUNT_COMPACTED_FROM_MOB_DESC), rsWrap.getCellsCountCompactedFromMob()) @@ -453,7 +488,6 @@ public class MetricsRegionServerSourceImpl rsWrap.getHedgedReadWins()) .addCounter(Interns.info(BLOCKED_REQUESTS_COUNT, BLOCKED_REQUESTS_COUNT_DESC), rsWrap.getBlockedRequestsCount()) - .tag(Interns.info(ZOOKEEPER_QUORUM_NAME, ZOOKEEPER_QUORUM_DESC), rsWrap.getZookeeperQuorum()) .tag(Interns.info(SERVER_NAME_NAME, SERVER_NAME_DESC), rsWrap.getServerName()) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheKey.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheKey.java index 180cbb484e9..64405de28e0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheKey.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheKey.java @@ -30,6 +30,7 @@ public class BlockCacheKey implements HeapSize, java.io.Serializable { private static final long serialVersionUID = -5199992013113130534L; private final String hfileName; private final long offset; + private final BlockType blockType; private final boolean isPrimaryReplicaBlock; /** @@ -38,13 +39,14 @@ public class BlockCacheKey implements HeapSize, java.io.Serializable { * @param offset Offset of the block into the file */ public BlockCacheKey(String hfileName, long offset) { - this(hfileName, offset, true); + this(hfileName, offset, true, BlockType.DATA); } - public BlockCacheKey(String hfileName, long offset, boolean isPrimaryReplica) { + public BlockCacheKey(String hfileName, long offset, boolean isPrimaryReplica, BlockType blockType) { this.isPrimaryReplicaBlock = isPrimaryReplica; this.hfileName = hfileName; this.offset = offset; + this.blockType = blockType; } @Override @@ -69,9 +71,12 @@ public class BlockCacheKey implements HeapSize, java.io.Serializable { return String.format("%s_%d", hfileName, offset); } - public static final long FIXED_OVERHEAD = ClassSize.align(ClassSize.OBJECT +Bytes.SIZEOF_BOOLEAN + - ClassSize.REFERENCE + // this.hfileName - Bytes.SIZEOF_LONG); // this.offset + public static final long FIXED_OVERHEAD = ClassSize.align( + ClassSize.OBJECT + + Bytes.SIZEOF_BOOLEAN + + ClassSize.REFERENCE + // this.hfileName + ClassSize.REFERENCE + // this.blockType + Bytes.SIZEOF_LONG); // this.offset /** * Strings have two bytes per character due to default Java Unicode encoding @@ -98,4 +103,8 @@ public class BlockCacheKey implements HeapSize, java.io.Serializable { public long getOffset() { return offset; } + + public BlockType getBlockType() { + return blockType; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java index c2083882510..8de2a033fd4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java @@ -74,6 +74,29 @@ public class CacheStats { /** The total number of blocks that were not inserted. */ private final AtomicLong failedInserts = new AtomicLong(0); + /** Per Block Type Counts */ + private final Counter dataMissCount = new Counter(0); + private final Counter leafIndexMissCount = new Counter(0); + private final Counter bloomChunkMissCount = new Counter(0); + private final Counter metaMissCount = new Counter(0); + private final Counter rootIndexMissCount = new Counter(0); + private final Counter intermediateIndexMissCount = new Counter(0); + private final Counter fileInfoMissCount = new Counter(0); + private final Counter generalBloomMetaMissCount = new Counter(0); + private final Counter deleteFamilyBloomMissCount = new Counter(0); + private final Counter trailerMissCount = new Counter(0); + + private final Counter dataHitCount = new Counter(0); + private final Counter leafIndexHitCount = new Counter(0); + private final Counter bloomChunkHitCount = new Counter(0); + private final Counter metaHitCount = new Counter(0); + private final Counter rootIndexHitCount = new Counter(0); + private final Counter intermediateIndexHitCount = new Counter(0); + private final Counter fileInfoHitCount = new Counter(0); + private final Counter generalBloomMetaHitCount = new Counter(0); + private final Counter deleteFamilyBloomHitCount = new Counter(0); + private final Counter trailerHitCount = new Counter(0); + /** The number of metrics periods to include in window */ private final int numPeriodsInWindow; /** Hit counts for each period in window */ @@ -125,20 +148,99 @@ public class CacheStats { ", evictedAgeMean=" + snapshot.getMean(); } - public void miss(boolean caching, boolean primary) { + + public void miss(boolean caching, boolean primary, BlockType type) { missCount.increment(); if (primary) primaryMissCount.increment(); if (caching) missCachingCount.increment(); + if (type == null) { + return; + } + switch (type) { + case DATA: + case ENCODED_DATA: + dataMissCount.increment(); + break; + case LEAF_INDEX: + leafIndexMissCount.increment(); + break; + case BLOOM_CHUNK: + bloomChunkMissCount.increment(); + break; + case META: + metaMissCount.increment(); + break; + case INTERMEDIATE_INDEX: + intermediateIndexMissCount.increment(); + break; + case ROOT_INDEX: + rootIndexMissCount.increment(); + break; + case FILE_INFO: + fileInfoMissCount.increment(); + break; + case GENERAL_BLOOM_META: + generalBloomMetaMissCount.increment(); + break; + case DELETE_FAMILY_BLOOM_META: + deleteFamilyBloomMissCount.increment(); + break; + case TRAILER: + trailerMissCount.increment(); + break; + default: + // If there's a new type that's fine + // Ignore it for now. This is metrics don't exception. + break; + } } - public void hit(boolean caching) { - hit(caching, true); - } - - public void hit(boolean caching, boolean primary) { + public void hit(boolean caching, boolean primary, BlockType type) { hitCount.increment(); if (primary) primaryHitCount.increment(); if (caching) hitCachingCount.increment(); + + + if (type == null) { + return; + } + switch (type) { + case DATA: + case ENCODED_DATA: + dataHitCount.increment(); + break; + case LEAF_INDEX: + leafIndexHitCount.increment(); + break; + case BLOOM_CHUNK: + bloomChunkHitCount.increment(); + break; + case META: + metaHitCount.increment(); + break; + case INTERMEDIATE_INDEX: + intermediateIndexHitCount.increment(); + break; + case ROOT_INDEX: + rootIndexHitCount.increment(); + break; + case FILE_INFO: + fileInfoHitCount.increment(); + break; + case GENERAL_BLOOM_META: + generalBloomMetaHitCount.increment(); + break; + case DELETE_FAMILY_BLOOM_META: + deleteFamilyBloomHitCount.increment(); + break; + case TRAILER: + trailerHitCount.increment(); + break; + default: + // If there's a new type that's fine + // Ignore it for now. This is metrics don't exception. + break; + } } public void evict() { @@ -157,6 +259,88 @@ public class CacheStats { return failedInserts.incrementAndGet(); } + + // All of the counts of misses and hits. + public long getDataMissCount() { + return dataMissCount.get(); + } + + public long getLeafIndexMissCount() { + return leafIndexMissCount.get(); + } + + public long getBloomChunkMissCount() { + return bloomChunkMissCount.get(); + } + + public long getMetaMissCount() { + return metaMissCount.get(); + } + + public long getRootIndexMissCount() { + return rootIndexMissCount.get(); + } + + public long getIntermediateIndexMissCount() { + return intermediateIndexMissCount.get(); + } + + public long getFileInfoMissCount() { + return fileInfoMissCount.get(); + } + + public long getGeneralBloomMetaMissCount() { + return generalBloomMetaMissCount.get(); + } + + public long getDeleteFamilyBloomMissCount() { + return deleteFamilyBloomMissCount.get(); + } + + public long getTrailerMissCount() { + return trailerMissCount.get(); + } + + public long getDataHitCount() { + return dataHitCount.get(); + } + + public long getLeafIndexHitCount() { + return leafIndexHitCount.get(); + } + + public long getBloomChunkHitCount() { + return bloomChunkHitCount.get(); + } + + public long getMetaHitCount() { + return metaHitCount.get(); + } + + public long getRootIndexHitCount() { + return rootIndexHitCount.get(); + } + + public long getIntermediateIndexHitCount() { + return intermediateIndexHitCount.get(); + } + + public long getFileInfoHitCount() { + return fileInfoHitCount.get(); + } + + public long getGeneralBloomMetaHitCount() { + return generalBloomMetaHitCount.get(); + } + + public long getDeleteFamilyBloomHitCount() { + return deleteFamilyBloomHitCount.get(); + } + + public long getTrailerHitCount() { + return trailerHitCount.get(); + } + public long getRequestCount() { return getHitCount() + getMissCount(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java index 666b357265a..15e226f2682 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java @@ -138,6 +138,112 @@ public class CombinedBlockCache implements ResizableBlockCache, HeapSize { this.bucketCacheStats = fcStats; } + @Override + public long getDataMissCount() { + return lruCacheStats.getDataMissCount() + bucketCacheStats.getDataMissCount(); + } + + @Override + public long getLeafIndexMissCount() { + return lruCacheStats.getLeafIndexMissCount() + bucketCacheStats.getLeafIndexMissCount(); + } + + @Override + public long getBloomChunkMissCount() { + return lruCacheStats.getBloomChunkMissCount() + bucketCacheStats.getBloomChunkMissCount(); + } + + @Override + public long getMetaMissCount() { + return lruCacheStats.getMetaMissCount() + bucketCacheStats.getMetaMissCount(); + } + + @Override + public long getRootIndexMissCount() { + return lruCacheStats.getRootIndexMissCount() + bucketCacheStats.getRootIndexMissCount(); + } + + @Override + public long getIntermediateIndexMissCount() { + return lruCacheStats.getIntermediateIndexMissCount() + + bucketCacheStats.getIntermediateIndexMissCount(); + } + + @Override + public long getFileInfoMissCount() { + return lruCacheStats.getFileInfoMissCount() + bucketCacheStats.getFileInfoMissCount(); + } + + @Override + public long getGeneralBloomMetaMissCount() { + return lruCacheStats.getGeneralBloomMetaMissCount() + + bucketCacheStats.getGeneralBloomMetaMissCount(); + } + + @Override + public long getDeleteFamilyBloomMissCount() { + return lruCacheStats.getDeleteFamilyBloomMissCount() + + bucketCacheStats.getDeleteFamilyBloomMissCount(); + } + + @Override + public long getTrailerMissCount() { + return lruCacheStats.getTrailerMissCount() + bucketCacheStats.getTrailerMissCount(); + } + + @Override + public long getDataHitCount() { + return lruCacheStats.getDataHitCount() + bucketCacheStats.getDataHitCount(); + } + + @Override + public long getLeafIndexHitCount() { + return lruCacheStats.getLeafIndexHitCount() + bucketCacheStats.getLeafIndexHitCount(); + } + + @Override + public long getBloomChunkHitCount() { + return lruCacheStats.getBloomChunkHitCount() + bucketCacheStats.getBloomChunkHitCount(); + } + + @Override + public long getMetaHitCount() { + return lruCacheStats.getMetaHitCount() + bucketCacheStats.getMetaHitCount(); + } + + @Override + public long getRootIndexHitCount() { + return lruCacheStats.getRootIndexHitCount() + bucketCacheStats.getRootIndexHitCount(); + } + + @Override + public long getIntermediateIndexHitCount() { + return lruCacheStats.getIntermediateIndexHitCount() + + bucketCacheStats.getIntermediateIndexHitCount(); + } + + @Override + public long getFileInfoHitCount() { + return lruCacheStats.getFileInfoHitCount() + bucketCacheStats.getFileInfoHitCount(); + } + + @Override + public long getGeneralBloomMetaHitCount() { + return lruCacheStats.getGeneralBloomMetaHitCount() + + bucketCacheStats.getGeneralBloomMetaHitCount(); + } + + @Override + public long getDeleteFamilyBloomHitCount() { + return lruCacheStats.getDeleteFamilyBloomHitCount() + + bucketCacheStats.getDeleteFamilyBloomHitCount(); + } + + @Override + public long getTrailerHitCount() { + return lruCacheStats.getTrailerHitCount() + bucketCacheStats.getTrailerHitCount(); + } + @Override public long getRequestCount() { return lruCacheStats.getRequestCount() diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java index 506f08d0f6a..76fec06b6ef 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java @@ -1170,7 +1170,7 @@ public class HFileBlockIndex { if (cacheConf != null) { HFileBlock blockForCaching = blockWriter.getBlockForCaching(cacheConf); cacheConf.getBlockCache().cacheBlock(new BlockCacheKey(nameForCaching, - beginOffset), blockForCaching); + beginOffset, true, blockForCaching.getBlockType()), blockForCaching); } // Add intermediate index block size diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java index d71911f340e..0b1ac8379e9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java @@ -359,7 +359,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { BlockCache blockCache = this.cacheConf.getBlockCache(); if (blockCache != null && block != null) { BlockCacheKey cacheKey = new BlockCacheKey(this.getFileContext().getHFileName(), - block.getOffset(), this.isPrimaryReplicaReader()); + block.getOffset(), this.isPrimaryReplicaReader(), block.getBlockType()); blockCache.returnBlock(cacheKey, block); } } @@ -1425,7 +1425,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { // Check cache for block. If found return. long metaBlockOffset = metaBlockIndexReader.getRootBlockOffset(block); BlockCacheKey cacheKey = new BlockCacheKey(name, metaBlockOffset, - this.isPrimaryReplicaReader()); + this.isPrimaryReplicaReader(), BlockType.META); cacheBlock &= cacheConf.shouldCacheBlockOnRead(BlockType.META.getCategory()); if (cacheConf.isBlockCacheEnabled()) { @@ -1475,7 +1475,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { // from doing). BlockCacheKey cacheKey = new BlockCacheKey(name, dataBlockOffset, - this.isPrimaryReplicaReader()); + this.isPrimaryReplicaReader(), expectedBlockType); boolean useLock = false; IdLock.Entry lockEntry = null; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java index 9ab46cf57ff..576974493c2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java @@ -483,7 +483,9 @@ public class HFileWriterImpl implements HFile.Writer { */ private void doCacheOnWrite(long offset) { HFileBlock cacheFormatBlock = blockWriter.getBlockForCaching(cacheConf); - cacheConf.getBlockCache().cacheBlock(new BlockCacheKey(name, offset), cacheFormatBlock); + cacheConf.getBlockCache().cacheBlock( + new BlockCacheKey(name, offset, true, cacheFormatBlock.getBlockType()), + cacheFormatBlock); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 68ce16c3064..c380318c48a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -438,7 +438,9 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { boolean updateCacheMetrics) { LruCachedBlock cb = map.get(cacheKey); if (cb == null) { - if (!repeat && updateCacheMetrics) stats.miss(caching, cacheKey.isPrimary()); + if (!repeat && updateCacheMetrics) { + stats.miss(caching, cacheKey.isPrimary(), cacheKey.getBlockType()); + } // If there is another block cache then try and read there. // However if this is a retry ( second time in double checked locking ) // And it's already a miss then the l2 will also be a miss. @@ -453,7 +455,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { } return null; } - if (updateCacheMetrics) stats.hit(caching, cacheKey.isPrimary()); + if (updateCacheMetrics) stats.hit(caching, cacheKey.isPrimary(), cacheKey.getBlockType()); cb.access(count.incrementAndGet()); return cb.getBuffer(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java index 69c42c34ad8..6f0193fa60e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java @@ -405,7 +405,7 @@ public class BucketCache implements BlockCache, HeapSize { RAMQueueEntry re = ramCache.get(key); if (re != null) { if (updateCacheMetrics) { - cacheStats.hit(caching, key.isPrimary()); + cacheStats.hit(caching, key.isPrimary(), key.getBlockType()); } re.access(accessCount.incrementAndGet()); return re.getData(); @@ -430,7 +430,7 @@ public class BucketCache implements BlockCache, HeapSize { bucketEntry.deserializerReference(this.deserialiserMap)); long timeTaken = System.nanoTime() - start; if (updateCacheMetrics) { - cacheStats.hit(caching, key.isPrimary()); + cacheStats.hit(caching, key.isPrimary(), key.getBlockType()); cacheStats.ioHit(timeTaken); } if (cachedBlock.getMemoryType() == MemoryType.SHARED) { @@ -450,7 +450,7 @@ public class BucketCache implements BlockCache, HeapSize { } } if (!repeat && updateCacheMetrics) { - cacheStats.miss(caching, key.isPrimary()); + cacheStats.miss(caching, key.isPrimary(), key.getBlockType()); } return null; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java index 6e6d07d71ae..eb2e57a9e0f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java @@ -839,4 +839,162 @@ class MetricsRegionServerWrapperImpl return averageRegionSize; } + public long getDataMissCount() { + if (this.cacheStats == null) { + return 0; + } + return cacheStats.getDataMissCount(); + } + + @Override + public long getLeafIndexMissCount() { + if (this.cacheStats == null) { + return 0; + } + return cacheStats.getLeafIndexMissCount(); + } + + @Override + public long getBloomChunkMissCount() { + if (this.cacheStats == null) { + return 0; + } + return cacheStats.getBloomChunkMissCount(); + } + + @Override + public long getMetaMissCount() { + if (this.cacheStats == null) { + return 0; + } + return cacheStats.getMetaMissCount(); + } + + @Override + public long getRootIndexMissCount() { + if (this.cacheStats == null) { + return 0; + } + return cacheStats.getRootIndexMissCount(); + } + + @Override + public long getIntermediateIndexMissCount() { + if (this.cacheStats == null) { + return 0; + } + return cacheStats.getIntermediateIndexMissCount(); + } + + @Override + public long getFileInfoMissCount() { + if (this.cacheStats == null) { + return 0; + } + return cacheStats.getFileInfoMissCount(); + } + + @Override + public long getGeneralBloomMetaMissCount() { + if (this.cacheStats == null) { + return 0; + } + return cacheStats.getGeneralBloomMetaMissCount(); + } + + @Override + public long getDeleteFamilyBloomMissCount() { + if (this.cacheStats == null) { + return 0; + } + return cacheStats.getDeleteFamilyBloomMissCount(); + } + + @Override + public long getTrailerMissCount() { + if (this.cacheStats == null) { + return 0; + } + return cacheStats.getTrailerMissCount(); + } + + @Override + public long getDataHitCount() { + if (this.cacheStats == null) { + return 0; + } + return cacheStats.getDataHitCount(); + } + + @Override + public long getLeafIndexHitCount() { + if (this.cacheStats == null) { + return 0; + } + return cacheStats.getLeafIndexHitCount(); + } + + @Override + public long getBloomChunkHitCount() { + if (this.cacheStats == null) { + return 0; + } + return cacheStats.getBloomChunkHitCount(); + } + + @Override + public long getMetaHitCount() { + if (this.cacheStats == null) { + return 0; + } + return cacheStats.getMetaHitCount(); + } + + @Override + public long getRootIndexHitCount() { + if (this.cacheStats == null) { + return 0; + } + return cacheStats.getRootIndexHitCount(); + } + + @Override + public long getIntermediateIndexHitCount() { + if (this.cacheStats == null) { + return 0; + } + return cacheStats.getIntermediateIndexHitCount(); + } + + @Override + public long getFileInfoHitCount() { + if (this.cacheStats == null) { + return 0; + } + return cacheStats.getFileInfoHitCount(); + } + + @Override + public long getGeneralBloomMetaHitCount() { + if (this.cacheStats == null) { + return 0; + } + return cacheStats.getGeneralBloomMetaHitCount(); + } + + @Override + public long getDeleteFamilyBloomHitCount() { + if (this.cacheStats == null) { + return 0; + } + return cacheStats.getDeleteFamilyBloomHitCount(); + } + + @Override + public long getTrailerHitCount() { + if (this.cacheStats == null) { + return 0; + } + return cacheStats.getTrailerHitCount(); + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCombinedBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCombinedBlockCache.java index 50bf33132ce..8041dff5ecd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCombinedBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCombinedBlockCache.java @@ -38,11 +38,11 @@ public class TestCombinedBlockCache { // period 1: // lru cache: 1 hit caching, 1 miss caching // bucket cache: 2 hit non-caching,1 miss non-caching/primary,1 fail insert - lruCacheStats.hit(true); - lruCacheStats.miss(true, false); - bucketCacheStats.hit(false); - bucketCacheStats.hit(false); - bucketCacheStats.miss(false, true); + lruCacheStats.hit(true, true, BlockType.DATA); + lruCacheStats.miss(true, false, BlockType.DATA); + bucketCacheStats.hit(false,true, BlockType.DATA); + bucketCacheStats.hit(false,true, BlockType.DATA); + bucketCacheStats.miss(false, true, BlockType.DATA); assertEquals(5, stats.getRequestCount()); assertEquals(2, stats.getRequestCachingCount()); @@ -84,9 +84,9 @@ public class TestCombinedBlockCache { // period 2: // lru cache: 3 hit caching - lruCacheStats.hit(true); - lruCacheStats.hit(true); - lruCacheStats.hit(true); + lruCacheStats.hit(true, true, BlockType.DATA); + lruCacheStats.hit(true, true, BlockType.DATA); + lruCacheStats.hit(true, true, BlockType.DATA); stats.rollMetricsPeriod(); assertEquals(6, stats.getSumHitCountsPastNPeriods()); assertEquals(8, stats.getSumRequestCountsPastNPeriods()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java index 4c0f98f66ee..d7f9aba6a91 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java @@ -660,48 +660,48 @@ public class TestLruBlockCache { // period 1, 1 hit caching, 1 hit non-caching, 2 miss non-caching // should be (2/4)=0.5 and (1/1)=1 - stats.hit(false); - stats.hit(true); - stats.miss(false, false); - stats.miss(false, false); + stats.hit(false, true, BlockType.DATA); + stats.hit(true, true, BlockType.DATA); + stats.miss(false, false, BlockType.DATA); + stats.miss(false, false, BlockType.DATA); stats.rollMetricsPeriod(); assertEquals(0.5, stats.getHitRatioPastNPeriods(), delta); assertEquals(1.0, stats.getHitCachingRatioPastNPeriods(), delta); // period 2, 1 miss caching, 3 miss non-caching // should be (2/8)=0.25 and (1/2)=0.5 - stats.miss(true, false); - stats.miss(false, false); - stats.miss(false, false); - stats.miss(false, false); + stats.miss(true, false, BlockType.DATA); + stats.miss(false, false, BlockType.DATA); + stats.miss(false, false, BlockType.DATA); + stats.miss(false, false, BlockType.DATA); stats.rollMetricsPeriod(); assertEquals(0.25, stats.getHitRatioPastNPeriods(), delta); assertEquals(0.5, stats.getHitCachingRatioPastNPeriods(), delta); // period 3, 2 hits of each type // should be (6/12)=0.5 and (3/4)=0.75 - stats.hit(false); - stats.hit(true); - stats.hit(false); - stats.hit(true); + stats.hit(false, true, BlockType.DATA); + stats.hit(true, true, BlockType.DATA); + stats.hit(false, true, BlockType.DATA); + stats.hit(true, true, BlockType.DATA); stats.rollMetricsPeriod(); assertEquals(0.5, stats.getHitRatioPastNPeriods(), delta); assertEquals(0.75, stats.getHitCachingRatioPastNPeriods(), delta); // period 4, evict period 1, two caching misses // should be (4/10)=0.4 and (2/5)=0.4 - stats.miss(true, false); - stats.miss(true, false); + stats.miss(true, false, BlockType.DATA); + stats.miss(true, false, BlockType.DATA); stats.rollMetricsPeriod(); assertEquals(0.4, stats.getHitRatioPastNPeriods(), delta); assertEquals(0.4, stats.getHitCachingRatioPastNPeriods(), delta); // period 5, evict period 2, 2 caching misses, 2 non-caching hit // should be (6/10)=0.6 and (2/6)=1/3 - stats.miss(true, false); - stats.miss(true, false); - stats.hit(false); - stats.hit(false); + stats.miss(true, false, BlockType.DATA); + stats.miss(true, false, BlockType.DATA); + stats.hit(false, true, BlockType.DATA); + stats.hit(false, true, BlockType.DATA); stats.rollMetricsPeriod(); assertEquals(0.6, stats.getHitRatioPastNPeriods(), delta); assertEquals((double)1/3, stats.getHitCachingRatioPastNPeriods(), delta); @@ -726,10 +726,10 @@ public class TestLruBlockCache { // period 9, one of each // should be (2/4)=0.5 and (1/2)=0.5 - stats.miss(true, false); - stats.miss(false, false); - stats.hit(true); - stats.hit(false); + stats.miss(true, false, BlockType.DATA); + stats.miss(false, false, BlockType.DATA); + stats.hit(true, true, BlockType.DATA); + stats.hit(false, true, BlockType.DATA); stats.rollMetricsPeriod(); assertEquals(0.5, stats.getHitRatioPastNPeriods(), delta); assertEquals(0.5, stats.getHitCachingRatioPastNPeriods(), delta); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperStub.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperStub.java index 9aa49ba09e5..6e4828cfd92 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperStub.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperStub.java @@ -330,6 +330,106 @@ public class MetricsRegionServerWrapperStub implements MetricsRegionServerWrappe return 0; } + @Override + public long getDataMissCount() { + return 0; + } + + @Override + public long getLeafIndexMissCount() { + return 0; + } + + @Override + public long getBloomChunkMissCount() { + return 0; + } + + @Override + public long getMetaMissCount() { + return 0; + } + + @Override + public long getRootIndexMissCount() { + return 0; + } + + @Override + public long getIntermediateIndexMissCount() { + return 0; + } + + @Override + public long getFileInfoMissCount() { + return 0; + } + + @Override + public long getGeneralBloomMetaMissCount() { + return 0; + } + + @Override + public long getDeleteFamilyBloomMissCount() { + return 0; + } + + @Override + public long getTrailerMissCount() { + return 0; + } + + @Override + public long getDataHitCount() { + return 0; + } + + @Override + public long getLeafIndexHitCount() { + return 0; + } + + @Override + public long getBloomChunkHitCount() { + return 0; + } + + @Override + public long getMetaHitCount() { + return 0; + } + + @Override + public long getRootIndexHitCount() { + return 0; + } + + @Override + public long getIntermediateIndexHitCount() { + return 0; + } + + @Override + public long getFileInfoHitCount() { + return 0; + } + + @Override + public long getGeneralBloomMetaHitCount() { + return 0; + } + + @Override + public long getDeleteFamilyBloomHitCount() { + return 0; + } + + @Override + public long getTrailerHitCount() { + return 0; + } + @Override public int getSplitQueueSize() { return 0;