HBASE-26190 High rate logging of Bucket Allocator Allocation fails (#3648)
* HBASE-26190 High rate logging of BucketAllocatorException: Allocation too big Reduce the frequency of allocation failed traces by printing them preiodically (once per minute). Record the allocation failures in the Bucket Cache Stats and let the stat thread dump cumulative allocation failures alongside other traces it dumps. Also, this change adds trace for the Table name, Column Family and HFileName for the most recent allocation failure in last 1 minute. Signed-off-by: Anoop <anoopsamjohn@apache.org>
This commit is contained in:
parent
4d27c4726d
commit
891e90f08f
|
@ -2029,7 +2029,7 @@ public class HFileBlock implements Cacheable {
|
||||||
* @return This HFileBlocks fileContext which will a derivative of the
|
* @return This HFileBlocks fileContext which will a derivative of the
|
||||||
* fileContext for the file from which this block's data was originally read.
|
* fileContext for the file from which this block's data was originally read.
|
||||||
*/
|
*/
|
||||||
HFileContext getHFileContext() {
|
public HFileContext getHFileContext() {
|
||||||
return this.fileContext;
|
return this.fileContext;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -67,9 +67,11 @@ import org.apache.hadoop.hbase.io.hfile.CacheStats;
|
||||||
import org.apache.hadoop.hbase.io.hfile.Cacheable;
|
import org.apache.hadoop.hbase.io.hfile.Cacheable;
|
||||||
import org.apache.hadoop.hbase.io.hfile.CachedBlock;
|
import org.apache.hadoop.hbase.io.hfile.CachedBlock;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFileBlock;
|
import org.apache.hadoop.hbase.io.hfile.HFileBlock;
|
||||||
|
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
||||||
import org.apache.hadoop.hbase.nio.ByteBuff;
|
import org.apache.hadoop.hbase.nio.ByteBuff;
|
||||||
import org.apache.hadoop.hbase.nio.RefCnt;
|
import org.apache.hadoop.hbase.nio.RefCnt;
|
||||||
import org.apache.hadoop.hbase.protobuf.ProtobufMagic;
|
import org.apache.hadoop.hbase.protobuf.ProtobufMagic;
|
||||||
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||||
import org.apache.hadoop.hbase.util.IdReadWriteLock;
|
import org.apache.hadoop.hbase.util.IdReadWriteLock;
|
||||||
import org.apache.hadoop.hbase.util.IdReadWriteLockStrongRef;
|
import org.apache.hadoop.hbase.util.IdReadWriteLockStrongRef;
|
||||||
|
@ -249,6 +251,10 @@ public class BucketCache implements BlockCache, HeapSize {
|
||||||
* */
|
* */
|
||||||
private String algorithm;
|
private String algorithm;
|
||||||
|
|
||||||
|
/* Tracing failed Bucket Cache allocations. */
|
||||||
|
private long allocFailLogPrevTs; // time of previous log event for allocation failure.
|
||||||
|
private static final int ALLOCATION_FAIL_LOG_TIME_PERIOD = 60000; // Default 1 minute.
|
||||||
|
|
||||||
public BucketCache(String ioEngineName, long capacity, int blockSize, int[] bucketSizes,
|
public BucketCache(String ioEngineName, long capacity, int blockSize, int[] bucketSizes,
|
||||||
int writerThreadNum, int writerQLen, String persistencePath) throws IOException {
|
int writerThreadNum, int writerQLen, String persistencePath) throws IOException {
|
||||||
this(ioEngineName, capacity, blockSize, bucketSizes, writerThreadNum, writerQLen,
|
this(ioEngineName, capacity, blockSize, bucketSizes, writerThreadNum, writerQLen,
|
||||||
|
@ -291,6 +297,8 @@ public class BucketCache implements BlockCache, HeapSize {
|
||||||
this.blockSize = blockSize;
|
this.blockSize = blockSize;
|
||||||
this.ioErrorsTolerationDuration = ioErrorsTolerationDuration;
|
this.ioErrorsTolerationDuration = ioErrorsTolerationDuration;
|
||||||
|
|
||||||
|
this.allocFailLogPrevTs = 0;
|
||||||
|
|
||||||
bucketAllocator = new BucketAllocator(capacity, bucketSizes);
|
bucketAllocator = new BucketAllocator(capacity, bucketSizes);
|
||||||
for (int i = 0; i < writerThreads.length; ++i) {
|
for (int i = 0; i < writerThreads.length; ++i) {
|
||||||
writerQueues.add(new ArrayBlockingQueue<>(writerQLen));
|
writerQueues.add(new ArrayBlockingQueue<>(writerQLen));
|
||||||
|
@ -727,7 +735,8 @@ public class BucketCache implements BlockCache, HeapSize {
|
||||||
(StringUtils.formatPercent(cacheStats.getHitCachingRatio(), 2)+ ", ")) +
|
(StringUtils.formatPercent(cacheStats.getHitCachingRatio(), 2)+ ", ")) +
|
||||||
"evictions=" + cacheStats.getEvictionCount() + ", " +
|
"evictions=" + cacheStats.getEvictionCount() + ", " +
|
||||||
"evicted=" + cacheStats.getEvictedCount() + ", " +
|
"evicted=" + cacheStats.getEvictedCount() + ", " +
|
||||||
"evictedPerRun=" + cacheStats.evictedPerEviction());
|
"evictedPerRun=" + cacheStats.evictedPerEviction() + ", " +
|
||||||
|
"allocationFailCount=" + cacheStats.getAllocationFailCount());
|
||||||
cacheStats.reset();
|
cacheStats.reset();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -995,6 +1004,25 @@ public class BucketCache implements BlockCache, HeapSize {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Prepare and return a warning message for Bucket Allocator Exception
|
||||||
|
* @param re The RAMQueueEntry for which the exception was thrown.
|
||||||
|
* @return A warning message created from the input RAMQueueEntry object.
|
||||||
|
*/
|
||||||
|
private String getAllocationFailWarningMessage(RAMQueueEntry re) {
|
||||||
|
if (re != null && re.getData() instanceof HFileBlock) {
|
||||||
|
HFileBlock block = (HFileBlock) re.getData();
|
||||||
|
HFileContext fileContext = block.getHFileContext();
|
||||||
|
String hFileName = fileContext.getHFileName();
|
||||||
|
String columnFamily = Bytes.toString(fileContext.getColumnFamily());
|
||||||
|
String tableName = Bytes.toString(fileContext.getTableName());
|
||||||
|
return ("Most recent failed allocation in " + ALLOCATION_FAIL_LOG_TIME_PERIOD
|
||||||
|
+ " milliseconds; Table Name = " + tableName + ", Column Family = "
|
||||||
|
+ columnFamily + ", HFile Name : " + hFileName);
|
||||||
|
}
|
||||||
|
return ("Failed allocation for " + (re == null ? "" : re.getKey()) + "; ");
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Flush the entries in ramCache to IOEngine and add bucket entry to backingMap. Process all that
|
* Flush the entries in ramCache to IOEngine and add bucket entry to backingMap. Process all that
|
||||||
* are passed in even if failure being sure to remove from ramCache else we'll never undo the
|
* are passed in even if failure being sure to remove from ramCache else we'll never undo the
|
||||||
|
@ -1040,7 +1068,12 @@ public class BucketCache implements BlockCache, HeapSize {
|
||||||
}
|
}
|
||||||
index++;
|
index++;
|
||||||
} catch (BucketAllocatorException fle) {
|
} catch (BucketAllocatorException fle) {
|
||||||
LOG.warn("Failed allocation for " + (re == null ? "" : re.getKey()) + "; " + fle);
|
long currTs = System.currentTimeMillis(); // Current time since Epoch in milliseconds.
|
||||||
|
cacheStats.allocationFailed(); // Record the warning.
|
||||||
|
if (allocFailLogPrevTs == 0 || (currTs - allocFailLogPrevTs) > ALLOCATION_FAIL_LOG_TIME_PERIOD) {
|
||||||
|
LOG.warn (getAllocationFailWarningMessage(re), fle);
|
||||||
|
allocFailLogPrevTs = currTs;
|
||||||
|
}
|
||||||
// Presume can't add. Too big? Move index on. Entry will be cleared from ramCache below.
|
// Presume can't add. Too big? Move index on. Entry will be cleared from ramCache below.
|
||||||
bucketEntries[index] = null;
|
bucketEntries[index] = null;
|
||||||
index++;
|
index++;
|
||||||
|
|
|
@ -35,14 +35,20 @@ public class BucketCacheStats extends CacheStats {
|
||||||
private static final long NANO_TIME = TimeUnit.MILLISECONDS.toNanos(1);
|
private static final long NANO_TIME = TimeUnit.MILLISECONDS.toNanos(1);
|
||||||
private long lastLogTime = EnvironmentEdgeManager.currentTime();
|
private long lastLogTime = EnvironmentEdgeManager.currentTime();
|
||||||
|
|
||||||
|
/* Tracing failed Bucket Cache allocations. */
|
||||||
|
private LongAdder allocationFailCount = new LongAdder();
|
||||||
|
|
||||||
BucketCacheStats() {
|
BucketCacheStats() {
|
||||||
super("BucketCache");
|
super("BucketCache");
|
||||||
|
|
||||||
|
allocationFailCount.reset();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
return super.toString() + ", ioHitsPerSecond=" + getIOHitsPerSecond() +
|
return super.toString() + ", ioHitsPerSecond=" + getIOHitsPerSecond() +
|
||||||
", ioTimePerHit=" + getIOTimePerHit();
|
", ioTimePerHit=" + getIOTimePerHit() + ", allocationFailCount=" +
|
||||||
|
getAllocationFailCount();
|
||||||
}
|
}
|
||||||
|
|
||||||
public void ioHit(long time) {
|
public void ioHit(long time) {
|
||||||
|
@ -66,5 +72,14 @@ public class BucketCacheStats extends CacheStats {
|
||||||
public void reset() {
|
public void reset() {
|
||||||
ioHitCount.reset();
|
ioHitCount.reset();
|
||||||
ioHitTime.reset();
|
ioHitTime.reset();
|
||||||
|
allocationFailCount.reset();
|
||||||
|
}
|
||||||
|
|
||||||
|
public long getAllocationFailCount() {
|
||||||
|
return allocationFailCount.sum();
|
||||||
|
}
|
||||||
|
|
||||||
|
public void allocationFailed () {
|
||||||
|
allocationFailCount.increment();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue