HBASE-15640 L1 cache doesn't give fair warning that it is showing partial stats only when it hits limit

This commit is contained in:
stack 2016-04-20 14:14:37 -07:00
parent fa215a67e2
commit 6dd938c20b
3 changed files with 26 additions and 9 deletions

View File

@ -305,7 +305,11 @@ are combined counts. Request count is sum of hits and misses.</p>
} }
</%java> </%java>
<%if cbsbf.isFull() %> <%if cbsbf.isFull() %>
<p><b>Statistics below is based on sampling first <% cbsbfSnapshot.getMax() %> blocks only</b> (hbase.ui.blockcache.by.file.max)</p> <p>
<div class="alert alert-danger">
<strong>The stats below are incomplete!</strong> We ran into our accounting limit of <% cbsbf.getCount() %> blocks. Up the configuration <i>hbase.ui.blockcache.by.file.max</i>.
</div>
</p>
</%if> </%if>
<table id="blocks_summary" class="table table-striped"> <table id="blocks_summary" class="table table-striped">
<tr> <tr>

View File

@ -180,16 +180,22 @@ public class BlockCacheUtil {
private long size; private long size;
private long dataSize; private long dataSize;
private final long now = System.nanoTime(); private final long now = System.nanoTime();
/**
* How many blocks to look at before we give up.
* There could be many millions of blocks. We don't want the
* ui to freeze while we run through 1B blocks... users will
* think hbase dead. UI displays warning in red when stats
* are incomplete.
*/
private final int max; private final int max;
public static final int DEFAULT_MAX = 100000; public static final int DEFAULT_MAX = 1000000;
CachedBlocksByFile() { CachedBlocksByFile() {
this(null); this(null);
} }
CachedBlocksByFile(final Configuration c) { CachedBlocksByFile(final Configuration c) {
this.max = c == null? DEFAULT_MAX: this.max = c == null? DEFAULT_MAX: c.getInt("hbase.ui.blockcache.by.file.max", DEFAULT_MAX);
c.getInt("hbase.ui.blockcache.by.file.max", DEFAULT_MAX);
} }
/** /**
@ -231,7 +237,7 @@ public class BlockCacheUtil {
public boolean isFull() { public boolean isFull() {
return this.count >= this.max; return this.count >= this.max;
} }
public NavigableMap<String, NavigableSet<CachedBlock>> getCachedBlockStatsByFile() { public NavigableMap<String, NavigableSet<CachedBlock>> getCachedBlockStatsByFile() {
return this.cachedBlockByFile; return this.cachedBlockByFile;
} }

View File

@ -253,7 +253,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
try { try {
end = getTrailer().getLoadOnOpenDataOffset(); end = getTrailer().getLoadOnOpenDataOffset();
if (LOG.isTraceEnabled()) { if (LOG.isTraceEnabled()) {
LOG.trace("Prefetch=" + path.toString() + ", offset=" + offset + ", end=" + end); LOG.trace("Prefetch start " + getPathOffsetEndStr(path, offset, end));
} }
// TODO: Could we use block iterator in here? Would that get stuff into the cache? // TODO: Could we use block iterator in here? Would that get stuff into the cache?
HFileBlock prevBlock = null; HFileBlock prevBlock = null;
@ -267,7 +267,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
// cached block. This 'optimization' triggers extremely rarely I'd say. // cached block. This 'optimization' triggers extremely rarely I'd say.
long onDiskSize = prevBlock != null? prevBlock.getNextBlockOnDiskSize(): -1; long onDiskSize = prevBlock != null? prevBlock.getNextBlockOnDiskSize(): -1;
HFileBlock block = readBlock(offset, onDiskSize, true, false, false, false, HFileBlock block = readBlock(offset, onDiskSize, true, false, false, false,
null, null); null, null);
// Need not update the current block. Ideally here the readBlock won't find the // Need not update the current block. Ideally here the readBlock won't find the
// block in cache. We call this readBlock so that block data is read from FS and // block in cache. We call this readBlock so that block data is read from FS and
// cached in BC. So there is no reference count increment that happens here. // cached in BC. So there is no reference count increment that happens here.
@ -279,11 +279,14 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
} catch (IOException e) { } catch (IOException e) {
// IOExceptions are probably due to region closes (relocation, etc.) // IOExceptions are probably due to region closes (relocation, etc.)
if (LOG.isTraceEnabled()) { if (LOG.isTraceEnabled()) {
LOG.trace("Prefetch=" + path.toString() + ", offset=" + offset + ", end=" + end, e); LOG.trace("Prefetch " + getPathOffsetEndStr(path, offset, end), e);
} }
} catch (NullPointerException e) {
LOG.warn("Stream moved/closed or prefetch cancelled?" +
getPathOffsetEndStr(path, offset, end), e);
} catch (Exception e) { } catch (Exception e) {
// Other exceptions are interesting // Other exceptions are interesting
LOG.warn("Prefetch=" + path.toString() + ", offset=" + offset + ", end=" + end, e); LOG.warn("Prefetch " + getPathOffsetEndStr(path, offset, end), e);
} finally { } finally {
PrefetchExecutor.complete(path); PrefetchExecutor.complete(path);
} }
@ -302,6 +305,10 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
} }
} }
private static String getPathOffsetEndStr(final Path path, final long offset, final long end) {
return "path=" + path.toString() + ", offset=" + offset + ", end=" + end;
}
/** /**
* File version check is a little sloppy. We read v3 files but can also read v2 files if their * File version check is a little sloppy. We read v3 files but can also read v2 files if their
* content has been pb'd; files written with 0.98. * content has been pb'd; files written with 0.98.