HBASE-15640 L1 cache doesn't give fair warning that it is showing partial stats only when it hits limit
This commit is contained in:
parent
a331a57ef8
commit
71c55d182d
|
@ -305,7 +305,11 @@ are combined counts. Request count is sum of hits and misses.</p>
|
|||
}
|
||||
</%java>
|
||||
<%if cbsbf.isFull() %>
|
||||
<p><b>Statistics below is based on sampling first <% cbsbfSnapshot.getMax() %> blocks only</b> (hbase.ui.blockcache.by.file.max)</p>
|
||||
<p>
|
||||
<div class="alert alert-danger">
|
||||
<strong>The stats below are incomplete!</strong> We ran into our accounting limit of <% cbsbf.getCount() %> blocks. Up the configuration <i>hbase.ui.blockcache.by.file.max</i>.
|
||||
</div>
|
||||
</p>
|
||||
</%if>
|
||||
<table id="blocks_summary" class="table table-striped">
|
||||
<tr>
|
||||
|
|
|
@ -179,16 +179,22 @@ public class BlockCacheUtil {
|
|||
private long size;
|
||||
private long dataSize;
|
||||
private final long now = System.nanoTime();
|
||||
/**
|
||||
* How many blocks to look at before we give up.
|
||||
* There could be many millions of blocks. We don't want the
|
||||
* ui to freeze while we run through 1B blocks... users will
|
||||
* think hbase dead. UI displays warning in red when stats
|
||||
* are incomplete.
|
||||
*/
|
||||
private final int max;
|
||||
public static final int DEFAULT_MAX = 100000;
|
||||
|
||||
public static final int DEFAULT_MAX = 1000000;
|
||||
|
||||
CachedBlocksByFile() {
|
||||
this(null);
|
||||
}
|
||||
|
||||
CachedBlocksByFile(final Configuration c) {
|
||||
this.max = c == null? DEFAULT_MAX:
|
||||
c.getInt("hbase.ui.blockcache.by.file.max", DEFAULT_MAX);
|
||||
this.max = c == null? DEFAULT_MAX: c.getInt("hbase.ui.blockcache.by.file.max", DEFAULT_MAX);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -230,7 +236,7 @@ public class BlockCacheUtil {
|
|||
public boolean isFull() {
|
||||
return this.count >= this.max;
|
||||
}
|
||||
|
||||
|
||||
public NavigableMap<String, NavigableSet<CachedBlock>> getCachedBlockStatsByFile() {
|
||||
return this.cachedBlockByFile;
|
||||
}
|
||||
|
|
|
@ -195,7 +195,7 @@ public class HFileReaderV2 extends AbstractHFileReader {
|
|||
end = getTrailer().getLoadOnOpenDataOffset();
|
||||
HFileBlock prevBlock = null;
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace("Prefetch=" + path.toString() + ", offset=" + offset + ", end=" + end);
|
||||
LOG.trace("Prefetch start " + getPathOffsetEndStr(path, offset, end));
|
||||
}
|
||||
while (offset < end) {
|
||||
if (Thread.interrupted()) {
|
||||
|
@ -213,11 +213,14 @@ public class HFileReaderV2 extends AbstractHFileReader {
|
|||
} catch (IOException e) {
|
||||
// IOExceptions are probably due to region closes (relocation, etc.)
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace("Prefetch=" + path.toString() + ", offset=" + offset + ", end=" + end, e);
|
||||
LOG.trace("Prefetch " + getPathOffsetEndStr(path, offset, end), e);
|
||||
}
|
||||
} catch (NullPointerException e) {
|
||||
LOG.warn("Stream moved/closed or prefetch cancelled?" +
|
||||
getPathOffsetEndStr(path, offset, end), e);
|
||||
} catch (Exception e) {
|
||||
// Other exceptions are interesting
|
||||
LOG.warn("Prefetch=" + path.toString() + ", offset=" + offset + ", end=" + end, e);
|
||||
LOG.warn("Prefetch " + getPathOffsetEndStr(path, offset, end), e);
|
||||
} finally {
|
||||
PrefetchExecutor.complete(path);
|
||||
}
|
||||
|
@ -235,6 +238,10 @@ public class HFileReaderV2 extends AbstractHFileReader {
|
|||
.build();
|
||||
}
|
||||
|
||||
private static String getPathOffsetEndStr(final Path path, final long offset, final long end) {
|
||||
return "path=" + path.toString() + ", offset=" + offset + ", end=" + end;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a Scanner on this file. No seeks or reads are done on creation. Call
|
||||
* {@link HFileScanner#seekTo(byte[])} to position an start the read. There is
|
||||
|
|
Loading…
Reference in New Issue