HBASE-14205 RegionCoprocessorHost System.nanoTime() performance bottleneck

This commit is contained in:
Andrew Purtell 2015-09-24 11:16:36 -07:00
parent 2239d55871
commit 20ed465d07
7 changed files with 1 additions and 172 deletions

View File

@ -18,10 +18,6 @@
package org.apache.hadoop.hbase.regionserver;
import java.util.Map;
import org.apache.commons.math.stat.descriptive.DescriptiveStatistics;
/**
* Interface of class that will wrap an HRegion and export numbers so they can be
* used in MetricsRegionSource
@ -86,11 +82,6 @@ public interface MetricsRegionWrapper {
int getRegionHashCode();
/**
* Get the time spent by coprocessors in this region.
*/
Map<String, DescriptiveStatistics> getCoprocessorExecutionStatistics();
/**
* Get the replica id of this region.
*/

View File

@ -18,12 +18,10 @@
package org.apache.hadoop.hbase.regionserver;
import java.util.Map;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.math.stat.descriptive.DescriptiveStatistics;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.lib.DynamicMetricsRegistry;
@ -245,36 +243,6 @@ public class MetricsRegionSourceImpl implements MetricsRegionSource {
mrb.addCounter(Interns.info(regionNamePrefix + MetricsRegionSource.REPLICA_ID,
MetricsRegionSource.REPLICA_ID_DESC),
this.regionWrapper.getReplicaId());
for (Map.Entry<String, DescriptiveStatistics> entry : this.regionWrapper
.getCoprocessorExecutionStatistics()
.entrySet()) {
DescriptiveStatistics ds = entry.getValue();
mrb.addGauge(Interns.info(regionNamePrefix + " " + entry.getKey() + " "
+ MetricsRegionSource.COPROCESSOR_EXECUTION_STATISTICS,
MetricsRegionSource.COPROCESSOR_EXECUTION_STATISTICS_DESC + "Min: "),
ds.getMin() / 1000);
mrb.addGauge(Interns.info(regionNamePrefix + " " + entry.getKey() + " "
+ MetricsRegionSource.COPROCESSOR_EXECUTION_STATISTICS,
MetricsRegionSource.COPROCESSOR_EXECUTION_STATISTICS_DESC + "Mean: "),
ds.getMean() / 1000);
mrb.addGauge(Interns.info(regionNamePrefix + " " + entry.getKey() + " "
+ MetricsRegionSource.COPROCESSOR_EXECUTION_STATISTICS,
MetricsRegionSource.COPROCESSOR_EXECUTION_STATISTICS_DESC + "Max: "),
ds.getMax() / 1000);
mrb.addGauge(Interns.info(regionNamePrefix + " " + entry.getKey() + " "
+ MetricsRegionSource.COPROCESSOR_EXECUTION_STATISTICS,
MetricsRegionSource.COPROCESSOR_EXECUTION_STATISTICS_DESC + "90th percentile: "), ds
.getPercentile(90d) / 1000);
mrb.addGauge(Interns.info(regionNamePrefix + " " + entry.getKey() + " "
+ MetricsRegionSource.COPROCESSOR_EXECUTION_STATISTICS,
MetricsRegionSource.COPROCESSOR_EXECUTION_STATISTICS_DESC + "95th percentile: "), ds
.getPercentile(95d) / 1000);
mrb.addGauge(Interns.info(regionNamePrefix + " " + entry.getKey() + " "
+ MetricsRegionSource.COPROCESSOR_EXECUTION_STATISTICS,
MetricsRegionSource.COPROCESSOR_EXECUTION_STATISTICS_DESC + "99th percentile: "), ds
.getPercentile(99d) / 1000);
}
}
}

View File

@ -129,11 +129,6 @@ public class TestMetricsRegionSourceImpl {
return regionName.hashCode();
}
@Override
public Map<String, DescriptiveStatistics> getCoprocessorExecutionStatistics() {
return null;
}
/**
* Always return 0 for testing
*/

View File

@ -22,7 +22,6 @@
</%args>
<%import>
java.util.*;
org.apache.commons.math.stat.descriptive.DescriptiveStatistics;
org.apache.hadoop.hbase.regionserver.HRegionServer;
org.apache.hadoop.hbase.util.Bytes;
org.apache.hadoop.hbase.HRegionInfo;
@ -48,7 +47,6 @@
<li class=""><a href="#tab_regionStoreStats" data-toggle="tab">Storefile Metrics</a></li>
<li class=""><a href="#tab_regionMemstoreStats" data-toggle="tab">Memstore Metrics</a></li>
<li class=""><a href="#tab_regionCompactStats" data-toggle="tab">Compaction Metrics</a></li>
<li class=""><a href="#tab_coprocessorStats" data-toggle="tab">Coprocessor Metrics</a></li>
</ul>
<div class="tab-content" style="padding-bottom: 9px; border-bottom: 1px solid #ddd;">
<div class="tab-pane active" id="tab_regionBaseInfo">
@ -66,9 +64,6 @@
<div class="tab-pane" id="tab_regionCompactStats">
<& compactStats; onlineRegions = onlineRegions; &>
</div>
<div class="tab-pane" id="tab_coprocessorStats">
<& coprocessorStats; onlineRegions = onlineRegions; &>
</div>
</div>
</div>
<p>Region names are made of the containing table's name, a comma,
@ -239,66 +234,4 @@
</tr>
</%for>
</table>
</%def>
<%def coprocessorStats>
<%args>
List<HRegionInfo> onlineRegions;
</%args>
<table class="table table-striped">
<tr>
<th>Region Name</th>
<th>Coprocessor</th>
<th>Execution Time Statistics</th>
</tr>
<%for HRegionInfo r: onlineRegions %>
<%java>
Region region = regionServer.getFromOnlineRegions(r.getEncodedName());
MetricsRegionWrapper mWrap = region == null ? null: region.getMetrics().getRegionWrapper();
</%java>
<%if mWrap != null %>
<%for Map.Entry<String, DescriptiveStatistics> entry: mWrap.getCoprocessorExecutionStatistics().entrySet() %>
<tr>
<%java>
String coprocessorName = entry.getKey();
DescriptiveStatistics ds = entry.getValue();
</%java>
<td><% r.getRegionNameAsString() %></td>
<td><% coprocessorName %></td>
<td>
<table class="table-condensed">
<tr>
<td>Min Time </td>
<td><% String.format("%.3f%n", ds.getMin()/1000/1000) %>ms</td>
</tr>
<tr>
<td>Avg Time </td>
<td><% String.format("%.3f%n", ds.getMean()/1000/1000) %>ms</td>
</tr>
<tr>
<td>Max Time </td>
<td><% String.format("%.3f%n", ds.getMax()/1000/1000) %>ms</td>
</tr>
<tr>
<td>90th percentile </td>
<td><% String.format("%.3f%n", ds.getPercentile(90d)/1000/1000) %>ms</td>
</tr>
<tr>
<td>95th percentile </td>
<td><% String.format("%.3f%n", ds.getPercentile(95d)/1000/1000) %>ms</td>
</tr>
<tr>
<td>99th percentile </td>
<td><% String.format("%.3f%n", ds.getPercentile(99d)/1000/1000) %>ms</td>
</tr>
</table>
</td>
</tr>
</%for>
</%if>
</%for>
</table>
</%def>
</%def>

View File

@ -20,13 +20,11 @@ package org.apache.hadoop.hbase.regionserver;
import java.io.Closeable;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import org.apache.commons.math.stat.descriptive.DescriptiveStatistics;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
import org.apache.hadoop.hbase.HRegionInfo;
@ -45,7 +43,6 @@ public class MetricsRegionWrapperImpl implements MetricsRegionWrapper, Closeable
private long numStoreFiles;
private long memstoreSize;
private long storeFileSize;
private Map<String, DescriptiveStatistics> coprocessorTimes;
private ScheduledFuture<?> regionMetricsUpdateTask;
@ -55,7 +52,6 @@ public class MetricsRegionWrapperImpl implements MetricsRegionWrapper, Closeable
this.runnable = new HRegionMetricsWrapperRunnable();
this.regionMetricsUpdateTask = this.executor.scheduleWithFixedDelay(this.runnable, PERIOD,
PERIOD, TimeUnit.SECONDS);
this.coprocessorTimes = new HashMap<String, DescriptiveStatistics>();
}
@Override
@ -159,8 +155,6 @@ public class MetricsRegionWrapperImpl implements MetricsRegionWrapper, Closeable
numStoreFiles = tempNumStoreFiles;
memstoreSize = tempMemstoreSize;
storeFileSize = tempStoreFileSize;
coprocessorTimes = region.getCoprocessorHost().getCoprocessorExecutionStatistics();
}
}
@ -169,11 +163,6 @@ public class MetricsRegionWrapperImpl implements MetricsRegionWrapper, Closeable
regionMetricsUpdateTask.cancel(true);
}
@Override
public Map<String, DescriptiveStatistics> getCoprocessorExecutionStatistics() {
return coprocessorTimes;
}
/**
* Get the replica id of this region.
*/

View File

@ -21,8 +21,6 @@ package org.apache.hadoop.hbase.regionserver;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.NavigableSet;
@ -41,7 +39,6 @@ import org.apache.commons.collections.map.ReferenceMap;
import org.apache.commons.lang.ClassUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.math.stat.descriptive.DescriptiveStatistics;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@ -81,7 +78,6 @@ import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
import org.apache.hadoop.hbase.wal.WALKey;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.util.BoundedConcurrentLinkedQueue;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.CoprocessorClassLoader;
import org.apache.hadoop.hbase.util.Pair;
@ -110,9 +106,6 @@ public class RegionCoprocessorHost
private Region region;
private RegionServerServices rsServices;
ConcurrentMap<String, Object> sharedData;
private static final int LATENCY_BUFFER_SIZE = 100;
private final BoundedConcurrentLinkedQueue<Long> coprocessorTimeNanos =
new BoundedConcurrentLinkedQueue<Long>(LATENCY_BUFFER_SIZE);
private final boolean useLegacyPre;
private final boolean useLegacyPost;
@ -159,16 +152,6 @@ public class RegionCoprocessorHost
return sharedData;
}
public void offerExecutionLatency(long latencyNanos) {
coprocessorTimeNanos.offer(latencyNanos);
}
public Collection<Long> getExecutionLatenciesNanos() {
final List<Long> latencies = Lists.newArrayListWithCapacity(coprocessorTimeNanos.size());
coprocessorTimeNanos.drainTo(latencies);
return latencies;
}
@Override
public HRegionInfo getRegionInfo() {
return region.getRegionInfo();
@ -1597,24 +1580,6 @@ public class RegionCoprocessorHost
});
}
public Map<String, DescriptiveStatistics> getCoprocessorExecutionStatistics() {
Map<String, DescriptiveStatistics> results = new HashMap<String, DescriptiveStatistics>();
for (RegionEnvironment env : coprocessors) {
DescriptiveStatistics ds = new DescriptiveStatistics();
if (env.getInstance() instanceof RegionObserver) {
for (Long time : env.getExecutionLatenciesNanos()) {
ds.addValue(time);
}
// Ensures that web ui circumvents the display of NaN values when there are zero samples.
if (ds.getN() == 0) {
ds.addValue(0);
}
results.put(env.getInstance().getClass().getSimpleName(), ds);
}
}
return results;
}
private static abstract class CoprocessorOperation
extends ObserverContext<RegionCoprocessorEnvironment> {
public abstract void call(Coprocessor observer,
@ -1702,7 +1667,6 @@ public class RegionCoprocessorHost
for (RegionEnvironment env: coprocessors) {
Coprocessor observer = env.getInstance();
if (ctx.hasCall(observer)) {
long startTime = System.nanoTime();
ctx.prepare(env);
Thread currentThread = Thread.currentThread();
ClassLoader cl = currentThread.getContextClassLoader();
@ -1714,7 +1678,6 @@ public class RegionCoprocessorHost
} finally {
currentThread.setContextClassLoader(cl);
}
env.offerExecutionLatency(System.nanoTime() - startTime);
bypass |= ctx.shouldBypass();
if (earlyExit && ctx.shouldComplete()) {
break;

View File

@ -18,11 +18,6 @@
package org.apache.hadoop.hbase.regionserver;
import java.util.HashMap;
import java.util.Map;
import org.apache.commons.math.stat.descriptive.DescriptiveStatistics;
public class MetricsRegionWrapperStub implements MetricsRegionWrapper {
int replicaid = 0;
@ -105,11 +100,6 @@ public class MetricsRegionWrapperStub implements MetricsRegionWrapper {
return 42;
}
@Override
public Map<String, DescriptiveStatistics> getCoprocessorExecutionStatistics() {
return new HashMap<String, DescriptiveStatistics>();
}
/**
* Get the replica id of this region.
*/