mirror of https://github.com/apache/lucene.git
SOLR-8785: Use Dropwizard Metrics library for core metrics
(cherry picked from commit ff6da66
)
This commit is contained in:
parent
c85a0d8aa3
commit
2deb900774
|
@ -13,9 +13,6 @@ com.carrotsearch.randomizedtesting.version = 2.4.0
|
|||
|
||||
/com.carrotsearch/hppc = 0.7.1
|
||||
|
||||
com.codahale.metrics.version = 3.0.1
|
||||
/com.codahale.metrics/metrics-core = ${com.codahale.metrics.version}
|
||||
/com.codahale.metrics/metrics-healthchecks = ${com.codahale.metrics.version}
|
||||
|
||||
/com.cybozu.labs/langdetect = 1.1-20120112
|
||||
/com.drewnoakes/metadata-extractor = 2.8.1
|
||||
|
@ -76,6 +73,10 @@ com.sun.jersey.version = 1.9
|
|||
/hsqldb/hsqldb = 1.8.0.10
|
||||
/io.airlift/slice = 0.10
|
||||
|
||||
io.dropwizard.metrics.version = 3.1.2
|
||||
/io.dropwizard.metrics/metrics-core = ${io.dropwizard.metrics.version}
|
||||
/io.dropwizard.metrics/metrics-healthchecks = ${io.dropwizard.metrics.version}
|
||||
|
||||
io.netty.netty-all.version = 4.0.36.Final
|
||||
/io.netty/netty-all = ${io.netty.netty-all.version}
|
||||
|
||||
|
|
|
@ -39,6 +39,13 @@ Upgrade Notes
|
|||
consequence of this change is that you must be aware that some tuples will not have values if
|
||||
there were none in the original document.
|
||||
|
||||
* SOLR-8785: Metrics related classes in org.apache.solr.util.stats have been removed in favor of
|
||||
the dropwizard metrics library. Any custom plugins using these classes should be changed to use
|
||||
the equivalent classes from the metrics library. As part of these changes, the "totalTime" metric
|
||||
exposed by Overseer Status API in previous versions has been removed because it is no longer supported
|
||||
by the metrics library. Also, the metrics "75thPctlRequestTime", "95thPctlRequestTime", "99thPctlRequestTime"
|
||||
and "999thPctlRequestTime" in Overseer Status API have been renamed to "75thPcRequestTime", "95thPcRequestTime"
|
||||
and so on for consistency with stats output in other parts of Solr.
|
||||
|
||||
New Features
|
||||
----------------------
|
||||
|
@ -142,6 +149,9 @@ Other Changes
|
|||
|
||||
* SOLR-9609: Change hard-coded keysize from 512 to 1024 (Jeremy Martini via Erick Erickson)
|
||||
|
||||
* SOLR-8785: Use Dropwizard Metrics library for core metrics. The copied over code in
|
||||
org.apache.solr.util.stats has been removed. (Jeff Wartes, Christine Poerschke, Kelvin Wong, shalin)
|
||||
|
||||
================== 6.3.0 ==================
|
||||
|
||||
Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.
|
||||
|
|
|
@ -18,11 +18,10 @@ package org.apache.solr.analytics.plugin;
|
|||
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
import com.codahale.metrics.Timer;
|
||||
import org.apache.solr.common.util.NamedList;
|
||||
import org.apache.solr.common.util.SimpleOrderedMap;
|
||||
import org.apache.solr.util.stats.Snapshot;
|
||||
import org.apache.solr.util.stats.Timer;
|
||||
import org.apache.solr.util.stats.TimerContext;
|
||||
import org.apache.solr.util.stats.TimerUtils;
|
||||
|
||||
public class AnalyticsStatisticsCollector {
|
||||
private final AtomicLong numRequests;
|
||||
|
@ -35,7 +34,7 @@ public class AnalyticsStatisticsCollector {
|
|||
private final AtomicLong numQueries;
|
||||
private final Timer requestTimes;
|
||||
|
||||
public TimerContext currentTimer;
|
||||
public Timer.Context currentTimer;
|
||||
|
||||
public AnalyticsStatisticsCollector() {
|
||||
numRequests = new AtomicLong();
|
||||
|
@ -88,7 +87,6 @@ public class AnalyticsStatisticsCollector {
|
|||
|
||||
public NamedList<Object> getStatistics() {
|
||||
NamedList<Object> lst = new SimpleOrderedMap<>();
|
||||
Snapshot snapshot = requestTimes.getSnapshot();
|
||||
lst.add("requests", numRequests.longValue());
|
||||
lst.add("analyticsRequests", numAnalyticsRequests.longValue());
|
||||
lst.add("statsRequests", numStatsRequests.longValue());
|
||||
|
@ -97,17 +95,7 @@ public class AnalyticsStatisticsCollector {
|
|||
lst.add("rangeFacets", numRangeFacets.longValue());
|
||||
lst.add("queryFacets", numQueryFacets.longValue());
|
||||
lst.add("queriesInQueryFacets", numQueries.longValue());
|
||||
lst.add("totalTime", requestTimes.getSum());
|
||||
lst.add("avgRequestsPerSecond", requestTimes.getMeanRate());
|
||||
lst.add("5minRateReqsPerSecond", requestTimes.getFiveMinuteRate());
|
||||
lst.add("15minRateReqsPerSecond", requestTimes.getFifteenMinuteRate());
|
||||
lst.add("avgTimePerRequest", requestTimes.getMean());
|
||||
lst.add("medianRequestTime", snapshot.getMedian());
|
||||
lst.add("75thPcRequestTime", snapshot.get75thPercentile());
|
||||
lst.add("95thPcRequestTime", snapshot.get95thPercentile());
|
||||
lst.add("99thPcRequestTime", snapshot.get99thPercentile());
|
||||
lst.add("999thPcRequestTime", snapshot.get999thPercentile());
|
||||
TimerUtils.addMetrics(lst, requestTimes);
|
||||
return lst;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -34,8 +34,8 @@
|
|||
|
||||
<dependency org="org.kitesdk" name="kite-morphlines-avro" rev="${/org.kitesdk/kite-morphlines-avro}" conf="compile" />
|
||||
|
||||
<dependency org="com.codahale.metrics" name="metrics-core" rev="${/com.codahale.metrics/metrics-core}" conf="compile" />
|
||||
<dependency org="com.codahale.metrics" name="metrics-healthchecks" rev="${/com.codahale.metrics/metrics-healthchecks}" conf="compile" />
|
||||
<dependency org="io.dropwizard.metrics" name="metrics-core" rev="${/io.dropwizard.metrics/metrics-core}" conf="compile" />
|
||||
<dependency org="io.dropwizard.metrics" name="metrics-healthchecks" rev="${/io.dropwizard.metrics/metrics-healthchecks}" conf="compile" />
|
||||
<dependency org="com.typesafe" name="config" rev="${/com.typesafe/config}" conf="compile" />
|
||||
|
||||
<!-- Test Dependencies -->
|
||||
|
|
|
@ -50,6 +50,7 @@
|
|||
<dependency org="log4j" name="log4j" rev="${/log4j/log4j}" conf="compile"/>
|
||||
<dependency org="org.slf4j" name="slf4j-log4j12" rev="${/org.slf4j/slf4j-log4j12}" conf="compile"/>
|
||||
<dependency org="org.slf4j" name="jcl-over-slf4j" rev="${/org.slf4j/jcl-over-slf4j}" conf="compile"/>
|
||||
<dependency org="io.dropwizard.metrics" name="metrics-core" rev="${/io.dropwizard.metrics/metrics-core}" conf="compile" />
|
||||
|
||||
<dependency org="org.easymock" name="easymock" rev="${/org.easymock/easymock}" conf="test"/>
|
||||
<dependency org="cglib" name="cglib-nodep" rev="${/cglib/cglib-nodep}" conf="test"/>
|
||||
|
|
|
@ -27,6 +27,7 @@ import java.util.concurrent.locks.Condition;
|
|||
import java.util.concurrent.locks.ReentrantLock;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
import com.codahale.metrics.Timer;
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.solr.common.SolrException;
|
||||
|
@ -34,7 +35,6 @@ import org.apache.solr.common.SolrException.ErrorCode;
|
|||
import org.apache.solr.common.cloud.SolrZkClient;
|
||||
import org.apache.solr.common.cloud.ZkCmdExecutor;
|
||||
import org.apache.solr.common.util.Pair;
|
||||
import org.apache.solr.util.stats.TimerContext;
|
||||
import org.apache.zookeeper.CreateMode;
|
||||
import org.apache.zookeeper.KeeperException;
|
||||
import org.apache.zookeeper.WatchedEvent;
|
||||
|
@ -119,7 +119,7 @@ public class DistributedQueue {
|
|||
* @return data at the first element of the queue, or null.
|
||||
*/
|
||||
public byte[] peek() throws KeeperException, InterruptedException {
|
||||
TimerContext time = stats.time(dir + "_peek");
|
||||
Timer.Context time = stats.time(dir + "_peek");
|
||||
try {
|
||||
return firstElement();
|
||||
} finally {
|
||||
|
@ -147,7 +147,7 @@ public class DistributedQueue {
|
|||
*/
|
||||
public byte[] peek(long wait) throws KeeperException, InterruptedException {
|
||||
Preconditions.checkArgument(wait > 0);
|
||||
TimerContext time;
|
||||
Timer.Context time;
|
||||
if (wait == Long.MAX_VALUE) {
|
||||
time = stats.time(dir + "_peek_wait_forever");
|
||||
} else {
|
||||
|
@ -177,7 +177,7 @@ public class DistributedQueue {
|
|||
* @return Head of the queue or null.
|
||||
*/
|
||||
public byte[] poll() throws KeeperException, InterruptedException {
|
||||
TimerContext time = stats.time(dir + "_poll");
|
||||
Timer.Context time = stats.time(dir + "_poll");
|
||||
try {
|
||||
return removeFirst();
|
||||
} finally {
|
||||
|
@ -191,7 +191,7 @@ public class DistributedQueue {
|
|||
* @return The former head of the queue
|
||||
*/
|
||||
public byte[] remove() throws NoSuchElementException, KeeperException, InterruptedException {
|
||||
TimerContext time = stats.time(dir + "_remove");
|
||||
Timer.Context time = stats.time(dir + "_remove");
|
||||
try {
|
||||
byte[] result = removeFirst();
|
||||
if (result == null) {
|
||||
|
@ -210,7 +210,7 @@ public class DistributedQueue {
|
|||
*/
|
||||
public byte[] take() throws KeeperException, InterruptedException {
|
||||
// Same as for element. Should refactor this.
|
||||
TimerContext timer = stats.time(dir + "_take");
|
||||
Timer.Context timer = stats.time(dir + "_take");
|
||||
updateLock.lockInterruptibly();
|
||||
try {
|
||||
while (true) {
|
||||
|
@ -234,7 +234,7 @@ public class DistributedQueue {
|
|||
* element to become visible.
|
||||
*/
|
||||
public void offer(byte[] data) throws KeeperException, InterruptedException {
|
||||
TimerContext time = stats.time(dir + "_offer");
|
||||
Timer.Context time = stats.time(dir + "_offer");
|
||||
try {
|
||||
while (true) {
|
||||
try {
|
||||
|
|
|
@ -26,9 +26,9 @@ import java.util.List;
|
|||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import com.codahale.metrics.Timer;
|
||||
import org.apache.solr.client.solrj.SolrResponse;
|
||||
import org.apache.solr.cloud.overseer.ClusterStateMutator;
|
||||
import org.apache.solr.cloud.overseer.CollectionMutator;
|
||||
|
@ -49,9 +49,6 @@ import org.apache.solr.core.CloudConfig;
|
|||
import org.apache.solr.handler.admin.CollectionsHandler;
|
||||
import org.apache.solr.handler.component.ShardHandler;
|
||||
import org.apache.solr.update.UpdateShardHandler;
|
||||
import org.apache.solr.util.stats.Clock;
|
||||
import org.apache.solr.util.stats.Timer;
|
||||
import org.apache.solr.util.stats.TimerContext;
|
||||
import org.apache.zookeeper.CreateMode;
|
||||
import org.apache.zookeeper.KeeperException;
|
||||
import org.slf4j.Logger;
|
||||
|
@ -254,7 +251,7 @@ public class Overseer implements Closeable {
|
|||
private ClusterState processQueueItem(ZkNodeProps message, ClusterState clusterState, ZkStateWriter zkStateWriter, boolean enableBatching, ZkStateWriter.ZkWriteCallback callback) throws Exception {
|
||||
final String operation = message.getStr(QUEUE_OPERATION);
|
||||
List<ZkWriteCommand> zkWriteCommands = null;
|
||||
final TimerContext timerContext = stats.time(operation);
|
||||
final Timer.Context timerContext = stats.time(operation);
|
||||
try {
|
||||
zkWriteCommands = processMessage(clusterState, message, operation);
|
||||
stats.success(operation);
|
||||
|
@ -392,7 +389,7 @@ public class Overseer implements Closeable {
|
|||
}
|
||||
|
||||
private LeaderStatus amILeader() {
|
||||
TimerContext timerContext = stats.time("am_i_leader");
|
||||
Timer.Context timerContext = stats.time("am_i_leader");
|
||||
boolean success = true;
|
||||
try {
|
||||
ZkNodeProps props = ZkNodeProps.load(zkClient.getData(
|
||||
|
@ -795,7 +792,7 @@ public class Overseer implements Closeable {
|
|||
stat.errors.incrementAndGet();
|
||||
}
|
||||
|
||||
public TimerContext time(String operation) {
|
||||
public Timer.Context time(String operation) {
|
||||
String op = operation.toLowerCase(Locale.ROOT);
|
||||
Stat stat = stats.get(op);
|
||||
if (stat == null) {
|
||||
|
@ -853,7 +850,7 @@ public class Overseer implements Closeable {
|
|||
public Stat() {
|
||||
this.success = new AtomicInteger();
|
||||
this.errors = new AtomicInteger();
|
||||
this.requestTime = new Timer(TimeUnit.MILLISECONDS, TimeUnit.MINUTES, Clock.defaultClock());
|
||||
this.requestTime = new Timer();
|
||||
this.failureDetails = new LinkedList<>();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,14 +23,14 @@ import java.util.ArrayList;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import com.codahale.metrics.Timer;
|
||||
import org.apache.solr.cloud.OverseerCollectionMessageHandler.Cmd;
|
||||
import org.apache.solr.common.cloud.ClusterState;
|
||||
import org.apache.solr.common.cloud.ZkNodeProps;
|
||||
import org.apache.solr.common.cloud.ZkStateReader;
|
||||
import org.apache.solr.common.util.NamedList;
|
||||
import org.apache.solr.common.util.SimpleOrderedMap;
|
||||
import org.apache.solr.util.stats.Snapshot;
|
||||
import org.apache.solr.util.stats.Timer;
|
||||
import org.apache.solr.util.stats.TimerUtils;
|
||||
import org.apache.zookeeper.data.Stat;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
@ -100,17 +100,7 @@ public class OverseerStatusCmd implements Cmd {
|
|||
lst.add("errors", errors);
|
||||
}
|
||||
Timer timer = entry.getValue().requestTime;
|
||||
Snapshot snapshot = timer.getSnapshot();
|
||||
lst.add("totalTime", timer.getSum());
|
||||
lst.add("avgRequestsPerMinute", timer.getMeanRate());
|
||||
lst.add("5minRateRequestsPerMinute", timer.getFiveMinuteRate());
|
||||
lst.add("15minRateRequestsPerMinute", timer.getFifteenMinuteRate());
|
||||
lst.add("avgTimePerRequest", timer.getMean());
|
||||
lst.add("medianRequestTime", snapshot.getMedian());
|
||||
lst.add("75thPctlRequestTime", snapshot.get75thPercentile());
|
||||
lst.add("95thPctlRequestTime", snapshot.get95thPercentile());
|
||||
lst.add("99thPctlRequestTime", snapshot.get99thPercentile());
|
||||
lst.add("999thPctlRequestTime", snapshot.get999thPercentile());
|
||||
TimerUtils.addMetrics(lst, timer);
|
||||
}
|
||||
results.add("overseer_operations", overseerStats);
|
||||
results.add("collection_operations", collectionStats);
|
||||
|
|
|
@ -30,6 +30,7 @@ import java.util.concurrent.SynchronousQueue;
|
|||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
import com.codahale.metrics.Timer;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.solr.client.solrj.SolrResponse;
|
||||
|
@ -43,7 +44,6 @@ import org.apache.solr.common.util.ExecutorUtil;
|
|||
import org.apache.solr.common.util.StrUtils;
|
||||
import org.apache.solr.common.util.Utils;
|
||||
import org.apache.solr.util.DefaultSolrThreadFactory;
|
||||
import org.apache.solr.util.stats.TimerContext;
|
||||
import org.apache.zookeeper.KeeperException;
|
||||
import org.apache.zookeeper.data.Stat;
|
||||
import org.slf4j.Logger;
|
||||
|
@ -380,7 +380,7 @@ public class OverseerTaskProcessor implements Runnable, Closeable {
|
|||
|
||||
protected LeaderStatus amILeader() {
|
||||
String statsName = "collection_am_i_leader";
|
||||
TimerContext timerContext = stats.time(statsName);
|
||||
Timer.Context timerContext = stats.time(statsName);
|
||||
boolean success = true;
|
||||
try {
|
||||
ZkNodeProps props = ZkNodeProps.load(zkStateReader.getZkClient().getData(
|
||||
|
@ -451,7 +451,7 @@ public class OverseerTaskProcessor implements Runnable, Closeable {
|
|||
|
||||
public void run() {
|
||||
String statsName = messageHandler.getTimerName(operation);
|
||||
final TimerContext timerContext = stats.time(statsName);
|
||||
final Timer.Context timerContext = stats.time(statsName);
|
||||
|
||||
boolean success = false;
|
||||
final String asyncId = message.getStr(ASYNC);
|
||||
|
|
|
@ -22,10 +22,10 @@ import java.util.List;
|
|||
import java.util.TreeSet;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
import com.codahale.metrics.Timer;
|
||||
import org.apache.solr.common.cloud.SolrZkClient;
|
||||
import org.apache.solr.common.cloud.ZkNodeProps;
|
||||
import org.apache.solr.common.util.Pair;
|
||||
import org.apache.solr.util.stats.TimerContext;
|
||||
import org.apache.zookeeper.CreateMode;
|
||||
import org.apache.zookeeper.KeeperException;
|
||||
import org.apache.zookeeper.WatchedEvent;
|
||||
|
@ -85,7 +85,7 @@ public class OverseerTaskQueue extends DistributedQueue {
|
|||
*/
|
||||
public void remove(QueueEvent event) throws KeeperException,
|
||||
InterruptedException {
|
||||
TimerContext time = stats.time(dir + "_remove_event");
|
||||
Timer.Context time = stats.time(dir + "_remove_event");
|
||||
try {
|
||||
String path = event.getId();
|
||||
String responsePath = dir + "/" + response_prefix
|
||||
|
@ -181,7 +181,7 @@ public class OverseerTaskQueue extends DistributedQueue {
|
|||
*/
|
||||
public QueueEvent offer(byte[] data, long timeout) throws KeeperException,
|
||||
InterruptedException {
|
||||
TimerContext time = stats.time(dir + "_offer");
|
||||
Timer.Context time = stats.time(dir + "_offer");
|
||||
try {
|
||||
// Create and watch the response node before creating the request node;
|
||||
// otherwise we may miss the response.
|
||||
|
@ -227,7 +227,7 @@ public class OverseerTaskQueue extends DistributedQueue {
|
|||
ArrayList<QueueEvent> topN = new ArrayList<>();
|
||||
|
||||
LOG.debug("Peeking for top {} elements. ExcludeSet: {}", n, excludeSet);
|
||||
TimerContext time;
|
||||
Timer.Context time;
|
||||
if (waitMillis == Long.MAX_VALUE) time = stats.time(dir + "_peekTopN_wait_forever");
|
||||
else time = stats.time(dir + "_peekTopN_wait" + waitMillis);
|
||||
|
||||
|
|
|
@ -21,12 +21,12 @@ import java.util.HashMap;
|
|||
import java.util.Map;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import com.codahale.metrics.Timer;
|
||||
import org.apache.solr.cloud.Overseer;
|
||||
import org.apache.solr.common.cloud.ClusterState;
|
||||
import org.apache.solr.common.cloud.DocCollection;
|
||||
import org.apache.solr.common.cloud.ZkStateReader;
|
||||
import org.apache.solr.common.util.Utils;
|
||||
import org.apache.solr.util.stats.TimerContext;
|
||||
import org.apache.zookeeper.CreateMode;
|
||||
import org.apache.zookeeper.KeeperException;
|
||||
import org.apache.zookeeper.data.Stat;
|
||||
|
@ -210,7 +210,7 @@ public class ZkStateWriter {
|
|||
throw new IllegalStateException("ZkStateWriter has seen a tragic error, this instance can no longer be used");
|
||||
}
|
||||
if (!hasPendingUpdates()) return clusterState;
|
||||
TimerContext timerContext = stats.time("update_state");
|
||||
Timer.Context timerContext = stats.time("update_state");
|
||||
boolean success = false;
|
||||
try {
|
||||
if (!updates.isEmpty()) {
|
||||
|
|
|
@ -20,6 +20,7 @@ import java.lang.invoke.MethodHandles;
|
|||
import java.net.URL;
|
||||
import java.util.concurrent.atomic.LongAdder;
|
||||
|
||||
import com.codahale.metrics.Timer;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.params.SolrParams;
|
||||
import org.apache.solr.common.util.NamedList;
|
||||
|
@ -33,9 +34,7 @@ import org.apache.solr.request.SolrRequestHandler;
|
|||
import org.apache.solr.response.SolrQueryResponse;
|
||||
import org.apache.solr.search.SyntaxError;
|
||||
import org.apache.solr.util.SolrPluginUtils;
|
||||
import org.apache.solr.util.stats.Snapshot;
|
||||
import org.apache.solr.util.stats.Timer;
|
||||
import org.apache.solr.util.stats.TimerContext;
|
||||
import org.apache.solr.util.stats.TimerUtils;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
@ -144,7 +143,7 @@ public abstract class RequestHandlerBase implements SolrRequestHandler, SolrInfo
|
|||
@Override
|
||||
public void handleRequest(SolrQueryRequest req, SolrQueryResponse rsp) {
|
||||
numRequests.increment();
|
||||
TimerContext timer = requestTimes.time();
|
||||
Timer.Context timer = requestTimes.time();
|
||||
try {
|
||||
if(pluginInfo != null && pluginInfo.attributes.containsKey(USEPARAM)) req.getContext().put(USEPARAM,pluginInfo.attributes.get(USEPARAM));
|
||||
SolrPluginUtils.setDefaults(this, req, defaults, appends, invariants);
|
||||
|
@ -268,26 +267,16 @@ public abstract class RequestHandlerBase implements SolrRequestHandler, SolrInfo
|
|||
@Override
|
||||
public NamedList<Object> getStatistics() {
|
||||
NamedList<Object> lst = new SimpleOrderedMap<>();
|
||||
Snapshot snapshot = requestTimes.getSnapshot();
|
||||
lst.add("handlerStart",handlerStart);
|
||||
lst.add("requests", numRequests.longValue());
|
||||
lst.add("errors", numServerErrors.longValue() + numClientErrors.longValue());
|
||||
lst.add("serverErrors", numServerErrors.longValue());
|
||||
lst.add("clientErrors", numClientErrors.longValue());
|
||||
lst.add("timeouts", numTimeouts.longValue());
|
||||
lst.add("totalTime", requestTimes.getSum());
|
||||
lst.add("avgRequestsPerSecond", requestTimes.getMeanRate());
|
||||
lst.add("5minRateReqsPerSecond", requestTimes.getFiveMinuteRate());
|
||||
lst.add("15minRateReqsPerSecond", requestTimes.getFifteenMinuteRate());
|
||||
lst.add("avgTimePerRequest", requestTimes.getMean());
|
||||
lst.add("medianRequestTime", snapshot.getMedian());
|
||||
lst.add("75thPcRequestTime", snapshot.get75thPercentile());
|
||||
lst.add("95thPcRequestTime", snapshot.get95thPercentile());
|
||||
lst.add("99thPcRequestTime", snapshot.get99thPercentile());
|
||||
lst.add("999thPcRequestTime", snapshot.get999thPercentile());
|
||||
TimerUtils.addMetrics(lst, requestTimes);
|
||||
return lst;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -1,84 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
/*
|
||||
* Forked from https://github.com/codahale/metrics
|
||||
*/
|
||||
|
||||
package org.apache.solr.util.stats;
|
||||
|
||||
import java.lang.management.ManagementFactory;
|
||||
import java.lang.management.ThreadMXBean;
|
||||
|
||||
import org.apache.solr.common.util.SuppressForbidden;
|
||||
|
||||
/**
|
||||
* An abstraction for how time passes. It is passed to {@link Timer} to track timing.
|
||||
*/
|
||||
public abstract class Clock {
|
||||
/**
|
||||
* Returns the current time tick.
|
||||
*
|
||||
* @return time tick in nanoseconds
|
||||
*/
|
||||
public abstract long getTick();
|
||||
|
||||
/**
|
||||
* Returns the current time in milliseconds.
|
||||
*
|
||||
* @return time in milliseconds
|
||||
*/
|
||||
@SuppressForbidden(reason = "Need currentTimeMillis, API used by ExponentiallyDecayingSample for suspect reasons")
|
||||
public long getTime() {
|
||||
return System.currentTimeMillis();
|
||||
}
|
||||
|
||||
private static final Clock DEFAULT = new UserTimeClock();
|
||||
|
||||
/**
|
||||
* The default clock to use.
|
||||
*
|
||||
* @return the default {@link Clock} instance
|
||||
*
|
||||
* @see UserTimeClock
|
||||
*/
|
||||
public static Clock defaultClock() {
|
||||
return DEFAULT;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* A clock implementation which returns the current time in epoch nanoseconds.
|
||||
*/
|
||||
public static class UserTimeClock extends Clock {
|
||||
@Override
|
||||
public long getTick() {
|
||||
return System.nanoTime();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A clock implementation which returns the current thread's CPU time.
|
||||
*/
|
||||
public static class CpuTimeClock extends Clock {
|
||||
private static final ThreadMXBean THREAD_MX_BEAN = ManagementFactory.getThreadMXBean();
|
||||
|
||||
@Override
|
||||
public long getTick() {
|
||||
return THREAD_MX_BEAN.getCurrentThreadCpuTime();
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,126 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
/*
|
||||
* Forked from https://github.com/codahale/metrics
|
||||
*/
|
||||
|
||||
package org.apache.solr.util.stats;
|
||||
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
import static java.lang.Math.exp;
|
||||
|
||||
/**
|
||||
* An exponentially-weighted moving average.
|
||||
*
|
||||
* @see <a href="http://www.teamquest.com/pdfs/whitepaper/ldavg1.pdf">UNIX Load Average Part 1: How
|
||||
* It Works</a>
|
||||
* @see <a href="http://www.teamquest.com/pdfs/whitepaper/ldavg2.pdf">UNIX Load Average Part 2: Not
|
||||
* Your Average Average</a>
|
||||
*/
|
||||
public class EWMA {
|
||||
private static final int INTERVAL = 5;
|
||||
private static final double SECONDS_PER_MINUTE = 60.0;
|
||||
private static final int ONE_MINUTE = 1;
|
||||
private static final int FIVE_MINUTES = 5;
|
||||
private static final int FIFTEEN_MINUTES = 15;
|
||||
private static final double M1_ALPHA = 1 - exp(-INTERVAL / SECONDS_PER_MINUTE / ONE_MINUTE);
|
||||
private static final double M5_ALPHA = 1 - exp(-INTERVAL / SECONDS_PER_MINUTE / FIVE_MINUTES);
|
||||
private static final double M15_ALPHA = 1 - exp(-INTERVAL / SECONDS_PER_MINUTE / FIFTEEN_MINUTES);
|
||||
|
||||
private volatile boolean initialized = false;
|
||||
private volatile double rate = 0.0;
|
||||
|
||||
private final AtomicLong uncounted = new AtomicLong();
|
||||
private final double alpha, interval;
|
||||
|
||||
/**
|
||||
* Creates a new EWMA which is equivalent to the UNIX one minute load average and which expects
|
||||
* to be ticked every 5 seconds.
|
||||
*
|
||||
* @return a one-minute EWMA
|
||||
*/
|
||||
public static EWMA oneMinuteEWMA() {
|
||||
return new EWMA(M1_ALPHA, INTERVAL, TimeUnit.SECONDS);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new EWMA which is equivalent to the UNIX five minute load average and which expects
|
||||
* to be ticked every 5 seconds.
|
||||
*
|
||||
* @return a five-minute EWMA
|
||||
*/
|
||||
public static EWMA fiveMinuteEWMA() {
|
||||
return new EWMA(M5_ALPHA, INTERVAL, TimeUnit.SECONDS);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new EWMA which is equivalent to the UNIX fifteen minute load average and which
|
||||
* expects to be ticked every 5 seconds.
|
||||
*
|
||||
* @return a fifteen-minute EWMA
|
||||
*/
|
||||
public static EWMA fifteenMinuteEWMA() {
|
||||
return new EWMA(M15_ALPHA, INTERVAL, TimeUnit.SECONDS);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new EWMA with a specific smoothing constant.
|
||||
*
|
||||
* @param alpha the smoothing constant
|
||||
* @param interval the expected tick interval
|
||||
* @param intervalUnit the time unit of the tick interval
|
||||
*/
|
||||
public EWMA(double alpha, long interval, TimeUnit intervalUnit) {
|
||||
this.interval = intervalUnit.toNanos(interval);
|
||||
this.alpha = alpha;
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the moving average with a new value.
|
||||
*
|
||||
* @param n the new value
|
||||
*/
|
||||
public void update(long n) {
|
||||
uncounted.addAndGet(n);
|
||||
}
|
||||
|
||||
/**
|
||||
* Mark the passage of time and decay the current rate accordingly.
|
||||
*/
|
||||
public void tick() {
|
||||
final long count = uncounted.getAndSet(0);
|
||||
final double instantRate = count / interval;
|
||||
if (initialized) {
|
||||
rate += (alpha * (instantRate - rate));
|
||||
} else {
|
||||
rate = instantRate;
|
||||
initialized = true;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the rate in the given units of time.
|
||||
*
|
||||
* @param rateUnit the unit of time
|
||||
* @return the rate
|
||||
*/
|
||||
public double getRate(TimeUnit rateUnit) {
|
||||
return rate * (double) rateUnit.toNanos(1);
|
||||
}
|
||||
}
|
|
@ -1,218 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
/*
|
||||
* Forked from https://github.com/codahale/metrics
|
||||
*/
|
||||
|
||||
package org.apache.solr.util.stats;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.ConcurrentSkipListMap;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||
|
||||
import static java.lang.Math.exp;
|
||||
import static java.lang.Math.min;
|
||||
|
||||
/**
|
||||
* An exponentially-decaying random sample of {@code long}s. Uses Cormode et al's forward-decaying
|
||||
* priority reservoir sampling method to produce a statistically representative sample,
|
||||
* exponentially biased towards newer entries.
|
||||
*
|
||||
* See <a href="http://www.research.att.com/people/Cormode_Graham/library/publications/CormodeShkapenyukSrivastavaXu09.pdf">
|
||||
* Cormode et al. Forward Decay: A Practical Time Decay Model for Streaming Systems. ICDE '09: Proceedings of the 2009 IEEE International Conference on Data Engineering (2009)</a>
|
||||
*/
|
||||
public class ExponentiallyDecayingSample implements Sample {
|
||||
|
||||
private static final long RESCALE_THRESHOLD = TimeUnit.HOURS.toNanos(1);
|
||||
private final ConcurrentSkipListMap<Double, Long> values;
|
||||
private final ReentrantReadWriteLock lock;
|
||||
private final double alpha;
|
||||
private final int reservoirSize;
|
||||
private final AtomicLong count = new AtomicLong(0);
|
||||
private volatile long startTime;
|
||||
private final AtomicLong nextScaleTime = new AtomicLong(0);
|
||||
private final Clock clock;
|
||||
// TODO: Maybe replace this with a Mersenne Twister?
|
||||
private final Random random = new Random();
|
||||
|
||||
/**
|
||||
* Creates a new {@link ExponentiallyDecayingSample}.
|
||||
*
|
||||
* @param reservoirSize the number of samples to keep in the sampling reservoir
|
||||
* @param alpha the exponential decay factor; the higher this is, the more biased the
|
||||
* sample will be towards newer values
|
||||
*/
|
||||
public ExponentiallyDecayingSample(int reservoirSize, double alpha) {
|
||||
this(reservoirSize, alpha, Clock.defaultClock());
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new {@link ExponentiallyDecayingSample}.
|
||||
*
|
||||
* @param reservoirSize the number of samples to keep in the sampling reservoir
|
||||
* @param alpha the exponential decay factor; the higher this is, the more biased the
|
||||
* sample will be towards newer values
|
||||
*/
|
||||
public ExponentiallyDecayingSample(int reservoirSize, double alpha, Clock clock) {
|
||||
this.values = new ConcurrentSkipListMap<>();
|
||||
this.lock = new ReentrantReadWriteLock();
|
||||
this.alpha = alpha;
|
||||
this.reservoirSize = reservoirSize;
|
||||
this.clock = clock;
|
||||
clear();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clear() {
|
||||
lockForRescale();
|
||||
try {
|
||||
values.clear();
|
||||
count.set(0);
|
||||
this.startTime = currentTimeInSeconds();
|
||||
nextScaleTime.set(clock.getTick() + RESCALE_THRESHOLD);
|
||||
} finally {
|
||||
unlockForRescale();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int size() {
|
||||
return (int) min(reservoirSize, count.get());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void update(long value) {
|
||||
update(value, currentTimeInSeconds());
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds an old value with a fixed timestamp to the sample.
|
||||
*
|
||||
* @param value the value to be added
|
||||
* @param timestamp the epoch timestamp of {@code value} in seconds
|
||||
*/
|
||||
public void update(long value, long timestamp) {
|
||||
|
||||
rescaleIfNeeded();
|
||||
|
||||
lockForRegularUsage();
|
||||
try {
|
||||
final double priority = weight(timestamp - startTime) / random.nextDouble();
|
||||
final long newCount = count.incrementAndGet();
|
||||
if (newCount <= reservoirSize) {
|
||||
values.put(priority, value);
|
||||
} else {
|
||||
Double first = values.firstKey();
|
||||
if (first < priority) {
|
||||
if (values.putIfAbsent(priority, value) == null) {
|
||||
// ensure we always remove an item
|
||||
while (values.remove(first) == null) {
|
||||
first = values.firstKey();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
unlockForRegularUsage();
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
private void rescaleIfNeeded() {
|
||||
final long now = clock.getTick();
|
||||
final long next = nextScaleTime.get();
|
||||
if (now >= next) {
|
||||
rescale(now, next);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Snapshot getSnapshot() {
|
||||
lockForRegularUsage();
|
||||
try {
|
||||
return new Snapshot(values.values());
|
||||
} finally {
|
||||
unlockForRegularUsage();
|
||||
}
|
||||
}
|
||||
|
||||
private long currentTimeInSeconds() {
|
||||
return TimeUnit.MILLISECONDS.toSeconds(clock.getTime());
|
||||
}
|
||||
|
||||
private double weight(long t) {
|
||||
return exp(alpha * t);
|
||||
}
|
||||
|
||||
/* "A common feature of the above techniques—indeed, the key technique that
|
||||
* allows us to track the decayed weights efficiently—is that they maintain
|
||||
* counts and other quantities based on g(ti − L), and only scale by g(t − L)
|
||||
* at query time. But while g(ti −L)/g(t−L) is guaranteed to lie between zero
|
||||
* and one, the intermediate values of g(ti − L) could become very large. For
|
||||
* polynomial functions, these values should not grow too large, and should be
|
||||
* effectively represented in practice by floating point values without loss of
|
||||
* precision. For exponential functions, these values could grow quite large as
|
||||
* new values of (ti − L) become large, and potentially exceed the capacity of
|
||||
* common floating point types. However, since the values stored by the
|
||||
* algorithms are linear combinations of g values (scaled sums), they can be
|
||||
* rescaled relative to a new landmark. That is, by the analysis of exponential
|
||||
* decay in Section III-A, the choice of L does not affect the final result. We
|
||||
* can therefore multiply each value based on L by a factor of exp(−α(L′ − L)),
|
||||
* and obtain the correct value as if we had instead computed relative to a new
|
||||
* landmark L′ (and then use this new L′ at query time). This can be done with
|
||||
* a linear pass over whatever data structure is being used."
|
||||
*/
|
||||
private void rescale(long now, long next) {
|
||||
if (nextScaleTime.compareAndSet(next, now + RESCALE_THRESHOLD)) {
|
||||
lockForRescale();
|
||||
try {
|
||||
final long oldStartTime = startTime;
|
||||
this.startTime = currentTimeInSeconds();
|
||||
final ArrayList<Double> keys = new ArrayList<>(values.keySet());
|
||||
for (Double key : keys) {
|
||||
final Long value = values.remove(key);
|
||||
values.put(key * exp(-alpha * (startTime - oldStartTime)), value);
|
||||
}
|
||||
|
||||
// make sure the counter is in sync with the number of stored samples.
|
||||
count.set(values.size());
|
||||
} finally {
|
||||
unlockForRescale();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void unlockForRescale() {
|
||||
lock.writeLock().unlock();
|
||||
}
|
||||
|
||||
private void lockForRescale() {
|
||||
lock.writeLock().lock();
|
||||
}
|
||||
|
||||
private void lockForRegularUsage() {
|
||||
lock.readLock().lock();
|
||||
}
|
||||
|
||||
private void unlockForRegularUsage() {
|
||||
lock.readLock().unlock();
|
||||
}
|
||||
}
|
|
@ -1,238 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
/*
|
||||
* Forked from https://github.com/codahale/metrics
|
||||
*/
|
||||
|
||||
package org.apache.solr.util.stats;
|
||||
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import static java.lang.Math.sqrt;
|
||||
|
||||
/**
|
||||
* A metric which calculates the distribution of a value.
|
||||
*
|
||||
* @see <a href="http://www.johndcook.com/standard_deviation.html">Accurately computing running
|
||||
* variance</a>
|
||||
*/
|
||||
public class Histogram {
|
||||
|
||||
private static final int DEFAULT_SAMPLE_SIZE = 1028;
|
||||
private static final double DEFAULT_ALPHA = 0.015;
|
||||
|
||||
/**
|
||||
* The type of sampling the histogram should be performing.
|
||||
*/
|
||||
enum SampleType {
|
||||
/**
|
||||
* Uses a uniform sample of 1028 elements, which offers a 99.9% confidence level with a 5%
|
||||
* margin of error assuming a normal distribution.
|
||||
*/
|
||||
UNIFORM {
|
||||
@Override
|
||||
public Sample newSample() {
|
||||
return new UniformSample(DEFAULT_SAMPLE_SIZE);
|
||||
}
|
||||
},
|
||||
|
||||
/**
|
||||
* Uses an exponentially decaying sample of 1028 elements, which offers a 99.9% confidence
|
||||
* level with a 5% margin of error assuming a normal distribution, and an alpha factor of
|
||||
* 0.015, which heavily biases the sample to the past 5 minutes of measurements.
|
||||
*/
|
||||
BIASED {
|
||||
@Override
|
||||
public Sample newSample() {
|
||||
return new ExponentiallyDecayingSample(DEFAULT_SAMPLE_SIZE, DEFAULT_ALPHA);
|
||||
}
|
||||
};
|
||||
|
||||
public abstract Sample newSample();
|
||||
}
|
||||
|
||||
private final Sample sample;
|
||||
private final AtomicLong min = new AtomicLong();
|
||||
private final AtomicLong max = new AtomicLong();
|
||||
private final AtomicLong sum = new AtomicLong();
|
||||
// These are for the Welford algorithm for calculating running variance
|
||||
// without floating-point doom.
|
||||
private final AtomicReference<double[]> variance =
|
||||
new AtomicReference<>(new double[]{-1, 0}); // M, S
|
||||
private final AtomicLong count = new AtomicLong();
|
||||
|
||||
/**
|
||||
* Creates a new {@link Histogram} with the given sample type.
|
||||
*
|
||||
* @param type the type of sample to use
|
||||
*/
|
||||
Histogram(SampleType type) {
|
||||
this(type.newSample());
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new {@link Histogram} with the given sample.
|
||||
*
|
||||
* @param sample the sample to create a histogram from
|
||||
*/
|
||||
Histogram(Sample sample) {
|
||||
this.sample = sample;
|
||||
clear();
|
||||
}
|
||||
|
||||
/**
|
||||
* Clears all recorded values.
|
||||
*/
|
||||
public void clear() {
|
||||
sample.clear();
|
||||
count.set(0);
|
||||
max.set(Long.MIN_VALUE);
|
||||
min.set(Long.MAX_VALUE);
|
||||
sum.set(0);
|
||||
variance.set(new double[]{ -1, 0 });
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a recorded value.
|
||||
*
|
||||
* @param value the length of the value
|
||||
*/
|
||||
public void update(int value) {
|
||||
update((long) value);
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a recorded value.
|
||||
*
|
||||
* @param value the length of the value
|
||||
*/
|
||||
public void update(long value) {
|
||||
count.incrementAndGet();
|
||||
sample.update(value);
|
||||
setMax(value);
|
||||
setMin(value);
|
||||
sum.getAndAdd(value);
|
||||
updateVariance(value);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the number of values recorded.
|
||||
*
|
||||
* @return the number of values recorded
|
||||
*/
|
||||
public long getCount() {
|
||||
return count.get();
|
||||
}
|
||||
|
||||
/* (non-Javadoc)
|
||||
* @see com.yammer.metrics.core.Summarizable#max()
|
||||
*/
|
||||
public double getMax() {
|
||||
if (getCount() > 0) {
|
||||
return max.get();
|
||||
}
|
||||
return 0.0;
|
||||
}
|
||||
|
||||
/* (non-Javadoc)
|
||||
* @see com.yammer.metrics.core.Summarizable#min()
|
||||
*/
|
||||
public double getMin() {
|
||||
if (getCount() > 0) {
|
||||
return min.get();
|
||||
}
|
||||
return 0.0;
|
||||
}
|
||||
|
||||
/* (non-Javadoc)
|
||||
* @see com.yammer.metrics.core.Summarizable#mean()
|
||||
*/
|
||||
public double getMean() {
|
||||
if (getCount() > 0) {
|
||||
return sum.get() / (double) getCount();
|
||||
}
|
||||
return 0.0;
|
||||
}
|
||||
|
||||
/* (non-Javadoc)
|
||||
* @see com.yammer.metrics.core.Summarizable#stdDev()
|
||||
*/
|
||||
public double getStdDev() {
|
||||
if (getCount() > 0) {
|
||||
return sqrt(getVariance());
|
||||
}
|
||||
return 0.0;
|
||||
}
|
||||
|
||||
/* (non-Javadoc)
|
||||
* @see com.yammer.metrics.core.Summarizable#sum()
|
||||
*/
|
||||
public double getSum() {
|
||||
return (double) sum.get();
|
||||
}
|
||||
|
||||
public Snapshot getSnapshot() {
|
||||
return sample.getSnapshot();
|
||||
}
|
||||
|
||||
private double getVariance() {
|
||||
if (getCount() <= 1) {
|
||||
return 0.0;
|
||||
}
|
||||
return variance.get()[1] / (getCount() - 1);
|
||||
}
|
||||
|
||||
private void setMax(long potentialMax) {
|
||||
boolean done = false;
|
||||
while (!done) {
|
||||
final long currentMax = max.get();
|
||||
done = currentMax >= potentialMax || max.compareAndSet(currentMax, potentialMax);
|
||||
}
|
||||
}
|
||||
|
||||
private void setMin(long potentialMin) {
|
||||
boolean done = false;
|
||||
while (!done) {
|
||||
final long currentMin = min.get();
|
||||
done = currentMin <= potentialMin || min.compareAndSet(currentMin, potentialMin);
|
||||
}
|
||||
}
|
||||
|
||||
private void updateVariance(long value) {
|
||||
while (true) {
|
||||
final double[] oldValues = variance.get();
|
||||
final double[] newValues = new double[2];
|
||||
if (oldValues[0] == -1) {
|
||||
newValues[0] = value;
|
||||
newValues[1] = 0;
|
||||
} else {
|
||||
final double oldM = oldValues[0];
|
||||
final double oldS = oldValues[1];
|
||||
|
||||
final double newM = oldM + ((value - oldM) / getCount());
|
||||
final double newS = oldS + ((value - oldM) * (value - newM));
|
||||
|
||||
newValues[0] = newM;
|
||||
newValues[1] = newS;
|
||||
}
|
||||
if (variance.compareAndSet(oldValues, newValues)) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,143 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
/*
|
||||
* Forked from https://github.com/codahale/metrics
|
||||
*/
|
||||
|
||||
package org.apache.solr.util.stats;
|
||||
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
/**
|
||||
* A meter metric which measures mean throughput and one-, five-, and fifteen-minute
|
||||
* exponentially-weighted moving average throughputs.
|
||||
*
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average">EMA</a>
|
||||
*/
|
||||
public class Meter {
|
||||
|
||||
private static final long TICK_INTERVAL = TimeUnit.SECONDS.toNanos(5);
|
||||
|
||||
private final EWMA m1Rate = EWMA.oneMinuteEWMA();
|
||||
private final EWMA m5Rate = EWMA.fiveMinuteEWMA();
|
||||
private final EWMA m15Rate = EWMA.fifteenMinuteEWMA();
|
||||
|
||||
private final AtomicLong count = new AtomicLong();
|
||||
private final long startTime;
|
||||
private final AtomicLong lastTick;
|
||||
private final TimeUnit rateUnit;
|
||||
private final String eventType;
|
||||
private final Clock clock;
|
||||
|
||||
/**
|
||||
* Creates a new {@link Meter}.
|
||||
*
|
||||
* @param eventType the plural name of the event the meter is measuring (e.g., {@code
|
||||
* "requests"})
|
||||
* @param rateUnit the rate unit of the new meter
|
||||
* @param clock the clock to use for the meter ticks
|
||||
*/
|
||||
Meter(String eventType, TimeUnit rateUnit, Clock clock) {
|
||||
this.rateUnit = rateUnit;
|
||||
this.eventType = eventType;
|
||||
this.clock = clock;
|
||||
this.startTime = this.clock.getTick();
|
||||
this.lastTick = new AtomicLong(startTime);
|
||||
}
|
||||
|
||||
public TimeUnit getRateUnit() {
|
||||
return rateUnit;
|
||||
}
|
||||
|
||||
public String getEventType() {
|
||||
return eventType;
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates the moving averages.
|
||||
*/
|
||||
void tick() {
|
||||
m1Rate.tick();
|
||||
m5Rate.tick();
|
||||
m15Rate.tick();
|
||||
}
|
||||
|
||||
/**
|
||||
* Mark the occurrence of an event.
|
||||
*/
|
||||
public void mark() {
|
||||
mark(1);
|
||||
}
|
||||
|
||||
/**
|
||||
* Mark the occurrence of a given number of events.
|
||||
*
|
||||
* @param n the number of events
|
||||
*/
|
||||
public void mark(long n) {
|
||||
tickIfNecessary();
|
||||
count.addAndGet(n);
|
||||
m1Rate.update(n);
|
||||
m5Rate.update(n);
|
||||
m15Rate.update(n);
|
||||
}
|
||||
|
||||
private void tickIfNecessary() {
|
||||
final long oldTick = lastTick.get();
|
||||
final long newTick = clock.getTick();
|
||||
final long age = newTick - oldTick;
|
||||
if (age > TICK_INTERVAL && lastTick.compareAndSet(oldTick, newTick)) {
|
||||
final long requiredTicks = age / TICK_INTERVAL;
|
||||
for (long i = 0; i < requiredTicks; i++) {
|
||||
tick();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public long getCount() {
|
||||
return count.get();
|
||||
}
|
||||
|
||||
public double getFifteenMinuteRate() {
|
||||
tickIfNecessary();
|
||||
return m15Rate.getRate(rateUnit);
|
||||
}
|
||||
|
||||
public double getFiveMinuteRate() {
|
||||
tickIfNecessary();
|
||||
return m5Rate.getRate(rateUnit);
|
||||
}
|
||||
|
||||
public double getMeanRate() {
|
||||
if (getCount() == 0) {
|
||||
return 0.0;
|
||||
} else {
|
||||
final long elapsed = (clock.getTick() - startTime);
|
||||
return convertNsRate(getCount() / (double) elapsed);
|
||||
}
|
||||
}
|
||||
|
||||
public double getOneMinuteRate() {
|
||||
tickIfNecessary();
|
||||
return m1Rate.getRate(rateUnit);
|
||||
}
|
||||
|
||||
private double convertNsRate(double ratePerNs) {
|
||||
return ratePerNs * (double) rateUnit.toNanos(1);
|
||||
}
|
||||
}
|
|
@ -1,52 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
/*
|
||||
* Forked from https://github.com/codahale/metrics
|
||||
*/
|
||||
|
||||
package org.apache.solr.util.stats;
|
||||
|
||||
/**
|
||||
* A statistically representative sample of a data stream.
|
||||
*/
|
||||
public interface Sample {
|
||||
/**
|
||||
* Clears all recorded values.
|
||||
*/
|
||||
void clear();
|
||||
|
||||
/**
|
||||
* Returns the number of values recorded.
|
||||
*
|
||||
* @return the number of values recorded
|
||||
*/
|
||||
int size();
|
||||
|
||||
/**
|
||||
* Adds a new recorded value to the sample.
|
||||
*
|
||||
* @param value a new recorded value
|
||||
*/
|
||||
void update(long value);
|
||||
|
||||
/**
|
||||
* Returns a snapshot of the sample's values.
|
||||
*
|
||||
* @return a snapshot of the sample's values
|
||||
*/
|
||||
Snapshot getSnapshot();
|
||||
}
|
|
@ -1,168 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
/*
|
||||
* Forked from https://github.com/codahale/metrics
|
||||
*/
|
||||
|
||||
package org.apache.solr.util.stats;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
|
||||
import static java.lang.Math.floor;
|
||||
|
||||
/**
|
||||
* A statistical snapshot of a {@link Snapshot}.
|
||||
*/
|
||||
public class Snapshot {
|
||||
private static final double MEDIAN_Q = 0.5;
|
||||
private static final double P75_Q = 0.75;
|
||||
private static final double P95_Q = 0.95;
|
||||
private static final double P98_Q = 0.98;
|
||||
private static final double P99_Q = 0.99;
|
||||
private static final double P999_Q = 0.999;
|
||||
|
||||
private final double[] values;
|
||||
|
||||
/**
|
||||
* Create a new {@link Snapshot} with the given values.
|
||||
*
|
||||
* @param values an unordered set of values in the sample
|
||||
*/
|
||||
public Snapshot(Collection<Long> values) {
|
||||
final Object[] copy = values.toArray();
|
||||
this.values = new double[copy.length];
|
||||
for (int i = 0; i < copy.length; i++) {
|
||||
this.values[i] = (Long) copy[i];
|
||||
}
|
||||
Arrays.sort(this.values);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new {@link Snapshot} with the given values.
|
||||
*
|
||||
* @param values an unordered set of values in the sample
|
||||
*/
|
||||
public Snapshot(double[] values) {
|
||||
this.values = new double[values.length];
|
||||
System.arraycopy(values, 0, this.values, 0, values.length);
|
||||
Arrays.sort(this.values);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the value at the given quantile.
|
||||
*
|
||||
* @param quantile a given quantile, in {@code [0..1]}
|
||||
* @return the value in the distribution at {@code quantile}
|
||||
*/
|
||||
public double getValue(double quantile) {
|
||||
if (quantile < 0.0 || quantile > 1.0) {
|
||||
throw new IllegalArgumentException(quantile + " is not in [0..1]");
|
||||
}
|
||||
|
||||
if (values.length == 0) {
|
||||
return 0.0;
|
||||
}
|
||||
|
||||
final double pos = quantile * (values.length + 1);
|
||||
|
||||
if (pos < 1) {
|
||||
return values[0];
|
||||
}
|
||||
|
||||
if (pos >= values.length) {
|
||||
return values[values.length - 1];
|
||||
}
|
||||
|
||||
final double lower = values[(int) pos - 1];
|
||||
final double upper = values[(int) pos];
|
||||
return lower + (pos - floor(pos)) * (upper - lower);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the number of values in the snapshot.
|
||||
*
|
||||
* @return the number of values in the snapshot
|
||||
*/
|
||||
public int size() {
|
||||
return values.length;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the median value in the distribution.
|
||||
*
|
||||
* @return the median value in the distribution
|
||||
*/
|
||||
public double getMedian() {
|
||||
return getValue(MEDIAN_Q);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the value at the 75th percentile in the distribution.
|
||||
*
|
||||
* @return the value at the 75th percentile in the distribution
|
||||
*/
|
||||
public double get75thPercentile() {
|
||||
return getValue(P75_Q);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the value at the 95th percentile in the distribution.
|
||||
*
|
||||
* @return the value at the 95th percentile in the distribution
|
||||
*/
|
||||
public double get95thPercentile() {
|
||||
return getValue(P95_Q);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the value at the 98th percentile in the distribution.
|
||||
*
|
||||
* @return the value at the 98th percentile in the distribution
|
||||
*/
|
||||
public double get98thPercentile() {
|
||||
return getValue(P98_Q);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the value at the 99th percentile in the distribution.
|
||||
*
|
||||
* @return the value at the 99th percentile in the distribution
|
||||
*/
|
||||
public double get99thPercentile() {
|
||||
return getValue(P99_Q);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the value at the 99.9th percentile in the distribution.
|
||||
*
|
||||
* @return the value at the 99.9th percentile in the distribution
|
||||
*/
|
||||
public double get999thPercentile() {
|
||||
return getValue(P999_Q);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the entire set of values in the snapshot.
|
||||
*
|
||||
* @return the entire set of values in the snapshot
|
||||
*/
|
||||
public double[] getValues() {
|
||||
return Arrays.copyOf(values, values.length);
|
||||
}
|
||||
|
||||
}
|
|
@ -1,203 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
/*
|
||||
* Forked from https://github.com/codahale/metrics
|
||||
*/
|
||||
|
||||
package org.apache.solr.util.stats;
|
||||
|
||||
import org.apache.solr.util.stats.Histogram.SampleType;
|
||||
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
* A timer metric which aggregates timing durations and provides duration statistics, plus
|
||||
* throughput statistics via {@link Meter}.
|
||||
*/
|
||||
public class Timer {
|
||||
|
||||
private final TimeUnit durationUnit, rateUnit;
|
||||
private final Meter meter;
|
||||
private final Histogram histogram = new Histogram(SampleType.BIASED);
|
||||
private final Clock clock;
|
||||
|
||||
public Timer() {
|
||||
this(TimeUnit.MILLISECONDS, TimeUnit.SECONDS, Clock.defaultClock());
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new {@link Timer}.
|
||||
*
|
||||
* @param durationUnit the scale unit for this timer's duration metrics
|
||||
* @param rateUnit the scale unit for this timer's rate metrics
|
||||
* @param clock the clock used to calculate duration
|
||||
*/
|
||||
public Timer(TimeUnit durationUnit, TimeUnit rateUnit, Clock clock) {
|
||||
this.durationUnit = durationUnit;
|
||||
this.rateUnit = rateUnit;
|
||||
this.meter = new Meter("calls", rateUnit, clock);
|
||||
this.clock = clock;
|
||||
clear();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the timer's duration scale unit.
|
||||
*
|
||||
* @return the timer's duration scale unit
|
||||
*/
|
||||
public TimeUnit getDurationUnit() {
|
||||
return durationUnit;
|
||||
}
|
||||
|
||||
public TimeUnit getRateUnit() {
|
||||
return rateUnit;
|
||||
}
|
||||
|
||||
/**
|
||||
* Clears all recorded durations.
|
||||
*/
|
||||
public void clear() {
|
||||
histogram.clear();
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a recorded duration.
|
||||
*
|
||||
* @param duration the length of the duration
|
||||
* @param unit the scale unit of {@code duration}
|
||||
*/
|
||||
public void update(long duration, TimeUnit unit) {
|
||||
update(unit.toNanos(duration));
|
||||
}
|
||||
|
||||
/**
|
||||
* Times and records the duration of event.
|
||||
*
|
||||
* @param event a {@link Callable} whose {@link Callable#call()} method implements a process
|
||||
* whose duration should be timed
|
||||
* @param <T> the type of the value returned by {@code event}
|
||||
* @return the value returned by {@code event}
|
||||
* @throws Exception if {@code event} throws an {@link Exception}
|
||||
*/
|
||||
public <T> T time(Callable<T> event) throws Exception {
|
||||
final long startTime = clock.getTick();
|
||||
try {
|
||||
return event.call();
|
||||
} finally {
|
||||
update(clock.getTick() - startTime);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a timing {@link TimerContext}, which measures an elapsed time in nanoseconds.
|
||||
*
|
||||
* @return a new {@link TimerContext}
|
||||
*/
|
||||
public TimerContext time() {
|
||||
return new TimerContext(this, clock);
|
||||
}
|
||||
|
||||
public long getCount() {
|
||||
return histogram.getCount();
|
||||
}
|
||||
|
||||
public double getFifteenMinuteRate() {
|
||||
return meter.getFifteenMinuteRate();
|
||||
}
|
||||
|
||||
public double getFiveMinuteRate() {
|
||||
return meter.getFiveMinuteRate();
|
||||
}
|
||||
|
||||
public double getMeanRate() {
|
||||
return meter.getMeanRate();
|
||||
}
|
||||
|
||||
public double getOneMinuteRate() {
|
||||
return meter.getOneMinuteRate();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the longest recorded duration.
|
||||
*
|
||||
* @return the longest recorded duration
|
||||
*/
|
||||
public double getMax() {
|
||||
return convertFromNS(histogram.getMax());
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the shortest recorded duration.
|
||||
*
|
||||
* @return the shortest recorded duration
|
||||
*/
|
||||
public double getMin() {
|
||||
return convertFromNS(histogram.getMin());
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the arithmetic mean of all recorded durations.
|
||||
*
|
||||
* @return the arithmetic mean of all recorded durations
|
||||
*/
|
||||
public double getMean() {
|
||||
return convertFromNS(histogram.getMean());
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the standard deviation of all recorded durations.
|
||||
*
|
||||
* @return the standard deviation of all recorded durations
|
||||
*/
|
||||
public double getStdDev() {
|
||||
return convertFromNS(histogram.getStdDev());
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the sum of all recorded durations.
|
||||
*
|
||||
* @return the sum of all recorded durations
|
||||
*/
|
||||
public double getSum() {
|
||||
return convertFromNS(histogram.getSum());
|
||||
}
|
||||
|
||||
public Snapshot getSnapshot() {
|
||||
final double[] values = histogram.getSnapshot().getValues();
|
||||
final double[] converted = new double[values.length];
|
||||
for (int i = 0; i < values.length; i++) {
|
||||
converted[i] = convertFromNS(values[i]);
|
||||
}
|
||||
return new Snapshot(converted);
|
||||
}
|
||||
|
||||
public String getEventType() {
|
||||
return meter.getEventType();
|
||||
}
|
||||
|
||||
private void update(long duration) {
|
||||
if (duration >= 0) {
|
||||
histogram.update(duration);
|
||||
meter.mark();
|
||||
}
|
||||
}
|
||||
|
||||
private double convertFromNS(double ns) {
|
||||
return ns / TimeUnit.NANOSECONDS.convert(1, durationUnit);
|
||||
}
|
||||
}
|
|
@ -1,55 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
/*
|
||||
* Forked from https://github.com/codahale/metrics
|
||||
*/
|
||||
|
||||
package org.apache.solr.util.stats;
|
||||
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
* A timing context.
|
||||
*
|
||||
* @see Timer#time()
|
||||
*/
|
||||
public class TimerContext {
|
||||
private final Timer timer;
|
||||
private final Clock clock;
|
||||
private final long startTime;
|
||||
|
||||
/**
|
||||
* Creates a new {@link TimerContext} with the current time as its starting value and with the
|
||||
* given {@link Timer}.
|
||||
*
|
||||
* @param timer the {@link Timer} to report the elapsed time to
|
||||
*/
|
||||
TimerContext(Timer timer, Clock clock) {
|
||||
this.timer = timer;
|
||||
this.clock = clock;
|
||||
this.startTime = clock.getTick();
|
||||
}
|
||||
|
||||
/**
|
||||
* Stops recording the elapsed time, updates the timer and returns the elapsed time
|
||||
*/
|
||||
public long stop() {
|
||||
final long elapsedNanos = clock.getTick() - startTime;
|
||||
timer.update(elapsedNanos, TimeUnit.NANOSECONDS);
|
||||
return elapsedNanos;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,58 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.solr.util.stats;
|
||||
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import com.codahale.metrics.Snapshot;
|
||||
import com.codahale.metrics.Timer;
|
||||
import org.apache.solr.common.util.NamedList;
|
||||
|
||||
/**
|
||||
* Solr specific {@link Timer} utility functions.
|
||||
*/
|
||||
public class TimerUtils {
|
||||
|
||||
/**
|
||||
* Adds metrics from a Timer to a NamedList, using well-known names.
|
||||
* @param lst The NamedList to add the metrics data to
|
||||
* @param timer The Timer to extract the metrics from
|
||||
*/
|
||||
public static void addMetrics(NamedList<Object> lst, Timer timer) {
|
||||
Snapshot snapshot = timer.getSnapshot();
|
||||
lst.add("avgRequestsPerMinute", timer.getMeanRate());
|
||||
lst.add("5minRateRequestsPerMinute", timer.getFiveMinuteRate());
|
||||
lst.add("15minRateRequestsPerMinute", timer.getFifteenMinuteRate());
|
||||
lst.add("avgTimePerRequest", nsToMs(snapshot.getMean()));
|
||||
lst.add("medianRequestTime", nsToMs(snapshot.getMedian()));
|
||||
lst.add("75thPcRequestTime", nsToMs(snapshot.get75thPercentile()));
|
||||
lst.add("95thPcRequestTime", nsToMs(snapshot.get95thPercentile()));
|
||||
lst.add("99thPcRequestTime", nsToMs(snapshot.get99thPercentile()));
|
||||
lst.add("999thPcRequestTime", nsToMs(snapshot.get999thPercentile()));
|
||||
}
|
||||
|
||||
/**
|
||||
* Converts a double representing nanoseconds to a double representing milliseconds.
|
||||
*
|
||||
* @param ns the amount of time in nanoseconds
|
||||
* @return the amount of time in milliseconds
|
||||
*/
|
||||
static double nsToMs(double ns) {
|
||||
return ns / TimeUnit.MILLISECONDS.toNanos(1);
|
||||
}
|
||||
|
||||
}
|
|
@ -1,108 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
/*
|
||||
* Forked from https://github.com/codahale/metrics
|
||||
*/
|
||||
|
||||
package org.apache.solr.util.stats;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.concurrent.atomic.AtomicLongArray;
|
||||
|
||||
/**
|
||||
* A random sample of a stream of {@code long}s. Uses Vitter's Algorithm R to produce a
|
||||
* statistically representative sample.
|
||||
*
|
||||
* @see <a href="http://www.cs.umd.edu/~samir/498/vitter.pdf">Random Sampling with a Reservoir</a>
|
||||
*/
|
||||
public class UniformSample implements Sample {
|
||||
|
||||
private static final int BITS_PER_LONG = 63;
|
||||
private final AtomicLong count = new AtomicLong();
|
||||
private final AtomicLongArray values;
|
||||
//TODO: Maybe replace with a Mersenne twister for better distribution
|
||||
private static final Random random = new Random();
|
||||
|
||||
/**
|
||||
* Creates a new {@link UniformSample}.
|
||||
*
|
||||
* @param reservoirSize the number of samples to keep in the sampling reservoir
|
||||
*/
|
||||
public UniformSample(int reservoirSize) {
|
||||
this.values = new AtomicLongArray(reservoirSize);
|
||||
clear();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clear() {
|
||||
for (int i = 0; i < values.length(); i++) {
|
||||
values.set(i, 0);
|
||||
}
|
||||
count.set(0);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int size() {
|
||||
final long c = count.get();
|
||||
if (c > values.length()) {
|
||||
return values.length();
|
||||
}
|
||||
return (int) c;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void update(long value) {
|
||||
final long c = count.incrementAndGet();
|
||||
if (c <= values.length()) {
|
||||
values.set((int) c - 1, value);
|
||||
} else {
|
||||
final long r = nextLong(c);
|
||||
if (r < values.length()) {
|
||||
values.set((int) r, value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a pseudo-random long uniformly between 0 and n-1. Stolen from
|
||||
* {@link java.util.Random#nextInt()}.
|
||||
*
|
||||
* @param n the bound
|
||||
* @return a value select randomly from the range {@code [0..n)}.
|
||||
*/
|
||||
private static long nextLong(long n) {
|
||||
long bits, val;
|
||||
do {
|
||||
bits = random.nextLong() & (~(1L << BITS_PER_LONG));
|
||||
val = bits % n;
|
||||
} while (bits - val + (n - 1) < 0L);
|
||||
return val;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Snapshot getSnapshot() {
|
||||
final int s = size();
|
||||
final List<Long> copy = new ArrayList<>(s);
|
||||
for (int i = 0; i < s; i++) {
|
||||
copy.add(values.get(i));
|
||||
}
|
||||
return new Snapshot(copy);
|
||||
}
|
||||
}
|
|
@ -34,6 +34,8 @@ import java.util.concurrent.TimeUnit;
|
|||
import java.util.concurrent.TimeoutException;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import com.codahale.metrics.Snapshot;
|
||||
import com.codahale.metrics.Timer;
|
||||
import org.apache.lucene.util.LuceneTestCase.Slow;
|
||||
import org.apache.solr.SolrTestCaseJ4;
|
||||
import org.apache.solr.cloud.overseer.OverseerAction;
|
||||
|
@ -52,9 +54,6 @@ import org.apache.solr.handler.component.HttpShardHandlerFactory;
|
|||
import org.apache.solr.update.UpdateShardHandler;
|
||||
import org.apache.solr.update.UpdateShardHandlerConfig;
|
||||
import org.apache.solr.util.DefaultSolrThreadFactory;
|
||||
import org.apache.solr.util.stats.Snapshot;
|
||||
import org.apache.solr.util.stats.Timer;
|
||||
import org.apache.solr.util.stats.TimerContext;
|
||||
import org.apache.zookeeper.CreateMode;
|
||||
import org.apache.zookeeper.KeeperException;
|
||||
import org.apache.zookeeper.KeeperException.NoNodeException;
|
||||
|
@ -1027,7 +1026,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
q.offer(Utils.toJSON(m));
|
||||
|
||||
Timer t = new Timer();
|
||||
TimerContext context = t.time();
|
||||
Timer.Context context = t.time();
|
||||
try {
|
||||
overseerClient = electNewOverseer(server.getZkAddress());
|
||||
assertTrue(overseers.size() > 0);
|
||||
|
@ -1072,16 +1071,19 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
|
||||
private void printTimingStats(Timer timer) {
|
||||
Snapshot snapshot = timer.getSnapshot();
|
||||
log.info("\t totalTime: {}", timer.getSum());
|
||||
log.info("\t avgRequestsPerMinute: {}", timer.getMeanRate());
|
||||
log.info("\t 5minRateRequestsPerMinute: {}", timer.getFiveMinuteRate());
|
||||
log.info("\t 15minRateRequestsPerMinute: {}", timer.getFifteenMinuteRate());
|
||||
log.info("\t avgTimePerRequest: {}", timer.getMean());
|
||||
log.info("\t medianRequestTime: {}", snapshot.getMedian());
|
||||
log.info("\t 75thPctlRequestTime: {}", snapshot.get75thPercentile());
|
||||
log.info("\t 95thPctlRequestTime: {}", snapshot.get95thPercentile());
|
||||
log.info("\t 99thPctlRequestTime: {}", snapshot.get99thPercentile());
|
||||
log.info("\t 999thPctlRequestTime: {}", snapshot.get999thPercentile());
|
||||
log.info("\t avgTimePerRequest: {}", nsToMs(snapshot.getMean()));
|
||||
log.info("\t medianRequestTime: {}", nsToMs(snapshot.getMedian()));
|
||||
log.info("\t 75thPcRequestTime: {}", nsToMs(snapshot.get75thPercentile()));
|
||||
log.info("\t 95thPcRequestTime: {}", nsToMs(snapshot.get95thPercentile()));
|
||||
log.info("\t 99thPcRequestTime: {}", nsToMs(snapshot.get99thPercentile()));
|
||||
log.info("\t 999thPcRequestTime: {}", nsToMs(snapshot.get999thPercentile()));
|
||||
}
|
||||
|
||||
private static long nsToMs(double ns) {
|
||||
return TimeUnit.NANOSECONDS.convert((long)ns, TimeUnit.MILLISECONDS);
|
||||
}
|
||||
|
||||
private void close(MockZKController mockController) {
|
||||
|
|
|
@ -108,8 +108,8 @@ public class RequestHandlersTest extends SolrTestCaseJ4 {
|
|||
NamedList updateStats = updateHandler.getStatistics();
|
||||
NamedList termStats = termHandler.getStatistics();
|
||||
|
||||
Double updateTime = (Double) updateStats.get("totalTime");
|
||||
Double termTime = (Double) termStats.get("totalTime");
|
||||
Double updateTime = (Double) updateStats.get("avgTimePerRequest");
|
||||
Double termTime = (Double) termStats.get("avgTimePerRequest");
|
||||
|
||||
assertFalse("RequestHandlers should not share statistics!", updateTime.equals(termTime));
|
||||
}
|
||||
|
|
|
@ -0,0 +1,58 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.solr.util.stats;
|
||||
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import com.codahale.metrics.Snapshot;
|
||||
import com.codahale.metrics.Timer;
|
||||
import org.apache.solr.SolrTestCaseJ4;
|
||||
import org.apache.solr.common.util.NamedList;
|
||||
import org.apache.solr.common.util.SimpleOrderedMap;
|
||||
import org.junit.Test;
|
||||
|
||||
public class TimerUtilsTest extends SolrTestCaseJ4 {
|
||||
|
||||
@Test
|
||||
public void testSolrTimerGetSnapshot() {
|
||||
// create a timer with up to 100 data points
|
||||
final Timer timer = new Timer();
|
||||
final int iterations = random().nextInt(100);
|
||||
for (int i = 0; i < iterations; ++i) {
|
||||
timer.update(random().nextInt(), TimeUnit.NANOSECONDS);
|
||||
}
|
||||
// obtain timer metrics
|
||||
final NamedList<Object> lst = new SimpleOrderedMap<>();
|
||||
TimerUtils.addMetrics(lst, timer);
|
||||
// check that expected metrics were obtained
|
||||
assertEquals(lst.size(), 9);
|
||||
final Snapshot snapshot = timer.getSnapshot();
|
||||
// cannot test avgRequestsPerMinute directly because mean rate changes as time increases!
|
||||
// assertEquals(lst.get("avgRequestsPerMinute"), timer.getMeanRate());
|
||||
assertEquals(lst.get("5minRateRequestsPerMinute"), timer.getFiveMinuteRate());
|
||||
assertEquals(lst.get("15minRateRequestsPerMinute"), timer.getFifteenMinuteRate());
|
||||
assertEquals(lst.get("avgTimePerRequest"), TimerUtils.nsToMs(snapshot.getMean()));
|
||||
assertEquals(lst.get("medianRequestTime"), TimerUtils.nsToMs(snapshot.getMedian()));
|
||||
assertEquals(lst.get("75thPcRequestTime"), TimerUtils.nsToMs(snapshot.get75thPercentile()));
|
||||
assertEquals(lst.get("95thPcRequestTime"), TimerUtils.nsToMs(snapshot.get95thPercentile()));
|
||||
assertEquals(lst.get("99thPcRequestTime"), TimerUtils.nsToMs(snapshot.get99thPercentile()));
|
||||
assertEquals(lst.get("999thPcRequestTime"), TimerUtils.nsToMs(snapshot.get999thPercentile()));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -1 +0,0 @@
|
|||
1e98427c7f6e53363b598e2943e50903ce4f3657
|
|
@ -0,0 +1 @@
|
|||
224f03afd2521c6c94632f566beb1bb5ee32cf07
|
|
@ -1 +0,0 @@
|
|||
bec37e61ebe40bf0f52f3fc8b7df57b5c1773682
|
|
@ -0,0 +1 @@
|
|||
e32a01aa7ca4070676e22e707272422baa0f7ecd
|
Loading…
Reference in New Issue