SOLR-1972: Add extra query stats to RequestHandler

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1428372 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Alan Woodward 2013-01-03 14:52:55 +00:00
parent 182cb4c2f4
commit 84aa901490
13 changed files with 1459 additions and 20 deletions

View File

@ -185,6 +185,10 @@ New Features
* SOLR-4230: The new Solr 4 spatial fields now work with the {!geofilt} and
{!bbox} query parsers. The score local-param works too. (David Smiley)
* SOLR-1972: Add extra statistics to RequestHandlers - 5 & 15-minute reqs/sec
rolling averages; median, 75th, 95th, 99th, 99.9th percentile request times
(Alan Woodward, Shawn Heisey, Adrien Grand, Uwe Schindler)
Optimizations
----------------------

View File

@ -28,28 +28,30 @@ import org.apache.solr.request.SolrRequestHandler;
import org.apache.solr.response.SolrQueryResponse;
import org.apache.solr.search.SyntaxError;
import org.apache.solr.util.SolrPluginUtils;
import org.apache.solr.util.stats.Snapshot;
import org.apache.solr.util.stats.Timer;
import org.apache.solr.util.stats.TimerContext;
import java.net.URL;
import java.util.concurrent.atomic.AtomicLong;
/**
*
*/
public abstract class RequestHandlerBase implements SolrRequestHandler, SolrInfoMBean {
// statistics
// TODO: should we bother synchronizing these, or is an off-by-one error
// acceptable every million requests or so?
volatile long numRequests;
volatile long numErrors;
volatile long numTimeouts;
protected NamedList initArgs = null;
protected SolrParams defaults;
protected SolrParams appends;
protected SolrParams invariants;
volatile long totalTime = 0;
long handlerStart = System.currentTimeMillis();
protected boolean httpCaching = true;
// Statistics
private final AtomicLong numRequests = new AtomicLong();
private final AtomicLong numErrors = new AtomicLong();
private final AtomicLong numTimeouts = new AtomicLong();
private final Timer requestTimes = new Timer();
private final long handlerStart = System.currentTimeMillis();
/**
* Initializes the {@link org.apache.solr.request.SolrRequestHandler} by creating three {@link org.apache.solr.common.params.SolrParams} named.
@ -94,7 +96,7 @@ public abstract class RequestHandlerBase implements SolrRequestHandler, SolrInfo
public void init(NamedList args) {
initArgs = args;
// Copied from StandardRequestHandler
// Copied from StandardRequestHandler
if( args != null ) {
Object o = args.get("defaults");
if (o != null && o instanceof NamedList) {
@ -114,6 +116,7 @@ public abstract class RequestHandlerBase implements SolrRequestHandler, SolrInfo
Object caching = initArgs.get("httpCaching");
httpCaching = caching != null ? Boolean.parseBoolean(caching.toString()) : true;
}
}
public NamedList getInitArgs() {
@ -124,7 +127,8 @@ public abstract class RequestHandlerBase implements SolrRequestHandler, SolrInfo
@Override
public void handleRequest(SolrQueryRequest req, SolrQueryResponse rsp) {
numRequests++;
numRequests.incrementAndGet();
TimerContext timer = requestTimes.time();
try {
SolrPluginUtils.setDefaults(req,defaults,appends,invariants);
rsp.setHttpCaching(httpCaching);
@ -135,7 +139,7 @@ public abstract class RequestHandlerBase implements SolrRequestHandler, SolrInfo
Object partialResults = header.get("partialResults");
boolean timedOut = partialResults == null ? false : (Boolean)partialResults;
if( timedOut ) {
numTimeouts++;
numTimeouts.incrementAndGet();
rsp.setHttpCaching(false);
}
}
@ -156,11 +160,12 @@ public abstract class RequestHandlerBase implements SolrRequestHandler, SolrInfo
}
rsp.setException(e);
numErrors++;
numErrors.incrementAndGet();
}
finally {
timer.stop();
}
totalTime += rsp.getEndTime() - req.getStartTime();
}
//////////////////////// SolrInfoMBeans methods //////////////////////
@ -192,13 +197,21 @@ public abstract class RequestHandlerBase implements SolrRequestHandler, SolrInfo
@Override
public NamedList<Object> getStatistics() {
NamedList<Object> lst = new SimpleOrderedMap<Object>();
Snapshot snapshot = requestTimes.getSnapshot();
lst.add("handlerStart",handlerStart);
lst.add("requests", numRequests);
lst.add("errors", numErrors);
lst.add("timeouts", numTimeouts);
lst.add("totalTime",totalTime);
lst.add("avgTimePerRequest", (float) totalTime / (float) this.numRequests);
lst.add("avgRequestsPerSecond", (float) numRequests*1000 / (float)(System.currentTimeMillis()-handlerStart));
lst.add("requests", numRequests.longValue());
lst.add("errors", numErrors.longValue());
lst.add("timeouts", numTimeouts.longValue());
lst.add("totalTime", requestTimes.getSum());
lst.add("avgRequestsPerSecond", requestTimes.getMeanRate());
lst.add("5minRateReqsPerSecond", requestTimes.getFiveMinuteRate());
lst.add("15minRateReqsPerSecond", requestTimes.getFifteenMinuteRate());
lst.add("avgTimePerRequest", requestTimes.getMean());
lst.add("medianRequestTime", snapshot.getMedian());
lst.add("75thPcRequestTime", snapshot.get75thPercentile());
lst.add("95thPcRequestTime", snapshot.get95thPercentile());
lst.add("99thPcRequestTime", snapshot.get99thPercentile());
lst.add("999thPcRequestTime", snapshot.get999thPercentile());
return lst;
}

View File

@ -0,0 +1,82 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Forked from https://github.com/codahale/metrics
*/
package org.apache.solr.util.stats;
import java.lang.management.ManagementFactory;
import java.lang.management.ThreadMXBean;
/**
* An abstraction for how time passes. It is passed to {@link Timer} to track timing.
*/
public abstract class Clock {
/**
* Returns the current time tick.
*
* @return time tick in nanoseconds
*/
public abstract long getTick();
/**
* Returns the current time in milliseconds.
*
* @return time in milliseconds
*/
public long getTime() {
return System.currentTimeMillis();
}
private static final Clock DEFAULT = new UserTimeClock();
/**
* The default clock to use.
*
* @return the default {@link Clock} instance
*
* @see UserTimeClock
*/
public static Clock defaultClock() {
return DEFAULT;
}
/**
* A clock implementation which returns the current time in epoch nanoseconds.
*/
public static class UserTimeClock extends Clock {
@Override
public long getTick() {
return System.nanoTime();
}
}
/**
* A clock implementation which returns the current thread's CPU time.
*/
public static class CpuTimeClock extends Clock {
private static final ThreadMXBean THREAD_MX_BEAN = ManagementFactory.getThreadMXBean();
@Override
public long getTick() {
return THREAD_MX_BEAN.getCurrentThreadCpuTime();
}
}
}

View File

@ -0,0 +1,127 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Forked from https://github.com/codahale/metrics
*/
package org.apache.solr.util.stats;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import static java.lang.Math.exp;
/**
* An exponentially-weighted moving average.
*
* @see <a href="http://www.teamquest.com/pdfs/whitepaper/ldavg1.pdf">UNIX Load Average Part 1: How
* It Works</a>
* @see <a href="http://www.teamquest.com/pdfs/whitepaper/ldavg2.pdf">UNIX Load Average Part 2: Not
* Your Average Average</a>
*/
public class EWMA {
private static final int INTERVAL = 5;
private static final double SECONDS_PER_MINUTE = 60.0;
private static final int ONE_MINUTE = 1;
private static final int FIVE_MINUTES = 5;
private static final int FIFTEEN_MINUTES = 15;
private static final double M1_ALPHA = 1 - exp(-INTERVAL / SECONDS_PER_MINUTE / ONE_MINUTE);
private static final double M5_ALPHA = 1 - exp(-INTERVAL / SECONDS_PER_MINUTE / FIVE_MINUTES);
private static final double M15_ALPHA = 1 - exp(-INTERVAL / SECONDS_PER_MINUTE / FIFTEEN_MINUTES);
private volatile boolean initialized = false;
private volatile double rate = 0.0;
private final AtomicLong uncounted = new AtomicLong();
private final double alpha, interval;
/**
* Creates a new EWMA which is equivalent to the UNIX one minute load average and which expects
* to be ticked every 5 seconds.
*
* @return a one-minute EWMA
*/
public static EWMA oneMinuteEWMA() {
return new EWMA(M1_ALPHA, INTERVAL, TimeUnit.SECONDS);
}
/**
* Creates a new EWMA which is equivalent to the UNIX five minute load average and which expects
* to be ticked every 5 seconds.
*
* @return a five-minute EWMA
*/
public static EWMA fiveMinuteEWMA() {
return new EWMA(M5_ALPHA, INTERVAL, TimeUnit.SECONDS);
}
/**
* Creates a new EWMA which is equivalent to the UNIX fifteen minute load average and which
* expects to be ticked every 5 seconds.
*
* @return a fifteen-minute EWMA
*/
public static EWMA fifteenMinuteEWMA() {
return new EWMA(M15_ALPHA, INTERVAL, TimeUnit.SECONDS);
}
/**
* Create a new EWMA with a specific smoothing constant.
*
* @param alpha the smoothing constant
* @param interval the expected tick interval
* @param intervalUnit the time unit of the tick interval
*/
public EWMA(double alpha, long interval, TimeUnit intervalUnit) {
this.interval = intervalUnit.toNanos(interval);
this.alpha = alpha;
}
/**
* Update the moving average with a new value.
*
* @param n the new value
*/
public void update(long n) {
uncounted.addAndGet(n);
}
/**
* Mark the passage of time and decay the current rate accordingly.
*/
public void tick() {
final long count = uncounted.getAndSet(0);
final double instantRate = count / interval;
if (initialized) {
rate += (alpha * (instantRate - rate));
} else {
rate = instantRate;
initialized = true;
}
}
/**
* Returns the rate in the given units of time.
*
* @param rateUnit the unit of time
* @return the rate
*/
public double getRate(TimeUnit rateUnit) {
return rate * (double) rateUnit.toNanos(1);
}
}

View File

@ -0,0 +1,219 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Forked from https://github.com/codahale/metrics
*/
package org.apache.solr.util.stats;
import java.util.ArrayList;
import java.util.Random;
import java.util.concurrent.ConcurrentSkipListMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import static java.lang.Math.exp;
import static java.lang.Math.min;
/**
* An exponentially-decaying random sample of {@code long}s. Uses Cormode et al's forward-decaying
* priority reservoir sampling method to produce a statistically representative sample,
* exponentially biased towards newer entries.
*
* See <a href="http://www.research.att.com/people/Cormode_Graham/library/publications/CormodeShkapenyukSrivastavaXu09.pdf">
* Cormode et al. Forward Decay: A Practical Time Decay Model for Streaming Systems. ICDE '09: Proceedings of the 2009 IEEE International Conference on Data Engineering (2009)</a>
*/
public class ExponentiallyDecayingSample implements Sample {
private static final long RESCALE_THRESHOLD = TimeUnit.HOURS.toNanos(1);
private final ConcurrentSkipListMap<Double, Long> values;
private final ReentrantReadWriteLock lock;
private final double alpha;
private final int reservoirSize;
private final AtomicLong count = new AtomicLong(0);
private volatile long startTime;
private final AtomicLong nextScaleTime = new AtomicLong(0);
private final Clock clock;
// TODO: Maybe replace this with a Mersenne Twister?
private final Random random = new Random();
/**
* Creates a new {@link ExponentiallyDecayingSample}.
*
* @param reservoirSize the number of samples to keep in the sampling reservoir
* @param alpha the exponential decay factor; the higher this is, the more biased the
* sample will be towards newer values
*/
public ExponentiallyDecayingSample(int reservoirSize, double alpha) {
this(reservoirSize, alpha, Clock.defaultClock());
}
/**
* Creates a new {@link ExponentiallyDecayingSample}.
*
* @param reservoirSize the number of samples to keep in the sampling reservoir
* @param alpha the exponential decay factor; the higher this is, the more biased the
* sample will be towards newer values
*/
public ExponentiallyDecayingSample(int reservoirSize, double alpha, Clock clock) {
this.values = new ConcurrentSkipListMap<Double, Long>();
this.lock = new ReentrantReadWriteLock();
this.alpha = alpha;
this.reservoirSize = reservoirSize;
this.clock = clock;
clear();
}
@Override
public void clear() {
lockForRescale();
try {
values.clear();
count.set(0);
this.startTime = currentTimeInSeconds();
nextScaleTime.set(clock.getTick() + RESCALE_THRESHOLD);
} finally {
unlockForRescale();
}
}
@Override
public int size() {
return (int) min(reservoirSize, count.get());
}
@Override
public void update(long value) {
update(value, currentTimeInSeconds());
}
/**
* Adds an old value with a fixed timestamp to the sample.
*
* @param value the value to be added
* @param timestamp the epoch timestamp of {@code value} in seconds
*/
public void update(long value, long timestamp) {
rescaleIfNeeded();
lockForRegularUsage();
try {
final double priority = weight(timestamp - startTime) / random.nextDouble();
final long newCount = count.incrementAndGet();
if (newCount <= reservoirSize) {
values.put(priority, value);
} else {
Double first = values.firstKey();
if (first < priority) {
if (values.putIfAbsent(priority, value) == null) {
// ensure we always remove an item
while (values.remove(first) == null) {
first = values.firstKey();
}
}
}
}
} finally {
unlockForRegularUsage();
}
}
private void rescaleIfNeeded() {
final long now = clock.getTick();
final long next = nextScaleTime.get();
if (now >= next) {
rescale(now, next);
}
}
@Override
public Snapshot getSnapshot() {
lockForRegularUsage();
try {
return new Snapshot(values.values());
} finally {
unlockForRegularUsage();
}
}
private long currentTimeInSeconds() {
return TimeUnit.MILLISECONDS.toSeconds(clock.getTime());
}
private double weight(long t) {
return exp(alpha * t);
}
/* "A common feature of the above techniques—indeed, the key technique that
* allows us to track the decayed weights efficientlyis that they maintain
* counts and other quantities based on g(ti L), and only scale by g(t L)
* at query time. But while g(ti L)/g(tL) is guaranteed to lie between zero
* and one, the intermediate values of g(ti L) could become very large. For
* polynomial functions, these values should not grow too large, and should be
* effectively represented in practice by floating point values without loss of
* precision. For exponential functions, these values could grow quite large as
* new values of (ti L) become large, and potentially exceed the capacity of
* common floating point types. However, since the values stored by the
* algorithms are linear combinations of g values (scaled sums), they can be
* rescaled relative to a new landmark. That is, by the analysis of exponential
* decay in Section III-A, the choice of L does not affect the final result. We
* can therefore multiply each value based on L by a factor of exp(α(L L)),
* and obtain the correct value as if we had instead computed relative to a new
* landmark L (and then use this new L at query time). This can be done with
* a linear pass over whatever data structure is being used."
*/
private void rescale(long now, long next) {
if (nextScaleTime.compareAndSet(next, now + RESCALE_THRESHOLD)) {
lockForRescale();
try {
final long oldStartTime = startTime;
this.startTime = currentTimeInSeconds();
final ArrayList<Double> keys = new ArrayList<Double>(values.keySet());
for (Double key : keys) {
final Long value = values.remove(key);
values.put(key * exp(-alpha * (startTime - oldStartTime)), value);
}
// make sure the counter is in sync with the number of stored samples.
count.set(values.size());
} finally {
unlockForRescale();
}
}
}
private void unlockForRescale() {
lock.writeLock().unlock();
}
private void lockForRescale() {
lock.writeLock().lock();
}
private void lockForRegularUsage() {
lock.readLock().lock();
}
private void unlockForRegularUsage() {
lock.readLock().unlock();
}
}

View File

@ -0,0 +1,239 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Forked from https://github.com/codahale/metrics
*/
package org.apache.solr.util.stats;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import static java.lang.Math.sqrt;
/**
* A metric which calculates the distribution of a value.
*
* @see <a href="http://www.johndcook.com/standard_deviation.html">Accurately computing running
* variance</a>
*/
public class Histogram {
private static final int DEFAULT_SAMPLE_SIZE = 1028;
private static final double DEFAULT_ALPHA = 0.015;
/**
* The type of sampling the histogram should be performing.
*/
enum SampleType {
/**
* Uses a uniform sample of 1028 elements, which offers a 99.9% confidence level with a 5%
* margin of error assuming a normal distribution.
*/
UNIFORM {
@Override
public Sample newSample() {
return new UniformSample(DEFAULT_SAMPLE_SIZE);
}
},
/**
* Uses an exponentially decaying sample of 1028 elements, which offers a 99.9% confidence
* level with a 5% margin of error assuming a normal distribution, and an alpha factor of
* 0.015, which heavily biases the sample to the past 5 minutes of measurements.
*/
BIASED {
@Override
public Sample newSample() {
return new ExponentiallyDecayingSample(DEFAULT_SAMPLE_SIZE, DEFAULT_ALPHA);
}
};
public abstract Sample newSample();
}
private final Sample sample;
private final AtomicLong min = new AtomicLong();
private final AtomicLong max = new AtomicLong();
private final AtomicLong sum = new AtomicLong();
// These are for the Welford algorithm for calculating running variance
// without floating-point doom.
private final AtomicReference<double[]> variance =
new AtomicReference<double[]>(new double[]{-1, 0}); // M, S
private final AtomicLong count = new AtomicLong();
/**
* Creates a new {@link Histogram} with the given sample type.
*
* @param type the type of sample to use
*/
Histogram(SampleType type) {
this(type.newSample());
}
/**
* Creates a new {@link Histogram} with the given sample.
*
* @param sample the sample to create a histogram from
*/
Histogram(Sample sample) {
this.sample = sample;
clear();
}
/**
* Clears all recorded values.
*/
public void clear() {
sample.clear();
count.set(0);
max.set(Long.MIN_VALUE);
min.set(Long.MAX_VALUE);
sum.set(0);
variance.set(new double[]{ -1, 0 });
}
/**
* Adds a recorded value.
*
* @param value the length of the value
*/
public void update(int value) {
update((long) value);
}
/**
* Adds a recorded value.
*
* @param value the length of the value
*/
public void update(long value) {
count.incrementAndGet();
sample.update(value);
setMax(value);
setMin(value);
sum.getAndAdd(value);
updateVariance(value);
}
/**
* Returns the number of values recorded.
*
* @return the number of values recorded
*/
public long getCount() {
return count.get();
}
/* (non-Javadoc)
* @see com.yammer.metrics.core.Summarizable#max()
*/
public double getMax() {
if (getCount() > 0) {
return max.get();
}
return 0.0;
}
/* (non-Javadoc)
* @see com.yammer.metrics.core.Summarizable#min()
*/
public double getMin() {
if (getCount() > 0) {
return min.get();
}
return 0.0;
}
/* (non-Javadoc)
* @see com.yammer.metrics.core.Summarizable#mean()
*/
public double getMean() {
if (getCount() > 0) {
return sum.get() / (double) getCount();
}
return 0.0;
}
/* (non-Javadoc)
* @see com.yammer.metrics.core.Summarizable#stdDev()
*/
public double getStdDev() {
if (getCount() > 0) {
return sqrt(getVariance());
}
return 0.0;
}
/* (non-Javadoc)
* @see com.yammer.metrics.core.Summarizable#sum()
*/
public double getSum() {
return (double) sum.get();
}
public Snapshot getSnapshot() {
return sample.getSnapshot();
}
private double getVariance() {
if (getCount() <= 1) {
return 0.0;
}
return variance.get()[1] / (getCount() - 1);
}
private void setMax(long potentialMax) {
boolean done = false;
while (!done) {
final long currentMax = max.get();
done = currentMax >= potentialMax || max.compareAndSet(currentMax, potentialMax);
}
}
private void setMin(long potentialMin) {
boolean done = false;
while (!done) {
final long currentMin = min.get();
done = currentMin <= potentialMin || min.compareAndSet(currentMin, potentialMin);
}
}
private void updateVariance(long value) {
while (true) {
final double[] oldValues = variance.get();
final double[] newValues = new double[2];
if (oldValues[0] == -1) {
newValues[0] = value;
newValues[1] = 0;
} else {
final double oldM = oldValues[0];
final double oldS = oldValues[1];
final double newM = oldM + ((value - oldM) / getCount());
final double newS = oldS + ((value - oldM) * (value - newM));
newValues[0] = newM;
newValues[1] = newS;
}
if (variance.compareAndSet(oldValues, newValues)) {
return;
}
}
}
}

View File

@ -0,0 +1,144 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Forked from https://github.com/codahale/metrics
*/
package org.apache.solr.util.stats;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
/**
* A meter metric which measures mean throughput and one-, five-, and fifteen-minute
* exponentially-weighted moving average throughputs.
*
* @see <a href="http://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average">EMA</a>
*/
public class Meter {
private static final long TICK_INTERVAL = TimeUnit.SECONDS.toNanos(5);
private final EWMA m1Rate = EWMA.oneMinuteEWMA();
private final EWMA m5Rate = EWMA.fiveMinuteEWMA();
private final EWMA m15Rate = EWMA.fifteenMinuteEWMA();
private final AtomicLong count = new AtomicLong();
private final long startTime;
private final AtomicLong lastTick;
private final TimeUnit rateUnit;
private final String eventType;
private final Clock clock;
/**
* Creates a new {@link Meter}.
*
* @param eventType the plural name of the event the meter is measuring (e.g., {@code
* "requests"})
* @param rateUnit the rate unit of the new meter
* @param clock the clock to use for the meter ticks
*/
Meter(String eventType, TimeUnit rateUnit, Clock clock) {
this.rateUnit = rateUnit;
this.eventType = eventType;
this.clock = clock;
this.startTime = this.clock.getTick();
this.lastTick = new AtomicLong(startTime);
}
public TimeUnit getRateUnit() {
return rateUnit;
}
public String getEventType() {
return eventType;
}
/**
* Updates the moving averages.
*/
void tick() {
m1Rate.tick();
m5Rate.tick();
m15Rate.tick();
}
/**
* Mark the occurrence of an event.
*/
public void mark() {
mark(1);
}
/**
* Mark the occurrence of a given number of events.
*
* @param n the number of events
*/
public void mark(long n) {
tickIfNecessary();
count.addAndGet(n);
m1Rate.update(n);
m5Rate.update(n);
m15Rate.update(n);
}
private void tickIfNecessary() {
final long oldTick = lastTick.get();
final long newTick = clock.getTick();
final long age = newTick - oldTick;
if (age > TICK_INTERVAL && lastTick.compareAndSet(oldTick, newTick)) {
final long requiredTicks = age / TICK_INTERVAL;
for (long i = 0; i < requiredTicks; i++) {
tick();
}
}
}
public long getCount() {
return count.get();
}
public double getFifteenMinuteRate() {
tickIfNecessary();
return m15Rate.getRate(rateUnit);
}
public double getFiveMinuteRate() {
tickIfNecessary();
return m5Rate.getRate(rateUnit);
}
public double getMeanRate() {
if (getCount() == 0) {
return 0.0;
} else {
final long elapsed = (clock.getTick() - startTime);
return convertNsRate(getCount() / (double) elapsed);
}
}
public double getOneMinuteRate() {
tickIfNecessary();
return m1Rate.getRate(rateUnit);
}
private double convertNsRate(double ratePerNs) {
return ratePerNs * (double) rateUnit.toNanos(1);
}
}

View File

@ -0,0 +1,53 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Forked from https://github.com/codahale/metrics
*/
package org.apache.solr.util.stats;
/**
* A statistically representative sample of a data stream.
*/
public interface Sample {
/**
* Clears all recorded values.
*/
void clear();
/**
* Returns the number of values recorded.
*
* @return the number of values recorded
*/
int size();
/**
* Adds a new recorded value to the sample.
*
* @param value a new recorded value
*/
void update(long value);
/**
* Returns a snapshot of the sample's values.
*
* @return a snapshot of the sample's values
*/
Snapshot getSnapshot();
}

View File

@ -0,0 +1,169 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Forked from https://github.com/codahale/metrics
*/
package org.apache.solr.util.stats;
import java.util.Arrays;
import java.util.Collection;
import static java.lang.Math.floor;
/**
* A statistical snapshot of a {@link Snapshot}.
*/
public class Snapshot {
private static final double MEDIAN_Q = 0.5;
private static final double P75_Q = 0.75;
private static final double P95_Q = 0.95;
private static final double P98_Q = 0.98;
private static final double P99_Q = 0.99;
private static final double P999_Q = 0.999;
private final double[] values;
/**
* Create a new {@link Snapshot} with the given values.
*
* @param values an unordered set of values in the sample
*/
public Snapshot(Collection<Long> values) {
final Object[] copy = values.toArray();
this.values = new double[copy.length];
for (int i = 0; i < copy.length; i++) {
this.values[i] = (Long) copy[i];
}
Arrays.sort(this.values);
}
/**
* Create a new {@link Snapshot} with the given values.
*
* @param values an unordered set of values in the sample
*/
public Snapshot(double[] values) {
this.values = new double[values.length];
System.arraycopy(values, 0, this.values, 0, values.length);
Arrays.sort(this.values);
}
/**
* Returns the value at the given quantile.
*
* @param quantile a given quantile, in {@code [0..1]}
* @return the value in the distribution at {@code quantile}
*/
public double getValue(double quantile) {
if (quantile < 0.0 || quantile > 1.0) {
throw new IllegalArgumentException(quantile + " is not in [0..1]");
}
if (values.length == 0) {
return 0.0;
}
final double pos = quantile * (values.length + 1);
if (pos < 1) {
return values[0];
}
if (pos >= values.length) {
return values[values.length - 1];
}
final double lower = values[(int) pos - 1];
final double upper = values[(int) pos];
return lower + (pos - floor(pos)) * (upper - lower);
}
/**
* Returns the number of values in the snapshot.
*
* @return the number of values in the snapshot
*/
public int size() {
return values.length;
}
/**
* Returns the median value in the distribution.
*
* @return the median value in the distribution
*/
public double getMedian() {
return getValue(MEDIAN_Q);
}
/**
* Returns the value at the 75th percentile in the distribution.
*
* @return the value at the 75th percentile in the distribution
*/
public double get75thPercentile() {
return getValue(P75_Q);
}
/**
* Returns the value at the 95th percentile in the distribution.
*
* @return the value at the 95th percentile in the distribution
*/
public double get95thPercentile() {
return getValue(P95_Q);
}
/**
* Returns the value at the 98th percentile in the distribution.
*
* @return the value at the 98th percentile in the distribution
*/
public double get98thPercentile() {
return getValue(P98_Q);
}
/**
* Returns the value at the 99th percentile in the distribution.
*
* @return the value at the 99th percentile in the distribution
*/
public double get99thPercentile() {
return getValue(P99_Q);
}
/**
* Returns the value at the 99.9th percentile in the distribution.
*
* @return the value at the 99.9th percentile in the distribution
*/
public double get999thPercentile() {
return getValue(P999_Q);
}
/**
* Returns the entire set of values in the snapshot.
*
* @return the entire set of values in the snapshot
*/
public double[] getValues() {
return Arrays.copyOf(values, values.length);
}
}

View File

@ -0,0 +1,204 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Forked from https://github.com/codahale/metrics
*/
package org.apache.solr.util.stats;
import org.apache.solr.util.stats.Histogram.SampleType;
import java.util.concurrent.Callable;
import java.util.concurrent.TimeUnit;
/**
* A timer metric which aggregates timing durations and provides duration statistics, plus
* throughput statistics via {@link Meter}.
*/
public class Timer {
private final TimeUnit durationUnit, rateUnit;
private final Meter meter;
private final Histogram histogram = new Histogram(SampleType.BIASED);
private final Clock clock;
public Timer() {
this(TimeUnit.MILLISECONDS, TimeUnit.SECONDS, Clock.defaultClock());
}
/**
* Creates a new {@link Timer}.
*
* @param durationUnit the scale unit for this timer's duration metrics
* @param rateUnit the scale unit for this timer's rate metrics
* @param clock the clock used to calculate duration
*/
public Timer(TimeUnit durationUnit, TimeUnit rateUnit, Clock clock) {
this.durationUnit = durationUnit;
this.rateUnit = rateUnit;
this.meter = new Meter("calls", rateUnit, clock);
this.clock = clock;
clear();
}
/**
* Returns the timer's duration scale unit.
*
* @return the timer's duration scale unit
*/
public TimeUnit getDurationUnit() {
return durationUnit;
}
public TimeUnit getRateUnit() {
return rateUnit;
}
/**
* Clears all recorded durations.
*/
public void clear() {
histogram.clear();
}
/**
* Adds a recorded duration.
*
* @param duration the length of the duration
* @param unit the scale unit of {@code duration}
*/
public void update(long duration, TimeUnit unit) {
update(unit.toNanos(duration));
}
/**
* Times and records the duration of event.
*
* @param event a {@link Callable} whose {@link Callable#call()} method implements a process
* whose duration should be timed
* @param <T> the type of the value returned by {@code event}
* @return the value returned by {@code event}
* @throws Exception if {@code event} throws an {@link Exception}
*/
public <T> T time(Callable<T> event) throws Exception {
final long startTime = clock.getTick();
try {
return event.call();
} finally {
update(clock.getTick() - startTime);
}
}
/**
* Returns a timing {@link TimerContext}, which measures an elapsed time in nanoseconds.
*
* @return a new {@link TimerContext}
*/
public TimerContext time() {
return new TimerContext(this, clock);
}
public long getCount() {
return histogram.getCount();
}
public double getFifteenMinuteRate() {
return meter.getFifteenMinuteRate();
}
public double getFiveMinuteRate() {
return meter.getFiveMinuteRate();
}
public double getMeanRate() {
return meter.getMeanRate();
}
public double getOneMinuteRate() {
return meter.getOneMinuteRate();
}
/**
* Returns the longest recorded duration.
*
* @return the longest recorded duration
*/
public double getMax() {
return convertFromNS(histogram.getMax());
}
/**
* Returns the shortest recorded duration.
*
* @return the shortest recorded duration
*/
public double getMin() {
return convertFromNS(histogram.getMin());
}
/**
* Returns the arithmetic mean of all recorded durations.
*
* @return the arithmetic mean of all recorded durations
*/
public double getMean() {
return convertFromNS(histogram.getMean());
}
/**
* Returns the standard deviation of all recorded durations.
*
* @return the standard deviation of all recorded durations
*/
public double getStdDev() {
return convertFromNS(histogram.getStdDev());
}
/**
* Returns the sum of all recorded durations.
*
* @return the sum of all recorded durations
*/
public double getSum() {
return convertFromNS(histogram.getSum());
}
public Snapshot getSnapshot() {
final double[] values = histogram.getSnapshot().getValues();
final double[] converted = new double[values.length];
for (int i = 0; i < values.length; i++) {
converted[i] = convertFromNS(values[i]);
}
return new Snapshot(converted);
}
public String getEventType() {
return meter.getEventType();
}
private void update(long duration) {
if (duration >= 0) {
histogram.update(duration);
meter.mark();
}
}
private double convertFromNS(double ns) {
return ns / TimeUnit.NANOSECONDS.convert(1, durationUnit);
}
}

View File

@ -0,0 +1,56 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Forked from https://github.com/codahale/metrics
*/
package org.apache.solr.util.stats;
import java.util.concurrent.TimeUnit;
/**
* A timing context.
*
* @see Timer#time()
*/
public class TimerContext {
private final Timer timer;
private final Clock clock;
private final long startTime;
/**
* Creates a new {@link TimerContext} with the current time as its starting value and with the
* given {@link Timer}.
*
* @param timer the {@link Timer} to report the elapsed time to
*/
TimerContext(Timer timer, Clock clock) {
this.timer = timer;
this.clock = clock;
this.startTime = clock.getTick();
}
/**
* Stops recording the elapsed time, updates the timer and returns the elapsed time
*/
public long stop() {
final long elapsedNanos = clock.getTick() - startTime;
timer.update(elapsedNanos, TimeUnit.NANOSECONDS);
return elapsedNanos;
}
}

View File

@ -0,0 +1,109 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Forked from https://github.com/codahale/metrics
*/
package org.apache.solr.util.stats;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicLongArray;
/**
* A random sample of a stream of {@code long}s. Uses Vitter's Algorithm R to produce a
* statistically representative sample.
*
* @see <a href="http://www.cs.umd.edu/~samir/498/vitter.pdf">Random Sampling with a Reservoir</a>
*/
public class UniformSample implements Sample {
private static final int BITS_PER_LONG = 63;
private final AtomicLong count = new AtomicLong();
private final AtomicLongArray values;
//TODO: Maybe replace with a Mersenne twister for better distribution
private static final Random random = new Random();
/**
* Creates a new {@link UniformSample}.
*
* @param reservoirSize the number of samples to keep in the sampling reservoir
*/
public UniformSample(int reservoirSize) {
this.values = new AtomicLongArray(reservoirSize);
clear();
}
@Override
public void clear() {
for (int i = 0; i < values.length(); i++) {
values.set(i, 0);
}
count.set(0);
}
@Override
public int size() {
final long c = count.get();
if (c > values.length()) {
return values.length();
}
return (int) c;
}
@Override
public void update(long value) {
final long c = count.incrementAndGet();
if (c <= values.length()) {
values.set((int) c - 1, value);
} else {
final long r = nextLong(c);
if (r < values.length()) {
values.set((int) r, value);
}
}
}
/**
* Get a pseudo-random long uniformly between 0 and n-1. Stolen from
* {@link java.util.Random#nextInt()}.
*
* @param n the bound
* @return a value select randomly from the range {@code [0..n)}.
*/
private static long nextLong(long n) {
long bits, val;
do {
bits = random.nextLong() & (~(1L << BITS_PER_LONG));
val = bits % n;
} while (bits - val + (n - 1) < 0L);
return val;
}
@Override
public Snapshot getSnapshot() {
final int s = size();
final List<Long> copy = new ArrayList<Long>(s);
for (int i = 0; i < s; i++) {
copy.add(values.get(i));
}
return new Snapshot(copy);
}
}

View File

@ -18,6 +18,7 @@
package org.apache.solr.core;
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.handler.StandardRequestHandler;
import org.apache.solr.request.SolrRequestHandler;
import org.junit.BeforeClass;
@ -87,4 +88,23 @@ public class RequestHandlersTest extends SolrTestCaseJ4 {
assertNull( core.getRequestHandler("/update/asdgadsgas" ) ); // prefix
}
@Test
public void testStatistics() {
SolrCore core = h.getCore();
SolrRequestHandler updateHandler = core.getRequestHandler("/update");
SolrRequestHandler termHandler = core.getRequestHandler("/terms");
assertU(adoc("id", "47",
"text", "line up and fly directly at the enemy death cannons, clogging them with wreckage!"));
assertU(commit());
NamedList updateStats = updateHandler.getStatistics();
NamedList termStats = termHandler.getStatistics();
Double updateTime = (Double) updateStats.get("totalTime");
Double termTime = (Double) termStats.get("totalTime");
assertFalse("RequestHandlers should not share statistics!", updateTime.equals(termTime));
}
}