Use System.nanoTime when measuring elapsed time
System.currentTimeMillis is more vulnerable when the clock shifts. Closes #11058
This commit is contained in:
parent
cc3f02c0a0
commit
08f7caa56b
|
@ -101,7 +101,7 @@ public class TrackingConcurrentMergeScheduler extends ConcurrentMergeScheduler {
|
|||
protected void doMerge(IndexWriter writer, MergePolicy.OneMerge merge) throws IOException {
|
||||
int totalNumDocs = merge.totalNumDocs();
|
||||
long totalSizeInBytes = merge.totalBytesSize();
|
||||
long time = System.currentTimeMillis();
|
||||
long timeNS = System.nanoTime();
|
||||
currentMerges.inc();
|
||||
currentMergesNumDocs.inc(totalNumDocs);
|
||||
currentMergesSizeInBytes.inc(totalSizeInBytes);
|
||||
|
@ -116,7 +116,7 @@ public class TrackingConcurrentMergeScheduler extends ConcurrentMergeScheduler {
|
|||
beforeMerge(onGoingMerge);
|
||||
super.doMerge(writer, merge);
|
||||
} finally {
|
||||
long took = System.currentTimeMillis() - time;
|
||||
long tookMS = TimeValue.nsecToMSec(System.nanoTime() - timeNS);
|
||||
|
||||
onGoingMerges.remove(onGoingMerge);
|
||||
afterMerge(onGoingMerge);
|
||||
|
@ -127,10 +127,10 @@ public class TrackingConcurrentMergeScheduler extends ConcurrentMergeScheduler {
|
|||
|
||||
totalMergesNumDocs.inc(totalNumDocs);
|
||||
totalMergesSizeInBytes.inc(totalSizeInBytes);
|
||||
totalMerges.inc(took);
|
||||
totalMerges.inc(tookMS);
|
||||
|
||||
long stoppedMS = merge.rateLimiter.getTotalStoppedNS()/1000000;
|
||||
long throttledMS = merge.rateLimiter.getTotalPausedNS()/1000000;
|
||||
long stoppedMS = TimeValue.nsecToMSec(merge.rateLimiter.getTotalStoppedNS());
|
||||
long throttledMS = TimeValue.nsecToMSec(merge.rateLimiter.getTotalPausedNS());
|
||||
|
||||
totalMergeStoppedTime.inc(stoppedMS);
|
||||
totalMergeThrottledTime.inc(throttledMS);
|
||||
|
@ -138,7 +138,7 @@ public class TrackingConcurrentMergeScheduler extends ConcurrentMergeScheduler {
|
|||
String message = String.format(Locale.ROOT,
|
||||
"merge segment [%s] done: took [%s], [%,.1f MB], [%,d docs], [%s stopped], [%s throttled], [%,.1f MB written], [%,.1f MB/sec throttle]",
|
||||
merge.info == null ? "_na_" : merge.info.info.name,
|
||||
TimeValue.timeValueMillis(took),
|
||||
TimeValue.timeValueMillis(tookMS),
|
||||
totalSizeInBytes/1024f/1024f,
|
||||
totalNumDocs,
|
||||
TimeValue.timeValueMillis(stoppedMS),
|
||||
|
@ -146,7 +146,7 @@ public class TrackingConcurrentMergeScheduler extends ConcurrentMergeScheduler {
|
|||
merge.rateLimiter.getTotalBytesWritten()/1024f/1024f,
|
||||
merge.rateLimiter.getMBPerSec());
|
||||
|
||||
if (took > 20000) { // if more than 20 seconds, DEBUG log it
|
||||
if (tookMS > 20000) { // if more than 20 seconds, DEBUG log it
|
||||
logger.debug(message);
|
||||
} else if (logger.isTraceEnabled()) {
|
||||
logger.trace(message);
|
||||
|
|
|
@ -67,7 +67,7 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadOperati
|
|||
@Override
|
||||
protected void masterOperation(final ClusterHealthRequest request, final ClusterState unusedState, final ActionListener<ClusterHealthResponse> listener) {
|
||||
if (request.waitForEvents() != null) {
|
||||
final long endTime = System.currentTimeMillis() + request.timeout().millis();
|
||||
final long endTimeMS = TimeValue.nsecToMSec(System.nanoTime()) + request.timeout().millis();
|
||||
clusterService.submitStateUpdateTask("cluster_health (wait_for_events [" + request.waitForEvents() + "])", request.waitForEvents(), new ProcessedClusterStateUpdateTask() {
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
|
@ -76,7 +76,7 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadOperati
|
|||
|
||||
@Override
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
final long timeoutInMillis = Math.max(0, endTime - System.currentTimeMillis());
|
||||
final long timeoutInMillis = Math.max(0, endTimeMS - TimeValue.nsecToMSec(System.nanoTime()));
|
||||
final TimeValue newTimeout = TimeValue.timeValueMillis(timeoutInMillis);
|
||||
request.timeout(newTimeout);
|
||||
executeHealth(request, listener);
|
||||
|
|
|
@ -74,7 +74,7 @@ public class UpdateHelper extends AbstractComponent {
|
|||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public Result prepare(UpdateRequest request, IndexShard indexShard) {
|
||||
long getDate = System.currentTimeMillis();
|
||||
long getDateNS = System.nanoTime();
|
||||
final GetResult getResult = indexShard.getService().get(request.type(), request.id(),
|
||||
new String[]{RoutingFieldMapper.NAME, ParentFieldMapper.NAME, TTLFieldMapper.NAME, TimestampFieldMapper.NAME},
|
||||
true, request.version(), request.versionType(), FetchSourceContext.FETCH_SOURCE, false);
|
||||
|
@ -222,7 +222,7 @@ public class UpdateHelper extends AbstractComponent {
|
|||
if (ttl == null) {
|
||||
ttl = getResult.getFields().containsKey(TTLFieldMapper.NAME) ? (Long) getResult.field(TTLFieldMapper.NAME).getValue() : null;
|
||||
if (ttl != null) {
|
||||
ttl = ttl - (System.currentTimeMillis() - getDate); // It is an approximation of exact TTL value, could be improved
|
||||
ttl = ttl - TimeValue.nsecToMSec(System.nanoTime() - getDateNS); // It is an approximation of exact TTL value, could be improved
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -51,7 +51,7 @@ public class ClusterStateObserver {
|
|||
final TimeoutClusterStateListener clusterStateListener = new ObserverClusterStateListener();
|
||||
// observingContext is not null when waiting on cluster state changes
|
||||
final AtomicReference<ObservingContext> observingContext = new AtomicReference<ObservingContext>(null);
|
||||
volatile Long startTime;
|
||||
volatile Long startTimeNS;
|
||||
volatile boolean timedOut;
|
||||
|
||||
|
||||
|
@ -69,7 +69,7 @@ public class ClusterStateObserver {
|
|||
this.lastObservedState = new AtomicReference<>(new ObservedState(clusterService.state()));
|
||||
this.timeOutValue = timeout;
|
||||
if (timeOutValue != null) {
|
||||
this.startTime = System.currentTimeMillis();
|
||||
this.startTimeNS = System.nanoTime();
|
||||
}
|
||||
this.logger = logger;
|
||||
}
|
||||
|
@ -111,15 +111,15 @@ public class ClusterStateObserver {
|
|||
throw new ElasticsearchException("already waiting for a cluster state change");
|
||||
}
|
||||
|
||||
Long timeoutTimeLeft;
|
||||
Long timeoutTimeLeftMS;
|
||||
if (timeOutValue == null) {
|
||||
timeOutValue = this.timeOutValue;
|
||||
if (timeOutValue != null) {
|
||||
long timeSinceStart = System.currentTimeMillis() - startTime;
|
||||
timeoutTimeLeft = timeOutValue.millis() - timeSinceStart;
|
||||
if (timeoutTimeLeft <= 0l) {
|
||||
long timeSinceStartMS = TimeValue.nsecToMSec(System.nanoTime() - startTimeNS);
|
||||
timeoutTimeLeftMS = timeOutValue.millis() - timeSinceStartMS;
|
||||
if (timeoutTimeLeftMS <= 0l) {
|
||||
// things have timeout while we were busy -> notify
|
||||
logger.debug("observer timed out. notifying listener. timeout setting [{}], time since start [{}]", timeOutValue, new TimeValue(timeSinceStart));
|
||||
logger.debug("observer timed out. notifying listener. timeout setting [{}], time since start [{}]", timeOutValue, new TimeValue(timeSinceStartMS));
|
||||
// update to latest, in case people want to retry
|
||||
timedOut = true;
|
||||
lastObservedState.set(new ObservedState(clusterService.state()));
|
||||
|
@ -127,12 +127,12 @@ public class ClusterStateObserver {
|
|||
return;
|
||||
}
|
||||
} else {
|
||||
timeoutTimeLeft = null;
|
||||
timeoutTimeLeftMS = null;
|
||||
}
|
||||
} else {
|
||||
this.startTime = System.currentTimeMillis();
|
||||
this.startTimeNS = System.nanoTime();
|
||||
this.timeOutValue = timeOutValue;
|
||||
timeoutTimeLeft = timeOutValue.millis();
|
||||
timeoutTimeLeftMS = timeOutValue.millis();
|
||||
timedOut = false;
|
||||
}
|
||||
|
||||
|
@ -150,7 +150,7 @@ public class ClusterStateObserver {
|
|||
if (!observingContext.compareAndSet(null, context)) {
|
||||
throw new ElasticsearchException("already waiting for a cluster state change");
|
||||
}
|
||||
clusterService.add(timeoutTimeLeft == null ? null : new TimeValue(timeoutTimeLeft), clusterStateListener);
|
||||
clusterService.add(timeoutTimeLeftMS == null ? null : new TimeValue(timeoutTimeLeftMS), clusterStateListener);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -230,8 +230,8 @@ public class ClusterStateObserver {
|
|||
ObservingContext context = observingContext.getAndSet(null);
|
||||
if (context != null) {
|
||||
clusterService.remove(this);
|
||||
long timeSinceStart = System.currentTimeMillis() - startTime;
|
||||
logger.debug("observer: timeout notification from cluster service. timeout setting [{}], time since start [{}]", timeOutValue, new TimeValue(timeSinceStart));
|
||||
long timeSinceStartMS = TimeValue.nsecToMSec(System.nanoTime() - startTimeNS);
|
||||
logger.debug("observer: timeout notification from cluster service. timeout setting [{}], time since start [{}]", timeOutValue, new TimeValue(timeSinceStartMS));
|
||||
// update to latest, in case people want to retry
|
||||
lastObservedState.set(new ObservedState(clusterService.state()));
|
||||
timedOut = true;
|
||||
|
|
|
@ -130,7 +130,7 @@ public class DiskThresholdDecider extends AllocationDecider {
|
|||
*/
|
||||
class DiskListener implements ClusterInfoService.Listener {
|
||||
private final Client client;
|
||||
private long lastRun;
|
||||
private long lastRunNS;
|
||||
|
||||
DiskListener(Client client) {
|
||||
this.client = client;
|
||||
|
@ -168,8 +168,8 @@ public class DiskThresholdDecider extends AllocationDecider {
|
|||
warnAboutDiskIfNeeded(entry);
|
||||
if (entry.getFreeBytes() < DiskThresholdDecider.this.freeBytesThresholdHigh.bytes() ||
|
||||
entry.getFreeDiskAsPercentage() < DiskThresholdDecider.this.freeDiskThresholdHigh) {
|
||||
if ((System.currentTimeMillis() - lastRun) > DiskThresholdDecider.this.rerouteInterval.millis()) {
|
||||
lastRun = System.currentTimeMillis();
|
||||
if ((System.nanoTime() - lastRunNS) > DiskThresholdDecider.this.rerouteInterval.nanos()) {
|
||||
lastRunNS = System.nanoTime();
|
||||
reroute = true;
|
||||
} else {
|
||||
logger.debug("high disk watermark exceeded on {} but an automatic reroute has occurred in the last [{}], skipping reroute",
|
||||
|
|
|
@ -334,19 +334,19 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
|
||||
|
||||
static abstract class TimedPrioritizedRunnable extends PrioritizedRunnable {
|
||||
private final long creationTime;
|
||||
private final long creationTimeNS;
|
||||
protected final String source;
|
||||
|
||||
protected TimedPrioritizedRunnable(Priority priority, String source) {
|
||||
super(priority);
|
||||
this.source = source;
|
||||
this.creationTime = System.currentTimeMillis();
|
||||
this.creationTimeNS = System.nanoTime();
|
||||
}
|
||||
|
||||
public long timeSinceCreatedInMillis() {
|
||||
// max with 0 to make sure we always return a non negative number
|
||||
// even if time shifts.
|
||||
return Math.max(0, System.currentTimeMillis() - creationTime);
|
||||
return Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - creationTimeNS));
|
||||
}
|
||||
|
||||
public String source() {
|
||||
|
@ -378,11 +378,11 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
return;
|
||||
}
|
||||
ClusterState newClusterState;
|
||||
long startTime = System.currentTimeMillis();
|
||||
long startTimeNS = System.nanoTime();
|
||||
try {
|
||||
newClusterState = updateTask.execute(previousClusterState);
|
||||
} catch (Throwable e) {
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, System.currentTimeMillis() - startTime));
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS)));
|
||||
if (logger.isTraceEnabled()) {
|
||||
StringBuilder sb = new StringBuilder("failed to execute cluster state update in ").append(executionTime).append(", state:\nversion [").append(previousClusterState.version()).append("], source [").append(source).append("]\n");
|
||||
sb.append(previousClusterState.nodes().prettyPrint());
|
||||
|
@ -403,7 +403,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
if (updateTask instanceof ProcessedClusterStateUpdateTask) {
|
||||
((ProcessedClusterStateUpdateTask) updateTask).clusterStateProcessed(source, previousClusterState, newClusterState);
|
||||
}
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, System.currentTimeMillis() - startTime));
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS)));
|
||||
logger.debug("processing [{}]: took {} no change in cluster_state", source, executionTime);
|
||||
warnAboutSlowTaskIfNeeded(executionTime, source);
|
||||
return;
|
||||
|
@ -523,11 +523,11 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
((ProcessedClusterStateUpdateTask) updateTask).clusterStateProcessed(source, previousClusterState, newClusterState);
|
||||
}
|
||||
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, System.currentTimeMillis() - startTime));
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS)));
|
||||
logger.debug("processing [{}]: took {} done applying updated cluster_state (version: {}, uuid: {})", source, executionTime, newClusterState.version(), newClusterState.uuid());
|
||||
warnAboutSlowTaskIfNeeded(executionTime, source);
|
||||
} catch (Throwable t) {
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, System.currentTimeMillis() - startTime));
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS)));
|
||||
StringBuilder sb = new StringBuilder("failed to apply updated cluster state in ").append(executionTime).append(":\nversion [").append(newClusterState.version()).append("], uuid [").append(newClusterState.uuid()).append("], source [").append(source).append("]\n");
|
||||
sb.append(newClusterState.nodes().prettyPrint());
|
||||
sb.append(newClusterState.routingTable().prettyPrint());
|
||||
|
|
|
@ -31,7 +31,7 @@ import java.util.concurrent.TimeUnit;
|
|||
* Simple stop watch, allowing for timing of a number of tasks,
|
||||
* exposing total running time and running time for each named task.
|
||||
* <p/>
|
||||
* <p>Conceals use of <code>System.currentTimeMillis()</code>, improving the
|
||||
* <p>Conceals use of <code>System.nanoTime()</code>, improving the
|
||||
* readability of application code and reducing the likelihood of calculation errors.
|
||||
* <p/>
|
||||
* <p>Note that this object is not designed to be thread-safe and does not
|
||||
|
@ -58,7 +58,7 @@ public class StopWatch {
|
|||
/**
|
||||
* Start time of the current task
|
||||
*/
|
||||
private long startTimeMillis;
|
||||
private long startTimeNS;
|
||||
|
||||
/**
|
||||
* Is the stop watch currently running?
|
||||
|
@ -77,7 +77,7 @@ public class StopWatch {
|
|||
/**
|
||||
* Total running time
|
||||
*/
|
||||
private long totalTimeMillis;
|
||||
private long totalTimeNS;
|
||||
|
||||
/**
|
||||
* Construct a new stop watch. Does not start any task.
|
||||
|
@ -129,7 +129,7 @@ public class StopWatch {
|
|||
if (this.running) {
|
||||
throw new IllegalStateException("Can't start StopWatch: it's already running");
|
||||
}
|
||||
this.startTimeMillis = System.currentTimeMillis();
|
||||
this.startTimeNS = System.nanoTime();
|
||||
this.running = true;
|
||||
this.currentTaskName = taskName;
|
||||
return this;
|
||||
|
@ -146,9 +146,9 @@ public class StopWatch {
|
|||
if (!this.running) {
|
||||
throw new IllegalStateException("Can't stop StopWatch: it's not running");
|
||||
}
|
||||
long lastTime = System.currentTimeMillis() - this.startTimeMillis;
|
||||
this.totalTimeMillis += lastTime;
|
||||
this.lastTaskInfo = new TaskInfo(this.currentTaskName, lastTime);
|
||||
long lastTimeNS = System.nanoTime() - this.startTimeNS;
|
||||
this.totalTimeNS += lastTimeNS;
|
||||
this.lastTaskInfo = new TaskInfo(this.currentTaskName, TimeValue.nsecToMSec(lastTimeNS));
|
||||
if (this.keepTaskList) {
|
||||
this.taskList.add(lastTaskInfo);
|
||||
}
|
||||
|
@ -189,7 +189,7 @@ public class StopWatch {
|
|||
* Return the total time for all tasks.
|
||||
*/
|
||||
public TimeValue totalTime() {
|
||||
return new TimeValue(totalTimeMillis, TimeUnit.MILLISECONDS);
|
||||
return new TimeValue(totalTimeNS, TimeUnit.NANOSECONDS);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -16,6 +16,8 @@
|
|||
|
||||
package org.elasticsearch.common.inject.internal;
|
||||
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
||||
import java.util.logging.Logger;
|
||||
|
||||
/**
|
||||
|
@ -26,17 +28,17 @@ import java.util.logging.Logger;
|
|||
public class Stopwatch {
|
||||
private static final Logger logger = Logger.getLogger(Stopwatch.class.getName());
|
||||
|
||||
private long start = System.currentTimeMillis();
|
||||
private long startNS = System.nanoTime();
|
||||
|
||||
/**
|
||||
* Resets and returns elapsed time in milliseconds.
|
||||
*/
|
||||
public long reset() {
|
||||
long now = System.currentTimeMillis();
|
||||
long nowNS = System.nanoTime();
|
||||
try {
|
||||
return now - start;
|
||||
return TimeValue.nsecToMSec(nowNS - startNS);
|
||||
} finally {
|
||||
start = now;
|
||||
startNS = nowNS;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -38,6 +38,9 @@ import java.util.concurrent.TimeUnit;
|
|||
*/
|
||||
public class TimeValue implements Serializable, Streamable {
|
||||
|
||||
/** How many nano-seconds in one milli-second */
|
||||
public static final long NSEC_PER_MSEC = 1000000;
|
||||
|
||||
public static TimeValue timeValueNanos(long nanos) {
|
||||
return new TimeValue(nanos, TimeUnit.NANOSECONDS);
|
||||
}
|
||||
|
@ -296,4 +299,8 @@ public class TimeValue implements Serializable, Streamable {
|
|||
long normalized = timeUnit.toNanos(duration);
|
||||
return (int) (normalized ^ (normalized >>> 32));
|
||||
}
|
||||
|
||||
public static long nsecToMSec(long ns) {
|
||||
return ns / NSEC_PER_MSEC;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -33,6 +33,7 @@ import org.elasticsearch.common.io.FileSystemUtils;
|
|||
import org.elasticsearch.common.io.PathUtils;
|
||||
import org.elasticsearch.common.settings.ImmutableSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.settings.IndexSettings;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
@ -384,11 +385,11 @@ public class NodeEnvironment extends AbstractComponent implements Closeable {
|
|||
logger.trace("locking all shards for index {} - [{}]", index, numShards);
|
||||
List<ShardLock> allLocks = new ArrayList<>(numShards);
|
||||
boolean success = false;
|
||||
long startTime = System.currentTimeMillis();
|
||||
long startTimeNS = System.nanoTime();
|
||||
try {
|
||||
for (int i = 0; i < numShards; i++) {
|
||||
long timeoutLeft = Math.max(0, lockTimeoutMS - (System.currentTimeMillis() - startTime));
|
||||
allLocks.add(shardLock(new ShardId(index, i), timeoutLeft));
|
||||
long timeoutLeftMS = Math.max(0, lockTimeoutMS - TimeValue.nsecToMSec((System.nanoTime() - startTimeNS)));
|
||||
allLocks.add(shardLock(new ShardId(index, i), timeoutLeftMS));
|
||||
}
|
||||
success = true;
|
||||
} finally {
|
||||
|
|
|
@ -82,9 +82,9 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL
|
|||
try {
|
||||
ensureNoPre019State();
|
||||
pre20Upgrade();
|
||||
long start = System.currentTimeMillis();
|
||||
long startNS = System.nanoTime();
|
||||
metaStateService.loadFullState();
|
||||
logger.debug("took {} to load state", TimeValue.timeValueMillis(System.currentTimeMillis() - start));
|
||||
logger.debug("took {} to load state", TimeValue.timeValueMillis(TimeValue.nsecToMSec(System.nanoTime() - startNS)));
|
||||
} catch (Exception e) {
|
||||
logger.error("failed to read local state, exiting...", e);
|
||||
throw e;
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.apache.lucene.util.packed.PackedInts;
|
|||
import org.elasticsearch.common.breaker.CircuitBreaker;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.fielddata.AtomicOrdinalsFieldData;
|
||||
import org.elasticsearch.index.fielddata.IndexOrdinalsFieldData;
|
||||
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
||||
|
@ -43,7 +44,7 @@ public enum GlobalOrdinalsBuilder {
|
|||
*/
|
||||
public static IndexOrdinalsFieldData build(final IndexReader indexReader, IndexOrdinalsFieldData indexFieldData, Settings settings, CircuitBreakerService breakerService, ESLogger logger) throws IOException {
|
||||
assert indexReader.leaves().size() > 1;
|
||||
long startTime = System.currentTimeMillis();
|
||||
long startTimeNS = System.nanoTime();
|
||||
|
||||
final AtomicOrdinalsFieldData[] atomicFD = new AtomicOrdinalsFieldData[indexReader.leaves().size()];
|
||||
final RandomAccessOrds[] subs = new RandomAccessOrds[indexReader.leaves().size()];
|
||||
|
@ -60,7 +61,7 @@ public enum GlobalOrdinalsBuilder {
|
|||
"Global-ordinals[{}][{}] took {} ms",
|
||||
indexFieldData.getFieldNames().fullName(),
|
||||
ordinalMap.getValueCount(),
|
||||
(System.currentTimeMillis() - startTime)
|
||||
TimeValue.nsecToMSec(System.nanoTime() - startTimeNS)
|
||||
);
|
||||
}
|
||||
return new InternalGlobalOrdinalsIndexFieldData(indexFieldData.index(), settings, indexFieldData.getFieldNames(),
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.elasticsearch.common.metrics.CounterMetric;
|
|||
import org.elasticsearch.common.metrics.MeanMetric;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.indexing.slowlog.ShardSlowLogIndexingService;
|
||||
import org.elasticsearch.index.settings.IndexSettings;
|
||||
|
@ -259,33 +260,33 @@ public class ShardIndexingService extends AbstractIndexShardComponent {
|
|||
public final CounterMetric noopUpdates = new CounterMetric();
|
||||
public final CounterMetric throttleTimeMillisMetric = new CounterMetric();
|
||||
volatile boolean isThrottled = false;
|
||||
volatile long startOfThrottleMillis;
|
||||
volatile long startOfThrottleNS;
|
||||
|
||||
public IndexingStats.Stats stats() {
|
||||
long currentThrottleMillis = 0;
|
||||
if (isThrottled && startOfThrottleMillis != 0) {
|
||||
currentThrottleMillis += System.currentTimeMillis() - startOfThrottleMillis;
|
||||
if (currentThrottleMillis < 0) {
|
||||
//Timeslip must have happened, have to ignore this value
|
||||
currentThrottleMillis = 0;
|
||||
long currentThrottleNS = 0;
|
||||
if (isThrottled && startOfThrottleNS != 0) {
|
||||
currentThrottleNS += System.nanoTime() - startOfThrottleNS;
|
||||
if (currentThrottleNS < 0) {
|
||||
// Paranoia (System.nanoTime() is supposed to be monotonic): time slip must have happened, have to ignore this value
|
||||
currentThrottleNS = 0;
|
||||
}
|
||||
}
|
||||
return new IndexingStats.Stats(
|
||||
indexMetric.count(), TimeUnit.NANOSECONDS.toMillis(indexMetric.sum()), indexCurrent.count(),
|
||||
deleteMetric.count(), TimeUnit.NANOSECONDS.toMillis(deleteMetric.sum()), deleteCurrent.count(),
|
||||
noopUpdates.count(), isThrottled, TimeUnit.MILLISECONDS.toMillis(throttleTimeMillisMetric.count() + currentThrottleMillis));
|
||||
noopUpdates.count(), isThrottled, TimeUnit.MILLISECONDS.toMillis(throttleTimeMillisMetric.count() + TimeValue.nsecToMSec(currentThrottleNS)));
|
||||
}
|
||||
|
||||
|
||||
void setThrottled(boolean isThrottled) {
|
||||
if (!this.isThrottled && isThrottled) {
|
||||
startOfThrottleMillis = System.currentTimeMillis();
|
||||
startOfThrottleNS = System.nanoTime();
|
||||
} else if (this.isThrottled && !isThrottled) {
|
||||
assert startOfThrottleMillis > 0 : "Bad state of startOfThrottleMillis";
|
||||
long throttleTimeMillis = System.currentTimeMillis() - startOfThrottleMillis;
|
||||
if (throttleTimeMillis >= 0) {
|
||||
//A timeslip may have occurred but never want to add a negative number
|
||||
throttleTimeMillisMetric.inc(throttleTimeMillis);
|
||||
assert startOfThrottleNS > 0 : "Bad state of startOfThrottleNS";
|
||||
long throttleTimeNS = System.nanoTime() - startOfThrottleNS;
|
||||
if (throttleTimeNS >= 0) {
|
||||
// Paranoia (System.nanoTime() is supposed to be monotonic): time slip may have occurred but never want to add a negative number
|
||||
throttleTimeMillisMetric.inc(TimeValue.nsecToMSec(throttleTimeNS));
|
||||
}
|
||||
}
|
||||
this.isThrottled = isThrottled;
|
||||
|
|
|
@ -1149,7 +1149,7 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
}
|
||||
|
||||
private void doCheckIndex() throws IndexShardException, IOException {
|
||||
long time = System.currentTimeMillis();
|
||||
long timeNS = System.nanoTime();
|
||||
if (!Lucene.indexExists(store.directory())) {
|
||||
return;
|
||||
}
|
||||
|
@ -1208,7 +1208,7 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
logger.debug("check index [success]\n{}", new String(os.bytes().toBytes(), Charsets.UTF_8));
|
||||
}
|
||||
|
||||
recoveryState.getVerifyIndex().checkIndexTime(Math.max(0, System.currentTimeMillis() - time));
|
||||
recoveryState.getVerifyIndex().checkIndexTime(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - timeNS)));
|
||||
}
|
||||
|
||||
public Engine engine() {
|
||||
|
|
|
@ -1078,10 +1078,8 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
|||
}
|
||||
|
||||
synchronized void writeChecksums(Directory directory, Map<String, String> checksums, long lastVersion) throws IOException {
|
||||
long nextVersion = System.currentTimeMillis();
|
||||
while (nextVersion <= lastVersion) {
|
||||
nextVersion = System.currentTimeMillis();
|
||||
}
|
||||
// Make sure if clock goes backwards we still move version forwards:
|
||||
long nextVersion = Math.max(lastVersion+1, System.currentTimeMillis());
|
||||
final String checksumName = CHECKSUMS_PREFIX + nextVersion;
|
||||
try (IndexOutput output = directory.createOutput(checksumName, IOContext.DEFAULT)) {
|
||||
output.writeInt(0); // version
|
||||
|
|
|
@ -697,7 +697,7 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
|
|||
*/
|
||||
public void processPendingDeletes(Index index, @IndexSettings Settings indexSettings, TimeValue timeout) throws IOException {
|
||||
logger.debug("{} processing pending deletes", index);
|
||||
final long startTime = System.currentTimeMillis();
|
||||
final long startTimeNS = System.nanoTime();
|
||||
final List<ShardLock> shardLocks = nodeEnv.lockAllForIndex(index, indexSettings, timeout.millis());
|
||||
try {
|
||||
Map<ShardId, ShardLock> locks = new HashMap<>();
|
||||
|
@ -756,7 +756,7 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
|
|||
return;
|
||||
}
|
||||
}
|
||||
} while ((System.currentTimeMillis() - startTime) < timeout.millis());
|
||||
} while ((System.nanoTime() - startTimeNS) < timeout.nanos());
|
||||
}
|
||||
} finally {
|
||||
IOUtils.close(shardLocks);
|
||||
|
|
|
@ -312,7 +312,7 @@ public class IndicesFieldDataCache extends AbstractComponent implements RemovalL
|
|||
|
||||
@Override
|
||||
public void run() {
|
||||
long startTime = System.currentTimeMillis();
|
||||
long startTimeNS = System.nanoTime();
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("running periodic field data cache cleanup");
|
||||
}
|
||||
|
@ -322,7 +322,7 @@ public class IndicesFieldDataCache extends AbstractComponent implements RemovalL
|
|||
logger.warn("Exception during periodic field data cache cleanup:", e);
|
||||
}
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("periodic field data cache cleanup finished in {} milliseconds", System.currentTimeMillis() - startTime);
|
||||
logger.trace("periodic field data cache cleanup finished in {} milliseconds", TimeValue.nsecToMSec(System.nanoTime() - startTimeNS));
|
||||
}
|
||||
// Reschedule itself to run again if not closed
|
||||
if (closed == false) {
|
||||
|
|
|
@ -386,13 +386,15 @@ public class RecoveryState implements ToXContent, Streamable {
|
|||
|
||||
public synchronized void start() {
|
||||
assert startTime == 0 : "already started";
|
||||
startTime = System.currentTimeMillis();
|
||||
startTime = TimeValue.nsecToMSec(System.nanoTime());
|
||||
}
|
||||
|
||||
/** Returns start time in millis */
|
||||
public synchronized long startTime() {
|
||||
return startTime;
|
||||
}
|
||||
|
||||
/** Returns elapsed time in millis, or 0 if timer was not started */
|
||||
public synchronized long time() {
|
||||
if (startTime == 0) {
|
||||
return 0;
|
||||
|
@ -400,16 +402,17 @@ public class RecoveryState implements ToXContent, Streamable {
|
|||
if (time >= 0) {
|
||||
return time;
|
||||
}
|
||||
return Math.max(0, System.currentTimeMillis() - startTime);
|
||||
return Math.max(0, TimeValue.nsecToMSec(System.nanoTime()) - startTime);
|
||||
}
|
||||
|
||||
/** Returns stop time in millis */
|
||||
public synchronized long stopTime() {
|
||||
return stopTime;
|
||||
}
|
||||
|
||||
public synchronized void stop() {
|
||||
assert stopTime == 0 : "already stopped";
|
||||
stopTime = Math.max(System.currentTimeMillis(), startTime);
|
||||
stopTime = Math.max(TimeValue.nsecToMSec(System.nanoTime()), startTime);
|
||||
time = stopTime - startTime;
|
||||
assert time >= 0;
|
||||
}
|
||||
|
|
|
@ -55,6 +55,7 @@ import java.util.Iterator;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicReferenceArray;
|
||||
|
||||
/**
|
||||
|
@ -133,7 +134,7 @@ public class TransportNodesListShardStoreMetaData extends TransportNodesOperatio
|
|||
|
||||
private StoreFilesMetaData listStoreMetaData(ShardId shardId) throws IOException {
|
||||
logger.trace("listing store meta data for {}", shardId);
|
||||
long startTime = System.currentTimeMillis();
|
||||
long startTimeNS = System.nanoTime();
|
||||
boolean exists = false;
|
||||
try {
|
||||
IndexService indexService = indicesService.indexService(shardId.index().name());
|
||||
|
@ -165,7 +166,7 @@ public class TransportNodesListShardStoreMetaData extends TransportNodesOperatio
|
|||
}
|
||||
return new StoreFilesMetaData(false, shardId, Store.readMetadataSnapshot(shardPath.resolveIndex(), logger).asMap());
|
||||
} finally {
|
||||
TimeValue took = new TimeValue(System.currentTimeMillis() - startTime);
|
||||
TimeValue took = new TimeValue(System.nanoTime() - startTimeNS, TimeUnit.NANOSECONDS);
|
||||
if (exists) {
|
||||
logger.debug("{} loaded store meta data (took [{}])", shardId, took);
|
||||
} else {
|
||||
|
|
|
@ -81,7 +81,7 @@ public class PluginsService extends AbstractComponent {
|
|||
private PluginsInfo cachedPluginsInfo;
|
||||
private final TimeValue refreshInterval;
|
||||
private final boolean checkLucene;
|
||||
private long lastRefresh;
|
||||
private long lastRefreshNS;
|
||||
|
||||
static class OnModuleReference {
|
||||
public final Class<? extends Module> moduleClass;
|
||||
|
@ -313,13 +313,13 @@ public class PluginsService extends AbstractComponent {
|
|||
synchronized public PluginsInfo info() {
|
||||
if (refreshInterval.millis() != 0) {
|
||||
if (cachedPluginsInfo != null &&
|
||||
(refreshInterval.millis() < 0 || (System.currentTimeMillis() - lastRefresh) < refreshInterval.millis())) {
|
||||
(refreshInterval.millis() < 0 || (System.nanoTime() - lastRefreshNS) < refreshInterval.nanos())) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("using cache to retrieve plugins info");
|
||||
}
|
||||
return cachedPluginsInfo;
|
||||
}
|
||||
lastRefresh = System.currentTimeMillis();
|
||||
lastRefreshNS = System.nanoTime();
|
||||
}
|
||||
|
||||
if (logger.isTraceEnabled()) {
|
||||
|
|
Loading…
Reference in New Issue