Merge branch 'master' into docs/add_console_to_search_request_options
This commit is contained in:
commit
bf471c6950
|
@ -1,2 +1,2 @@
|
|||
#!/bin/sh -e
|
||||
#!/bin/bash -e
|
||||
<% commands.each {command -> %><%= command %><% } %>
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
#!/bin/sh -e
|
||||
#!/bin/bash -e
|
||||
<% commands.each {command -> %><%= command %><% } %>
|
||||
|
|
|
@ -69,6 +69,8 @@ public class Version {
|
|||
public static final Version V_2_3_1 = new Version(V_2_3_1_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
|
||||
public static final int V_2_3_2_ID = 2030299;
|
||||
public static final Version V_2_3_2 = new Version(V_2_3_2_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
|
||||
public static final int V_2_3_3_ID = 2030399;
|
||||
public static final Version V_2_3_3 = new Version(V_2_3_3_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
|
||||
public static final int V_5_0_0_alpha1_ID = 5000001;
|
||||
public static final Version V_5_0_0_alpha1 = new Version(V_5_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_6_0_0);
|
||||
public static final int V_5_0_0_alpha2_ID = 5000002;
|
||||
|
@ -94,6 +96,8 @@ public class Version {
|
|||
return V_5_0_0_alpha2;
|
||||
case V_5_0_0_alpha1_ID:
|
||||
return V_5_0_0_alpha1;
|
||||
case V_2_3_3_ID:
|
||||
return V_2_3_3;
|
||||
case V_2_3_2_ID:
|
||||
return V_2_3_2;
|
||||
case V_2_3_1_ID:
|
||||
|
|
|
@ -54,7 +54,7 @@ public class TransportClearIndicesCacheAction extends TransportBroadcastByNodeAc
|
|||
TransportService transportService, IndicesService indicesService, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, ClearIndicesCacheAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver,
|
||||
ClearIndicesCacheRequest::new, ThreadPool.Names.MANAGEMENT);
|
||||
ClearIndicesCacheRequest::new, ThreadPool.Names.MANAGEMENT, false);
|
||||
this.indicesService = indicesService;
|
||||
}
|
||||
|
||||
|
|
|
@ -84,6 +84,20 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
|
|||
|
||||
final String transportNodeBroadcastAction;
|
||||
|
||||
public TransportBroadcastByNodeAction(
|
||||
Settings settings,
|
||||
String actionName,
|
||||
ThreadPool threadPool,
|
||||
ClusterService clusterService,
|
||||
TransportService transportService,
|
||||
ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
Supplier<Request> request,
|
||||
String executor) {
|
||||
this(settings, actionName, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, request,
|
||||
executor, true);
|
||||
}
|
||||
|
||||
public TransportBroadcastByNodeAction(
|
||||
Settings settings,
|
||||
String actionName,
|
||||
|
@ -93,7 +107,8 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
|
|||
ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
Supplier<Request> request,
|
||||
String executor) {
|
||||
String executor,
|
||||
boolean canTripCircuitBreaker) {
|
||||
super(settings, actionName, threadPool, transportService, actionFilters, indexNameExpressionResolver, request);
|
||||
|
||||
this.clusterService = clusterService;
|
||||
|
@ -101,7 +116,8 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
|
|||
|
||||
transportNodeBroadcastAction = actionName + "[n]";
|
||||
|
||||
transportService.registerRequestHandler(transportNodeBroadcastAction, NodeRequest::new, executor, new BroadcastByNodeTransportRequestHandler());
|
||||
transportService.registerRequestHandler(transportNodeBroadcastAction, NodeRequest::new, executor, false, canTripCircuitBreaker,
|
||||
new BroadcastByNodeTransportRequestHandler());
|
||||
}
|
||||
|
||||
private Response newResponse(
|
||||
|
|
|
@ -53,6 +53,15 @@ public enum DateTimeUnit {
|
|||
return field;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param unit the {@link DateTimeUnit} to check
|
||||
* @return true if the unit is a day or longer
|
||||
*/
|
||||
public static boolean isDayOrLonger(DateTimeUnit unit) {
|
||||
return (unit == DateTimeUnit.HOUR_OF_DAY || unit == DateTimeUnit.MINUTES_OF_HOUR
|
||||
|| unit == DateTimeUnit.SECOND_OF_MINUTE) == false;
|
||||
}
|
||||
|
||||
public static DateTimeUnit resolve(byte id) {
|
||||
switch (id) {
|
||||
case 1: return WEEK_OF_WEEKYEAR;
|
||||
|
|
|
@ -46,8 +46,8 @@ public abstract class TimeZoneRounding extends Rounding {
|
|||
|
||||
public static class Builder {
|
||||
|
||||
private DateTimeUnit unit;
|
||||
private long interval = -1;
|
||||
private final DateTimeUnit unit;
|
||||
private final long interval;
|
||||
|
||||
private DateTimeZone timeZone = DateTimeZone.UTC;
|
||||
|
||||
|
@ -142,10 +142,15 @@ public abstract class TimeZoneRounding extends Rounding {
|
|||
|
||||
@Override
|
||||
public long nextRoundingValue(long time) {
|
||||
long timeLocal = time;
|
||||
timeLocal = timeZone.convertUTCToLocal(time);
|
||||
long nextInLocalTime = durationField.add(timeLocal, 1);
|
||||
return timeZone.convertLocalToUTC(nextInLocalTime, false);
|
||||
if (DateTimeUnit.isDayOrLonger(unit)) {
|
||||
time = timeZone.convertUTCToLocal(time);
|
||||
}
|
||||
long next = durationField.add(time, 1);
|
||||
if (DateTimeUnit.isDayOrLonger(unit)) {
|
||||
return timeZone.convertLocalToUTC(next, false);
|
||||
} else {
|
||||
return next;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -161,12 +166,12 @@ public abstract class TimeZoneRounding extends Rounding {
|
|||
out.writeByte(unit.id());
|
||||
out.writeString(timeZone.getID());
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(unit, timeZone);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null) {
|
||||
|
@ -236,12 +241,12 @@ public abstract class TimeZoneRounding extends Rounding {
|
|||
out.writeVLong(interval);
|
||||
out.writeString(timeZone.getID());
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(interval, timeZone);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null) {
|
||||
|
|
|
@ -397,6 +397,9 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
|||
JvmGcMonitorService.ENABLED_SETTING,
|
||||
JvmGcMonitorService.REFRESH_INTERVAL_SETTING,
|
||||
JvmGcMonitorService.GC_SETTING,
|
||||
JvmGcMonitorService.GC_OVERHEAD_WARN_SETTING,
|
||||
JvmGcMonitorService.GC_OVERHEAD_INFO_SETTING,
|
||||
JvmGcMonitorService.GC_OVERHEAD_DEBUG_SETTING,
|
||||
PageCacheRecycler.LIMIT_HEAP_SETTING,
|
||||
PageCacheRecycler.WEIGHT_BYTES_SETTING,
|
||||
PageCacheRecycler.WEIGHT_INT_SETTING,
|
||||
|
|
|
@ -18,19 +18,6 @@
|
|||
*/
|
||||
package org.elasticsearch.common.settings;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.EnumSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
import java.util.regex.Pattern;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.action.support.ToXContentToBytes;
|
||||
|
@ -50,6 +37,19 @@ import org.elasticsearch.common.xcontent.XContentFactory;
|
|||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.EnumSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
import java.util.regex.Pattern;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
* A setting. Encapsulates typical stuff like default value, parsing, and scope.
|
||||
* Some (SettingsProperty.Dynamic) can by modified at run time using the API.
|
||||
|
@ -504,7 +504,7 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue);
|
||||
}
|
||||
if (value > maxValue) {
|
||||
throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be =< " + maxValue);
|
||||
throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be <= " + maxValue);
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
@ -572,7 +572,7 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue);
|
||||
}
|
||||
if (value.bytes() > maxValue.bytes()) {
|
||||
throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be =< " + maxValue);
|
||||
throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be <= " + maxValue);
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
|
|
@ -1,629 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.common.util;
|
||||
|
||||
import org.apache.lucene.store.DataInput;
|
||||
import org.apache.lucene.store.DataOutput;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.RamUsageEstimator;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.hash.MurmurHash3;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.unit.SizeValue;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Comparator;
|
||||
|
||||
/**
|
||||
* A bloom filter. Inspired by Guava bloom filter implementation though with some optimizations.
|
||||
*/
|
||||
public class BloomFilter {
|
||||
|
||||
/**
|
||||
* A factory that can use different fpp based on size.
|
||||
*/
|
||||
public static class Factory {
|
||||
|
||||
public static final Factory DEFAULT = buildDefault();
|
||||
|
||||
private static Factory buildDefault() {
|
||||
// Some numbers:
|
||||
// 10k =0.001: 140.4kb , 10 Hashes
|
||||
// 10k =0.01 : 93.6kb , 6 Hashes
|
||||
// 100k=0.01 : 936.0kb , 6 Hashes
|
||||
// 100k=0.03 : 712.7kb , 5 Hashes
|
||||
// 500k=0.01 : 4.5mb , 6 Hashes
|
||||
// 500k=0.03 : 3.4mb , 5 Hashes
|
||||
// 500k=0.05 : 2.9mb , 4 Hashes
|
||||
// 1m=0.01 : 9.1mb , 6 Hashes
|
||||
// 1m=0.03 : 6.9mb , 5 Hashes
|
||||
// 1m=0.05 : 5.9mb , 4 Hashes
|
||||
// 5m=0.01 : 45.7mb , 6 Hashes
|
||||
// 5m=0.03 : 34.8mb , 5 Hashes
|
||||
// 5m=0.05 : 29.7mb , 4 Hashes
|
||||
// 50m=0.01 : 457.0mb , 6 Hashes
|
||||
// 50m=0.03 : 297.3mb , 4 Hashes
|
||||
// 50m=0.10 : 228.5mb , 3 Hashes
|
||||
return buildFromString("10k=0.01,1m=0.03");
|
||||
}
|
||||
|
||||
/**
|
||||
* Supports just passing fpp, as in "0.01", and also ranges, like "50k=0.01,1m=0.05". If
|
||||
* its null, returns {@link #buildDefault()}.
|
||||
*/
|
||||
public static Factory buildFromString(@Nullable String config) {
|
||||
if (config == null) {
|
||||
return buildDefault();
|
||||
}
|
||||
String[] sEntries = config.split(",");
|
||||
if (sEntries.length == 0) {
|
||||
if (config.length() > 0) {
|
||||
return new Factory(new Entry[]{new Entry(0, Double.parseDouble(config))});
|
||||
}
|
||||
return buildDefault();
|
||||
}
|
||||
Entry[] entries = new Entry[sEntries.length];
|
||||
for (int i = 0; i < sEntries.length; i++) {
|
||||
int index = sEntries[i].indexOf('=');
|
||||
entries[i] = new Entry(
|
||||
(int) SizeValue.parseSizeValue(sEntries[i].substring(0, index).trim()).singles(),
|
||||
Double.parseDouble(sEntries[i].substring(index + 1).trim())
|
||||
);
|
||||
}
|
||||
return new Factory(entries);
|
||||
}
|
||||
|
||||
private final Entry[] entries;
|
||||
|
||||
public Factory(Entry[] entries) {
|
||||
this.entries = entries;
|
||||
// the order is from the upper most expected insertions to the lowest
|
||||
Arrays.sort(this.entries, new Comparator<Entry>() {
|
||||
@Override
|
||||
public int compare(Entry o1, Entry o2) {
|
||||
return o2.expectedInsertions - o1.expectedInsertions;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
public BloomFilter createFilter(int expectedInsertions) {
|
||||
for (Entry entry : entries) {
|
||||
if (expectedInsertions > entry.expectedInsertions) {
|
||||
return BloomFilter.create(expectedInsertions, entry.fpp);
|
||||
}
|
||||
}
|
||||
return BloomFilter.create(expectedInsertions, 0.03);
|
||||
}
|
||||
|
||||
public static class Entry {
|
||||
public final int expectedInsertions;
|
||||
public final double fpp;
|
||||
|
||||
Entry(int expectedInsertions, double fpp) {
|
||||
this.expectedInsertions = expectedInsertions;
|
||||
this.fpp = fpp;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a bloom filter based on the with the expected number
|
||||
* of insertions and expected false positive probability.
|
||||
*
|
||||
* @param expectedInsertions the number of expected insertions to the constructed
|
||||
* @param fpp the desired false positive probability (must be positive and less than 1.0)
|
||||
*/
|
||||
public static BloomFilter create(int expectedInsertions, double fpp) {
|
||||
return create(expectedInsertions, fpp, -1);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a bloom filter based on the expected number of insertions, expected false positive probability,
|
||||
* and number of hash functions.
|
||||
*
|
||||
* @param expectedInsertions the number of expected insertions to the constructed
|
||||
* @param fpp the desired false positive probability (must be positive and less than 1.0)
|
||||
* @param numHashFunctions the number of hash functions to use (must be less than or equal to 255)
|
||||
*/
|
||||
public static BloomFilter create(int expectedInsertions, double fpp, int numHashFunctions) {
|
||||
if (expectedInsertions == 0) {
|
||||
expectedInsertions = 1;
|
||||
}
|
||||
/*
|
||||
* TODO(user): Put a warning in the javadoc about tiny fpp values,
|
||||
* since the resulting size is proportional to -log(p), but there is not
|
||||
* much of a point after all, e.g. optimalM(1000, 0.0000000000000001) = 76680
|
||||
* which is less that 10kb. Who cares!
|
||||
*/
|
||||
long numBits = optimalNumOfBits(expectedInsertions, fpp);
|
||||
|
||||
// calculate the optimal number of hash functions
|
||||
if (numHashFunctions == -1) {
|
||||
numHashFunctions = optimalNumOfHashFunctions(expectedInsertions, numBits);
|
||||
}
|
||||
|
||||
try {
|
||||
return new BloomFilter(new BitArray(numBits), numHashFunctions, Hashing.DEFAULT);
|
||||
} catch (IllegalArgumentException e) {
|
||||
throw new IllegalArgumentException("Could not create BloomFilter of " + numBits + " bits", e);
|
||||
}
|
||||
}
|
||||
|
||||
public static void skipBloom(IndexInput in) throws IOException {
|
||||
int version = in.readInt(); // we do nothing with this now..., defaults to 0
|
||||
final int numLongs = in.readInt();
|
||||
in.seek(in.getFilePointer() + (numLongs * 8) + 4 + 4); // filter + numberOfHashFunctions + hashType
|
||||
}
|
||||
|
||||
public static BloomFilter deserialize(DataInput in) throws IOException {
|
||||
int version = in.readInt(); // we do nothing with this now..., defaults to 0
|
||||
int numLongs = in.readInt();
|
||||
long[] data = new long[numLongs];
|
||||
for (int i = 0; i < numLongs; i++) {
|
||||
data[i] = in.readLong();
|
||||
}
|
||||
int numberOfHashFunctions = in.readInt();
|
||||
int hashType = in.readInt();
|
||||
return new BloomFilter(new BitArray(data), numberOfHashFunctions, Hashing.fromType(hashType));
|
||||
}
|
||||
|
||||
public static void serilaize(BloomFilter filter, DataOutput out) throws IOException {
|
||||
out.writeInt(0); // version
|
||||
BitArray bits = filter.bits;
|
||||
out.writeInt(bits.data.length);
|
||||
for (long l : bits.data) {
|
||||
out.writeLong(l);
|
||||
}
|
||||
out.writeInt(filter.numHashFunctions);
|
||||
out.writeInt(filter.hashing.type()); // hashType
|
||||
}
|
||||
|
||||
public static BloomFilter readFrom(StreamInput in) throws IOException {
|
||||
int version = in.readVInt(); // we do nothing with this now..., defaults to 0
|
||||
int numLongs = in.readVInt();
|
||||
long[] data = new long[numLongs];
|
||||
for (int i = 0; i < numLongs; i++) {
|
||||
data[i] = in.readLong();
|
||||
}
|
||||
int numberOfHashFunctions = in.readVInt();
|
||||
int hashType = in.readVInt(); // again, nothing to do now...
|
||||
return new BloomFilter(new BitArray(data), numberOfHashFunctions, Hashing.fromType(hashType));
|
||||
}
|
||||
|
||||
public static void writeTo(BloomFilter filter, StreamOutput out) throws IOException {
|
||||
out.writeVInt(0); // version
|
||||
BitArray bits = filter.bits;
|
||||
out.writeVInt(bits.data.length);
|
||||
for (long l : bits.data) {
|
||||
out.writeLong(l);
|
||||
}
|
||||
out.writeVInt(filter.numHashFunctions);
|
||||
out.writeVInt(filter.hashing.type()); // hashType
|
||||
}
|
||||
|
||||
/**
|
||||
* The bit set of the BloomFilter (not necessarily power of 2!)
|
||||
*/
|
||||
final BitArray bits;
|
||||
/**
|
||||
* Number of hashes per element
|
||||
*/
|
||||
final int numHashFunctions;
|
||||
|
||||
final Hashing hashing;
|
||||
|
||||
BloomFilter(BitArray bits, int numHashFunctions, Hashing hashing) {
|
||||
this.bits = bits;
|
||||
this.numHashFunctions = numHashFunctions;
|
||||
this.hashing = hashing;
|
||||
/*
|
||||
* This only exists to forbid BFs that cannot use the compact persistent representation.
|
||||
* If it ever throws, at a user who was not intending to use that representation, we should
|
||||
* reconsider
|
||||
*/
|
||||
if (numHashFunctions > 255) {
|
||||
throw new IllegalArgumentException("Currently we don't allow BloomFilters that would use more than 255 hash functions");
|
||||
}
|
||||
}
|
||||
|
||||
public boolean put(BytesRef value) {
|
||||
return hashing.put(value, numHashFunctions, bits);
|
||||
}
|
||||
|
||||
public boolean mightContain(BytesRef value) {
|
||||
return hashing.mightContain(value, numHashFunctions, bits);
|
||||
}
|
||||
|
||||
public int getNumHashFunctions() {
|
||||
return this.numHashFunctions;
|
||||
}
|
||||
|
||||
public long getSizeInBytes() {
|
||||
return bits.ramBytesUsed();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return bits.hashCode() + numHashFunctions;
|
||||
}
|
||||
|
||||
/*
|
||||
* Cheat sheet:
|
||||
*
|
||||
* m: total bits
|
||||
* n: expected insertions
|
||||
* b: m/n, bits per insertion
|
||||
|
||||
* p: expected false positive probability
|
||||
*
|
||||
* 1) Optimal k = b * ln2
|
||||
* 2) p = (1 - e ^ (-kn/m))^k
|
||||
* 3) For optimal k: p = 2 ^ (-k) ~= 0.6185^b
|
||||
* 4) For optimal k: m = -nlnp / ((ln2) ^ 2)
|
||||
*/
|
||||
|
||||
/**
|
||||
* Computes the optimal k (number of hashes per element inserted in Bloom filter), given the
|
||||
* expected insertions and total number of bits in the Bloom filter.
|
||||
* <p>
|
||||
* See http://en.wikipedia.org/wiki/File:Bloom_filter_fp_probability.svg for the formula.
|
||||
*
|
||||
* @param n expected insertions (must be positive)
|
||||
* @param m total number of bits in Bloom filter (must be positive)
|
||||
*/
|
||||
static int optimalNumOfHashFunctions(long n, long m) {
|
||||
return Math.max(1, (int) Math.round(m / n * Math.log(2)));
|
||||
}
|
||||
|
||||
/**
|
||||
* Computes m (total bits of Bloom filter) which is expected to achieve, for the specified
|
||||
* expected insertions, the required false positive probability.
|
||||
* <p>
|
||||
* See http://en.wikipedia.org/wiki/Bloom_filter#Probability_of_false_positives for the formula.
|
||||
*
|
||||
* @param n expected insertions (must be positive)
|
||||
* @param p false positive rate (must be 0 < p < 1)
|
||||
*/
|
||||
static long optimalNumOfBits(long n, double p) {
|
||||
if (p == 0) {
|
||||
p = Double.MIN_VALUE;
|
||||
}
|
||||
return (long) (-n * Math.log(p) / (Math.log(2) * Math.log(2)));
|
||||
}
|
||||
|
||||
// Note: We use this instead of java.util.BitSet because we need access to the long[] data field
|
||||
static final class BitArray {
|
||||
final long[] data;
|
||||
final long bitSize;
|
||||
long bitCount;
|
||||
|
||||
BitArray(long bits) {
|
||||
this(new long[size(bits)]);
|
||||
}
|
||||
|
||||
private static int size(long bits) {
|
||||
long quotient = bits / 64;
|
||||
long remainder = bits - quotient * 64;
|
||||
return Math.toIntExact(remainder == 0 ? quotient : 1 + quotient);
|
||||
}
|
||||
|
||||
// Used by serialization
|
||||
BitArray(long[] data) {
|
||||
this.data = data;
|
||||
long bitCount = 0;
|
||||
for (long value : data) {
|
||||
bitCount += Long.bitCount(value);
|
||||
}
|
||||
this.bitCount = bitCount;
|
||||
this.bitSize = data.length * Long.SIZE;
|
||||
}
|
||||
|
||||
/** Returns true if the bit changed value. */
|
||||
boolean set(long index) {
|
||||
if (!get(index)) {
|
||||
data[(int) (index >>> 6)] |= (1L << index);
|
||||
bitCount++;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
boolean get(long index) {
|
||||
return (data[(int) (index >>> 6)] & (1L << index)) != 0;
|
||||
}
|
||||
|
||||
/** Number of bits */
|
||||
long bitSize() {
|
||||
return bitSize;
|
||||
}
|
||||
|
||||
/** Number of set bits (1s) */
|
||||
long bitCount() {
|
||||
return bitCount;
|
||||
}
|
||||
|
||||
BitArray copy() {
|
||||
return new BitArray(data.clone());
|
||||
}
|
||||
|
||||
/** Combines the two BitArrays using bitwise OR. */
|
||||
void putAll(BitArray array) {
|
||||
bitCount = 0;
|
||||
for (int i = 0; i < data.length; i++) {
|
||||
data[i] |= array.data[i];
|
||||
bitCount += Long.bitCount(data[i]);
|
||||
}
|
||||
}
|
||||
|
||||
@Override public boolean equals(Object o) {
|
||||
if (o instanceof BitArray) {
|
||||
BitArray bitArray = (BitArray) o;
|
||||
return Arrays.equals(data, bitArray.data);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override public int hashCode() {
|
||||
return Arrays.hashCode(data);
|
||||
}
|
||||
|
||||
public long ramBytesUsed() {
|
||||
return Long.BYTES * data.length + RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + 16;
|
||||
}
|
||||
}
|
||||
|
||||
static enum Hashing {
|
||||
|
||||
V0() {
|
||||
@Override
|
||||
protected boolean put(BytesRef value, int numHashFunctions, BitArray bits) {
|
||||
long bitSize = bits.bitSize();
|
||||
long hash64 = hash3_x64_128(value.bytes, value.offset, value.length, 0);
|
||||
int hash1 = (int) hash64;
|
||||
int hash2 = (int) (hash64 >>> 32);
|
||||
boolean bitsChanged = false;
|
||||
for (int i = 1; i <= numHashFunctions; i++) {
|
||||
int nextHash = hash1 + i * hash2;
|
||||
if (nextHash < 0) {
|
||||
nextHash = ~nextHash;
|
||||
}
|
||||
bitsChanged |= bits.set(nextHash % bitSize);
|
||||
}
|
||||
return bitsChanged;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean mightContain(BytesRef value, int numHashFunctions, BitArray bits) {
|
||||
long bitSize = bits.bitSize();
|
||||
long hash64 = hash3_x64_128(value.bytes, value.offset, value.length, 0);
|
||||
int hash1 = (int) hash64;
|
||||
int hash2 = (int) (hash64 >>> 32);
|
||||
for (int i = 1; i <= numHashFunctions; i++) {
|
||||
int nextHash = hash1 + i * hash2;
|
||||
if (nextHash < 0) {
|
||||
nextHash = ~nextHash;
|
||||
}
|
||||
if (!bits.get(nextHash % bitSize)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int type() {
|
||||
return 0;
|
||||
}
|
||||
},
|
||||
V1() {
|
||||
@Override
|
||||
protected boolean put(BytesRef value, int numHashFunctions, BitArray bits) {
|
||||
long bitSize = bits.bitSize();
|
||||
MurmurHash3.Hash128 hash128 = MurmurHash3.hash128(value.bytes, value.offset, value.length, 0, new MurmurHash3.Hash128());
|
||||
|
||||
boolean bitsChanged = false;
|
||||
long combinedHash = hash128.h1;
|
||||
for (int i = 0; i < numHashFunctions; i++) {
|
||||
// Make the combined hash positive and indexable
|
||||
bitsChanged |= bits.set((combinedHash & Long.MAX_VALUE) % bitSize);
|
||||
combinedHash += hash128.h2;
|
||||
}
|
||||
return bitsChanged;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean mightContain(BytesRef value, int numHashFunctions, BitArray bits) {
|
||||
long bitSize = bits.bitSize();
|
||||
MurmurHash3.Hash128 hash128 = MurmurHash3.hash128(value.bytes, value.offset, value.length, 0, new MurmurHash3.Hash128());
|
||||
|
||||
long combinedHash = hash128.h1;
|
||||
for (int i = 0; i < numHashFunctions; i++) {
|
||||
// Make the combined hash positive and indexable
|
||||
if (!bits.get((combinedHash & Long.MAX_VALUE) % bitSize)) {
|
||||
return false;
|
||||
}
|
||||
combinedHash += hash128.h2;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int type() {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
;
|
||||
|
||||
protected abstract boolean put(BytesRef value, int numHashFunctions, BitArray bits);
|
||||
|
||||
protected abstract boolean mightContain(BytesRef value, int numHashFunctions, BitArray bits);
|
||||
|
||||
protected abstract int type();
|
||||
|
||||
public static final Hashing DEFAULT = Hashing.V1;
|
||||
|
||||
public static Hashing fromType(int type) {
|
||||
if (type == 0) {
|
||||
return Hashing.V0;
|
||||
} if (type == 1) {
|
||||
return Hashing.V1;
|
||||
} else {
|
||||
throw new IllegalArgumentException("no hashing type matching " + type);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// START : MURMUR 3_128 USED FOR Hashing.V0
|
||||
// NOTE: don't replace this code with the o.e.common.hashing.MurmurHash3 method which returns a different hash
|
||||
|
||||
protected static long getblock(byte[] key, int offset, int index) {
|
||||
int i_8 = index << 3;
|
||||
int blockOffset = offset + i_8;
|
||||
return ((long) key[blockOffset + 0] & 0xff) + (((long) key[blockOffset + 1] & 0xff) << 8) +
|
||||
(((long) key[blockOffset + 2] & 0xff) << 16) + (((long) key[blockOffset + 3] & 0xff) << 24) +
|
||||
(((long) key[blockOffset + 4] & 0xff) << 32) + (((long) key[blockOffset + 5] & 0xff) << 40) +
|
||||
(((long) key[blockOffset + 6] & 0xff) << 48) + (((long) key[blockOffset + 7] & 0xff) << 56);
|
||||
}
|
||||
|
||||
protected static long rotl64(long v, int n) {
|
||||
return ((v << n) | (v >>> (64 - n)));
|
||||
}
|
||||
|
||||
protected static long fmix(long k) {
|
||||
k ^= k >>> 33;
|
||||
k *= 0xff51afd7ed558ccdL;
|
||||
k ^= k >>> 33;
|
||||
k *= 0xc4ceb9fe1a85ec53L;
|
||||
k ^= k >>> 33;
|
||||
|
||||
return k;
|
||||
}
|
||||
|
||||
@SuppressWarnings("fallthrough") // Uses fallthrough to implement a well know hashing algorithm
|
||||
public static long hash3_x64_128(byte[] key, int offset, int length, long seed) {
|
||||
final int nblocks = length >> 4; // Process as 128-bit blocks.
|
||||
|
||||
long h1 = seed;
|
||||
long h2 = seed;
|
||||
|
||||
long c1 = 0x87c37b91114253d5L;
|
||||
long c2 = 0x4cf5ad432745937fL;
|
||||
|
||||
//----------
|
||||
// body
|
||||
|
||||
for (int i = 0; i < nblocks; i++) {
|
||||
long k1 = getblock(key, offset, i * 2 + 0);
|
||||
long k2 = getblock(key, offset, i * 2 + 1);
|
||||
|
||||
k1 *= c1;
|
||||
k1 = rotl64(k1, 31);
|
||||
k1 *= c2;
|
||||
h1 ^= k1;
|
||||
|
||||
h1 = rotl64(h1, 27);
|
||||
h1 += h2;
|
||||
h1 = h1 * 5 + 0x52dce729;
|
||||
|
||||
k2 *= c2;
|
||||
k2 = rotl64(k2, 33);
|
||||
k2 *= c1;
|
||||
h2 ^= k2;
|
||||
|
||||
h2 = rotl64(h2, 31);
|
||||
h2 += h1;
|
||||
h2 = h2 * 5 + 0x38495ab5;
|
||||
}
|
||||
|
||||
//----------
|
||||
// tail
|
||||
|
||||
// Advance offset to the unprocessed tail of the data.
|
||||
offset += nblocks * 16;
|
||||
|
||||
long k1 = 0;
|
||||
long k2 = 0;
|
||||
|
||||
switch (length & 15) {
|
||||
case 15:
|
||||
k2 ^= ((long) key[offset + 14]) << 48;
|
||||
case 14:
|
||||
k2 ^= ((long) key[offset + 13]) << 40;
|
||||
case 13:
|
||||
k2 ^= ((long) key[offset + 12]) << 32;
|
||||
case 12:
|
||||
k2 ^= ((long) key[offset + 11]) << 24;
|
||||
case 11:
|
||||
k2 ^= ((long) key[offset + 10]) << 16;
|
||||
case 10:
|
||||
k2 ^= ((long) key[offset + 9]) << 8;
|
||||
case 9:
|
||||
k2 ^= ((long) key[offset + 8]) << 0;
|
||||
k2 *= c2;
|
||||
k2 = rotl64(k2, 33);
|
||||
k2 *= c1;
|
||||
h2 ^= k2;
|
||||
|
||||
case 8:
|
||||
k1 ^= ((long) key[offset + 7]) << 56;
|
||||
case 7:
|
||||
k1 ^= ((long) key[offset + 6]) << 48;
|
||||
case 6:
|
||||
k1 ^= ((long) key[offset + 5]) << 40;
|
||||
case 5:
|
||||
k1 ^= ((long) key[offset + 4]) << 32;
|
||||
case 4:
|
||||
k1 ^= ((long) key[offset + 3]) << 24;
|
||||
case 3:
|
||||
k1 ^= ((long) key[offset + 2]) << 16;
|
||||
case 2:
|
||||
k1 ^= ((long) key[offset + 1]) << 8;
|
||||
case 1:
|
||||
k1 ^= (key[offset]);
|
||||
k1 *= c1;
|
||||
k1 = rotl64(k1, 31);
|
||||
k1 *= c2;
|
||||
h1 ^= k1;
|
||||
}
|
||||
|
||||
//----------
|
||||
// finalization
|
||||
|
||||
h1 ^= length;
|
||||
h2 ^= length;
|
||||
|
||||
h1 += h2;
|
||||
h2 += h1;
|
||||
|
||||
h1 = fmix(h1);
|
||||
h2 = fmix(h2);
|
||||
|
||||
h1 += h2;
|
||||
h2 += h1;
|
||||
|
||||
//return (new long[]{h1, h2});
|
||||
// SAME AS GUAVA, they take the first long out of the 128bit
|
||||
return h1;
|
||||
}
|
||||
|
||||
// END: MURMUR 3_128
|
||||
}
|
|
@ -396,12 +396,14 @@ public class FsInfo implements Iterable<FsInfo.Path>, Writeable, ToXContent {
|
|||
builder.endObject();
|
||||
}
|
||||
builder.endArray();
|
||||
|
||||
builder.startObject("total");
|
||||
builder.field(OPERATIONS, totalOperations);
|
||||
builder.field(READ_OPERATIONS, totalReadOperations);
|
||||
builder.field(WRITE_OPERATIONS, totalWriteOperations);
|
||||
builder.field(READ_KILOBYTES, totalReadKilobytes);
|
||||
builder.field(WRITE_KILOBYTES, totalWriteKilobytes);
|
||||
builder.endObject();
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
|
|
|
@ -31,6 +31,7 @@ import org.elasticsearch.monitor.jvm.JvmStats.GarbageCollector;
|
|||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.concurrent.ScheduledFuture;
|
||||
|
@ -45,6 +46,7 @@ public class JvmGcMonitorService extends AbstractLifecycleComponent<JvmGcMonitor
|
|||
private final boolean enabled;
|
||||
private final TimeValue interval;
|
||||
private final Map<String, GcThreshold> gcThresholds;
|
||||
private final GcOverheadThreshold gcOverheadThreshold;
|
||||
|
||||
private volatile ScheduledFuture scheduledFuture;
|
||||
|
||||
|
@ -57,6 +59,27 @@ public class JvmGcMonitorService extends AbstractLifecycleComponent<JvmGcMonitor
|
|||
private static String GC_COLLECTOR_PREFIX = "monitor.jvm.gc.collector.";
|
||||
public final static Setting<Settings> GC_SETTING = Setting.groupSetting(GC_COLLECTOR_PREFIX, Property.NodeScope);
|
||||
|
||||
public final static Setting<Integer> GC_OVERHEAD_WARN_SETTING =
|
||||
Setting.intSetting("monitor.jvm.gc.overhead.warn", 50, 0, 100, Property.NodeScope);
|
||||
public final static Setting<Integer> GC_OVERHEAD_INFO_SETTING =
|
||||
Setting.intSetting("monitor.jvm.gc.overhead.info", 25, 0, 100, Property.NodeScope);
|
||||
public final static Setting<Integer> GC_OVERHEAD_DEBUG_SETTING =
|
||||
Setting.intSetting("monitor.jvm.gc.overhead.debug", 10, 0, 100, Property.NodeScope);
|
||||
|
||||
static class GcOverheadThreshold {
|
||||
final int warnThreshold;
|
||||
final int infoThreshold;
|
||||
final int debugThreshold;
|
||||
|
||||
public GcOverheadThreshold(final int warnThreshold, final int infoThreshold, final int debugThreshold) {
|
||||
this.warnThreshold = warnThreshold;
|
||||
this.infoThreshold = infoThreshold;
|
||||
this.debugThreshold = debugThreshold;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
static class GcThreshold {
|
||||
public final String name;
|
||||
public final long warnThreshold;
|
||||
|
@ -102,7 +125,42 @@ public class JvmGcMonitorService extends AbstractLifecycleComponent<JvmGcMonitor
|
|||
gcThresholds.putIfAbsent("default", new GcThreshold("default", 10000, 5000, 2000));
|
||||
this.gcThresholds = unmodifiableMap(gcThresholds);
|
||||
|
||||
logger.debug("enabled [{}], interval [{}], gc_threshold [{}]", enabled, interval, this.gcThresholds);
|
||||
if (GC_OVERHEAD_WARN_SETTING.get(settings) <= GC_OVERHEAD_INFO_SETTING.get(settings)) {
|
||||
final String message =
|
||||
String.format(
|
||||
Locale.ROOT,
|
||||
"[%s] must be greater than [%s] [%d] but was [%d]",
|
||||
GC_OVERHEAD_WARN_SETTING.getKey(),
|
||||
GC_OVERHEAD_INFO_SETTING.getKey(),
|
||||
GC_OVERHEAD_INFO_SETTING.get(settings),
|
||||
GC_OVERHEAD_WARN_SETTING.get(settings));
|
||||
throw new IllegalArgumentException(message);
|
||||
}
|
||||
if (GC_OVERHEAD_INFO_SETTING.get(settings) <= GC_OVERHEAD_DEBUG_SETTING.get(settings)) {
|
||||
final String message =
|
||||
String.format(
|
||||
Locale.ROOT,
|
||||
"[%s] must be greater than [%s] [%d] but was [%d]",
|
||||
GC_OVERHEAD_INFO_SETTING.getKey(),
|
||||
GC_OVERHEAD_DEBUG_SETTING.getKey(),
|
||||
GC_OVERHEAD_DEBUG_SETTING.get(settings),
|
||||
GC_OVERHEAD_INFO_SETTING.get(settings));
|
||||
throw new IllegalArgumentException(message);
|
||||
}
|
||||
|
||||
this.gcOverheadThreshold = new GcOverheadThreshold(
|
||||
GC_OVERHEAD_WARN_SETTING.get(settings),
|
||||
GC_OVERHEAD_INFO_SETTING.get(settings),
|
||||
GC_OVERHEAD_DEBUG_SETTING.get(settings));
|
||||
|
||||
logger.debug(
|
||||
"enabled [{}], interval [{}], gc_threshold [{}], overhead [{}, {}, {}]",
|
||||
this.enabled,
|
||||
this.interval,
|
||||
this.gcThresholds,
|
||||
this.gcOverheadThreshold.warnThreshold,
|
||||
this.gcOverheadThreshold.infoThreshold,
|
||||
this.gcOverheadThreshold.debugThreshold);
|
||||
}
|
||||
|
||||
private static TimeValue getValidThreshold(Settings settings, String key, String level) {
|
||||
|
@ -120,15 +178,12 @@ public class JvmGcMonitorService extends AbstractLifecycleComponent<JvmGcMonitor
|
|||
return GC_COLLECTOR_PREFIX + key + "." + level;
|
||||
}
|
||||
|
||||
private static final String LOG_MESSAGE =
|
||||
"[gc][{}][{}][{}] duration [{}], collections [{}]/[{}], total [{}]/[{}], memory [{}]->[{}]/[{}], all_pools {}";
|
||||
|
||||
@Override
|
||||
protected void doStart() {
|
||||
if (!enabled) {
|
||||
return;
|
||||
}
|
||||
scheduledFuture = threadPool.scheduleWithFixedDelay(new JvmMonitor(gcThresholds) {
|
||||
scheduledFuture = threadPool.scheduleWithFixedDelay(new JvmMonitor(gcThresholds, gcOverheadThreshold) {
|
||||
@Override
|
||||
void onMonitorFailure(Throwable t) {
|
||||
logger.debug("failed to monitor", t);
|
||||
|
@ -138,9 +193,17 @@ public class JvmGcMonitorService extends AbstractLifecycleComponent<JvmGcMonitor
|
|||
void onSlowGc(final Threshold threshold, final long seq, final SlowGcEvent slowGcEvent) {
|
||||
logSlowGc(logger, threshold, seq, slowGcEvent, JvmGcMonitorService::buildPools);
|
||||
}
|
||||
|
||||
@Override
|
||||
void onGcOverhead(final Threshold threshold, final long current, final long elapsed, final long seq) {
|
||||
logGcOverhead(logger, threshold, current, elapsed, seq);
|
||||
}
|
||||
}, interval);
|
||||
}
|
||||
|
||||
private static final String SLOW_GC_LOG_MESSAGE =
|
||||
"[gc][{}][{}][{}] duration [{}], collections [{}]/[{}], total [{}]/[{}], memory [{}]->[{}]/[{}], all_pools {}";
|
||||
|
||||
static void logSlowGc(
|
||||
final ESLogger logger,
|
||||
final JvmMonitor.Threshold threshold,
|
||||
|
@ -162,7 +225,7 @@ public class JvmGcMonitorService extends AbstractLifecycleComponent<JvmGcMonitor
|
|||
case WARN:
|
||||
if (logger.isWarnEnabled()) {
|
||||
logger.warn(
|
||||
LOG_MESSAGE,
|
||||
SLOW_GC_LOG_MESSAGE,
|
||||
name,
|
||||
seq,
|
||||
totalGcCollectionCount,
|
||||
|
@ -180,7 +243,7 @@ public class JvmGcMonitorService extends AbstractLifecycleComponent<JvmGcMonitor
|
|||
case INFO:
|
||||
if (logger.isInfoEnabled()) {
|
||||
logger.info(
|
||||
LOG_MESSAGE,
|
||||
SLOW_GC_LOG_MESSAGE,
|
||||
name,
|
||||
seq,
|
||||
totalGcCollectionCount,
|
||||
|
@ -198,7 +261,7 @@ public class JvmGcMonitorService extends AbstractLifecycleComponent<JvmGcMonitor
|
|||
case DEBUG:
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug(
|
||||
LOG_MESSAGE,
|
||||
SLOW_GC_LOG_MESSAGE,
|
||||
name,
|
||||
seq,
|
||||
totalGcCollectionCount,
|
||||
|
@ -239,6 +302,33 @@ public class JvmGcMonitorService extends AbstractLifecycleComponent<JvmGcMonitor
|
|||
return sb.toString();
|
||||
}
|
||||
|
||||
private static final String OVERHEAD_LOG_MESSAGE = "[gc][{}] overhead, spent [{}] collecting in the last [{}]";
|
||||
|
||||
static void logGcOverhead(
|
||||
final ESLogger logger,
|
||||
final JvmMonitor.Threshold threshold,
|
||||
final long current,
|
||||
final long elapsed,
|
||||
final long seq) {
|
||||
switch (threshold) {
|
||||
case WARN:
|
||||
if (logger.isWarnEnabled()) {
|
||||
logger.warn(OVERHEAD_LOG_MESSAGE, seq, TimeValue.timeValueMillis(current), TimeValue.timeValueMillis(elapsed));
|
||||
}
|
||||
break;
|
||||
case INFO:
|
||||
if (logger.isInfoEnabled()) {
|
||||
logger.info(OVERHEAD_LOG_MESSAGE, seq, TimeValue.timeValueMillis(current), TimeValue.timeValueMillis(elapsed));
|
||||
}
|
||||
break;
|
||||
case DEBUG:
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug(OVERHEAD_LOG_MESSAGE, seq, TimeValue.timeValueMillis(current), TimeValue.timeValueMillis(elapsed));
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doStop() {
|
||||
if (!enabled) {
|
||||
|
@ -287,16 +377,18 @@ public class JvmGcMonitorService extends AbstractLifecycleComponent<JvmGcMonitor
|
|||
private long lastTime = now();
|
||||
private JvmStats lastJvmStats = jvmStats();
|
||||
private long seq = 0;
|
||||
private final Map<String, GcThreshold> gcThresholds;
|
||||
private final Map<String, JvmGcMonitorService.GcThreshold> gcThresholds;
|
||||
final GcOverheadThreshold gcOverheadThreshold;
|
||||
|
||||
public JvmMonitor(Map<String, GcThreshold> gcThresholds) {
|
||||
public JvmMonitor(final Map<String, GcThreshold> gcThresholds, final GcOverheadThreshold gcOverheadThreshold) {
|
||||
this.gcThresholds = Objects.requireNonNull(gcThresholds);
|
||||
this.gcOverheadThreshold = Objects.requireNonNull(gcOverheadThreshold);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
monitorLongGc();
|
||||
monitorGc();
|
||||
} catch (Throwable t) {
|
||||
onMonitorFailure(t);
|
||||
}
|
||||
|
@ -304,12 +396,21 @@ public class JvmGcMonitorService extends AbstractLifecycleComponent<JvmGcMonitor
|
|||
|
||||
abstract void onMonitorFailure(Throwable t);
|
||||
|
||||
synchronized void monitorLongGc() {
|
||||
synchronized void monitorGc() {
|
||||
seq++;
|
||||
final long currentTime = now();
|
||||
JvmStats currentJvmStats = jvmStats();
|
||||
|
||||
final long elapsed = TimeUnit.NANOSECONDS.toMillis(currentTime - lastTime);
|
||||
|
||||
monitorSlowGc(currentJvmStats, elapsed);
|
||||
monitorGcOverhead(currentJvmStats, elapsed);
|
||||
|
||||
lastTime = currentTime;
|
||||
lastJvmStats = currentJvmStats;
|
||||
}
|
||||
|
||||
final void monitorSlowGc(JvmStats currentJvmStats, long elapsed) {
|
||||
for (int i = 0; i < currentJvmStats.getGc().getCollectors().length; i++) {
|
||||
GarbageCollector gc = currentJvmStats.getGc().getCollectors()[i];
|
||||
GarbageCollector prevGc = lastJvmStats.getGc().getCollectors()[i];
|
||||
|
@ -350,8 +451,31 @@ public class JvmGcMonitorService extends AbstractLifecycleComponent<JvmGcMonitor
|
|||
JvmInfo.jvmInfo().getMem().getHeapMax()));
|
||||
}
|
||||
}
|
||||
lastTime = currentTime;
|
||||
lastJvmStats = currentJvmStats;
|
||||
}
|
||||
|
||||
final void monitorGcOverhead(final JvmStats currentJvmStats, final long elapsed) {
|
||||
long current = 0;
|
||||
for (int i = 0; i < currentJvmStats.getGc().getCollectors().length; i++) {
|
||||
GarbageCollector gc = currentJvmStats.getGc().getCollectors()[i];
|
||||
GarbageCollector prevGc = lastJvmStats.getGc().getCollectors()[i];
|
||||
current += gc.getCollectionTime().millis() - prevGc.getCollectionTime().millis();
|
||||
}
|
||||
checkGcOverhead(current, elapsed, seq);
|
||||
}
|
||||
|
||||
void checkGcOverhead(final long current, final long elapsed, final long seq) {
|
||||
final int fraction = (int) ((100 * current) / (double) elapsed);
|
||||
Threshold overheadThreshold = null;
|
||||
if (fraction >= gcOverheadThreshold.warnThreshold) {
|
||||
overheadThreshold = Threshold.WARN;
|
||||
} else if (fraction >= gcOverheadThreshold.infoThreshold) {
|
||||
overheadThreshold = Threshold.INFO;
|
||||
} else if (fraction >= gcOverheadThreshold.debugThreshold) {
|
||||
overheadThreshold = Threshold.DEBUG;
|
||||
}
|
||||
if (overheadThreshold != null) {
|
||||
onGcOverhead(overheadThreshold, current, elapsed, seq);
|
||||
}
|
||||
}
|
||||
|
||||
JvmStats jvmStats() {
|
||||
|
@ -364,6 +488,8 @@ public class JvmGcMonitorService extends AbstractLifecycleComponent<JvmGcMonitor
|
|||
|
||||
abstract void onSlowGc(final Threshold threshold, final long seq, final SlowGcEvent slowGcEvent);
|
||||
|
||||
abstract void onGcOverhead(final Threshold threshold, final long total, final long elapsed, final long seq);
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -211,7 +211,14 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac
|
|||
}
|
||||
}
|
||||
assert execution != null;
|
||||
return execution.create(name, factories, valuesSource, config.format(), bucketCountThresholds, includeExclude, context, parent,
|
||||
|
||||
DocValueFormat format = config.format();
|
||||
if ((includeExclude != null) && (includeExclude.isRegexBased()) && format != DocValueFormat.RAW) {
|
||||
throw new AggregationExecutionException("Aggregation [" + name + "] cannot support regular expression style include/exclude "
|
||||
+ "settings as they can only be applied to string fields. Use an array of values for include/exclude clauses");
|
||||
}
|
||||
|
||||
return execution.create(name, factories, valuesSource, format, bucketCountThresholds, includeExclude, context, parent,
|
||||
significanceHeuristic, this, pipelineAggregators, metaData);
|
||||
}
|
||||
|
||||
|
@ -227,7 +234,7 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac
|
|||
}
|
||||
IncludeExclude.LongFilter longFilter = null;
|
||||
if (includeExclude != null) {
|
||||
longFilter = includeExclude.convertToLongFilter();
|
||||
longFilter = includeExclude.convertToLongFilter(config.format());
|
||||
}
|
||||
return new SignificantLongTermsAggregator(name, factories, (ValuesSource.Numeric) valuesSource, config.format(),
|
||||
bucketCountThresholds, context, parent, significanceHeuristic, this, longFilter, pipelineAggregators,
|
||||
|
@ -248,7 +255,7 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac
|
|||
AggregationContext aggregationContext, Aggregator parent, SignificanceHeuristic significanceHeuristic,
|
||||
SignificantTermsAggregatorFactory termsAggregatorFactory, List<PipelineAggregator> pipelineAggregators,
|
||||
Map<String, Object> metaData) throws IOException {
|
||||
final IncludeExclude.StringFilter filter = includeExclude == null ? null : includeExclude.convertToStringFilter();
|
||||
final IncludeExclude.StringFilter filter = includeExclude == null ? null : includeExclude.convertToStringFilter(format);
|
||||
return new SignificantStringTermsAggregator(name, factories, valuesSource, format, bucketCountThresholds, filter,
|
||||
aggregationContext, parent, significanceHeuristic, termsAggregatorFactory, pipelineAggregators, metaData);
|
||||
}
|
||||
|
@ -262,7 +269,7 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac
|
|||
AggregationContext aggregationContext, Aggregator parent, SignificanceHeuristic significanceHeuristic,
|
||||
SignificantTermsAggregatorFactory termsAggregatorFactory, List<PipelineAggregator> pipelineAggregators,
|
||||
Map<String, Object> metaData) throws IOException {
|
||||
final IncludeExclude.OrdinalsFilter filter = includeExclude == null ? null : includeExclude.convertToOrdinalsFilter();
|
||||
final IncludeExclude.OrdinalsFilter filter = includeExclude == null ? null : includeExclude.convertToOrdinalsFilter(format);
|
||||
return new GlobalOrdinalsSignificantTermsAggregator(name, factories,
|
||||
(ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource, format, bucketCountThresholds, filter,
|
||||
aggregationContext, parent, significanceHeuristic, termsAggregatorFactory, pipelineAggregators, metaData);
|
||||
|
@ -277,7 +284,7 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac
|
|||
AggregationContext aggregationContext, Aggregator parent, SignificanceHeuristic significanceHeuristic,
|
||||
SignificantTermsAggregatorFactory termsAggregatorFactory, List<PipelineAggregator> pipelineAggregators,
|
||||
Map<String, Object> metaData) throws IOException {
|
||||
final IncludeExclude.OrdinalsFilter filter = includeExclude == null ? null : includeExclude.convertToOrdinalsFilter();
|
||||
final IncludeExclude.OrdinalsFilter filter = includeExclude == null ? null : includeExclude.convertToOrdinalsFilter(format);
|
||||
return new GlobalOrdinalsSignificantTermsAggregator.WithHash(name, factories,
|
||||
(ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource, format, bucketCountThresholds, filter,
|
||||
aggregationContext, parent, significanceHeuristic, termsAggregatorFactory, pipelineAggregators, metaData);
|
||||
|
|
|
@ -150,8 +150,13 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory<Values
|
|||
}
|
||||
}
|
||||
}
|
||||
DocValueFormat format = config.format();
|
||||
if ((includeExclude != null) && (includeExclude.isRegexBased()) && format != DocValueFormat.RAW) {
|
||||
throw new AggregationExecutionException("Aggregation [" + name + "] cannot support regular expression style include/exclude "
|
||||
+ "settings as they can only be applied to string fields. Use an array of values for include/exclude clauses");
|
||||
}
|
||||
|
||||
return execution.create(name, factories, valuesSource, order, config.format(), bucketCountThresholds, includeExclude, context, parent,
|
||||
return execution.create(name, factories, valuesSource, order, format, bucketCountThresholds, includeExclude, context, parent,
|
||||
collectMode, showTermDocCountError, pipelineAggregators, metaData);
|
||||
}
|
||||
|
||||
|
@ -171,7 +176,7 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory<Values
|
|||
pipelineAggregators, metaData);
|
||||
}
|
||||
if (includeExclude != null) {
|
||||
longFilter = includeExclude.convertToLongFilter();
|
||||
longFilter = includeExclude.convertToLongFilter(config.format());
|
||||
}
|
||||
return new LongTermsAggregator(name, factories, (ValuesSource.Numeric) valuesSource, config.format(), order,
|
||||
bucketCountThresholds, context, parent, collectMode, showTermDocCountError, longFilter, pipelineAggregators,
|
||||
|
@ -192,7 +197,7 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory<Values
|
|||
AggregationContext aggregationContext, Aggregator parent, SubAggCollectionMode subAggCollectMode,
|
||||
boolean showTermDocCountError, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData)
|
||||
throws IOException {
|
||||
final IncludeExclude.StringFilter filter = includeExclude == null ? null : includeExclude.convertToStringFilter();
|
||||
final IncludeExclude.StringFilter filter = includeExclude == null ? null : includeExclude.convertToStringFilter(format);
|
||||
return new StringTermsAggregator(name, factories, valuesSource, order, format, bucketCountThresholds, filter,
|
||||
aggregationContext, parent, subAggCollectMode, showTermDocCountError, pipelineAggregators, metaData);
|
||||
}
|
||||
|
@ -211,7 +216,7 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory<Values
|
|||
AggregationContext aggregationContext, Aggregator parent, SubAggCollectionMode subAggCollectMode,
|
||||
boolean showTermDocCountError, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData)
|
||||
throws IOException {
|
||||
final IncludeExclude.OrdinalsFilter filter = includeExclude == null ? null : includeExclude.convertToOrdinalsFilter();
|
||||
final IncludeExclude.OrdinalsFilter filter = includeExclude == null ? null : includeExclude.convertToOrdinalsFilter(format);
|
||||
return new GlobalOrdinalsStringTermsAggregator(name, factories, (ValuesSource.Bytes.WithOrdinals) valuesSource, order,
|
||||
format, bucketCountThresholds, filter, aggregationContext, parent, subAggCollectMode, showTermDocCountError,
|
||||
pipelineAggregators, metaData);
|
||||
|
@ -231,7 +236,7 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory<Values
|
|||
AggregationContext aggregationContext, Aggregator parent, SubAggCollectionMode subAggCollectMode,
|
||||
boolean showTermDocCountError, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData)
|
||||
throws IOException {
|
||||
final IncludeExclude.OrdinalsFilter filter = includeExclude == null ? null : includeExclude.convertToOrdinalsFilter();
|
||||
final IncludeExclude.OrdinalsFilter filter = includeExclude == null ? null : includeExclude.convertToOrdinalsFilter(format);
|
||||
return new GlobalOrdinalsStringTermsAggregator.WithHash(name, factories, (ValuesSource.Bytes.WithOrdinals) valuesSource,
|
||||
order, format, bucketCountThresholds, filter, aggregationContext, parent, subAggCollectMode, showTermDocCountError,
|
||||
pipelineAggregators, metaData);
|
||||
|
|
|
@ -43,6 +43,7 @@ import org.elasticsearch.common.io.stream.Writeable;
|
|||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.search.DocValueFormat;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSource;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSource.Bytes.WithOrdinals;
|
||||
|
||||
|
@ -135,7 +136,8 @@ public class IncludeExclude implements Writeable, ToXContent {
|
|||
}
|
||||
|
||||
public static abstract class OrdinalsFilter {
|
||||
public abstract LongBitSet acceptedGlobalOrdinals(RandomAccessOrds globalOrdinals, ValuesSource.Bytes.WithOrdinals valueSource) throws IOException;
|
||||
public abstract LongBitSet acceptedGlobalOrdinals(RandomAccessOrds globalOrdinals, ValuesSource.Bytes.WithOrdinals valueSource)
|
||||
throws IOException;
|
||||
|
||||
}
|
||||
|
||||
|
@ -152,7 +154,8 @@ public class IncludeExclude implements Writeable, ToXContent {
|
|||
*
|
||||
*/
|
||||
@Override
|
||||
public LongBitSet acceptedGlobalOrdinals(RandomAccessOrds globalOrdinals, ValuesSource.Bytes.WithOrdinals valueSource) throws IOException {
|
||||
public LongBitSet acceptedGlobalOrdinals(RandomAccessOrds globalOrdinals, ValuesSource.Bytes.WithOrdinals valueSource)
|
||||
throws IOException {
|
||||
LongBitSet acceptedGlobalOrdinals = new LongBitSet(globalOrdinals.getValueCount());
|
||||
TermsEnum globalTermsEnum;
|
||||
Terms globalTerms = new DocValuesTerms(globalOrdinals);
|
||||
|
@ -179,7 +182,7 @@ public class IncludeExclude implements Writeable, ToXContent {
|
|||
@Override
|
||||
public LongBitSet acceptedGlobalOrdinals(RandomAccessOrds globalOrdinals, WithOrdinals valueSource) throws IOException {
|
||||
LongBitSet acceptedGlobalOrdinals = new LongBitSet(globalOrdinals.getValueCount());
|
||||
if(includeValues!=null){
|
||||
if (includeValues != null) {
|
||||
for (BytesRef term : includeValues) {
|
||||
long ord = globalOrdinals.lookupTerm(term);
|
||||
if (ord >= 0) {
|
||||
|
@ -534,33 +537,46 @@ public class IncludeExclude implements Writeable, ToXContent {
|
|||
return a;
|
||||
}
|
||||
|
||||
public StringFilter convertToStringFilter() {
|
||||
public StringFilter convertToStringFilter(DocValueFormat format) {
|
||||
if (isRegexBased()) {
|
||||
return new AutomatonBackedStringFilter(toAutomaton());
|
||||
}
|
||||
return new TermListBackedStringFilter(includeValues, excludeValues);
|
||||
return new TermListBackedStringFilter(parseForDocValues(includeValues, format), parseForDocValues(excludeValues, format));
|
||||
}
|
||||
|
||||
public OrdinalsFilter convertToOrdinalsFilter() {
|
||||
private static SortedSet<BytesRef> parseForDocValues(SortedSet<BytesRef> endUserFormattedValues, DocValueFormat format) {
|
||||
SortedSet<BytesRef> result = endUserFormattedValues;
|
||||
if (endUserFormattedValues != null) {
|
||||
if (format != DocValueFormat.RAW) {
|
||||
result = new TreeSet<>();
|
||||
for (BytesRef formattedVal : endUserFormattedValues) {
|
||||
result.add(format.parseBytesRef(formattedVal.utf8ToString()));
|
||||
}
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
public OrdinalsFilter convertToOrdinalsFilter(DocValueFormat format) {
|
||||
|
||||
if (isRegexBased()) {
|
||||
return new AutomatonBackedOrdinalsFilter(toAutomaton());
|
||||
}
|
||||
return new TermListBackedOrdinalsFilter(includeValues, excludeValues);
|
||||
return new TermListBackedOrdinalsFilter(parseForDocValues(includeValues, format), parseForDocValues(excludeValues, format));
|
||||
}
|
||||
|
||||
public LongFilter convertToLongFilter() {
|
||||
public LongFilter convertToLongFilter(DocValueFormat format) {
|
||||
int numValids = includeValues == null ? 0 : includeValues.size();
|
||||
int numInvalids = excludeValues == null ? 0 : excludeValues.size();
|
||||
LongFilter result = new LongFilter(numValids, numInvalids);
|
||||
if (includeValues != null) {
|
||||
for (BytesRef val : includeValues) {
|
||||
result.addAccept(Long.parseLong(val.utf8ToString()));
|
||||
result.addAccept(format.parseLong(val.utf8ToString(), false, null));
|
||||
}
|
||||
}
|
||||
if (excludeValues != null) {
|
||||
for (BytesRef val : excludeValues) {
|
||||
result.addReject(Long.parseLong(val.utf8ToString()));
|
||||
result.addReject(format.parseLong(val.utf8ToString(), false, null));
|
||||
}
|
||||
}
|
||||
return result;
|
||||
|
@ -572,13 +588,13 @@ public class IncludeExclude implements Writeable, ToXContent {
|
|||
LongFilter result = new LongFilter(numValids, numInvalids);
|
||||
if (includeValues != null) {
|
||||
for (BytesRef val : includeValues) {
|
||||
double dval=Double.parseDouble(val.utf8ToString());
|
||||
double dval = Double.parseDouble(val.utf8ToString());
|
||||
result.addAccept(NumericUtils.doubleToSortableLong(dval));
|
||||
}
|
||||
}
|
||||
if (excludeValues != null) {
|
||||
for (BytesRef val : excludeValues) {
|
||||
double dval=Double.parseDouble(val.utf8ToString());
|
||||
double dval = Double.parseDouble(val.utf8ToString());
|
||||
result.addReject(NumericUtils.doubleToSortableLong(dval));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1033,6 +1033,9 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
|
|||
suggestBuilder = SuggestBuilder.fromXContent(context, suggesters);
|
||||
} else if (context.getParseFieldMatcher().match(currentFieldName, SORT_FIELD)) {
|
||||
sorts = new ArrayList<>(SortBuilder.fromXContent(context));
|
||||
} else if (context.getParseFieldMatcher().match(currentFieldName, RESCORE_FIELD)) {
|
||||
rescoreBuilders = new ArrayList<>();
|
||||
rescoreBuilders.add(RescoreBuilder.parseFromXContent(context));
|
||||
} else if (context.getParseFieldMatcher().match(currentFieldName, EXT_FIELD)) {
|
||||
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser);
|
||||
ext = xContentBuilder.bytes();
|
||||
|
|
|
@ -0,0 +1,75 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.common.rounding;
|
||||
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import static org.elasticsearch.common.rounding.DateTimeUnit.WEEK_OF_WEEKYEAR;
|
||||
import static org.elasticsearch.common.rounding.DateTimeUnit.YEAR_OF_CENTURY;
|
||||
import static org.elasticsearch.common.rounding.DateTimeUnit.QUARTER;
|
||||
import static org.elasticsearch.common.rounding.DateTimeUnit.MONTH_OF_YEAR;
|
||||
import static org.elasticsearch.common.rounding.DateTimeUnit.DAY_OF_MONTH;
|
||||
import static org.elasticsearch.common.rounding.DateTimeUnit.HOUR_OF_DAY;
|
||||
import static org.elasticsearch.common.rounding.DateTimeUnit.MINUTES_OF_HOUR;
|
||||
import static org.elasticsearch.common.rounding.DateTimeUnit.SECOND_OF_MINUTE;
|
||||
|
||||
public class DateTimeUnitTests extends ESTestCase {
|
||||
|
||||
/**
|
||||
* test that we don't accidentally change enum ids
|
||||
*/
|
||||
public void testEnumIds() {
|
||||
assertEquals(1, WEEK_OF_WEEKYEAR.id());
|
||||
assertEquals(WEEK_OF_WEEKYEAR, DateTimeUnit.resolve((byte) 1));
|
||||
|
||||
assertEquals(2, YEAR_OF_CENTURY.id());
|
||||
assertEquals(YEAR_OF_CENTURY, DateTimeUnit.resolve((byte) 2));
|
||||
|
||||
assertEquals(3, QUARTER.id());
|
||||
assertEquals(QUARTER, DateTimeUnit.resolve((byte) 3));
|
||||
|
||||
assertEquals(4, MONTH_OF_YEAR.id());
|
||||
assertEquals(MONTH_OF_YEAR, DateTimeUnit.resolve((byte) 4));
|
||||
|
||||
assertEquals(5, DAY_OF_MONTH.id());
|
||||
assertEquals(DAY_OF_MONTH, DateTimeUnit.resolve((byte) 5));
|
||||
|
||||
assertEquals(6, HOUR_OF_DAY.id());
|
||||
assertEquals(HOUR_OF_DAY, DateTimeUnit.resolve((byte) 6));
|
||||
|
||||
assertEquals(7, MINUTES_OF_HOUR.id());
|
||||
assertEquals(MINUTES_OF_HOUR, DateTimeUnit.resolve((byte) 7));
|
||||
|
||||
assertEquals(8, SECOND_OF_MINUTE.id());
|
||||
assertEquals(SECOND_OF_MINUTE, DateTimeUnit.resolve((byte) 8));
|
||||
}
|
||||
|
||||
public void testIsDayOrLonger() {
|
||||
for (DateTimeUnit unit : DateTimeUnit.values()) {
|
||||
if (DateTimeUnit.isDayOrLonger(unit)) {
|
||||
assertTrue(unit == DAY_OF_MONTH ||
|
||||
unit == MONTH_OF_YEAR ||
|
||||
unit == QUARTER ||
|
||||
unit == YEAR_OF_CENTURY ||
|
||||
unit == WEEK_OF_WEEKYEAR);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -25,6 +25,7 @@ import org.joda.time.DateTime;
|
|||
import org.joda.time.DateTimeZone;
|
||||
import org.joda.time.format.ISODateTimeFormat;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
@ -147,21 +148,37 @@ public class TimeZoneRoundingTests extends ESTestCase {
|
|||
Rounding tzRounding;
|
||||
// testing savings to non savings switch
|
||||
tzRounding = TimeZoneRounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(DateTimeZone.forID("UTC")).build();
|
||||
assertThat(tzRounding.round(time("2014-10-26T01:01:01", DateTimeZone.forID("CET"))),
|
||||
equalTo(time("2014-10-26T01:00:00", DateTimeZone.forID("CET"))));
|
||||
assertThat(tzRounding.round(time("2014-10-26T01:01:01", DateTimeZone.forOffsetHours(2))), // CEST = UTC+2
|
||||
equalTo(time("2014-10-26T01:00:00", DateTimeZone.forOffsetHours(2))));
|
||||
assertThat(tzRounding.nextRoundingValue(time("2014-10-26T01:00:00", DateTimeZone.forOffsetHours(2))),
|
||||
equalTo(time("2014-10-26T02:00:00", DateTimeZone.forOffsetHours(2))));
|
||||
assertThat(tzRounding.nextRoundingValue(time("2014-10-26T02:00:00", DateTimeZone.forOffsetHours(2))),
|
||||
equalTo(time("2014-10-26T03:00:00", DateTimeZone.forOffsetHours(2))));
|
||||
|
||||
tzRounding = TimeZoneRounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(DateTimeZone.forID("CET")).build();
|
||||
assertThat(tzRounding.round(time("2014-10-26T01:01:01", DateTimeZone.forID("CET"))),
|
||||
equalTo(time("2014-10-26T01:00:00", DateTimeZone.forID("CET"))));
|
||||
assertThat(tzRounding.round(time("2014-10-26T01:01:01", DateTimeZone.forOffsetHours(2))), // CEST = UTC+2
|
||||
equalTo(time("2014-10-26T01:00:00", DateTimeZone.forOffsetHours(2))));
|
||||
assertThat(tzRounding.nextRoundingValue(time("2014-10-26T01:00:00", DateTimeZone.forOffsetHours(2))),
|
||||
equalTo(time("2014-10-26T02:00:00", DateTimeZone.forOffsetHours(2))));
|
||||
assertThat(tzRounding.nextRoundingValue(time("2014-10-26T02:00:00", DateTimeZone.forOffsetHours(2))),
|
||||
equalTo(time("2014-10-26T03:00:00", DateTimeZone.forOffsetHours(2))));
|
||||
|
||||
// testing non savings to savings switch
|
||||
tzRounding = TimeZoneRounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(DateTimeZone.forID("UTC")).build();
|
||||
assertThat(tzRounding.round(time("2014-03-30T01:01:01", DateTimeZone.forID("CET"))),
|
||||
equalTo(time("2014-03-30T01:00:00", DateTimeZone.forID("CET"))));
|
||||
assertThat(tzRounding.round(time("2014-03-30T01:01:01", DateTimeZone.forOffsetHours(1))), // CET = UTC+1
|
||||
equalTo(time("2014-03-30T01:00:00", DateTimeZone.forOffsetHours(1))));
|
||||
assertThat(tzRounding.nextRoundingValue(time("2014-03-30T01:00:00", DateTimeZone.forOffsetHours(1))),
|
||||
equalTo(time("2014-03-30T02:00:00", DateTimeZone.forOffsetHours(1))));
|
||||
assertThat(tzRounding.nextRoundingValue(time("2014-03-30T02:00:00", DateTimeZone.forOffsetHours(1))),
|
||||
equalTo(time("2014-03-30T03:00:00", DateTimeZone.forOffsetHours(1))));
|
||||
|
||||
tzRounding = TimeZoneRounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(DateTimeZone.forID("CET")).build();
|
||||
assertThat(tzRounding.round(time("2014-03-30T01:01:01", DateTimeZone.forID("CET"))),
|
||||
equalTo(time("2014-03-30T01:00:00", DateTimeZone.forID("CET"))));
|
||||
assertThat(tzRounding.round(time("2014-03-30T01:01:01", DateTimeZone.forOffsetHours(1))), // CET = UTC+1
|
||||
equalTo(time("2014-03-30T01:00:00", DateTimeZone.forOffsetHours(1))));
|
||||
assertThat(tzRounding.nextRoundingValue(time("2014-03-30T01:00:00", DateTimeZone.forOffsetHours(1))),
|
||||
equalTo(time("2014-03-30T02:00:00", DateTimeZone.forOffsetHours(1))));
|
||||
assertThat(tzRounding.nextRoundingValue(time("2014-03-30T02:00:00", DateTimeZone.forOffsetHours(1))),
|
||||
equalTo(time("2014-03-30T03:00:00", DateTimeZone.forOffsetHours(1))));
|
||||
|
||||
// testing non savings to savings switch (America/Chicago)
|
||||
tzRounding = TimeZoneRounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(DateTimeZone.forID("UTC")).build();
|
||||
|
@ -210,6 +227,31 @@ public class TimeZoneRoundingTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Test that nextRoundingValue() for hour rounding (and smaller) is equally spaced (see #18326)
|
||||
* Start at a random date in a random time zone, then find the next zone offset transition (if any).
|
||||
* From there, check that when we advance by using rounding#nextRoundingValue(), we always advance by the same
|
||||
* amount of milliseconds.
|
||||
*/
|
||||
public void testSubHourNextRoundingEquallySpaced() {
|
||||
String timeZone = randomFrom(new ArrayList<>(DateTimeZone.getAvailableIDs()));
|
||||
DateTimeUnit unit = randomFrom(new DateTimeUnit[] { DateTimeUnit.HOUR_OF_DAY, DateTimeUnit.MINUTES_OF_HOUR,
|
||||
DateTimeUnit.SECOND_OF_MINUTE });
|
||||
DateTimeZone tz = DateTimeZone.forID(timeZone);
|
||||
TimeZoneRounding rounding = new TimeZoneRounding.TimeUnitRounding(unit, tz);
|
||||
// move the random date to transition for timezones that have offset change due to dst transition
|
||||
long nextTransition = tz.nextTransition(Math.abs(randomLong() % ((long) 10e11)));
|
||||
final long millisPerUnit = unit.field().getDurationField().getUnitMillis();
|
||||
// start ten units before transition
|
||||
long roundedDate = rounding.round(nextTransition - (10 * millisPerUnit));
|
||||
while (roundedDate < nextTransition + 10 * millisPerUnit) {
|
||||
long delta = rounding.nextRoundingValue(roundedDate) - roundedDate;
|
||||
assertEquals("Difference between rounded values not equally spaced for [" + unit.name() + "], [" + timeZone + "] at "
|
||||
+ new DateTime(roundedDate), millisPerUnit, delta);
|
||||
roundedDate = rounding.nextRoundingValue(roundedDate);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* randomized test on TimeIntervalRounding with random interval and time zone offsets
|
||||
*/
|
||||
|
|
|
@ -430,7 +430,7 @@ public class SettingTests extends ESTestCase {
|
|||
integerSetting.get(Settings.builder().put("foo.bar", 11).build());
|
||||
fail();
|
||||
} catch (IllegalArgumentException ex) {
|
||||
assertEquals("Failed to parse value [11] for setting [foo.bar] must be =< 10", ex.getMessage());
|
||||
assertEquals("Failed to parse value [11] for setting [foo.bar] must be <= 10", ex.getMessage());
|
||||
}
|
||||
|
||||
try {
|
||||
|
|
|
@ -75,6 +75,9 @@ public class CircuitBreakerServiceIT extends ESIntegTestCase {
|
|||
/** Reset all breaker settings back to their defaults */
|
||||
private void reset() {
|
||||
logger.info("--> resetting breaker settings");
|
||||
// clear all caches, we could be very close (or even above) the limit and then we will not be able to reset the breaker settings
|
||||
client().admin().indices().prepareClearCache().setFieldDataCache(true).setQueryCache(true).setRequestCache(true).get();
|
||||
|
||||
Settings resetSettings = Settings.builder()
|
||||
.put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(),
|
||||
HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.getDefaultRaw(null))
|
||||
|
@ -214,7 +217,6 @@ public class CircuitBreakerServiceIT extends ESIntegTestCase {
|
|||
* Test that a breaker correctly redistributes to a different breaker, in
|
||||
* this case, the fielddata breaker borrows space from the request breaker
|
||||
*/
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/18325")
|
||||
public void testParentChecking() throws Exception {
|
||||
if (noopBreakerUsed()) {
|
||||
logger.info("--> noop breakers used, skipping test");
|
||||
|
@ -274,9 +276,6 @@ public class CircuitBreakerServiceIT extends ESIntegTestCase {
|
|||
cause.toString(), startsWith("CircuitBreakingException[[parent] Data too large"));
|
||||
assertThat("Exception: [" + cause.toString() + "] should contain a CircuitBreakingException",
|
||||
cause.toString(), endsWith(errMsg));
|
||||
} finally {
|
||||
// reset before teardown as it requires properly set up breakers
|
||||
reset();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -79,6 +79,46 @@ public class JvmGcMonitorServiceSettingsTests extends ESTestCase {
|
|||
}, true, null);
|
||||
}
|
||||
|
||||
public void testIllegalOverheadSettings() throws InterruptedException {
|
||||
for (final String threshold : new String[] { "warn", "info", "debug" }) {
|
||||
final Settings.Builder builder = Settings.builder();
|
||||
builder.put("monitor.jvm.gc.overhead." + threshold, randomIntBetween(Integer.MIN_VALUE, -1));
|
||||
execute(builder.build(), (command, interval) -> null, t -> {
|
||||
assertThat(t, instanceOf(IllegalArgumentException.class));
|
||||
assertThat(t.getMessage(), containsString("setting [monitor.jvm.gc.overhead." + threshold + "] must be >= 0"));
|
||||
}, true, null);
|
||||
}
|
||||
|
||||
for (final String threshold : new String[] { "warn", "info", "debug" }) {
|
||||
final Settings.Builder builder = Settings.builder();
|
||||
builder.put("monitor.jvm.gc.overhead." + threshold, randomIntBetween(100 + 1, Integer.MAX_VALUE));
|
||||
execute(builder.build(), (command, interval) -> null, t -> {
|
||||
assertThat(t, instanceOf(IllegalArgumentException.class));
|
||||
assertThat(t.getMessage(), containsString("setting [monitor.jvm.gc.overhead." + threshold + "] must be <= 100"));
|
||||
}, true, null);
|
||||
}
|
||||
|
||||
final Settings.Builder infoWarnOutOfOrderBuilder = Settings.builder();
|
||||
final int info = randomIntBetween(2, 98);
|
||||
infoWarnOutOfOrderBuilder.put("monitor.jvm.gc.overhead.info", info);
|
||||
final int warn = randomIntBetween(1, info - 1);
|
||||
infoWarnOutOfOrderBuilder.put("monitor.jvm.gc.overhead.warn", warn);
|
||||
execute(infoWarnOutOfOrderBuilder.build(), (command, interval) -> null, t -> {
|
||||
assertThat(t, instanceOf(IllegalArgumentException.class));
|
||||
assertThat(t.getMessage(), containsString("[monitor.jvm.gc.overhead.warn] must be greater than [monitor.jvm.gc.overhead.info] [" + info + "] but was [" + warn + "]"));
|
||||
}, true, null);
|
||||
|
||||
final Settings.Builder debugInfoOutOfOrderBuilder = Settings.builder();
|
||||
debugInfoOutOfOrderBuilder.put("monitor.jvm.gc.overhead.info", info);
|
||||
final int debug = randomIntBetween(info + 1, 99);
|
||||
debugInfoOutOfOrderBuilder.put("monitor.jvm.gc.overhead.debug", debug);
|
||||
debugInfoOutOfOrderBuilder.put("monitor.jvm.gc.overhead.warn", randomIntBetween(debug + 1, 100)); // or the test will fail for the wrong reason
|
||||
execute(debugInfoOutOfOrderBuilder.build(), (command, interval) -> null, t -> {
|
||||
assertThat(t, instanceOf(IllegalArgumentException.class));
|
||||
assertThat(t.getMessage(), containsString("[monitor.jvm.gc.overhead.info] must be greater than [monitor.jvm.gc.overhead.debug] [" + debug + "] but was [" + info + "]"));
|
||||
}, true, null);
|
||||
}
|
||||
|
||||
private static void execute(Settings settings, BiFunction<Runnable, TimeValue, ScheduledFuture<?>> scheduler, Runnable asserts) throws InterruptedException {
|
||||
execute(settings, scheduler, null, false, asserts);
|
||||
}
|
||||
|
|
|
@ -133,4 +133,43 @@ public class JvmGcMonitorServiceTests extends ESTestCase {
|
|||
verifyNoMoreInteractions(logger);
|
||||
}
|
||||
|
||||
public void testGcOverheadLogging() {
|
||||
final JvmGcMonitorService.JvmMonitor.Threshold threshold = randomFrom(JvmGcMonitorService.JvmMonitor.Threshold.values());
|
||||
final int current = randomIntBetween(1, Integer.MAX_VALUE);
|
||||
final long elapsed = randomIntBetween(current, Integer.MAX_VALUE);
|
||||
final long seq = randomIntBetween(1, Integer.MAX_VALUE);
|
||||
final ESLogger logger = mock(ESLogger.class);
|
||||
when(logger.isWarnEnabled()).thenReturn(true);
|
||||
when(logger.isInfoEnabled()).thenReturn(true);
|
||||
when(logger.isDebugEnabled()).thenReturn(true);
|
||||
JvmGcMonitorService.logGcOverhead(logger, threshold, current, elapsed, seq);
|
||||
switch(threshold) {
|
||||
case WARN:
|
||||
verify(logger).isWarnEnabled();
|
||||
verify(logger).warn(
|
||||
"[gc][{}] overhead, spent [{}] collecting in the last [{}]",
|
||||
seq,
|
||||
TimeValue.timeValueMillis(current),
|
||||
TimeValue.timeValueMillis(elapsed));
|
||||
break;
|
||||
case INFO:
|
||||
verify(logger).isInfoEnabled();
|
||||
verify(logger).info(
|
||||
"[gc][{}] overhead, spent [{}] collecting in the last [{}]",
|
||||
seq,
|
||||
TimeValue.timeValueMillis(current),
|
||||
TimeValue.timeValueMillis(elapsed));
|
||||
break;
|
||||
case DEBUG:
|
||||
verify(logger).isDebugEnabled();
|
||||
verify(logger).debug(
|
||||
"[gc][{}] overhead, spent [{}] collecting in the last [{}]",
|
||||
seq,
|
||||
TimeValue.timeValueMillis(current),
|
||||
TimeValue.timeValueMillis(elapsed));
|
||||
break;
|
||||
}
|
||||
verifyNoMoreInteractions(logger);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -41,10 +41,12 @@ import static org.mockito.Mockito.when;
|
|||
|
||||
public class JvmMonitorTests extends ESTestCase {
|
||||
|
||||
private static final JvmGcMonitorService.GcOverheadThreshold IGNORE = new JvmGcMonitorService.GcOverheadThreshold(0, 0, 0);
|
||||
|
||||
public void testMonitorFailure() {
|
||||
AtomicBoolean shouldFail = new AtomicBoolean();
|
||||
AtomicBoolean invoked = new AtomicBoolean();
|
||||
JvmGcMonitorService.JvmMonitor monitor = new JvmGcMonitorService.JvmMonitor(Collections.emptyMap()) {
|
||||
JvmGcMonitorService.JvmMonitor monitor = new JvmGcMonitorService.JvmMonitor(Collections.emptyMap(), IGNORE) {
|
||||
@Override
|
||||
void onMonitorFailure(Throwable t) {
|
||||
invoked.set(true);
|
||||
|
@ -53,7 +55,7 @@ public class JvmMonitorTests extends ESTestCase {
|
|||
}
|
||||
|
||||
@Override
|
||||
synchronized void monitorLongGc() {
|
||||
synchronized void monitorGc() {
|
||||
if (shouldFail.get()) {
|
||||
throw new RuntimeException("simulated");
|
||||
}
|
||||
|
@ -62,6 +64,10 @@ public class JvmMonitorTests extends ESTestCase {
|
|||
@Override
|
||||
void onSlowGc(final Threshold threshold, final long seq, final SlowGcEvent slowGcEvent) {
|
||||
}
|
||||
|
||||
@Override
|
||||
void onGcOverhead(Threshold threshold, long total, long elapsed, long seq) {
|
||||
}
|
||||
};
|
||||
|
||||
monitor.run();
|
||||
|
@ -166,7 +172,7 @@ public class JvmMonitorTests extends ESTestCase {
|
|||
|
||||
final AtomicInteger count = new AtomicInteger();
|
||||
|
||||
JvmGcMonitorService.JvmMonitor monitor = new JvmGcMonitorService.JvmMonitor(gcThresholds) {
|
||||
JvmGcMonitorService.JvmMonitor monitor = new JvmGcMonitorService.JvmMonitor(gcThresholds, IGNORE) {
|
||||
@Override
|
||||
void onMonitorFailure(Throwable t) {
|
||||
}
|
||||
|
@ -198,6 +204,10 @@ public class JvmMonitorTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
void onGcOverhead(Threshold threshold, long total, long elapsed, long seq) {
|
||||
}
|
||||
|
||||
@Override
|
||||
long now() {
|
||||
return now.get();
|
||||
|
@ -213,7 +223,7 @@ public class JvmMonitorTests extends ESTestCase {
|
|||
|
||||
now.set(start + TimeUnit.NANOSECONDS.convert(expectedElapsed, TimeUnit.MILLISECONDS));
|
||||
jvmStats.set(monitorJvmStats);
|
||||
monitor.monitorLongGc();
|
||||
monitor.monitorGc();
|
||||
|
||||
assertThat(count.get(), equalTo((youngGcThreshold ? 1 : 0) + (oldGcThreshold ? 1 : 0)));
|
||||
}
|
||||
|
@ -235,14 +245,140 @@ public class JvmMonitorTests extends ESTestCase {
|
|||
|
||||
private JvmStats jvmStats(JvmStats.GarbageCollector youngCollector, JvmStats.GarbageCollector oldCollector) {
|
||||
final JvmStats jvmStats = mock(JvmStats.class);
|
||||
final JvmStats.GarbageCollectors initialGcs = mock(JvmStats.GarbageCollectors.class);
|
||||
final JvmStats.GarbageCollector[] initialCollectors = new JvmStats.GarbageCollector[2];
|
||||
initialCollectors[0] = youngCollector;
|
||||
initialCollectors[1] = oldCollector;
|
||||
when(initialGcs.getCollectors()).thenReturn(initialCollectors);
|
||||
when(jvmStats.getGc()).thenReturn(initialGcs);
|
||||
final JvmStats.GarbageCollectors gcs = mock(JvmStats.GarbageCollectors.class);
|
||||
final JvmStats.GarbageCollector[] collectors = new JvmStats.GarbageCollector[2];
|
||||
collectors[0] = youngCollector;
|
||||
collectors[1] = oldCollector;
|
||||
when(gcs.getCollectors()).thenReturn(collectors);
|
||||
when(jvmStats.getGc()).thenReturn(gcs);
|
||||
when(jvmStats.getMem()).thenReturn(JvmStats.jvmStats().getMem());
|
||||
return jvmStats;
|
||||
}
|
||||
|
||||
public void testMonitorGc() {
|
||||
final int youngCollectionCount = randomIntBetween(1, 16);
|
||||
final int youngCollectionIncrement = randomIntBetween(1, 16);
|
||||
final int youngCollectionTime = randomIntBetween(1, 1 << 10);
|
||||
final int youngCollectionTimeIncrement = randomIntBetween(1, 1 << 10);
|
||||
final int oldCollectionCount = randomIntBetween(1, 16);
|
||||
final int oldCollectionIncrement = randomIntBetween(1, 16);
|
||||
final int oldCollectionTime = randomIntBetween(1, 1 << 10);
|
||||
final int oldCollectionTimeIncrement = randomIntBetween(1, 1 << 10);
|
||||
|
||||
final JvmStats.GarbageCollector lastYoungCollector = collector("young", youngCollectionCount, youngCollectionTime);
|
||||
final JvmStats.GarbageCollector lastOldCollector = collector("old", oldCollectionCount, oldCollectionTime);
|
||||
final JvmStats lastjvmStats = jvmStats(lastYoungCollector, lastOldCollector);
|
||||
|
||||
final JvmStats.GarbageCollector currentYoungCollector =
|
||||
collector("young", youngCollectionCount + youngCollectionIncrement, youngCollectionTime + youngCollectionTimeIncrement);
|
||||
final JvmStats.GarbageCollector currentOldCollector =
|
||||
collector("old", oldCollectionCount + oldCollectionIncrement, oldCollectionTime + oldCollectionTimeIncrement);
|
||||
final JvmStats currentJvmStats = jvmStats(currentYoungCollector, currentOldCollector);
|
||||
final long expectedElapsed =
|
||||
randomIntBetween(
|
||||
Math.max(youngCollectionTime + youngCollectionTimeIncrement, oldCollectionTime + oldCollectionTimeIncrement),
|
||||
Integer.MAX_VALUE);
|
||||
|
||||
final AtomicBoolean invoked = new AtomicBoolean();
|
||||
|
||||
final JvmGcMonitorService.JvmMonitor monitor = new JvmGcMonitorService.JvmMonitor(Collections.emptyMap(), IGNORE) {
|
||||
|
||||
@Override
|
||||
void onMonitorFailure(Throwable t) {
|
||||
}
|
||||
|
||||
@Override
|
||||
void onSlowGc(Threshold threshold, long seq, SlowGcEvent slowGcEvent) {
|
||||
}
|
||||
|
||||
@Override
|
||||
void onGcOverhead(Threshold threshold, long total, long elapsed, long seq) {
|
||||
}
|
||||
|
||||
@Override
|
||||
void checkGcOverhead(long current, long elapsed, long seq) {
|
||||
invoked.set(true);
|
||||
assertThat(current, equalTo((long)(youngCollectionTimeIncrement + oldCollectionTimeIncrement)));
|
||||
assertThat(elapsed, equalTo(expectedElapsed));
|
||||
}
|
||||
|
||||
@Override
|
||||
JvmStats jvmStats() {
|
||||
return lastjvmStats;
|
||||
}
|
||||
};
|
||||
|
||||
monitor.monitorGcOverhead(currentJvmStats, expectedElapsed);
|
||||
assertTrue(invoked.get());
|
||||
}
|
||||
|
||||
private JvmStats.GarbageCollector collector(final String name, final int collectionCount, final int collectionTime) {
|
||||
final JvmStats.GarbageCollector gc = mock(JvmStats.GarbageCollector.class);
|
||||
when(gc.getName()).thenReturn(name);
|
||||
when(gc.getCollectionCount()).thenReturn((long)collectionCount);
|
||||
when(gc.getCollectionTime()).thenReturn(TimeValue.timeValueMillis(collectionTime));
|
||||
return gc;
|
||||
}
|
||||
|
||||
public void testCheckGcOverhead() {
|
||||
final int debugThreshold = randomIntBetween(1, 98);
|
||||
final int infoThreshold = randomIntBetween(debugThreshold + 1, 99);
|
||||
final int warnThreshold = randomIntBetween(infoThreshold + 1, 100);
|
||||
final JvmGcMonitorService.GcOverheadThreshold gcOverheadThreshold =
|
||||
new JvmGcMonitorService.GcOverheadThreshold(warnThreshold, infoThreshold, debugThreshold);
|
||||
|
||||
final JvmGcMonitorService.JvmMonitor.Threshold expectedThreshold;
|
||||
int fraction = 0;
|
||||
final long expectedCurrent;
|
||||
final long expectedElapsed;
|
||||
if (randomBoolean()) {
|
||||
expectedThreshold = randomFrom(JvmGcMonitorService.JvmMonitor.Threshold.values());
|
||||
switch (expectedThreshold) {
|
||||
case WARN:
|
||||
fraction = randomIntBetween(warnThreshold, 100);
|
||||
break;
|
||||
case INFO:
|
||||
fraction = randomIntBetween(infoThreshold, warnThreshold - 1);
|
||||
break;
|
||||
case DEBUG:
|
||||
fraction = randomIntBetween(debugThreshold, infoThreshold - 1);
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
expectedThreshold = null;
|
||||
fraction = randomIntBetween(0, debugThreshold - 1);
|
||||
}
|
||||
|
||||
expectedElapsed = 100 * randomIntBetween(1, 1000);
|
||||
expectedCurrent = fraction * expectedElapsed / 100;
|
||||
|
||||
final AtomicBoolean invoked = new AtomicBoolean();
|
||||
final long expectedSeq = randomIntBetween(1, Integer.MAX_VALUE);
|
||||
|
||||
final JvmGcMonitorService.JvmMonitor monitor = new JvmGcMonitorService.JvmMonitor(Collections.emptyMap(), gcOverheadThreshold) {
|
||||
|
||||
@Override
|
||||
void onMonitorFailure(final Throwable t) {
|
||||
}
|
||||
|
||||
@Override
|
||||
void onSlowGc(Threshold threshold, long seq, SlowGcEvent slowGcEvent) {
|
||||
}
|
||||
|
||||
@Override
|
||||
void onGcOverhead(final Threshold threshold, final long current, final long elapsed, final long seq) {
|
||||
invoked.set(true);
|
||||
assertThat(threshold, equalTo(expectedThreshold));
|
||||
assertThat(current, equalTo(expectedCurrent));
|
||||
assertThat(elapsed, equalTo(expectedElapsed));
|
||||
assertThat(seq, equalTo(expectedSeq));
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
monitor.checkGcOverhead(expectedCurrent, expectedElapsed, expectedSeq);
|
||||
|
||||
assertThat(invoked.get(), equalTo(expectedThreshold != null));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@ import org.elasticsearch.common.joda.DateMathParser;
|
|||
import org.elasticsearch.common.joda.Joda;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.mapper.core.DateFieldMapper;
|
||||
import org.elasticsearch.index.query.MatchNoneQueryBuilder;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.script.Script;
|
||||
|
@ -1146,4 +1147,27 @@ public class DateHistogramIT extends ESIntegTestCase {
|
|||
Histogram histo = response.getAggregations().get("histo");
|
||||
assertThat(histo.getBuckets().size(), greaterThan(0));
|
||||
}
|
||||
|
||||
/**
|
||||
* When DST ends, local time turns back one hour, so between 2am and 4am wall time we should have four buckets:
|
||||
* "2015-10-25T02:00:00.000+02:00",
|
||||
* "2015-10-25T02:00:00.000+01:00",
|
||||
* "2015-10-25T03:00:00.000+01:00",
|
||||
* "2015-10-25T04:00:00.000+01:00".
|
||||
*/
|
||||
public void testDSTEndTransition() throws Exception {
|
||||
SearchResponse response = client().prepareSearch("idx")
|
||||
.setQuery(new MatchNoneQueryBuilder())
|
||||
.addAggregation(dateHistogram("histo").field("date").timeZone(DateTimeZone.forID("Europe/Oslo"))
|
||||
.dateHistogramInterval(DateHistogramInterval.HOUR).minDocCount(0).extendedBounds(
|
||||
new ExtendedBounds("2015-10-25T02:00:00.000+02:00", "2015-10-25T04:00:00.000+01:00")))
|
||||
.execute().actionGet();
|
||||
|
||||
Histogram histo = response.getAggregations().get("histo");
|
||||
List<? extends Bucket> buckets = histo.getBuckets();
|
||||
assertThat(buckets.size(), equalTo(4));
|
||||
assertThat(((DateTime) buckets.get(1).getKey()).getMillis() - ((DateTime) buckets.get(0).getKey()).getMillis(), equalTo(3600000L));
|
||||
assertThat(((DateTime) buckets.get(2).getKey()).getMillis() - ((DateTime) buckets.get(1).getKey()).getMillis(), equalTo(3600000L));
|
||||
assertThat(((DateTime) buckets.get(3).getKey()).getMillis() - ((DateTime) buckets.get(2).getKey()).getMillis(), equalTo(3600000L));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -72,6 +72,7 @@ import org.elasticsearch.search.aggregations.AggregatorParsers;
|
|||
import org.elasticsearch.search.fetch.source.FetchSourceContext;
|
||||
import org.elasticsearch.search.highlight.HighlightBuilderTests;
|
||||
import org.elasticsearch.search.rescore.QueryRescoreBuilderTests;
|
||||
import org.elasticsearch.search.rescore.QueryRescorerBuilder;
|
||||
import org.elasticsearch.search.searchafter.SearchAfterBuilder;
|
||||
import org.elasticsearch.search.sort.FieldSortBuilder;
|
||||
import org.elasticsearch.search.sort.ScoreSortBuilder;
|
||||
|
@ -561,6 +562,57 @@ public class SearchSourceBuilderTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* test that we can parse the `rescore` element either as single object or as array
|
||||
*/
|
||||
public void testParseRescore() throws IOException {
|
||||
{
|
||||
String restContent = "{\n" +
|
||||
" \"query\" : {\n" +
|
||||
" \"match\": { \"content\": { \"query\": \"foo bar\" }}\n" +
|
||||
" },\n" +
|
||||
" \"rescore\": {" +
|
||||
" \"window_size\": 50,\n" +
|
||||
" \"query\": {\n" +
|
||||
" \"rescore_query\" : {\n" +
|
||||
" \"match\": { \"content\": { \"query\": \"baz\" } }\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
"}\n";
|
||||
try (XContentParser parser = XContentFactory.xContent(restContent).createParser(restContent)) {
|
||||
SearchSourceBuilder searchSourceBuilder = SearchSourceBuilder.fromXContent(createParseContext(parser),
|
||||
aggParsers, suggesters);
|
||||
assertEquals(1, searchSourceBuilder.rescores().size());
|
||||
assertEquals(new QueryRescorerBuilder(QueryBuilders.matchQuery("content", "baz")).windowSize(50),
|
||||
searchSourceBuilder.rescores().get(0));
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
String restContent = "{\n" +
|
||||
" \"query\" : {\n" +
|
||||
" \"match\": { \"content\": { \"query\": \"foo bar\" }}\n" +
|
||||
" },\n" +
|
||||
" \"rescore\": [ {" +
|
||||
" \"window_size\": 50,\n" +
|
||||
" \"query\": {\n" +
|
||||
" \"rescore_query\" : {\n" +
|
||||
" \"match\": { \"content\": { \"query\": \"baz\" } }\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
" } ]\n" +
|
||||
"}\n";
|
||||
try (XContentParser parser = XContentFactory.xContent(restContent).createParser(restContent)) {
|
||||
SearchSourceBuilder searchSourceBuilder = SearchSourceBuilder.fromXContent(createParseContext(parser),
|
||||
aggParsers, suggesters);
|
||||
assertEquals(1, searchSourceBuilder.rescores().size());
|
||||
assertEquals(new QueryRescorerBuilder(QueryBuilders.matchQuery("content", "baz")).windowSize(50),
|
||||
searchSourceBuilder.rescores().get(0));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testEmptyPostFilter() throws IOException {
|
||||
SearchSourceBuilder builder = new SearchSourceBuilder();
|
||||
builder.postFilter(new EmptyQueryBuilder());
|
||||
|
|
Binary file not shown.
Binary file not shown.
|
@ -1,4 +1,4 @@
|
|||
#!/bin/sh
|
||||
#!/bin/bash
|
||||
|
||||
# CONF_FILE setting was removed
|
||||
if [ ! -z "$CONF_FILE" ]; then
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#!/bin/sh
|
||||
#!/bin/bash
|
||||
|
||||
# check in case a user was using this mechanism
|
||||
if [ "x$ES_CLASSPATH" != "x" ]; then
|
||||
|
|
|
@ -28,6 +28,8 @@ PUT /my_index
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
GET /_cluster/health?wait_for_status=yellow
|
||||
------------------------------------------
|
||||
// CONSOLE
|
||||
// TESTSETUP
|
||||
|
@ -73,7 +75,7 @@ This query has two required parameters:
|
|||
|
||||
`id`:: The required parent id select documents must referrer to.
|
||||
|
||||
`ignore_unmapped`:: When set to `true` this will ignore an unmapped `type` and will not match any
|
||||
`ignore_unmapped`:: When set to `true` this will ignore an unmapped `type` and will not match any
|
||||
documents for this query. This can be useful when querying multiple indexes
|
||||
which might have different mappings. When set to `false` (the default value)
|
||||
the query will throw an exception if the `type` is not mapped.
|
||||
|
|
|
@ -12,6 +12,7 @@ defaults to `10`.
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET /_search
|
||||
{
|
||||
"from" : 0, "size" : 10,
|
||||
"query" : {
|
||||
|
@ -19,6 +20,8 @@ defaults to `10`.
|
|||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
|
||||
Note that `from` + `size` can not be more than the `index.max_result_window`
|
||||
index setting which defaults to 10,000. See the <<search-request-scroll,Scroll>> or <<search-request-search-after,Search After>>
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
#!/bin/sh
|
||||
#!/bin/bash
|
||||
|
||||
echo test
|
||||
|
|
|
@ -316,7 +316,7 @@ public class RepositoryS3SettingsTests extends ESTestCase {
|
|||
"Failed to parse value [4mb] for setting [buffer_size] must be >= 5mb");
|
||||
// chunk > 5tb should fail
|
||||
internalTestInvalidChunkBufferSizeSettings(new ByteSizeValue(5, ByteSizeUnit.MB), new ByteSizeValue(6, ByteSizeUnit.TB),
|
||||
"Failed to parse value [6tb] for setting [chunk_size] must be =< 5tb");
|
||||
"Failed to parse value [6tb] for setting [chunk_size] must be <= 5tb");
|
||||
}
|
||||
|
||||
private Settings buildSettings(Settings... global) {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#!/bin/sh
|
||||
#!/bin/bash
|
||||
|
||||
# This file contains some utilities to test the elasticsearch scripts,
|
||||
# the .deb/.rpm packages and the SysV/Systemd scripts.
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#!/bin/sh
|
||||
#!/bin/bash
|
||||
|
||||
# This file contains some utilities to test the elasticsearch scripts with
|
||||
# the .deb/.rpm packages.
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#!/bin/sh
|
||||
#!/bin/bash
|
||||
|
||||
# This file contains some utilities to test the elasticsearch scripts,
|
||||
# the .deb/.rpm packages and the SysV/Systemd scripts.
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#!/bin/sh
|
||||
#!/bin/bash
|
||||
|
||||
# This file contains some utilities to test the elasticsearch scripts,
|
||||
# the .deb/.rpm packages and the SysV/Systemd scripts.
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#!/bin/sh
|
||||
#!/bin/bash
|
||||
|
||||
# This file contains some utilities to test the elasticsearch scripts,
|
||||
# the .deb/.rpm packages and the SysV/Systemd scripts.
|
||||
|
|
|
@ -1,9 +1,5 @@
|
|||
---
|
||||
"Discovery stats":
|
||||
- skip:
|
||||
version: "5.0.0 - "
|
||||
reason: Tracked in issue 18433
|
||||
|
||||
- do:
|
||||
cluster.state: {}
|
||||
|
||||
|
|
|
@ -117,6 +117,33 @@ setup:
|
|||
|
||||
- match: { aggregations.ip_terms.buckets.1.doc_count: 1 }
|
||||
|
||||
- do:
|
||||
search:
|
||||
body: { "size" : 0, "aggs" : { "ip_terms" : { "terms" : { "field" : "ip", "include" : [ "127.0.0.1" ] } } } }
|
||||
|
||||
- match: { hits.total: 3 }
|
||||
|
||||
- length: { aggregations.ip_terms.buckets: 1 }
|
||||
|
||||
- match: { aggregations.ip_terms.buckets.0.key: "127.0.0.1" }
|
||||
|
||||
- do:
|
||||
search:
|
||||
body: { "size" : 0, "aggs" : { "ip_terms" : { "terms" : { "field" : "ip", "exclude" : [ "127.0.0.1" ] } } } }
|
||||
|
||||
- match: { hits.total: 3 }
|
||||
|
||||
- length: { aggregations.ip_terms.buckets: 1 }
|
||||
|
||||
- match: { aggregations.ip_terms.buckets.0.key: "::1" }
|
||||
|
||||
- do:
|
||||
catch: request
|
||||
search:
|
||||
body: { "size" : 0, "aggs" : { "ip_terms" : { "terms" : { "field" : "ip", "exclude" : "127.*" } } } }
|
||||
|
||||
|
||||
|
||||
---
|
||||
"Boolean test":
|
||||
- do:
|
||||
|
@ -300,4 +327,27 @@ setup:
|
|||
- match: { aggregations.date_terms.buckets.1.key_as_string: "2014-09-01T00:00:00.000Z" }
|
||||
|
||||
- match: { aggregations.date_terms.buckets.1.doc_count: 1 }
|
||||
|
||||
- do:
|
||||
search:
|
||||
body: { "size" : 0, "aggs" : { "date_terms" : { "terms" : { "field" : "date", "include" : [ "2016-05-03" ] } } } }
|
||||
|
||||
- match: { hits.total: 3 }
|
||||
|
||||
- length: { aggregations.date_terms.buckets: 1 }
|
||||
|
||||
- match: { aggregations.date_terms.buckets.0.key_as_string: "2016-05-03T00:00:00.000Z" }
|
||||
|
||||
- match: { aggregations.date_terms.buckets.0.doc_count: 2 }
|
||||
|
||||
- do:
|
||||
search:
|
||||
body: { "size" : 0, "aggs" : { "date_terms" : { "terms" : { "field" : "date", "exclude" : [ "2016-05-03" ] } } } }
|
||||
|
||||
- match: { hits.total: 3 }
|
||||
|
||||
- length: { aggregations.date_terms.buckets: 1 }
|
||||
|
||||
- match: { aggregations.date_terms.buckets.0.key_as_string: "2014-09-01T00:00:00.000Z" }
|
||||
|
||||
- match: { aggregations.date_terms.buckets.0.doc_count: 1 }
|
||||
|
|
|
@ -121,3 +121,28 @@
|
|||
- is_false: aggregations.ip_terms.buckets.0.key_as_string
|
||||
|
||||
- match: { aggregations.ip_terms.buckets.0.doc_count: 1 }
|
||||
|
||||
- do:
|
||||
search:
|
||||
body: { "query" : { "exists" : { "field" : "ip" } }, "aggs" : { "ip_terms" : { "significant_terms" : { "field" : "ip", "min_doc_count" : 1, "include" : [ "::1" ] } } } }
|
||||
|
||||
- match: { hits.total: 1 }
|
||||
|
||||
- length: { aggregations.ip_terms.buckets: 1 }
|
||||
|
||||
- match: { aggregations.ip_terms.buckets.0.key: "::1" }
|
||||
|
||||
- do:
|
||||
search:
|
||||
body: { "query" : { "exists" : { "field" : "ip" } }, "aggs" : { "ip_terms" : { "significant_terms" : { "field" : "ip", "min_doc_count" : 1, "exclude" : [ "::1" ] } } } }
|
||||
|
||||
- match: { hits.total: 1 }
|
||||
|
||||
- length: { aggregations.ip_terms.buckets: 0 }
|
||||
|
||||
- do:
|
||||
catch: request
|
||||
search:
|
||||
body: { "size" : 0, "aggs" : { "ip_terms" : { "significant_terms" : { "field" : "ip", "exclude" : "127.*" } } } }
|
||||
|
||||
|
||||
|
|
Loading…
Reference in New Issue