Merge branch 'master' into feature/rank-eval
This commit is contained in:
commit
5c6cdb90ad
|
@ -20,8 +20,10 @@
|
|||
package org.elasticsearch.action.fieldstats;
|
||||
|
||||
import org.apache.lucene.document.InetAddressPoint;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.StringHelper;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
|
@ -45,9 +47,50 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
|||
private long sumTotalTermFreq;
|
||||
private boolean isSearchable;
|
||||
private boolean isAggregatable;
|
||||
private boolean hasMinMax;
|
||||
protected T minValue;
|
||||
protected T maxValue;
|
||||
|
||||
/**
|
||||
* Builds a FieldStats where min and max value are not available for the field.
|
||||
* @param type The native type of this FieldStats
|
||||
* @param maxDoc Max number of docs
|
||||
* @param docCount the number of documents that have at least one term for this field,
|
||||
* or -1 if this information isn't available for this field.
|
||||
* @param sumDocFreq the sum of {@link TermsEnum#docFreq()} for all terms in this field,
|
||||
* or -1 if this information isn't available for this field.
|
||||
* @param sumTotalTermFreq the sum of {@link TermsEnum#totalTermFreq} for all terms in this field,
|
||||
* or -1 if this measure isn't available for this field.
|
||||
* @param isSearchable true if this field is searchable
|
||||
* @param isAggregatable true if this field is aggregatable
|
||||
*/
|
||||
FieldStats(byte type, long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
|
||||
boolean isSearchable, boolean isAggregatable) {
|
||||
this.type = type;
|
||||
this.maxDoc = maxDoc;
|
||||
this.docCount = docCount;
|
||||
this.sumDocFreq = sumDocFreq;
|
||||
this.sumTotalTermFreq = sumTotalTermFreq;
|
||||
this.isSearchable = isSearchable;
|
||||
this.isAggregatable = isAggregatable;
|
||||
this.hasMinMax = false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Builds a FieldStats with min and max value for the field.
|
||||
* @param type The native type of this FieldStats
|
||||
* @param maxDoc Max number of docs
|
||||
* @param docCount the number of documents that have at least one term for this field,
|
||||
* or -1 if this information isn't available for this field.
|
||||
* @param sumDocFreq the sum of {@link TermsEnum#docFreq()} for all terms in this field,
|
||||
* or -1 if this information isn't available for this field.
|
||||
* @param sumTotalTermFreq the sum of {@link TermsEnum#totalTermFreq} for all terms in this field,
|
||||
* or -1 if this measure isn't available for this field.
|
||||
* @param isSearchable true if this field is searchable
|
||||
* @param isAggregatable true if this field is aggregatable
|
||||
* @param minValue the minimum value indexed in this field
|
||||
* @param maxValue the maximum value indexed in this field
|
||||
*/
|
||||
FieldStats(byte type,
|
||||
long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
|
||||
boolean isSearchable, boolean isAggregatable, T minValue, T maxValue) {
|
||||
|
@ -60,6 +103,7 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
|||
this.sumTotalTermFreq = sumTotalTermFreq;
|
||||
this.isSearchable = isSearchable;
|
||||
this.isAggregatable = isAggregatable;
|
||||
this.hasMinMax = true;
|
||||
this.minValue = minValue;
|
||||
this.maxValue = maxValue;
|
||||
}
|
||||
|
@ -85,6 +129,13 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if min/max informations are available for this field
|
||||
*/
|
||||
public boolean hasMinMax() {
|
||||
return hasMinMax;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the total number of documents.
|
||||
*
|
||||
|
@ -216,7 +267,13 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
|||
isAggregatable |= other.isAggregatable;
|
||||
|
||||
assert type == other.getType();
|
||||
if (hasMinMax && other.hasMinMax) {
|
||||
updateMinMax((T) other.minValue, (T) other.maxValue);
|
||||
} else {
|
||||
hasMinMax = false;
|
||||
minValue = null;
|
||||
maxValue = null;
|
||||
}
|
||||
}
|
||||
|
||||
private void updateMinMax(T min, T max) {
|
||||
|
@ -241,7 +298,9 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
|||
builder.field(SUM_TOTAL_TERM_FREQ_FIELD, sumTotalTermFreq);
|
||||
builder.field(SEARCHABLE_FIELD, isSearchable);
|
||||
builder.field(AGGREGATABLE_FIELD, isAggregatable);
|
||||
if (hasMinMax) {
|
||||
toInnerXContent(builder);
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
@ -262,8 +321,15 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
|||
out.writeLong(sumTotalTermFreq);
|
||||
out.writeBoolean(isSearchable);
|
||||
out.writeBoolean(isAggregatable);
|
||||
if (out.getVersion().onOrAfter(Version.V_5_2_0_UNRELEASED)) {
|
||||
out.writeBoolean(hasMinMax);
|
||||
if (hasMinMax) {
|
||||
writeMinMax(out);
|
||||
}
|
||||
} else {
|
||||
writeMinMax(out);
|
||||
}
|
||||
}
|
||||
|
||||
protected abstract void writeMinMax(StreamOutput out) throws IOException;
|
||||
|
||||
|
@ -272,6 +338,9 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
|||
* otherwise <code>false</code> is returned
|
||||
*/
|
||||
public boolean match(IndexConstraint constraint) {
|
||||
if (hasMinMax == false) {
|
||||
return false;
|
||||
}
|
||||
int cmp;
|
||||
T value = valueOf(constraint.getValue(), constraint.getOptionalFormat());
|
||||
if (constraint.getProperty() == IndexConstraint.Property.MIN) {
|
||||
|
@ -310,6 +379,10 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
|||
if (sumTotalTermFreq != that.sumTotalTermFreq) return false;
|
||||
if (isSearchable != that.isSearchable) return false;
|
||||
if (isAggregatable != that.isAggregatable) return false;
|
||||
if (hasMinMax != that.hasMinMax) return false;
|
||||
if (hasMinMax == false) {
|
||||
return true;
|
||||
}
|
||||
if (!minValue.equals(that.minValue)) return false;
|
||||
return maxValue.equals(that.maxValue);
|
||||
|
||||
|
@ -318,10 +391,16 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
|||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(type, maxDoc, docCount, sumDocFreq, sumTotalTermFreq, isSearchable, isAggregatable,
|
||||
minValue, maxValue);
|
||||
hasMinMax, minValue, maxValue);
|
||||
}
|
||||
|
||||
public static class Long extends FieldStats<java.lang.Long> {
|
||||
public Long(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
|
||||
boolean isSearchable, boolean isAggregatable) {
|
||||
super((byte) 0, maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
|
||||
isSearchable, isAggregatable);
|
||||
}
|
||||
|
||||
public Long(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
|
||||
boolean isSearchable, boolean isAggregatable,
|
||||
long minValue, long maxValue) {
|
||||
|
@ -357,6 +436,11 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
|||
}
|
||||
|
||||
public static class Double extends FieldStats<java.lang.Double> {
|
||||
public Double(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
|
||||
boolean isSearchable, boolean isAggregatable) {
|
||||
super((byte) 1, maxDoc, docCount, sumDocFreq, sumTotalTermFreq, isSearchable, isAggregatable);
|
||||
}
|
||||
|
||||
public Double(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
|
||||
boolean isSearchable, boolean isAggregatable,
|
||||
double minValue, double maxValue) {
|
||||
|
@ -397,6 +481,12 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
|||
public static class Date extends FieldStats<java.lang.Long> {
|
||||
private FormatDateTimeFormatter formatter;
|
||||
|
||||
public Date(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
|
||||
boolean isSearchable, boolean isAggregatable) {
|
||||
super((byte) 2, maxDoc, docCount, sumDocFreq, sumTotalTermFreq, isSearchable, isAggregatable);
|
||||
this.formatter = null;
|
||||
}
|
||||
|
||||
public Date(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
|
||||
boolean isSearchable, boolean isAggregatable,
|
||||
FormatDateTimeFormatter formatter,
|
||||
|
@ -439,23 +529,27 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
|||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
if (!super.equals(o)) return false;
|
||||
|
||||
Date that = (Date) o;
|
||||
return Objects.equals(formatter.format(), that.formatter.format());
|
||||
return Objects.equals(formatter == null ? null : formatter.format(),
|
||||
that.formatter == null ? null : that.formatter.format());
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = super.hashCode();
|
||||
result = 31 * result + formatter.format().hashCode();
|
||||
result = 31 * result + (formatter == null ? 0 : formatter.format().hashCode());
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
public static class Text extends FieldStats<BytesRef> {
|
||||
public Text(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
|
||||
boolean isSearchable, boolean isAggregatable) {
|
||||
super((byte) 3, maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
|
||||
isSearchable, isAggregatable);
|
||||
}
|
||||
|
||||
public Text(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
|
||||
boolean isSearchable, boolean isAggregatable,
|
||||
BytesRef minValue, BytesRef maxValue) {
|
||||
|
@ -501,6 +595,13 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
|||
}
|
||||
|
||||
public static class Ip extends FieldStats<InetAddress> {
|
||||
public Ip(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
|
||||
boolean isSearchable, boolean isAggregatable) {
|
||||
super((byte) 4, maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
|
||||
isSearchable, isAggregatable);
|
||||
}
|
||||
|
||||
|
||||
public Ip(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
|
||||
boolean isSearchable, boolean isAggregatable,
|
||||
InetAddress minValue, InetAddress maxValue) {
|
||||
|
@ -550,27 +651,50 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
|||
long sumTotalTermFreq = in.readLong();
|
||||
boolean isSearchable = in.readBoolean();
|
||||
boolean isAggregatable = in.readBoolean();
|
||||
|
||||
boolean hasMinMax = true;
|
||||
if (in.getVersion().onOrAfter(Version.V_5_2_0_UNRELEASED)) {
|
||||
hasMinMax = in.readBoolean();
|
||||
}
|
||||
switch (type) {
|
||||
case 0:
|
||||
if (hasMinMax) {
|
||||
return new Long(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
|
||||
isSearchable, isAggregatable, in.readLong(), in.readLong());
|
||||
|
||||
} else {
|
||||
return new Long(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
|
||||
isSearchable, isAggregatable);
|
||||
}
|
||||
case 1:
|
||||
if (hasMinMax) {
|
||||
return new Double(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
|
||||
isSearchable, isAggregatable, in.readDouble(), in.readDouble());
|
||||
|
||||
} else {
|
||||
return new Double(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
|
||||
isSearchable, isAggregatable);
|
||||
}
|
||||
case 2:
|
||||
if (hasMinMax) {
|
||||
FormatDateTimeFormatter formatter = Joda.forPattern(in.readString());
|
||||
return new Date(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
|
||||
isSearchable, isAggregatable, formatter, in.readLong(), in.readLong());
|
||||
|
||||
|
||||
} else {
|
||||
return new Date(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
|
||||
isSearchable, isAggregatable);
|
||||
}
|
||||
case 3:
|
||||
if (hasMinMax) {
|
||||
return new Text(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
|
||||
isSearchable, isAggregatable, in.readBytesRef(), in.readBytesRef());
|
||||
} else {
|
||||
return new Text(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
|
||||
isSearchable, isAggregatable);
|
||||
}
|
||||
|
||||
case 4:
|
||||
if (hasMinMax == false) {
|
||||
return new Ip(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
|
||||
isSearchable, isAggregatable);
|
||||
}
|
||||
int l1 = in.readByte();
|
||||
byte[] b1 = new byte[l1];
|
||||
in.readBytes(b1, 0, l1);
|
||||
|
@ -599,5 +723,4 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
|||
private static final String MIN_VALUE_AS_STRING_FIELD = "min_value_as_string";
|
||||
private static final String MAX_VALUE_FIELD = "max_value";
|
||||
private static final String MAX_VALUE_AS_STRING_FIELD = "max_value_as_string";
|
||||
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.action.fieldstats;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastResponse;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
|
@ -91,12 +92,23 @@ public class FieldStatsResponse extends BroadcastResponse {
|
|||
out.writeVInt(indicesMergedFieldStats.size());
|
||||
for (Map.Entry<String, Map<String, FieldStats>> entry1 : indicesMergedFieldStats.entrySet()) {
|
||||
out.writeString(entry1.getKey());
|
||||
out.writeVInt(entry1.getValue().size());
|
||||
int size = entry1.getValue().size();
|
||||
if (out.getVersion().before(Version.V_5_2_0_UNRELEASED)) {
|
||||
// filter fieldstats without min/max information
|
||||
for (FieldStats stats : entry1.getValue().values()) {
|
||||
if (stats.hasMinMax() == false) {
|
||||
size--;
|
||||
}
|
||||
}
|
||||
}
|
||||
out.writeVInt(size);
|
||||
for (Map.Entry<String, FieldStats> entry2 : entry1.getValue().entrySet()) {
|
||||
if (entry2.getValue().hasMinMax() || out.getVersion().onOrAfter(Version.V_5_2_0_UNRELEASED)) {
|
||||
out.writeString(entry2.getKey());
|
||||
entry2.getValue().writeTo(out);
|
||||
}
|
||||
}
|
||||
}
|
||||
out.writeVInt(conflicts.size());
|
||||
for (Map.Entry<String, String> entry : conflicts.entrySet()) {
|
||||
out.writeString(entry.getKey());
|
||||
|
|
|
@ -24,6 +24,7 @@ import org.elasticsearch.action.support.ActionFilters;
|
|||
import org.elasticsearch.action.support.single.shard.TransportSingleShardAction;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.routing.GroupShardsIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardIterator;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
|
@ -55,6 +56,13 @@ public class TransportTermVectorsAction extends TransportSingleShardAction<TermV
|
|||
|
||||
@Override
|
||||
protected ShardIterator shards(ClusterState state, InternalRequest request) {
|
||||
if (request.request().doc() != null && request.request().routing() == null) {
|
||||
// artificial document without routing specified, ignore its "id" and use either random shard or according to preference
|
||||
GroupShardsIterator groupShardsIter = clusterService.operationRouting().searchShards(state,
|
||||
new String[] { request.concreteIndex() }, null, request.request().preference());
|
||||
return groupShardsIter.iterator().next();
|
||||
}
|
||||
|
||||
return clusterService.operationRouting().getShards(state, request.concreteIndex(), request.request().id(),
|
||||
request.request().routing(), request.request().preference());
|
||||
}
|
||||
|
|
|
@ -31,6 +31,7 @@ import org.elasticsearch.common.unit.TimeValue;
|
|||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.fielddata.AtomicOrdinalsFieldData;
|
||||
import org.elasticsearch.index.fielddata.IndexOrdinalsFieldData;
|
||||
import org.elasticsearch.index.fielddata.ScriptDocValues;
|
||||
import org.elasticsearch.index.fielddata.plain.AbstractAtomicOrdinalsFieldData;
|
||||
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
||||
|
||||
|
@ -38,6 +39,7 @@ import java.io.IOException;
|
|||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.Function;
|
||||
|
||||
/**
|
||||
* Utility class to build global ordinals.
|
||||
|
@ -48,7 +50,9 @@ public enum GlobalOrdinalsBuilder {
|
|||
/**
|
||||
* Build global ordinals for the provided {@link IndexReader}.
|
||||
*/
|
||||
public static IndexOrdinalsFieldData build(final IndexReader indexReader, IndexOrdinalsFieldData indexFieldData, IndexSettings indexSettings, CircuitBreakerService breakerService, Logger logger) throws IOException {
|
||||
public static IndexOrdinalsFieldData build(final IndexReader indexReader, IndexOrdinalsFieldData indexFieldData,
|
||||
IndexSettings indexSettings, CircuitBreakerService breakerService, Logger logger,
|
||||
Function<RandomAccessOrds, ScriptDocValues<?>> scriptFunction) throws IOException {
|
||||
assert indexReader.leaves().size() > 1;
|
||||
long startTimeNS = System.nanoTime();
|
||||
|
||||
|
@ -71,7 +75,7 @@ public enum GlobalOrdinalsBuilder {
|
|||
);
|
||||
}
|
||||
return new InternalGlobalOrdinalsIndexFieldData(indexSettings, indexFieldData.getFieldName(),
|
||||
atomicFD, ordinalMap, memorySizeInBytes
|
||||
atomicFD, ordinalMap, memorySizeInBytes, scriptFunction
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -81,7 +85,7 @@ public enum GlobalOrdinalsBuilder {
|
|||
final AtomicOrdinalsFieldData[] atomicFD = new AtomicOrdinalsFieldData[indexReader.leaves().size()];
|
||||
final RandomAccessOrds[] subs = new RandomAccessOrds[indexReader.leaves().size()];
|
||||
for (int i = 0; i < indexReader.leaves().size(); ++i) {
|
||||
atomicFD[i] = new AbstractAtomicOrdinalsFieldData() {
|
||||
atomicFD[i] = new AbstractAtomicOrdinalsFieldData(AbstractAtomicOrdinalsFieldData.DEFAULT_SCRIPT_FUNCTION) {
|
||||
@Override
|
||||
public RandomAccessOrds getOrdinalsValues() {
|
||||
return DocValues.emptySortedSet();
|
||||
|
@ -105,7 +109,7 @@ public enum GlobalOrdinalsBuilder {
|
|||
}
|
||||
final OrdinalMap ordinalMap = OrdinalMap.build(null, subs, PackedInts.DEFAULT);
|
||||
return new InternalGlobalOrdinalsIndexFieldData(indexSettings, indexFieldData.getFieldName(),
|
||||
atomicFD, ordinalMap, 0
|
||||
atomicFD, ordinalMap, 0, AbstractAtomicOrdinalsFieldData.DEFAULT_SCRIPT_FUNCTION
|
||||
);
|
||||
}
|
||||
|
||||
|
|
|
@ -24,9 +24,11 @@ import org.apache.lucene.index.RandomAccessOrds;
|
|||
import org.apache.lucene.util.Accountable;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.fielddata.AtomicOrdinalsFieldData;
|
||||
import org.elasticsearch.index.fielddata.ScriptDocValues;
|
||||
import org.elasticsearch.index.fielddata.plain.AbstractAtomicOrdinalsFieldData;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.function.Function;
|
||||
|
||||
/**
|
||||
* {@link org.elasticsearch.index.fielddata.IndexFieldData} impl based on global ordinals.
|
||||
|
@ -34,13 +36,16 @@ import java.util.Collection;
|
|||
final class InternalGlobalOrdinalsIndexFieldData extends GlobalOrdinalsIndexFieldData {
|
||||
|
||||
private final Atomic[] atomicReaders;
|
||||
private final Function<RandomAccessOrds, ScriptDocValues<?>> scriptFunction;
|
||||
|
||||
InternalGlobalOrdinalsIndexFieldData(IndexSettings indexSettings, String fieldName, AtomicOrdinalsFieldData[] segmentAfd, OrdinalMap ordinalMap, long memorySizeInBytes) {
|
||||
InternalGlobalOrdinalsIndexFieldData(IndexSettings indexSettings, String fieldName, AtomicOrdinalsFieldData[] segmentAfd,
|
||||
OrdinalMap ordinalMap, long memorySizeInBytes, Function<RandomAccessOrds, ScriptDocValues<?>> scriptFunction) {
|
||||
super(indexSettings, fieldName, memorySizeInBytes);
|
||||
this.atomicReaders = new Atomic[segmentAfd.length];
|
||||
for (int i = 0; i < segmentAfd.length; i++) {
|
||||
atomicReaders[i] = new Atomic(segmentAfd[i], ordinalMap, i);
|
||||
}
|
||||
this.scriptFunction = scriptFunction;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -55,6 +60,7 @@ final class InternalGlobalOrdinalsIndexFieldData extends GlobalOrdinalsIndexFiel
|
|||
private final int segmentIndex;
|
||||
|
||||
private Atomic(AtomicOrdinalsFieldData afd, OrdinalMap ordinalMap, int segmentIndex) {
|
||||
super(scriptFunction);
|
||||
this.afd = afd;
|
||||
this.ordinalMap = ordinalMap;
|
||||
this.segmentIndex = segmentIndex;
|
||||
|
|
|
@ -29,13 +29,24 @@ import org.elasticsearch.index.fielddata.SortedBinaryDocValues;
|
|||
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.function.Function;
|
||||
|
||||
|
||||
public abstract class AbstractAtomicOrdinalsFieldData implements AtomicOrdinalsFieldData {
|
||||
|
||||
public static final Function<RandomAccessOrds, ScriptDocValues<?>> DEFAULT_SCRIPT_FUNCTION =
|
||||
((Function<RandomAccessOrds, SortedBinaryDocValues>) FieldData::toString)
|
||||
.andThen(ScriptDocValues.Strings::new);
|
||||
|
||||
private final Function<RandomAccessOrds, ScriptDocValues<?>> scriptFunction;
|
||||
|
||||
protected AbstractAtomicOrdinalsFieldData(Function<RandomAccessOrds, ScriptDocValues<?>> scriptFunction) {
|
||||
this.scriptFunction = scriptFunction;
|
||||
}
|
||||
|
||||
@Override
|
||||
public final ScriptDocValues getScriptValues() {
|
||||
return new ScriptDocValues.Strings(getBytesValues());
|
||||
public final ScriptDocValues<?> getScriptValues() {
|
||||
return scriptFunction.apply(getOrdinalsValues());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -44,7 +55,7 @@ public abstract class AbstractAtomicOrdinalsFieldData implements AtomicOrdinalsF
|
|||
}
|
||||
|
||||
public static AtomicOrdinalsFieldData empty() {
|
||||
return new AbstractAtomicOrdinalsFieldData() {
|
||||
return new AbstractAtomicOrdinalsFieldData(DEFAULT_SCRIPT_FUNCTION) {
|
||||
|
||||
@Override
|
||||
public long ramBytesUsed() {
|
||||
|
|
|
@ -97,7 +97,8 @@ public abstract class AbstractIndexOrdinalsFieldData extends AbstractIndexFieldD
|
|||
|
||||
@Override
|
||||
public IndexOrdinalsFieldData localGlobalDirect(DirectoryReader indexReader) throws Exception {
|
||||
return GlobalOrdinalsBuilder.build(indexReader, this, indexSettings, breakerService, logger);
|
||||
return GlobalOrdinalsBuilder.build(indexReader, this, indexSettings, breakerService, logger,
|
||||
AbstractAtomicOrdinalsFieldData.DEFAULT_SCRIPT_FUNCTION);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -21,12 +21,14 @@ package org.elasticsearch.index.fielddata.plain;
|
|||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.RandomAccessOrds;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldData;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldDataCache;
|
||||
import org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType;
|
||||
import org.elasticsearch.index.fielddata.ScriptDocValues;
|
||||
import org.elasticsearch.index.mapper.IdFieldMapper;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
|
@ -34,6 +36,7 @@ import org.elasticsearch.index.mapper.UidFieldMapper;
|
|||
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
||||
|
||||
import java.util.Set;
|
||||
import java.util.function.Function;
|
||||
|
||||
import static java.util.Collections.unmodifiableSet;
|
||||
import static org.elasticsearch.common.util.set.Sets.newHashSet;
|
||||
|
@ -72,12 +75,18 @@ public abstract class DocValuesIndexFieldData {
|
|||
private static final Set<String> BINARY_INDEX_FIELD_NAMES = unmodifiableSet(newHashSet(UidFieldMapper.NAME, IdFieldMapper.NAME));
|
||||
|
||||
private NumericType numericType;
|
||||
private Function<RandomAccessOrds, ScriptDocValues<?>> scriptFunction = AbstractAtomicOrdinalsFieldData.DEFAULT_SCRIPT_FUNCTION;
|
||||
|
||||
public Builder numericType(NumericType type) {
|
||||
this.numericType = type;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder scriptFunction(Function<RandomAccessOrds, ScriptDocValues<?>> scriptFunction) {
|
||||
this.scriptFunction = scriptFunction;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexFieldData<?> build(IndexSettings indexSettings, MappedFieldType fieldType, IndexFieldDataCache cache,
|
||||
CircuitBreakerService breakerService, MapperService mapperService) {
|
||||
|
@ -89,7 +98,7 @@ public abstract class DocValuesIndexFieldData {
|
|||
} else if (numericType != null) {
|
||||
return new SortedNumericDVIndexFieldData(indexSettings.getIndex(), fieldName, numericType);
|
||||
} else {
|
||||
return new SortedSetDVOrdinalsIndexFieldData(indexSettings, cache, fieldName, breakerService);
|
||||
return new SortedSetDVOrdinalsIndexFieldData(indexSettings, cache, fieldName, breakerService, scriptFunction);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -56,6 +56,7 @@ public class IndexIndexFieldData extends AbstractIndexOrdinalsFieldData {
|
|||
private final String index;
|
||||
|
||||
IndexAtomicFieldData(String index) {
|
||||
super(DEFAULT_SCRIPT_FUNCTION);
|
||||
this.index = index;
|
||||
}
|
||||
|
||||
|
|
|
@ -38,6 +38,7 @@ public class PagedBytesAtomicFieldData extends AbstractAtomicOrdinalsFieldData {
|
|||
protected final Ordinals ordinals;
|
||||
|
||||
public PagedBytesAtomicFieldData(PagedBytes.Reader bytes, PackedLongValues termOrdToBytesOffset, Ordinals ordinals) {
|
||||
super(DEFAULT_SCRIPT_FUNCTION);
|
||||
this.bytes = bytes;
|
||||
this.termOrdToBytesOffset = termOrdToBytesOffset;
|
||||
this.ordinals = ordinals;
|
||||
|
|
|
@ -25,10 +25,12 @@ import org.apache.lucene.index.RandomAccessOrds;
|
|||
import org.apache.lucene.util.Accountable;
|
||||
import org.elasticsearch.index.fielddata.AtomicFieldData;
|
||||
import org.elasticsearch.index.fielddata.FieldData;
|
||||
import org.elasticsearch.index.fielddata.ScriptDocValues;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.function.Function;
|
||||
|
||||
/**
|
||||
* An {@link AtomicFieldData} implementation that uses Lucene {@link org.apache.lucene.index.SortedSetDocValues}.
|
||||
|
@ -38,7 +40,9 @@ public final class SortedSetDVBytesAtomicFieldData extends AbstractAtomicOrdinal
|
|||
private final LeafReader reader;
|
||||
private final String field;
|
||||
|
||||
SortedSetDVBytesAtomicFieldData(LeafReader reader, String field) {
|
||||
SortedSetDVBytesAtomicFieldData(LeafReader reader, String field, Function<RandomAccessOrds,
|
||||
ScriptDocValues<?>> scriptFunction) {
|
||||
super(scriptFunction);
|
||||
this.reader = reader;
|
||||
this.field = field;
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.index.fielddata.plain;
|
|||
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.RandomAccessOrds;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.fielddata.AtomicOrdinalsFieldData;
|
||||
|
@ -28,24 +29,29 @@ import org.elasticsearch.index.fielddata.IndexFieldData;
|
|||
import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldDataCache;
|
||||
import org.elasticsearch.index.fielddata.IndexOrdinalsFieldData;
|
||||
import org.elasticsearch.index.fielddata.ScriptDocValues;
|
||||
import org.elasticsearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource;
|
||||
import org.elasticsearch.index.fielddata.ordinals.GlobalOrdinalsBuilder;
|
||||
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
||||
import org.elasticsearch.search.MultiValueMode;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.function.Function;
|
||||
|
||||
public class SortedSetDVOrdinalsIndexFieldData extends DocValuesIndexFieldData implements IndexOrdinalsFieldData {
|
||||
|
||||
private final IndexSettings indexSettings;
|
||||
private final IndexFieldDataCache cache;
|
||||
private final CircuitBreakerService breakerService;
|
||||
private final Function<RandomAccessOrds, ScriptDocValues<?>> scriptFunction;
|
||||
|
||||
public SortedSetDVOrdinalsIndexFieldData(IndexSettings indexSettings, IndexFieldDataCache cache, String fieldName, CircuitBreakerService breakerService) {
|
||||
public SortedSetDVOrdinalsIndexFieldData(IndexSettings indexSettings, IndexFieldDataCache cache, String fieldName,
|
||||
CircuitBreakerService breakerService, Function<RandomAccessOrds, ScriptDocValues<?>> scriptFunction) {
|
||||
super(indexSettings.getIndex(), fieldName);
|
||||
this.indexSettings = indexSettings;
|
||||
this.cache = cache;
|
||||
this.breakerService = breakerService;
|
||||
this.scriptFunction = scriptFunction;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -55,7 +61,7 @@ public class SortedSetDVOrdinalsIndexFieldData extends DocValuesIndexFieldData i
|
|||
|
||||
@Override
|
||||
public AtomicOrdinalsFieldData load(LeafReaderContext context) {
|
||||
return new SortedSetDVBytesAtomicFieldData(context.reader(), fieldName);
|
||||
return new SortedSetDVBytesAtomicFieldData(context.reader(), fieldName, scriptFunction);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -100,6 +106,6 @@ public class SortedSetDVOrdinalsIndexFieldData extends DocValuesIndexFieldData i
|
|||
|
||||
@Override
|
||||
public IndexOrdinalsFieldData localGlobalDirect(DirectoryReader indexReader) throws Exception {
|
||||
return GlobalOrdinalsBuilder.build(indexReader, this, indexSettings, breakerService, logger);
|
||||
return GlobalOrdinalsBuilder.build(indexReader, this, indexSettings, breakerService, logger, scriptFunction);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.index.mapper;
|
|||
import org.apache.lucene.document.StoredField;
|
||||
import org.apache.lucene.document.SortedNumericDocValuesField;
|
||||
import org.apache.lucene.document.LongPoint;
|
||||
import org.apache.lucene.index.FieldInfo;
|
||||
import org.apache.lucene.index.PointValues;
|
||||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
|
@ -299,9 +300,13 @@ public class DateFieldMapper extends FieldMapper {
|
|||
@Override
|
||||
public FieldStats.Date stats(IndexReader reader) throws IOException {
|
||||
String field = name();
|
||||
FieldInfo fi = org.apache.lucene.index.MultiFields.getMergedFieldInfos(reader).fieldInfo(name());
|
||||
if (fi == null) {
|
||||
return null;
|
||||
}
|
||||
long size = PointValues.size(reader, field);
|
||||
if (size == 0) {
|
||||
return null;
|
||||
return new FieldStats.Date(reader.maxDoc(), 0, -1, -1, isSearchable(), isAggregatable());
|
||||
}
|
||||
int docCount = PointValues.getDocCount(reader, field);
|
||||
byte[] min = PointValues.getMinPackedValue(reader, field);
|
||||
|
|
|
@ -23,10 +23,12 @@ import org.apache.lucene.document.Field;
|
|||
import org.apache.lucene.document.InetAddressPoint;
|
||||
import org.apache.lucene.document.SortedSetDocValuesField;
|
||||
import org.apache.lucene.document.StoredField;
|
||||
import org.apache.lucene.index.FieldInfo;
|
||||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.index.PointValues;
|
||||
import org.apache.lucene.index.RandomAccessOrds;
|
||||
import org.apache.lucene.search.MatchNoDocsQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
@ -38,6 +40,8 @@ import org.elasticsearch.common.network.InetAddresses;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldData;
|
||||
import org.elasticsearch.index.fielddata.ScriptDocValues;
|
||||
import org.elasticsearch.index.fielddata.SortedBinaryDocValues;
|
||||
import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.search.DocValueFormat;
|
||||
|
@ -45,8 +49,13 @@ import org.joda.time.DateTimeZone;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.net.InetAddress;
|
||||
import java.util.AbstractList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.ListIterator;
|
||||
import java.util.Map;
|
||||
|
||||
/** A {@link FieldMapper} for ip addresses. */
|
||||
|
@ -213,9 +222,13 @@ public class IpFieldMapper extends FieldMapper {
|
|||
@Override
|
||||
public FieldStats.Ip stats(IndexReader reader) throws IOException {
|
||||
String field = name();
|
||||
FieldInfo fi = org.apache.lucene.index.MultiFields.getMergedFieldInfos(reader).fieldInfo(name());
|
||||
if (fi == null) {
|
||||
return null;
|
||||
}
|
||||
long size = PointValues.size(reader, field);
|
||||
if (size == 0) {
|
||||
return null;
|
||||
return new FieldStats.Ip(reader.maxDoc(), 0, -1, -1, isSearchable(), isAggregatable());
|
||||
}
|
||||
int docCount = PointValues.getDocCount(reader, field);
|
||||
byte[] min = PointValues.getMinPackedValue(reader, field);
|
||||
|
@ -225,10 +238,50 @@ public class IpFieldMapper extends FieldMapper {
|
|||
InetAddressPoint.decode(min), InetAddressPoint.decode(max));
|
||||
}
|
||||
|
||||
private static class IpScriptDocValues extends AbstractList<String> implements ScriptDocValues<String> {
|
||||
|
||||
private final RandomAccessOrds values;
|
||||
|
||||
IpScriptDocValues(RandomAccessOrds values) {
|
||||
this.values = values;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setNextDocId(int docId) {
|
||||
values.setDocument(docId);
|
||||
}
|
||||
|
||||
public String getValue() {
|
||||
if (isEmpty()) {
|
||||
return null;
|
||||
} else {
|
||||
return get(0);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<String> getValues() {
|
||||
return Collections.unmodifiableList(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String get(int index) {
|
||||
BytesRef encoded = values.lookupOrd(values.ordAt(0));
|
||||
InetAddress address = InetAddressPoint.decode(
|
||||
Arrays.copyOfRange(encoded.bytes, encoded.offset, encoded.offset + encoded.length));
|
||||
return InetAddresses.toAddrString(address);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int size() {
|
||||
return values.cardinality();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexFieldData.Builder fielddataBuilder() {
|
||||
failIfNoDocValues();
|
||||
return new DocValuesIndexFieldData.Builder();
|
||||
return new DocValuesIndexFieldData.Builder().scriptFunction(IpScriptDocValues::new);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -20,18 +20,19 @@
|
|||
package org.elasticsearch.index.mapper;
|
||||
|
||||
import org.apache.lucene.document.FieldType;
|
||||
import org.apache.lucene.index.FieldInfo;
|
||||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.search.BooleanClause.Occur;
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.search.BoostQuery;
|
||||
import org.apache.lucene.search.ConstantScoreQuery;
|
||||
import org.apache.lucene.search.MultiTermQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.search.BooleanClause.Occur;
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.search.BoostQuery;
|
||||
import org.elasticsearch.action.fieldstats.FieldStats;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.joda.DateMathParser;
|
||||
|
@ -375,14 +376,16 @@ public abstract class MappedFieldType extends FieldType {
|
|||
*/
|
||||
public FieldStats stats(IndexReader reader) throws IOException {
|
||||
int maxDoc = reader.maxDoc();
|
||||
Terms terms = MultiFields.getTerms(reader, name());
|
||||
if (terms == null) {
|
||||
FieldInfo fi = MultiFields.getMergedFieldInfos(reader).fieldInfo(name());
|
||||
if (fi == null) {
|
||||
return null;
|
||||
}
|
||||
Terms terms = MultiFields.getTerms(reader, name());
|
||||
if (terms == null) {
|
||||
return new FieldStats.Text(maxDoc, 0, -1, -1, isSearchable(), isAggregatable());
|
||||
}
|
||||
FieldStats stats = new FieldStats.Text(maxDoc, terms.getDocCount(),
|
||||
terms.getSumDocFreq(), terms.getSumTotalTermFreq(),
|
||||
isSearchable(), isAggregatable(),
|
||||
terms.getMin(), terms.getMax());
|
||||
terms.getSumDocFreq(), terms.getSumTotalTermFreq(), isSearchable(), isAggregatable(), terms.getMin(), terms.getMax());
|
||||
return stats;
|
||||
}
|
||||
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.apache.lucene.document.IntPoint;
|
|||
import org.apache.lucene.document.LongPoint;
|
||||
import org.apache.lucene.document.SortedNumericDocValuesField;
|
||||
import org.apache.lucene.document.StoredField;
|
||||
import org.apache.lucene.index.FieldInfo;
|
||||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
|
@ -227,14 +228,18 @@ public class NumberFieldMapper extends FieldMapper {
|
|||
@Override
|
||||
FieldStats.Double stats(IndexReader reader, String fieldName,
|
||||
boolean isSearchable, boolean isAggregatable) throws IOException {
|
||||
FieldInfo fi = org.apache.lucene.index.MultiFields.getMergedFieldInfos(reader).fieldInfo(fieldName);
|
||||
if (fi == null) {
|
||||
return null;
|
||||
}
|
||||
long size = PointValues.size(reader, fieldName);
|
||||
if (size == 0) {
|
||||
return null;
|
||||
return new FieldStats.Double(reader.maxDoc(), 0, -1, -1, isSearchable, isAggregatable);
|
||||
}
|
||||
int docCount = PointValues.getDocCount(reader, fieldName);
|
||||
byte[] min = PointValues.getMinPackedValue(reader, fieldName);
|
||||
byte[] max = PointValues.getMaxPackedValue(reader, fieldName);
|
||||
return new FieldStats.Double(reader.maxDoc(),docCount, -1L, size,
|
||||
return new FieldStats.Double(reader.maxDoc(), docCount, -1L, size,
|
||||
isSearchable, isAggregatable,
|
||||
HalfFloatPoint.decodeDimension(min, 0), HalfFloatPoint.decodeDimension(max, 0));
|
||||
}
|
||||
|
@ -311,9 +316,13 @@ public class NumberFieldMapper extends FieldMapper {
|
|||
@Override
|
||||
FieldStats.Double stats(IndexReader reader, String fieldName,
|
||||
boolean isSearchable, boolean isAggregatable) throws IOException {
|
||||
FieldInfo fi = org.apache.lucene.index.MultiFields.getMergedFieldInfos(reader).fieldInfo(fieldName);
|
||||
if (fi == null) {
|
||||
return null;
|
||||
}
|
||||
long size = PointValues.size(reader, fieldName);
|
||||
if (size == 0) {
|
||||
return null;
|
||||
return new FieldStats.Double(reader.maxDoc(), 0, -1, -1, isSearchable, isAggregatable);
|
||||
}
|
||||
int docCount = PointValues.getDocCount(reader, fieldName);
|
||||
byte[] min = PointValues.getMinPackedValue(reader, fieldName);
|
||||
|
@ -395,9 +404,13 @@ public class NumberFieldMapper extends FieldMapper {
|
|||
@Override
|
||||
FieldStats.Double stats(IndexReader reader, String fieldName,
|
||||
boolean isSearchable, boolean isAggregatable) throws IOException {
|
||||
FieldInfo fi = org.apache.lucene.index.MultiFields.getMergedFieldInfos(reader).fieldInfo(fieldName);
|
||||
if (fi == null) {
|
||||
return null;
|
||||
}
|
||||
long size = PointValues.size(reader, fieldName);
|
||||
if (size == 0) {
|
||||
return null;
|
||||
return new FieldStats.Double(reader.maxDoc(),0, -1, -1, isSearchable, isAggregatable);
|
||||
}
|
||||
int docCount = PointValues.getDocCount(reader, fieldName);
|
||||
byte[] min = PointValues.getMinPackedValue(reader, fieldName);
|
||||
|
@ -613,9 +626,13 @@ public class NumberFieldMapper extends FieldMapper {
|
|||
@Override
|
||||
FieldStats.Long stats(IndexReader reader, String fieldName,
|
||||
boolean isSearchable, boolean isAggregatable) throws IOException {
|
||||
FieldInfo fi = org.apache.lucene.index.MultiFields.getMergedFieldInfos(reader).fieldInfo(fieldName);
|
||||
if (fi == null) {
|
||||
return null;
|
||||
}
|
||||
long size = PointValues.size(reader, fieldName);
|
||||
if (size == 0) {
|
||||
return null;
|
||||
return new FieldStats.Long(reader.maxDoc(), 0, -1, -1, isSearchable, isAggregatable);
|
||||
}
|
||||
int docCount = PointValues.getDocCount(reader, fieldName);
|
||||
byte[] min = PointValues.getMinPackedValue(reader, fieldName);
|
||||
|
@ -709,9 +726,13 @@ public class NumberFieldMapper extends FieldMapper {
|
|||
@Override
|
||||
FieldStats.Long stats(IndexReader reader, String fieldName,
|
||||
boolean isSearchable, boolean isAggregatable) throws IOException {
|
||||
FieldInfo fi = org.apache.lucene.index.MultiFields.getMergedFieldInfos(reader).fieldInfo(fieldName);
|
||||
if (fi == null) {
|
||||
return null;
|
||||
}
|
||||
long size = PointValues.size(reader, fieldName);
|
||||
if (size == 0) {
|
||||
return null;
|
||||
return new FieldStats.Long(reader.maxDoc(), 0, -1, -1, isSearchable, isAggregatable);
|
||||
}
|
||||
int docCount = PointValues.getDocCount(reader, fieldName);
|
||||
byte[] min = PointValues.getMinPackedValue(reader, fieldName);
|
||||
|
|
|
@ -65,6 +65,7 @@ import org.apache.lucene.analysis.util.ElisionFilter;
|
|||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.index.analysis.DelimitedPayloadTokenFilterFactory;
|
||||
import org.elasticsearch.index.analysis.LimitTokenCountFilterFactory;
|
||||
import org.elasticsearch.index.analysis.MultiTermAwareComponent;
|
||||
import org.elasticsearch.index.analysis.TokenFilterFactory;
|
||||
import org.elasticsearch.indices.analysis.PreBuiltCacheFactory.CachingStrategy;
|
||||
import org.tartarus.snowball.ext.DutchStemmer;
|
||||
|
@ -112,6 +113,10 @@ public enum PreBuiltTokenFilters {
|
|||
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||
return new ASCIIFoldingFilter(tokenStream);
|
||||
}
|
||||
@Override
|
||||
protected boolean isMultiTermAware() {
|
||||
return true;
|
||||
}
|
||||
},
|
||||
|
||||
LENGTH(CachingStrategy.LUCENE) {
|
||||
|
@ -133,6 +138,10 @@ public enum PreBuiltTokenFilters {
|
|||
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||
return new LowerCaseFilter(tokenStream);
|
||||
}
|
||||
@Override
|
||||
protected boolean isMultiTermAware() {
|
||||
return true;
|
||||
}
|
||||
},
|
||||
|
||||
UPPERCASE(CachingStrategy.LUCENE) {
|
||||
|
@ -140,6 +149,10 @@ public enum PreBuiltTokenFilters {
|
|||
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||
return new UpperCaseFilter(tokenStream);
|
||||
}
|
||||
@Override
|
||||
protected boolean isMultiTermAware() {
|
||||
return true;
|
||||
}
|
||||
},
|
||||
|
||||
KSTEM(CachingStrategy.ONE) {
|
||||
|
@ -218,6 +231,10 @@ public enum PreBuiltTokenFilters {
|
|||
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||
return new ElisionFilter(tokenStream, FrenchAnalyzer.DEFAULT_ARTICLES);
|
||||
}
|
||||
@Override
|
||||
protected boolean isMultiTermAware() {
|
||||
return true;
|
||||
}
|
||||
},
|
||||
|
||||
ARABIC_STEM(CachingStrategy.ONE) {
|
||||
|
@ -281,6 +298,10 @@ public enum PreBuiltTokenFilters {
|
|||
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||
return new ArabicNormalizationFilter(tokenStream);
|
||||
}
|
||||
@Override
|
||||
protected boolean isMultiTermAware() {
|
||||
return true;
|
||||
}
|
||||
},
|
||||
|
||||
PERSIAN_NORMALIZATION(CachingStrategy.ONE) {
|
||||
|
@ -288,6 +309,10 @@ public enum PreBuiltTokenFilters {
|
|||
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||
return new PersianNormalizationFilter(tokenStream);
|
||||
}
|
||||
@Override
|
||||
protected boolean isMultiTermAware() {
|
||||
return true;
|
||||
}
|
||||
},
|
||||
|
||||
TYPE_AS_PAYLOAD(CachingStrategy.ONE) {
|
||||
|
@ -309,6 +334,10 @@ public enum PreBuiltTokenFilters {
|
|||
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||
return new GermanNormalizationFilter(tokenStream);
|
||||
}
|
||||
@Override
|
||||
protected boolean isMultiTermAware() {
|
||||
return true;
|
||||
}
|
||||
},
|
||||
|
||||
HINDI_NORMALIZATION(CachingStrategy.ONE) {
|
||||
|
@ -316,6 +345,10 @@ public enum PreBuiltTokenFilters {
|
|||
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||
return new HindiNormalizationFilter(tokenStream);
|
||||
}
|
||||
@Override
|
||||
protected boolean isMultiTermAware() {
|
||||
return true;
|
||||
}
|
||||
},
|
||||
|
||||
INDIC_NORMALIZATION(CachingStrategy.ONE) {
|
||||
|
@ -323,6 +356,10 @@ public enum PreBuiltTokenFilters {
|
|||
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||
return new IndicNormalizationFilter(tokenStream);
|
||||
}
|
||||
@Override
|
||||
protected boolean isMultiTermAware() {
|
||||
return true;
|
||||
}
|
||||
},
|
||||
|
||||
SORANI_NORMALIZATION(CachingStrategy.ONE) {
|
||||
|
@ -330,6 +367,10 @@ public enum PreBuiltTokenFilters {
|
|||
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||
return new SoraniNormalizationFilter(tokenStream);
|
||||
}
|
||||
@Override
|
||||
protected boolean isMultiTermAware() {
|
||||
return true;
|
||||
}
|
||||
},
|
||||
|
||||
SCANDINAVIAN_NORMALIZATION(CachingStrategy.ONE) {
|
||||
|
@ -337,6 +378,10 @@ public enum PreBuiltTokenFilters {
|
|||
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||
return new ScandinavianNormalizationFilter(tokenStream);
|
||||
}
|
||||
@Override
|
||||
protected boolean isMultiTermAware() {
|
||||
return true;
|
||||
}
|
||||
},
|
||||
|
||||
SCANDINAVIAN_FOLDING(CachingStrategy.ONE) {
|
||||
|
@ -344,6 +389,10 @@ public enum PreBuiltTokenFilters {
|
|||
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||
return new ScandinavianFoldingFilter(tokenStream);
|
||||
}
|
||||
@Override
|
||||
protected boolean isMultiTermAware() {
|
||||
return true;
|
||||
}
|
||||
},
|
||||
|
||||
APOSTROPHE(CachingStrategy.ONE) {
|
||||
|
@ -358,6 +407,10 @@ public enum PreBuiltTokenFilters {
|
|||
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||
return new CJKWidthFilter(tokenStream);
|
||||
}
|
||||
@Override
|
||||
protected boolean isMultiTermAware() {
|
||||
return true;
|
||||
}
|
||||
},
|
||||
|
||||
DECIMAL_DIGIT(CachingStrategy.ONE) {
|
||||
|
@ -365,6 +418,10 @@ public enum PreBuiltTokenFilters {
|
|||
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||
return new DecimalDigitFilter(tokenStream);
|
||||
}
|
||||
@Override
|
||||
protected boolean isMultiTermAware() {
|
||||
return true;
|
||||
}
|
||||
},
|
||||
|
||||
CJK_BIGRAM(CachingStrategy.ONE) {
|
||||
|
@ -390,6 +447,10 @@ public enum PreBuiltTokenFilters {
|
|||
|
||||
;
|
||||
|
||||
protected boolean isMultiTermAware() {
|
||||
return false;
|
||||
}
|
||||
|
||||
public abstract TokenStream create(TokenStream tokenStream, Version version);
|
||||
|
||||
protected final PreBuiltCacheFactory.PreBuiltCache<TokenFilterFactory> cache;
|
||||
|
@ -399,21 +460,42 @@ public enum PreBuiltTokenFilters {
|
|||
cache = PreBuiltCacheFactory.getCache(cachingStrategy);
|
||||
}
|
||||
|
||||
private interface MultiTermAwareTokenFilterFactory extends TokenFilterFactory, MultiTermAwareComponent {}
|
||||
|
||||
public synchronized TokenFilterFactory getTokenFilterFactory(final Version version) {
|
||||
TokenFilterFactory factory = cache.get(version);
|
||||
if (factory == null) {
|
||||
final String finalName = name();
|
||||
factory = new TokenFilterFactory() {
|
||||
final String finalName = name().toLowerCase(Locale.ROOT);
|
||||
if (isMultiTermAware()) {
|
||||
factory = new MultiTermAwareTokenFilterFactory() {
|
||||
@Override
|
||||
public String name() {
|
||||
return finalName.toLowerCase(Locale.ROOT);
|
||||
return finalName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TokenStream create(TokenStream tokenStream) {
|
||||
return valueOf(finalName).create(tokenStream, version);
|
||||
return PreBuiltTokenFilters.this.create(tokenStream, version);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object getMultiTermComponent() {
|
||||
return this;
|
||||
}
|
||||
};
|
||||
} else {
|
||||
factory = new TokenFilterFactory() {
|
||||
@Override
|
||||
public String name() {
|
||||
return finalName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TokenStream create(TokenStream tokenStream) {
|
||||
return PreBuiltTokenFilters.this.create(tokenStream, version);
|
||||
}
|
||||
};
|
||||
}
|
||||
cache.put(version, factory);
|
||||
}
|
||||
|
||||
|
|
|
@ -33,6 +33,8 @@ import org.apache.lucene.analysis.standard.UAX29URLEmailTokenizer;
|
|||
import org.apache.lucene.analysis.th.ThaiTokenizer;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.index.analysis.MultiTermAwareComponent;
|
||||
import org.elasticsearch.index.analysis.TokenFilterFactory;
|
||||
import org.elasticsearch.index.analysis.TokenizerFactory;
|
||||
import org.elasticsearch.indices.analysis.PreBuiltCacheFactory.CachingStrategy;
|
||||
|
||||
|
@ -87,6 +89,10 @@ public enum PreBuiltTokenizers {
|
|||
protected Tokenizer create(Version version) {
|
||||
return new LowerCaseTokenizer();
|
||||
}
|
||||
@Override
|
||||
protected TokenFilterFactory getMultiTermComponent(Version version) {
|
||||
return PreBuiltTokenFilters.LOWERCASE.getTokenFilterFactory(version);
|
||||
}
|
||||
},
|
||||
|
||||
WHITESPACE(CachingStrategy.LUCENE) {
|
||||
|
@ -128,6 +134,10 @@ public enum PreBuiltTokenizers {
|
|||
|
||||
protected abstract Tokenizer create(Version version);
|
||||
|
||||
protected TokenFilterFactory getMultiTermComponent(Version version) {
|
||||
return null;
|
||||
}
|
||||
|
||||
protected final PreBuiltCacheFactory.PreBuiltCache<TokenizerFactory> cache;
|
||||
|
||||
|
||||
|
@ -135,22 +145,42 @@ public enum PreBuiltTokenizers {
|
|||
cache = PreBuiltCacheFactory.getCache(cachingStrategy);
|
||||
}
|
||||
|
||||
private interface MultiTermAwareTokenizerFactory extends TokenizerFactory, MultiTermAwareComponent {}
|
||||
|
||||
public synchronized TokenizerFactory getTokenizerFactory(final Version version) {
|
||||
TokenizerFactory tokenizerFactory = cache.get(version);
|
||||
if (tokenizerFactory == null) {
|
||||
final String finalName = name();
|
||||
|
||||
tokenizerFactory = new TokenizerFactory() {
|
||||
final String finalName = name().toLowerCase(Locale.ROOT);
|
||||
if (getMultiTermComponent(version) != null) {
|
||||
tokenizerFactory = new MultiTermAwareTokenizerFactory() {
|
||||
@Override
|
||||
public String name() {
|
||||
return finalName.toLowerCase(Locale.ROOT);
|
||||
return finalName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Tokenizer create() {
|
||||
return valueOf(finalName).create(version);
|
||||
return PreBuiltTokenizers.this.create(version);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object getMultiTermComponent() {
|
||||
return PreBuiltTokenizers.this.getMultiTermComponent(version);
|
||||
}
|
||||
};
|
||||
} else {
|
||||
tokenizerFactory = new TokenizerFactory() {
|
||||
@Override
|
||||
public String name() {
|
||||
return finalName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Tokenizer create() {
|
||||
return PreBuiltTokenizers.this.create(version);
|
||||
}
|
||||
};
|
||||
}
|
||||
cache.put(version, tokenizerFactory);
|
||||
}
|
||||
|
||||
|
|
|
@ -45,7 +45,7 @@ public final class ClusterAllocationExplainIT extends ESIntegTestCase {
|
|||
@TestLogging("_root:DEBUG")
|
||||
public void testDelayShards() throws Exception {
|
||||
logger.info("--> starting 3 nodes");
|
||||
internalCluster().startNodesAsync(3).get();
|
||||
internalCluster().startNodes(3);
|
||||
|
||||
// Wait for all 3 nodes to be up
|
||||
logger.info("--> waiting for 3 nodes to be up");
|
||||
|
|
|
@ -162,7 +162,7 @@ public class ClusterStatsIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
public void testValuesSmokeScreen() throws IOException, ExecutionException, InterruptedException {
|
||||
internalCluster().startNodesAsync(randomIntBetween(1, 3)).get();
|
||||
internalCluster().startNodes(randomIntBetween(1, 3));
|
||||
index("test1", "type", "1", "f", "f");
|
||||
|
||||
ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().get();
|
||||
|
@ -202,7 +202,7 @@ public class ClusterStatsIT extends ESIntegTestCase {
|
|||
|
||||
public void testAllocatedProcessors() throws Exception {
|
||||
// start one node with 7 processors.
|
||||
internalCluster().startNodesAsync(Settings.builder().put(EsExecutors.PROCESSORS_SETTING.getKey(), 7).build()).get();
|
||||
internalCluster().startNode(Settings.builder().put(EsExecutors.PROCESSORS_SETTING.getKey(), 7).build());
|
||||
waitForNodes(1);
|
||||
|
||||
ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().get();
|
||||
|
|
|
@ -75,7 +75,7 @@ public class IndexingMasterFailoverIT extends ESIntegTestCase {
|
|||
.put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 2)
|
||||
.build();
|
||||
|
||||
internalCluster().startMasterOnlyNodesAsync(3, sharedSettings).get();
|
||||
internalCluster().startMasterOnlyNodes(3, sharedSettings);
|
||||
|
||||
String dataNode = internalCluster().startDataOnlyNode(sharedSettings);
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.action.termvectors;
|
||||
|
||||
import com.carrotsearch.hppc.ObjectIntHashMap;
|
||||
|
||||
import org.apache.lucene.analysis.payloads.PayloadHelper;
|
||||
import org.apache.lucene.document.FieldType;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
|
@ -30,6 +31,7 @@ import org.apache.lucene.index.TermsEnum;
|
|||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionFuture;
|
||||
import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse;
|
||||
import org.elasticsearch.action.admin.indices.alias.Alias;
|
||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||
import org.elasticsearch.common.Strings;
|
||||
|
@ -42,6 +44,7 @@ import org.elasticsearch.index.mapper.FieldMapper;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
|
@ -49,6 +52,7 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
|
@ -1170,6 +1174,48 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testArtificialDocWithPreference() throws ExecutionException, InterruptedException, IOException {
|
||||
// setup indices
|
||||
Settings.Builder settings = Settings.builder()
|
||||
.put(indexSettings())
|
||||
.put("index.analysis.analyzer", "standard");
|
||||
assertAcked(prepareCreate("test")
|
||||
.setSettings(settings)
|
||||
.addMapping("type1", "field1", "type=text,term_vector=with_positions_offsets"));
|
||||
ensureGreen();
|
||||
|
||||
// index document
|
||||
indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("field1", "random permutation"));
|
||||
|
||||
// Get search shards
|
||||
ClusterSearchShardsResponse searchShardsResponse = client().admin().cluster().prepareSearchShards("test").get();
|
||||
List<Integer> shardIds = Arrays.stream(searchShardsResponse.getGroups()).map(s -> s.getShardId().id()).collect(Collectors.toList());
|
||||
|
||||
// request termvectors of artificial document from each shard
|
||||
int sumTotalTermFreq = 0;
|
||||
int sumDocFreq = 0;
|
||||
for (Integer shardId : shardIds) {
|
||||
TermVectorsResponse tvResponse = client().prepareTermVectors()
|
||||
.setIndex("test")
|
||||
.setType("type1")
|
||||
.setPreference("_shards:" + shardId)
|
||||
.setDoc(jsonBuilder().startObject().field("field1", "random permutation").endObject())
|
||||
.setFieldStatistics(true)
|
||||
.setTermStatistics(true)
|
||||
.get();
|
||||
Fields fields = tvResponse.getFields();
|
||||
Terms terms = fields.terms("field1");
|
||||
assertNotNull(terms);
|
||||
TermsEnum termsEnum = terms.iterator();
|
||||
while (termsEnum.next() != null) {
|
||||
sumTotalTermFreq += termsEnum.totalTermFreq();
|
||||
sumDocFreq += termsEnum.docFreq();
|
||||
}
|
||||
}
|
||||
assertEquals("expected to find term statistics in exactly one shard!", 2, sumTotalTermFreq);
|
||||
assertEquals("expected to find term statistics in exactly one shard!", 2, sumDocFreq);
|
||||
}
|
||||
|
||||
private void checkBestTerms(Terms terms, List<String> expectedTerms) throws IOException {
|
||||
final TermsEnum termsEnum = terms.iterator();
|
||||
List<String> bestTerms = new ArrayList<>();
|
||||
|
|
|
@ -61,7 +61,6 @@ import org.elasticsearch.search.aggregations.bucket.terms.Terms;
|
|||
import org.elasticsearch.search.sort.SortOrder;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.InternalSettingsPlugin;
|
||||
import org.elasticsearch.test.InternalTestCluster;
|
||||
import org.elasticsearch.test.OldIndexUtils;
|
||||
import org.elasticsearch.test.VersionUtils;
|
||||
import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
|
||||
|
@ -129,24 +128,23 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
void setupCluster() throws Exception {
|
||||
InternalTestCluster.Async<List<String>> replicas = internalCluster().startNodesAsync(1); // for replicas
|
||||
List<String> replicas = internalCluster().startNodes(1); // for replicas
|
||||
|
||||
Path baseTempDir = createTempDir();
|
||||
// start single data path node
|
||||
Settings.Builder nodeSettings = Settings.builder()
|
||||
.put(Environment.PATH_DATA_SETTING.getKey(), baseTempDir.resolve("single-path").toAbsolutePath())
|
||||
.put(Node.NODE_MASTER_SETTING.getKey(), false); // workaround for dangling index loading issue when node is master
|
||||
InternalTestCluster.Async<String> singleDataPathNode = internalCluster().startNodeAsync(nodeSettings.build());
|
||||
singleDataPathNodeName = internalCluster().startNode(nodeSettings);
|
||||
|
||||
// start multi data path node
|
||||
nodeSettings = Settings.builder()
|
||||
.put(Environment.PATH_DATA_SETTING.getKey(), baseTempDir.resolve("multi-path1").toAbsolutePath() + "," + baseTempDir
|
||||
.resolve("multi-path2").toAbsolutePath())
|
||||
.put(Node.NODE_MASTER_SETTING.getKey(), false); // workaround for dangling index loading issue when node is master
|
||||
InternalTestCluster.Async<String> multiDataPathNode = internalCluster().startNodeAsync(nodeSettings.build());
|
||||
multiDataPathNodeName = internalCluster().startNode(nodeSettings);
|
||||
|
||||
// find single data path dir
|
||||
singleDataPathNodeName = singleDataPathNode.get();
|
||||
Path[] nodePaths = internalCluster().getInstance(NodeEnvironment.class, singleDataPathNodeName).nodeDataPaths();
|
||||
assertEquals(1, nodePaths.length);
|
||||
singleDataPath = nodePaths[0].resolve(NodeEnvironment.INDICES_FOLDER);
|
||||
|
@ -155,7 +153,6 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
|
|||
logger.info("--> Single data path: {}", singleDataPath);
|
||||
|
||||
// find multi data path dirs
|
||||
multiDataPathNodeName = multiDataPathNode.get();
|
||||
nodePaths = internalCluster().getInstance(NodeEnvironment.class, multiDataPathNodeName).nodeDataPaths();
|
||||
assertEquals(2, nodePaths.length);
|
||||
multiDataPath = new Path[]{nodePaths[0].resolve(NodeEnvironment.INDICES_FOLDER),
|
||||
|
@ -165,8 +162,6 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
|
|||
Files.createDirectories(multiDataPath[0]);
|
||||
Files.createDirectories(multiDataPath[1]);
|
||||
logger.info("--> Multi data paths: {}, {}", multiDataPath[0], multiDataPath[1]);
|
||||
|
||||
replicas.get(); // wait for replicas
|
||||
}
|
||||
|
||||
void upgradeIndexFolder() throws Exception {
|
||||
|
|
|
@ -126,7 +126,7 @@ public class ClusterInfoServiceIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
public void testClusterInfoServiceCollectsInformation() throws Exception {
|
||||
internalCluster().startNodesAsync(2).get();
|
||||
internalCluster().startNodes(2);
|
||||
assertAcked(prepareCreate("test").setSettings(Settings.builder()
|
||||
.put(Store.INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING.getKey(), 0)
|
||||
.put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE).build()));
|
||||
|
@ -174,10 +174,9 @@ public class ClusterInfoServiceIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
public void testClusterInfoServiceInformationClearOnError() throws InterruptedException, ExecutionException {
|
||||
internalCluster().startNodesAsync(2,
|
||||
internalCluster().startNodes(2,
|
||||
// manually control publishing
|
||||
Settings.builder().put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING.getKey(), "60m").build())
|
||||
.get();
|
||||
Settings.builder().put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING.getKey(), "60m").build());
|
||||
prepareCreate("test").setSettings(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1).get();
|
||||
ensureGreen("test");
|
||||
InternalTestCluster internalTestCluster = internalCluster();
|
||||
|
|
|
@ -24,7 +24,6 @@ import org.elasticsearch.client.Client;
|
|||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.discovery.Discovery;
|
||||
|
@ -202,22 +201,19 @@ public class MinimumMasterNodesIT extends ESIntegTestCase {
|
|||
.build();
|
||||
|
||||
logger.info("--> start first 2 nodes");
|
||||
internalCluster().startNodesAsync(2, settings).get();
|
||||
internalCluster().startNodes(2, settings);
|
||||
|
||||
ClusterState state;
|
||||
|
||||
assertBusy(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
assertBusy(() -> {
|
||||
for (Client client : clients()) {
|
||||
ClusterState state = client.admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
|
||||
assertThat(state.blocks().hasGlobalBlock(DiscoverySettings.NO_MASTER_BLOCK_ID), equalTo(true));
|
||||
}
|
||||
ClusterState state1 = client.admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
|
||||
assertThat(state1.blocks().hasGlobalBlock(DiscoverySettings.NO_MASTER_BLOCK_ID), equalTo(true));
|
||||
}
|
||||
});
|
||||
|
||||
logger.info("--> start two more nodes");
|
||||
internalCluster().startNodesAsync(2, settings).get();
|
||||
internalCluster().startNodes(2, settings);
|
||||
|
||||
ensureGreen();
|
||||
ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("4").execute().actionGet();
|
||||
|
@ -252,7 +248,7 @@ public class MinimumMasterNodesIT extends ESIntegTestCase {
|
|||
assertNoMasterBlockOnAllNodes();
|
||||
|
||||
logger.info("--> start back the 2 nodes ");
|
||||
String[] newNodes = internalCluster().startNodesAsync(2, settings).get().toArray(Strings.EMPTY_ARRAY);
|
||||
String[] newNodes = internalCluster().startNodes(2, settings).stream().toArray(String[]::new);
|
||||
|
||||
ensureGreen();
|
||||
clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForNodes("4").execute().actionGet();
|
||||
|
@ -338,7 +334,7 @@ public class MinimumMasterNodesIT extends ESIntegTestCase {
|
|||
|
||||
|
||||
logger.info("--> starting [{}] nodes. min_master_nodes set to [{}]", nodeCount, initialMinMasterNodes);
|
||||
internalCluster().startNodesAsync(nodeCount, settings.build()).get();
|
||||
internalCluster().startNodes(nodeCount, settings.build());
|
||||
|
||||
logger.info("--> waiting for nodes to join");
|
||||
assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(nodeCount)).get().isTimedOut());
|
||||
|
@ -371,7 +367,7 @@ public class MinimumMasterNodesIT extends ESIntegTestCase {
|
|||
.put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 2)
|
||||
.put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), "100ms") // speed things up
|
||||
.build();
|
||||
internalCluster().startNodesAsync(3, settings).get();
|
||||
internalCluster().startNodes(3, settings);
|
||||
ensureGreen(); // ensure cluster state is recovered before we disrupt things
|
||||
|
||||
final String master = internalCluster().getMasterName();
|
||||
|
|
|
@ -27,17 +27,16 @@ import org.elasticsearch.test.ESIntegTestCase;
|
|||
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
|
||||
import org.elasticsearch.test.ESIntegTestCase.Scope;
|
||||
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
@ClusterScope(scope= Scope.TEST, numDataNodes =0)
|
||||
public class UpdateSettingsValidationIT extends ESIntegTestCase {
|
||||
public void testUpdateSettingsValidation() throws Exception {
|
||||
internalCluster().startNodesAsync(
|
||||
internalCluster().startNodes(
|
||||
Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), false).build(),
|
||||
Settings.builder().put(Node.NODE_MASTER_SETTING.getKey(), false).build(),
|
||||
Settings.builder().put(Node.NODE_MASTER_SETTING.getKey(), false).build()
|
||||
).get();
|
||||
);
|
||||
|
||||
createIndex("test");
|
||||
NumShards test = getNumShards("test");
|
||||
|
|
|
@ -57,7 +57,7 @@ public class AwarenessAllocationIT extends ESIntegTestCase {
|
|||
|
||||
|
||||
logger.info("--> starting 2 nodes on the same rack");
|
||||
internalCluster().startNodesAsync(2, Settings.builder().put(commonSettings).put("node.attr.rack_id", "rack_1").build()).get();
|
||||
internalCluster().startNodes(2, Settings.builder().put(commonSettings).put("node.attr.rack_id", "rack_1").build());
|
||||
|
||||
createIndex("test1");
|
||||
createIndex("test2");
|
||||
|
@ -107,12 +107,12 @@ public class AwarenessAllocationIT extends ESIntegTestCase {
|
|||
.build();
|
||||
|
||||
logger.info("--> starting 4 nodes on different zones");
|
||||
List<String> nodes = internalCluster().startNodesAsync(
|
||||
List<String> nodes = internalCluster().startNodes(
|
||||
Settings.builder().put(commonSettings).put("node.attr.zone", "a").build(),
|
||||
Settings.builder().put(commonSettings).put("node.attr.zone", "b").build(),
|
||||
Settings.builder().put(commonSettings).put("node.attr.zone", "b").build(),
|
||||
Settings.builder().put(commonSettings).put("node.attr.zone", "a").build()
|
||||
).get();
|
||||
);
|
||||
String A_0 = nodes.get(0);
|
||||
String B_0 = nodes.get(1);
|
||||
String B_1 = nodes.get(2);
|
||||
|
@ -153,10 +153,10 @@ public class AwarenessAllocationIT extends ESIntegTestCase {
|
|||
.build();
|
||||
|
||||
logger.info("--> starting 2 nodes on zones 'a' & 'b'");
|
||||
List<String> nodes = internalCluster().startNodesAsync(
|
||||
List<String> nodes = internalCluster().startNodes(
|
||||
Settings.builder().put(commonSettings).put("node.attr.zone", "a").build(),
|
||||
Settings.builder().put(commonSettings).put("node.attr.zone", "b").build()
|
||||
).get();
|
||||
);
|
||||
String A_0 = nodes.get(0);
|
||||
String B_0 = nodes.get(1);
|
||||
client().admin().indices().prepareCreate("test")
|
||||
|
|
|
@ -85,7 +85,7 @@ public class ClusterRerouteIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
private void rerouteWithCommands(Settings commonSettings) throws Exception {
|
||||
List<String> nodesIds = internalCluster().startNodesAsync(2, commonSettings).get();
|
||||
List<String> nodesIds = internalCluster().startNodes(2, commonSettings);
|
||||
final String node_1 = nodesIds.get(0);
|
||||
final String node_2 = nodesIds.get(1);
|
||||
|
||||
|
@ -304,7 +304,7 @@ public class ClusterRerouteIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
public void testClusterRerouteWithBlocks() throws Exception {
|
||||
List<String> nodesIds = internalCluster().startNodesAsync(2).get();
|
||||
List<String> nodesIds = internalCluster().startNodes(2);
|
||||
|
||||
logger.info("--> create an index with 1 shard and 0 replicas");
|
||||
assertAcked(prepareCreate("test-blocks").setSettings(Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0)));
|
||||
|
|
|
@ -43,7 +43,7 @@ public class FilteringAllocationIT extends ESIntegTestCase {
|
|||
|
||||
public void testDecommissionNodeNoReplicas() throws Exception {
|
||||
logger.info("--> starting 2 nodes");
|
||||
List<String> nodesIds = internalCluster().startNodesAsync(2).get();
|
||||
List<String> nodesIds = internalCluster().startNodes(2);
|
||||
final String node_0 = nodesIds.get(0);
|
||||
final String node_1 = nodesIds.get(1);
|
||||
assertThat(cluster().size(), equalTo(2));
|
||||
|
@ -82,7 +82,7 @@ public class FilteringAllocationIT extends ESIntegTestCase {
|
|||
|
||||
public void testDisablingAllocationFiltering() throws Exception {
|
||||
logger.info("--> starting 2 nodes");
|
||||
List<String> nodesIds = internalCluster().startNodesAsync(2).get();
|
||||
List<String> nodesIds = internalCluster().startNodes(2);
|
||||
final String node_0 = nodesIds.get(0);
|
||||
final String node_1 = nodesIds.get(1);
|
||||
assertThat(cluster().size(), equalTo(2));
|
||||
|
|
|
@ -26,7 +26,6 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.InternalTestCluster;
|
||||
import org.elasticsearch.test.junit.annotations.TestLogging;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
@ -42,7 +41,7 @@ public class DelayedAllocationIT extends ESIntegTestCase {
|
|||
* get allocated to a free node when the node hosting it leaves the cluster.
|
||||
*/
|
||||
public void testNoDelayedTimeout() throws Exception {
|
||||
internalCluster().startNodesAsync(3).get();
|
||||
internalCluster().startNodes(3);
|
||||
prepareCreate("test").setSettings(Settings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
|
||||
|
@ -61,7 +60,7 @@ public class DelayedAllocationIT extends ESIntegTestCase {
|
|||
* on it before.
|
||||
*/
|
||||
public void testDelayedAllocationNodeLeavesAndComesBack() throws Exception {
|
||||
internalCluster().startNodesAsync(3).get();
|
||||
internalCluster().startNodes(3);
|
||||
prepareCreate("test").setSettings(Settings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
|
||||
|
@ -85,7 +84,7 @@ public class DelayedAllocationIT extends ESIntegTestCase {
|
|||
* though the node hosting the shard is not coming back.
|
||||
*/
|
||||
public void testDelayedAllocationTimesOut() throws Exception {
|
||||
internalCluster().startNodesAsync(3).get();
|
||||
internalCluster().startNodes(3);
|
||||
prepareCreate("test").setSettings(Settings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
|
||||
|
@ -107,7 +106,7 @@ public class DelayedAllocationIT extends ESIntegTestCase {
|
|||
* even though the node it was hosted on will not come back.
|
||||
*/
|
||||
public void testDelayedAllocationChangeWithSettingTo100ms() throws Exception {
|
||||
internalCluster().startNodesAsync(3).get();
|
||||
internalCluster().startNodes(3);
|
||||
prepareCreate("test").setSettings(Settings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
|
||||
|
@ -133,7 +132,7 @@ public class DelayedAllocationIT extends ESIntegTestCase {
|
|||
* even though the node it was hosted on will not come back.
|
||||
*/
|
||||
public void testDelayedAllocationChangeWithSettingTo0() throws Exception {
|
||||
internalCluster().startNodesAsync(3).get();
|
||||
internalCluster().startNodes(3);
|
||||
prepareCreate("test").setSettings(Settings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
|
||||
|
|
|
@ -71,7 +71,7 @@ public class PrimaryAllocationIT extends ESIntegTestCase {
|
|||
private void createStaleReplicaScenario() throws Exception {
|
||||
logger.info("--> starting 3 nodes, 1 master, 2 data");
|
||||
String master = internalCluster().startMasterOnlyNode(Settings.EMPTY);
|
||||
internalCluster().startDataOnlyNodesAsync(2).get();
|
||||
internalCluster().startDataOnlyNodes(2);
|
||||
|
||||
assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder()
|
||||
.put("index.number_of_shards", 1).put("index.number_of_replicas", 1)).get());
|
||||
|
@ -267,7 +267,7 @@ public class PrimaryAllocationIT extends ESIntegTestCase {
|
|||
|
||||
public void testNotWaitForQuorumCopies() throws Exception {
|
||||
logger.info("--> starting 3 nodes");
|
||||
internalCluster().startNodesAsync(3).get();
|
||||
internalCluster().startNodes(3);
|
||||
logger.info("--> creating index with 1 primary and 2 replicas");
|
||||
assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder()
|
||||
.put("index.number_of_shards", randomIntBetween(1, 3)).put("index.number_of_replicas", 2)).get());
|
||||
|
@ -289,7 +289,7 @@ public class PrimaryAllocationIT extends ESIntegTestCase {
|
|||
*/
|
||||
public void testForceAllocatePrimaryOnNoDecision() throws Exception {
|
||||
logger.info("--> starting 1 node");
|
||||
final String node = internalCluster().startNodeAsync().get();
|
||||
final String node = internalCluster().startNode();
|
||||
logger.info("--> creating index with 1 primary and 0 replicas");
|
||||
final String indexName = "test-idx";
|
||||
assertAcked(client().admin().indices()
|
||||
|
|
|
@ -54,7 +54,7 @@ public class MockDiskUsagesIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
public void testRerouteOccursOnDiskPassingHighWatermark() throws Exception {
|
||||
List<String> nodes = internalCluster().startNodesAsync(3).get();
|
||||
List<String> nodes = internalCluster().startNodes(3);
|
||||
|
||||
// Wait for all 3 nodes to be up
|
||||
assertBusy(new Runnable() {
|
||||
|
|
|
@ -43,8 +43,8 @@ import org.elasticsearch.cluster.routing.ShardRouting;
|
|||
import org.elasticsearch.cluster.routing.ShardRoutingState;
|
||||
import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.cluster.service.ClusterStateStatus;
|
||||
import org.elasticsearch.cluster.service.ClusterServiceState;
|
||||
import org.elasticsearch.cluster.service.ClusterStateStatus;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.Strings;
|
||||
|
@ -189,11 +189,11 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
|||
private List<String> startCluster(int numberOfNodes, int minimumMasterNode, @Nullable int[] unicastHostsOrdinals) throws
|
||||
ExecutionException, InterruptedException {
|
||||
configureCluster(numberOfNodes, unicastHostsOrdinals, minimumMasterNode);
|
||||
List<String> nodes = internalCluster().startNodesAsync(numberOfNodes).get();
|
||||
List<String> nodes = internalCluster().startNodes(numberOfNodes);
|
||||
ensureStableCluster(numberOfNodes);
|
||||
|
||||
// TODO: this is a temporary solution so that nodes will not base their reaction to a partition based on previous successful results
|
||||
ZenPing zenPing = ((TestZenDiscovery)internalCluster().getInstance(Discovery.class)).getZenPing();
|
||||
ZenPing zenPing = ((TestZenDiscovery) internalCluster().getInstance(Discovery.class)).getZenPing();
|
||||
if (zenPing instanceof UnicastZenPing) {
|
||||
((UnicastZenPing) zenPing).clearTemporalResponses();
|
||||
}
|
||||
|
@ -853,7 +853,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
|||
|
||||
// Forcefully clean temporal response lists on all nodes. Otherwise the node in the unicast host list
|
||||
// includes all the other nodes that have pinged it and the issue doesn't manifest
|
||||
ZenPing zenPing = ((TestZenDiscovery)internalCluster().getInstance(Discovery.class)).getZenPing();
|
||||
ZenPing zenPing = ((TestZenDiscovery) internalCluster().getInstance(Discovery.class)).getZenPing();
|
||||
if (zenPing instanceof UnicastZenPing) {
|
||||
((UnicastZenPing) zenPing).clearTemporalResponses();
|
||||
}
|
||||
|
@ -890,7 +890,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
|||
|
||||
// Forcefully clean temporal response lists on all nodes. Otherwise the node in the unicast host list
|
||||
// includes all the other nodes that have pinged it and the issue doesn't manifest
|
||||
ZenPing zenPing = ((TestZenDiscovery)internalCluster().getInstance(Discovery.class)).getZenPing();
|
||||
ZenPing zenPing = ((TestZenDiscovery) internalCluster().getInstance(Discovery.class)).getZenPing();
|
||||
if (zenPing instanceof UnicastZenPing) {
|
||||
((UnicastZenPing) zenPing).clearTemporalResponses();
|
||||
}
|
||||
|
@ -1051,11 +1051,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
|||
SlowClusterStateProcessing disruption = new SlowClusterStateProcessing(random(), 0, 0, 1000, 2000);
|
||||
|
||||
// don't wait for initial state, wat want to add the disruption while the cluster is forming..
|
||||
internalCluster().startNodesAsync(3,
|
||||
Settings.builder()
|
||||
.put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "1ms")
|
||||
.put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "3s")
|
||||
.build()).get();
|
||||
internalCluster().startNodes(3, Settings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "3s").build());
|
||||
|
||||
logger.info("applying disruption while cluster is forming ...");
|
||||
|
||||
|
@ -1110,11 +1106,9 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
|||
public void testSearchWithRelocationAndSlowClusterStateProcessing() throws Exception {
|
||||
// don't use DEFAULT settings (which can cause node disconnects on a slow CI machine)
|
||||
configureCluster(Settings.EMPTY, 3, null, 1);
|
||||
InternalTestCluster.Async<String> masterNodeFuture = internalCluster().startMasterOnlyNodeAsync();
|
||||
InternalTestCluster.Async<String> node_1Future = internalCluster().startDataOnlyNodeAsync();
|
||||
final String masterNode = internalCluster().startMasterOnlyNode();
|
||||
final String node_1 = internalCluster().startDataOnlyNode();
|
||||
|
||||
final String node_1 = node_1Future.get();
|
||||
final String masterNode = masterNodeFuture.get();
|
||||
logger.info("--> creating index [test] with one shard and on replica");
|
||||
assertAcked(prepareCreate("test").setSettings(
|
||||
Settings.builder().put(indexSettings())
|
||||
|
@ -1123,8 +1117,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
|||
);
|
||||
ensureGreen("test");
|
||||
|
||||
InternalTestCluster.Async<String> node_2Future = internalCluster().startDataOnlyNodeAsync();
|
||||
final String node_2 = node_2Future.get();
|
||||
final String node_2 = internalCluster().startDataOnlyNode();
|
||||
List<IndexRequestBuilder> indexRequestBuilderList = new ArrayList<>();
|
||||
for (int i = 0; i < 100; i++) {
|
||||
indexRequestBuilderList.add(client().prepareIndex().setIndex("test").setType("doc").setSource("{\"int_field\":1}"));
|
||||
|
@ -1182,15 +1175,13 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
|||
.build();
|
||||
final String idxName = "test";
|
||||
configureCluster(settings, 3, null, 2);
|
||||
InternalTestCluster.Async<List<String>> masterNodes = internalCluster().startMasterOnlyNodesAsync(2);
|
||||
InternalTestCluster.Async<String> dataNode = internalCluster().startDataOnlyNodeAsync();
|
||||
dataNode.get();
|
||||
final List<String> allMasterEligibleNodes = masterNodes.get();
|
||||
final List<String> allMasterEligibleNodes = internalCluster().startMasterOnlyNodes(2);
|
||||
final String dataNode = internalCluster().startDataOnlyNode();
|
||||
ensureStableCluster(3);
|
||||
assertAcked(prepareCreate("test"));
|
||||
|
||||
final String masterNode1 = internalCluster().getMasterName();
|
||||
NetworkDisruption networkDisruption = new NetworkDisruption(new TwoPartitions(masterNode1, dataNode.get()),
|
||||
NetworkDisruption networkDisruption = new NetworkDisruption(new TwoPartitions(masterNode1, dataNode),
|
||||
new NetworkUnresponsive());
|
||||
internalCluster().setDisruptionScheme(networkDisruption);
|
||||
networkDisruption.startDisrupting();
|
||||
|
@ -1212,21 +1203,21 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
|||
|
||||
public void testElectMasterWithLatestVersion() throws Exception {
|
||||
configureCluster(3, null, 2);
|
||||
final Set<String> nodes = new HashSet<>(internalCluster().startNodesAsync(3).get());
|
||||
final Set<String> nodes = new HashSet<>(internalCluster().startNodes(3));
|
||||
ensureStableCluster(3);
|
||||
ServiceDisruptionScheme isolateAllNodes = new NetworkDisruption(new NetworkDisruption.IsolateAllNodes(nodes), new NetworkDisconnect());
|
||||
internalCluster().setDisruptionScheme(isolateAllNodes);
|
||||
|
||||
logger.info("--> forcing a complete election to make sure \"preferred\" master is elected");
|
||||
isolateAllNodes.startDisrupting();
|
||||
for (String node: nodes) {
|
||||
for (String node : nodes) {
|
||||
assertNoMaster(node);
|
||||
}
|
||||
isolateAllNodes.stopDisrupting();
|
||||
internalCluster().clearDisruptionScheme();
|
||||
ensureStableCluster(3);
|
||||
final String preferredMasterName = internalCluster().getMasterName();
|
||||
final DiscoveryNode preferredMaster = internalCluster().clusterService(preferredMasterName).localNode();
|
||||
for (String node: nodes) {
|
||||
for (String node : nodes) {
|
||||
DiscoveryNode discoveryNode = internalCluster().clusterService(node).localNode();
|
||||
assertThat(discoveryNode.getId(), greaterThanOrEqualTo(preferredMaster.getId()));
|
||||
}
|
||||
|
@ -1252,7 +1243,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
|||
|
||||
logger.info("--> forcing a complete election again");
|
||||
isolateAllNodes.startDisrupting();
|
||||
for (String node: nodes) {
|
||||
for (String node : nodes) {
|
||||
assertNoMaster(node);
|
||||
}
|
||||
|
||||
|
@ -1298,10 +1289,17 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
|||
}
|
||||
final NetworkLinkDisruptionType disruptionType;
|
||||
switch (randomInt(2)) {
|
||||
case 0: disruptionType = new NetworkUnresponsive(); break;
|
||||
case 1: disruptionType = new NetworkDisconnect(); break;
|
||||
case 2: disruptionType = NetworkDelay.random(random()); break;
|
||||
default: throw new IllegalArgumentException();
|
||||
case 0:
|
||||
disruptionType = new NetworkUnresponsive();
|
||||
break;
|
||||
case 1:
|
||||
disruptionType = new NetworkDisconnect();
|
||||
break;
|
||||
case 2:
|
||||
disruptionType = NetworkDelay.random(random());
|
||||
break;
|
||||
default:
|
||||
throw new IllegalArgumentException();
|
||||
}
|
||||
final ServiceDisruptionScheme scheme;
|
||||
if (rarely()) {
|
||||
|
|
|
@ -1,102 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.discovery;
|
||||
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
|
||||
import org.elasticsearch.test.ESIntegTestCase.Scope;
|
||||
import org.elasticsearch.test.discovery.ClusterDiscoveryConfiguration;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
@ClusterScope(scope = Scope.TEST, numDataNodes = 0, autoMinMasterNodes = false)
|
||||
public class ZenUnicastDiscoveryIT extends ESIntegTestCase {
|
||||
|
||||
private ClusterDiscoveryConfiguration discoveryConfig;
|
||||
|
||||
@Override
|
||||
protected Settings nodeSettings(int nodeOrdinal) {
|
||||
return discoveryConfig.nodeSettings(nodeOrdinal);
|
||||
}
|
||||
|
||||
@Before
|
||||
public void clearConfig() {
|
||||
discoveryConfig = null;
|
||||
}
|
||||
|
||||
public void testNormalClusterForming() throws ExecutionException, InterruptedException {
|
||||
int currentNumNodes = randomIntBetween(3, 5);
|
||||
|
||||
// use explicit unicast hosts so we can start those first
|
||||
int[] unicastHostOrdinals = new int[randomIntBetween(1, currentNumNodes)];
|
||||
for (int i = 0; i < unicastHostOrdinals.length; i++) {
|
||||
unicastHostOrdinals[i] = i;
|
||||
}
|
||||
discoveryConfig = new ClusterDiscoveryConfiguration.UnicastZen(currentNumNodes, unicastHostOrdinals);
|
||||
|
||||
// start the unicast hosts
|
||||
internalCluster().startNodesAsync(unicastHostOrdinals.length).get();
|
||||
|
||||
// start the rest of the cluster
|
||||
internalCluster().startNodesAsync(currentNumNodes - unicastHostOrdinals.length).get();
|
||||
|
||||
if (client().admin().cluster().prepareHealth().setWaitForNodes("" + currentNumNodes).get().isTimedOut()) {
|
||||
logger.info("cluster forming timed out, cluster state:\n{}", client().admin().cluster().prepareState().get().getState());
|
||||
fail("timed out waiting for cluster to form with [" + currentNumNodes + "] nodes");
|
||||
}
|
||||
}
|
||||
|
||||
// Without the 'include temporalResponses responses to nodesToConnect' improvement in UnicastZenPing#sendPings this
|
||||
// test fails, because 2 nodes elect themselves as master and the health request times out b/c waiting_for_nodes=N
|
||||
// can't be satisfied.
|
||||
public void testMinimumMasterNodes() throws Exception {
|
||||
int currentNumNodes = randomIntBetween(3, 5);
|
||||
final int min_master_nodes = currentNumNodes / 2 + 1;
|
||||
int currentNumOfUnicastHosts = randomIntBetween(min_master_nodes, currentNumNodes);
|
||||
final Settings settings = Settings.builder()
|
||||
.put("discovery.zen.join_timeout", TimeValue.timeValueSeconds(10))
|
||||
.put("discovery.zen.minimum_master_nodes", min_master_nodes)
|
||||
.build();
|
||||
discoveryConfig = new ClusterDiscoveryConfiguration.UnicastZen(currentNumNodes, currentNumOfUnicastHosts, settings);
|
||||
|
||||
List<String> nodes = internalCluster().startNodesAsync(currentNumNodes).get();
|
||||
|
||||
ensureStableCluster(currentNumNodes);
|
||||
|
||||
DiscoveryNode masterDiscoNode = null;
|
||||
for (String node : nodes) {
|
||||
ClusterState state = internalCluster().client(node).admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
|
||||
assertThat(state.nodes().getSize(), equalTo(currentNumNodes));
|
||||
if (masterDiscoNode == null) {
|
||||
masterDiscoNode = state.nodes().getMasterNode();
|
||||
} else {
|
||||
assertThat(masterDiscoNode.equals(state.nodes().getMasterNode()), equalTo(true));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -80,12 +80,12 @@ public class ZenDiscoveryIT extends ESIntegTestCase {
|
|||
.put(Node.NODE_DATA_SETTING.getKey(), false)
|
||||
.put(defaultSettings)
|
||||
.build();
|
||||
internalCluster().startNodesAsync(2, masterNodeSettings).get();
|
||||
internalCluster().startNodes(2, masterNodeSettings);
|
||||
Settings dateNodeSettings = Settings.builder()
|
||||
.put(Node.NODE_MASTER_SETTING.getKey(), false)
|
||||
.put(defaultSettings)
|
||||
.build();
|
||||
internalCluster().startNodesAsync(2, dateNodeSettings).get();
|
||||
internalCluster().startNodes(2, dateNodeSettings);
|
||||
ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth()
|
||||
.setWaitForEvents(Priority.LANGUID)
|
||||
.setWaitForNodes("4")
|
||||
|
@ -100,13 +100,10 @@ public class ZenDiscoveryIT extends ESIntegTestCase {
|
|||
|
||||
final String oldMaster = internalCluster().getMasterName();
|
||||
internalCluster().stopCurrentMasterNode();
|
||||
assertBusy(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
assertBusy(() -> {
|
||||
String current = internalCluster().getMasterName();
|
||||
assertThat(current, notNullValue());
|
||||
assertThat(current, not(equalTo(oldMaster)));
|
||||
}
|
||||
});
|
||||
ensureSearchable("test");
|
||||
|
||||
|
@ -130,7 +127,7 @@ public class ZenDiscoveryIT extends ESIntegTestCase {
|
|||
.put(Node.NODE_MASTER_SETTING.getKey(), false)
|
||||
.put(defaultSettings)
|
||||
.build();
|
||||
internalCluster().startNodesAsync(2, dateNodeSettings).get();
|
||||
internalCluster().startNodes(2, dateNodeSettings);
|
||||
client().admin().cluster().prepareHealth().setWaitForNodes("3").get();
|
||||
|
||||
ClusterService clusterService = internalCluster().getInstance(ClusterService.class, master);
|
||||
|
@ -155,8 +152,7 @@ public class ZenDiscoveryIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
public void testNodeRejectsClusterStateWithWrongMasterNode() throws Exception {
|
||||
List<String> nodeNames = internalCluster().startNodesAsync(2).get();
|
||||
client().admin().cluster().prepareHealth().setWaitForNodes("2").get();
|
||||
List<String> nodeNames = internalCluster().startNodes(2);
|
||||
|
||||
List<String> nonMasterNodes = new ArrayList<>(nodeNames);
|
||||
nonMasterNodes.remove(internalCluster().getMasterName());
|
||||
|
|
|
@ -20,10 +20,12 @@
|
|||
package org.elasticsearch.fieldstats;
|
||||
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.fieldstats.FieldStats;
|
||||
import org.elasticsearch.action.fieldstats.FieldStatsResponse;
|
||||
import org.elasticsearch.action.fieldstats.IndexConstraint;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.joda.Joda;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.mapper.DateFieldMapper;
|
||||
|
@ -46,7 +48,6 @@ import static org.elasticsearch.action.fieldstats.IndexConstraint.Comparison.LTE
|
|||
import static org.elasticsearch.action.fieldstats.IndexConstraint.Property.MAX;
|
||||
import static org.elasticsearch.action.fieldstats.IndexConstraint.Property.MIN;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
public class FieldStatsTests extends ESSingleNodeTestCase {
|
||||
public void testByte() {
|
||||
|
@ -73,83 +74,157 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
|
|||
testNumberRange("field1", "long", -312321312312422L, -312321312312412L);
|
||||
}
|
||||
|
||||
private static String makeType(String type, boolean indexed, boolean docValues, boolean stored) {
|
||||
return new StringBuilder()
|
||||
.append("type=").append(type)
|
||||
.append(",index=").append(indexed)
|
||||
.append(",doc_values=").append(docValues)
|
||||
.append(",store=").append(stored).toString();
|
||||
}
|
||||
|
||||
public void testString() {
|
||||
createIndex("test", Settings.EMPTY, "test", "field", "type=text");
|
||||
createIndex("test", Settings.EMPTY, "test",
|
||||
"field_index", makeType("keyword", true, false, false),
|
||||
"field_dv", makeType("keyword", false, true, false),
|
||||
"field_stored", makeType("keyword", false, true, true),
|
||||
"field_source", makeType("keyword", false, false, false));
|
||||
for (int value = 0; value <= 10; value++) {
|
||||
client().prepareIndex("test", "test").setSource("field",
|
||||
String.format(Locale.ENGLISH, "%03d", value)).get();
|
||||
String keyword = String.format(Locale.ENGLISH, "%03d", value);
|
||||
client().prepareIndex("test", "test")
|
||||
.setSource("field_index", keyword,
|
||||
"field_dv", keyword,
|
||||
"field_stored", keyword,
|
||||
"field_source", keyword).get();
|
||||
}
|
||||
client().admin().indices().prepareRefresh().get();
|
||||
|
||||
FieldStatsResponse result = client().prepareFieldStats().setFields("field").get();
|
||||
assertThat(result.getAllFieldStats().get("field").getMaxDoc(), equalTo(11L));
|
||||
assertThat(result.getAllFieldStats().get("field").getDocCount(), equalTo(11L));
|
||||
assertThat(result.getAllFieldStats().get("field").getDensity(), equalTo(100));
|
||||
assertThat(result.getAllFieldStats().get("field").getMinValue(),
|
||||
equalTo(new BytesRef(String.format(Locale.ENGLISH, "%03d", 0))));
|
||||
assertThat(result.getAllFieldStats().get("field").getMaxValue(),
|
||||
equalTo(new BytesRef(String.format(Locale.ENGLISH, "%03d", 10))));
|
||||
assertThat(result.getAllFieldStats().get("field").getMinValueAsString(),
|
||||
equalTo(String.format(Locale.ENGLISH, "%03d", 0)));
|
||||
assertThat(result.getAllFieldStats().get("field").getMaxValueAsString(),
|
||||
equalTo(String.format(Locale.ENGLISH, "%03d", 10)));
|
||||
assertThat(result.getAllFieldStats().get("field").getDisplayType(),
|
||||
equalTo("string"));
|
||||
FieldStatsResponse result = client().prepareFieldStats()
|
||||
.setFields("field_index", "field_dv", "field_stored", "field_source").get();
|
||||
assertEquals(result.getAllFieldStats().size(), 3);
|
||||
for (String field : new String[] {"field_index", "field_dv", "field_stored"}) {
|
||||
FieldStats stats = result.getAllFieldStats().get(field);
|
||||
assertEquals(stats.getMaxDoc(), 11L);
|
||||
assertEquals(stats.getDisplayType(),
|
||||
"string");
|
||||
if ("field_index".equals(field)) {
|
||||
assertEquals(stats.getMinValue(),
|
||||
new BytesRef(String.format(Locale.ENGLISH, "%03d", 0)));
|
||||
assertEquals(stats.getMaxValue(),
|
||||
new BytesRef(String.format(Locale.ENGLISH, "%03d", 10)));
|
||||
assertEquals(stats.getMinValueAsString(),
|
||||
String.format(Locale.ENGLISH, "%03d", 0));
|
||||
assertEquals(stats.getMaxValueAsString(),
|
||||
String.format(Locale.ENGLISH, "%03d", 10));
|
||||
assertEquals(stats.getDocCount(), 11L);
|
||||
assertEquals(stats.getDensity(), 100);
|
||||
} else {
|
||||
assertEquals(stats.getDocCount(), 0L);
|
||||
assertNull(stats.getMinValue());
|
||||
assertNull(stats.getMaxValue());
|
||||
assertEquals(stats.getDensity(), 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testDouble() {
|
||||
String fieldName = "field";
|
||||
createIndex("test", Settings.EMPTY, "test", fieldName, "type=double");
|
||||
createIndex("test", Settings.EMPTY, "test",
|
||||
"field_index", makeType("double", true, false, false),
|
||||
"field_dv", makeType("double", false, true, false),
|
||||
"field_stored", makeType("double", false, true, true),
|
||||
"field_source", makeType("double", false, false, false));
|
||||
for (double value = -1; value <= 9; value++) {
|
||||
client().prepareIndex("test", "test").setSource(fieldName, value).get();
|
||||
client().prepareIndex("test", "test")
|
||||
.setSource("field_index", value, "field_dv", value, "field_stored", value, "field_source", value).get();
|
||||
}
|
||||
client().admin().indices().prepareRefresh().get();
|
||||
|
||||
FieldStatsResponse result = client().prepareFieldStats().setFields(fieldName).get();
|
||||
assertThat(result.getAllFieldStats().get(fieldName).getMaxDoc(), equalTo(11L));
|
||||
assertThat(result.getAllFieldStats().get(fieldName).getDocCount(), equalTo(11L));
|
||||
assertThat(result.getAllFieldStats().get(fieldName).getDensity(), equalTo(100));
|
||||
assertThat(result.getAllFieldStats().get(fieldName).getMinValue(), equalTo(-1d));
|
||||
assertThat(result.getAllFieldStats().get(fieldName).getMaxValue(), equalTo(9d));
|
||||
assertThat(result.getAllFieldStats().get(fieldName).getMinValueAsString(), equalTo(Double.toString(-1)));
|
||||
assertThat(result.getAllFieldStats().get(fieldName).getDisplayType(), equalTo("float"));
|
||||
FieldStatsResponse result = client().prepareFieldStats()
|
||||
.setFields("field_index", "field_dv", "field_stored", "field_source").get();
|
||||
for (String field : new String[] {"field_index", "field_dv", "field_stored"}) {
|
||||
FieldStats stats = result.getAllFieldStats().get(field);
|
||||
assertEquals(stats.getMaxDoc(), 11L);
|
||||
assertEquals(stats.getDisplayType(), "float");
|
||||
if ("field_index".equals(field)) {
|
||||
assertEquals(stats.getDocCount(), 11L);
|
||||
assertEquals(stats.getDensity(), 100);
|
||||
assertEquals(stats.getMinValue(), -1d);
|
||||
assertEquals(stats.getMaxValue(), 9d);
|
||||
assertEquals(stats.getMinValueAsString(), Double.toString(-1));
|
||||
} else {
|
||||
assertEquals(stats.getDocCount(), 0L);
|
||||
assertNull(stats.getMinValue());
|
||||
assertNull(stats.getMaxValue());
|
||||
assertEquals(stats.getDensity(), 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testHalfFloat() {
|
||||
String fieldName = "field";
|
||||
createIndex("test", Settings.EMPTY, "test", fieldName, "type=half_float");
|
||||
createIndex("test", Settings.EMPTY, "test",
|
||||
"field_index", makeType("half_float", true, false, false),
|
||||
"field_dv", makeType("half_float", false, true, false),
|
||||
"field_stored", makeType("half_float", false, true, true),
|
||||
"field_source", makeType("half_float", false, false, false));
|
||||
for (float value = -1; value <= 9; value++) {
|
||||
client().prepareIndex("test", "test").setSource(fieldName, value).get();
|
||||
client().prepareIndex("test", "test")
|
||||
.setSource("field_index", value, "field_dv", value, "field_stored", value, "field_source", value).get();
|
||||
}
|
||||
client().admin().indices().prepareRefresh().get();
|
||||
|
||||
FieldStatsResponse result = client().prepareFieldStats().setFields(fieldName).get();
|
||||
assertThat(result.getAllFieldStats().get(fieldName).getMaxDoc(), equalTo(11L));
|
||||
assertThat(result.getAllFieldStats().get(fieldName).getDocCount(), equalTo(11L));
|
||||
assertThat(result.getAllFieldStats().get(fieldName).getDensity(), equalTo(100));
|
||||
assertThat(result.getAllFieldStats().get(fieldName).getMinValue(), equalTo(-1d));
|
||||
assertThat(result.getAllFieldStats().get(fieldName).getMaxValue(), equalTo(9d));
|
||||
assertThat(result.getAllFieldStats().get(fieldName).getMinValueAsString(), equalTo(Float.toString(-1)));
|
||||
assertThat(result.getAllFieldStats().get(fieldName).getMaxValueAsString(), equalTo(Float.toString(9)));
|
||||
assertThat(result.getAllFieldStats().get(fieldName).getDisplayType(), equalTo("float"));
|
||||
FieldStatsResponse result = client().prepareFieldStats()
|
||||
.setFields("field_index", "field_dv", "field_stored", "field_source").get();
|
||||
for (String field : new String[] {"field_index", "field_dv", "field_stored"}) {
|
||||
FieldStats stats = result.getAllFieldStats().get(field);
|
||||
assertEquals(stats.getMaxDoc(), 11L);
|
||||
assertEquals(stats.getDisplayType(), "float");
|
||||
if (field.equals("field_index")) {
|
||||
assertEquals(stats.getDocCount(), 11L);
|
||||
assertEquals(stats.getDensity(), 100);
|
||||
assertEquals(stats.getMinValue(), -1d);
|
||||
assertEquals(stats.getMaxValue(), 9d);
|
||||
assertEquals(stats.getMinValueAsString(), Float.toString(-1));
|
||||
assertEquals(stats.getMaxValueAsString(), Float.toString(9));
|
||||
} else {
|
||||
assertEquals(stats.getDocCount(), 0L);
|
||||
assertNull(stats.getMinValue());
|
||||
assertNull(stats.getMaxValue());
|
||||
assertEquals(stats.getDensity(), 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testFloat() {
|
||||
String fieldName = "field";
|
||||
createIndex("test", Settings.EMPTY, "test", fieldName, "type=float");
|
||||
createIndex("test", Settings.EMPTY, "test",
|
||||
"field_index", makeType("float", true, false, false),
|
||||
"field_dv", makeType("float", false, true, false),
|
||||
"field_stored", makeType("float", false, true, true),
|
||||
"field_source", makeType("float", false, false, false));
|
||||
for (float value = -1; value <= 9; value++) {
|
||||
client().prepareIndex("test", "test").setSource(fieldName, value).get();
|
||||
client().prepareIndex("test", "test")
|
||||
.setSource("field_index", value, "field_dv", value, "field_stored", value, "field_source", value).get();
|
||||
}
|
||||
client().admin().indices().prepareRefresh().get();
|
||||
|
||||
FieldStatsResponse result = client().prepareFieldStats().setFields(fieldName).get();
|
||||
assertThat(result.getAllFieldStats().get(fieldName).getMaxDoc(), equalTo(11L));
|
||||
assertThat(result.getAllFieldStats().get(fieldName).getDocCount(), equalTo(11L));
|
||||
assertThat(result.getAllFieldStats().get(fieldName).getDensity(), equalTo(100));
|
||||
assertThat(result.getAllFieldStats().get(fieldName).getMinValue(), equalTo(-1d));
|
||||
assertThat(result.getAllFieldStats().get(fieldName).getMaxValue(), equalTo(9d));
|
||||
assertThat(result.getAllFieldStats().get(fieldName).getMinValueAsString(), equalTo(Float.toString(-1)));
|
||||
assertThat(result.getAllFieldStats().get(fieldName).getMaxValueAsString(), equalTo(Float.toString(9)));
|
||||
FieldStatsResponse result = client().prepareFieldStats()
|
||||
.setFields("field_index", "field_dv", "field_stored", "field_source").get();
|
||||
for (String field : new String[]{"field_index", "field_dv", "field_stored"}) {
|
||||
FieldStats stats = result.getAllFieldStats().get(field);
|
||||
assertEquals(stats.getMaxDoc(), 11L);
|
||||
assertEquals(stats.getDisplayType(), "float");
|
||||
if (field.equals("field_index")) {
|
||||
assertEquals(stats.getDocCount(), 11L);
|
||||
assertEquals(stats.getDensity(), 100);
|
||||
assertEquals(stats.getMinValue(), -1d);
|
||||
assertEquals(stats.getMaxValue(), 9d);
|
||||
assertEquals(stats.getMinValueAsString(), Float.toString(-1));
|
||||
assertEquals(stats.getMaxValueAsString(), Float.toString(9));
|
||||
} else {
|
||||
assertEquals(stats.getDocCount(), 0L);
|
||||
assertNull(stats.getMinValue());
|
||||
assertNull(stats.getMaxValue());
|
||||
assertEquals(stats.getDensity(), 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void testNumberRange(String fieldName, String fieldType, long min, long max) {
|
||||
|
@ -166,21 +241,21 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
|
|||
|
||||
FieldStatsResponse result = client().prepareFieldStats().setFields(fieldName).get();
|
||||
long numDocs = max - min + 1;
|
||||
assertThat(result.getAllFieldStats().get(fieldName).getMaxDoc(), equalTo(numDocs));
|
||||
assertThat(result.getAllFieldStats().get(fieldName).getDocCount(), equalTo(numDocs));
|
||||
assertThat(result.getAllFieldStats().get(fieldName).getDensity(), equalTo(100));
|
||||
assertThat(result.getAllFieldStats().get(fieldName).getMinValue(), equalTo(min));
|
||||
assertThat(result.getAllFieldStats().get(fieldName).getMaxValue(), equalTo(max));
|
||||
assertThat(result.getAllFieldStats().get(fieldName).getMinValueAsString(),
|
||||
equalTo(java.lang.Long.toString(min)));
|
||||
assertThat(result.getAllFieldStats().get(fieldName).getMaxValueAsString(),
|
||||
equalTo(java.lang.Long.toString(max)));
|
||||
assertThat(result.getAllFieldStats().get(fieldName).isSearchable(), equalTo(true));
|
||||
assertThat(result.getAllFieldStats().get(fieldName).isAggregatable(), equalTo(true));
|
||||
assertEquals(result.getAllFieldStats().get(fieldName).getMaxDoc(), numDocs);
|
||||
assertEquals(result.getAllFieldStats().get(fieldName).getDocCount(), numDocs);
|
||||
assertEquals(result.getAllFieldStats().get(fieldName).getDensity(), 100);
|
||||
assertEquals(result.getAllFieldStats().get(fieldName).getMinValue(), min);
|
||||
assertEquals(result.getAllFieldStats().get(fieldName).getMaxValue(), max);
|
||||
assertEquals(result.getAllFieldStats().get(fieldName).getMinValueAsString(),
|
||||
java.lang.Long.toString(min));
|
||||
assertEquals(result.getAllFieldStats().get(fieldName).getMaxValueAsString(),
|
||||
java.lang.Long.toString(max));
|
||||
assertEquals(result.getAllFieldStats().get(fieldName).isSearchable(), true);
|
||||
assertEquals(result.getAllFieldStats().get(fieldName).isAggregatable(), true);
|
||||
if (fieldType.equals("float") || fieldType.equals("double") || fieldType.equals("half-float")) {
|
||||
assertThat(result.getAllFieldStats().get(fieldName).getDisplayType(), equalTo("float"));
|
||||
assertEquals(result.getAllFieldStats().get(fieldName).getDisplayType(), "float");
|
||||
} else {
|
||||
assertThat(result.getAllFieldStats().get(fieldName).getDisplayType(), equalTo("integer"));
|
||||
assertEquals(result.getAllFieldStats().get(fieldName).getDisplayType(), "integer");
|
||||
}
|
||||
|
||||
client().admin().indices().prepareDelete("test").get();
|
||||
|
@ -193,18 +268,19 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
|
|||
stats.add(new FieldStats.Long(1, 1L, 1L, 1L, true, false, 1L, 1L));
|
||||
stats.add(new FieldStats.Long(1, 1L, 1L, 1L, true, false, 1L, 1L));
|
||||
stats.add(new FieldStats.Long(1, 1L, 1L, 1L, true, false, 1L, 1L));
|
||||
stats.add(new FieldStats.Long(0, 0, 0, 0, false, false));
|
||||
|
||||
FieldStats stat = new FieldStats.Long(1, 1L, 1L, 1L, true, false, 1L, 1L);
|
||||
for (FieldStats otherStat : stats) {
|
||||
stat.accumulate(otherStat);
|
||||
}
|
||||
assertThat(stat.getMaxDoc(), equalTo(4L));
|
||||
assertThat(stat.getDocCount(), equalTo(4L));
|
||||
assertThat(stat.getSumDocFreq(), equalTo(4L));
|
||||
assertThat(stat.getSumTotalTermFreq(), equalTo(4L));
|
||||
assertThat(stat.isSearchable(), equalTo(true));
|
||||
assertThat(stat.isAggregatable(), equalTo(false));
|
||||
assertThat(stat.getDisplayType(), equalTo("integer"));
|
||||
assertEquals(stat.getMaxDoc(), 4L);
|
||||
assertEquals(stat.getDocCount(), 4L);
|
||||
assertEquals(stat.getSumDocFreq(), 4L);
|
||||
assertEquals(stat.getSumTotalTermFreq(), 4L);
|
||||
assertEquals(stat.isSearchable(), true);
|
||||
assertEquals(stat.isAggregatable(), false);
|
||||
assertEquals(stat.getDisplayType(), "integer");
|
||||
}
|
||||
|
||||
public void testMerge_notAvailable() {
|
||||
|
@ -217,26 +293,28 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
|
|||
for (FieldStats otherStat : stats) {
|
||||
stat.accumulate(otherStat);
|
||||
}
|
||||
assertThat(stat.getMaxDoc(), equalTo(4L));
|
||||
assertThat(stat.getDocCount(), equalTo(-1L));
|
||||
assertThat(stat.getSumDocFreq(), equalTo(-1L));
|
||||
assertThat(stat.getSumTotalTermFreq(), equalTo(-1L));
|
||||
assertThat(stat.isSearchable(), equalTo(true));
|
||||
assertThat(stat.isAggregatable(), equalTo(true));
|
||||
assertThat(stat.getDisplayType(), equalTo("integer"));
|
||||
assertEquals(stat.getMaxDoc(), 4L);
|
||||
assertEquals(stat.getDocCount(), -1L);
|
||||
assertEquals(stat.getSumDocFreq(), -1L);
|
||||
assertEquals(stat.getSumTotalTermFreq(), -1L);
|
||||
assertEquals(stat.isSearchable(), true);
|
||||
assertEquals(stat.isAggregatable(), true);
|
||||
assertEquals(stat.getDisplayType(), "integer");
|
||||
|
||||
stats.add(new FieldStats.Long(1, -1L, -1L, -1L, true, true, 1L, 1L));
|
||||
stats.add(new FieldStats.Long(1, -1L, -1L, -1L, false, true));
|
||||
stat = stats.remove(0);
|
||||
for (FieldStats otherStat : stats) {
|
||||
stat.accumulate(otherStat);
|
||||
}
|
||||
assertThat(stat.getMaxDoc(), equalTo(4L));
|
||||
assertThat(stat.getDocCount(), equalTo(-1L));
|
||||
assertThat(stat.getSumDocFreq(), equalTo(-1L));
|
||||
assertThat(stat.getSumTotalTermFreq(), equalTo(-1L));
|
||||
assertThat(stat.isSearchable(), equalTo(true));
|
||||
assertThat(stat.isAggregatable(), equalTo(true));
|
||||
assertThat(stat.getDisplayType(), equalTo("integer"));
|
||||
assertEquals(stat.getMaxDoc(), 4L);
|
||||
assertEquals(stat.getDocCount(), -1L);
|
||||
assertEquals(stat.getSumDocFreq(), -1L);
|
||||
assertEquals(stat.getSumTotalTermFreq(), -1L);
|
||||
assertEquals(stat.isSearchable(), true);
|
||||
assertEquals(stat.isAggregatable(), true);
|
||||
assertEquals(stat.getDisplayType(), "integer");
|
||||
assertNull(stat.getMaxValue());
|
||||
assertNull(stat.getMinValue());
|
||||
}
|
||||
|
||||
public void testNumberFiltering() {
|
||||
|
@ -250,9 +328,9 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
|
|||
.setFields("value")
|
||||
.setLevel("indices")
|
||||
.get();
|
||||
assertThat(response.getIndicesMergedFieldStats().size(), equalTo(2));
|
||||
assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValue(), equalTo(1L));
|
||||
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(), equalTo(3L));
|
||||
assertEquals(response.getIndicesMergedFieldStats().size(), 2);
|
||||
assertEquals(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValue(), 1L);
|
||||
assertEquals(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(), 3L);
|
||||
|
||||
response = client().prepareFieldStats()
|
||||
.setFields("value")
|
||||
|
@ -260,7 +338,7 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
|
|||
new IndexConstraint("value", MAX, LTE, "0"))
|
||||
.setLevel("indices")
|
||||
.get();
|
||||
assertThat(response.getIndicesMergedFieldStats().size(), equalTo(0));
|
||||
assertEquals(response.getIndicesMergedFieldStats().size(), 0);
|
||||
|
||||
response = client().prepareFieldStats()
|
||||
.setFields("value")
|
||||
|
@ -268,7 +346,7 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
|
|||
new IndexConstraint("value", MAX, LT, "1"))
|
||||
.setLevel("indices")
|
||||
.get();
|
||||
assertThat(response.getIndicesMergedFieldStats().size(), equalTo(0));
|
||||
assertEquals(response.getIndicesMergedFieldStats().size(), 0);
|
||||
|
||||
response = client().prepareFieldStats()
|
||||
.setFields("value")
|
||||
|
@ -276,8 +354,8 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
|
|||
new IndexConstraint("value", MAX, LTE, "1"))
|
||||
.setLevel("indices")
|
||||
.get();
|
||||
assertThat(response.getIndicesMergedFieldStats().size(), equalTo(1));
|
||||
assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValue(), equalTo(1L));
|
||||
assertEquals(response.getIndicesMergedFieldStats().size(), 1);
|
||||
assertEquals(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValue(), 1L);
|
||||
|
||||
response = client().prepareFieldStats()
|
||||
.setFields("value")
|
||||
|
@ -285,8 +363,8 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
|
|||
new IndexConstraint("value", MAX, LTE, "2"))
|
||||
.setLevel("indices")
|
||||
.get();
|
||||
assertThat(response.getIndicesMergedFieldStats().size(), equalTo(1));
|
||||
assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValue(), equalTo(1L));
|
||||
assertEquals(response.getIndicesMergedFieldStats().size(), 1);
|
||||
assertEquals(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValue(), 1L);
|
||||
|
||||
response = client().prepareFieldStats()
|
||||
.setFields("value")
|
||||
|
@ -294,7 +372,7 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
|
|||
new IndexConstraint("value", MAX, LTE, "2"))
|
||||
.setLevel("indices")
|
||||
.get();
|
||||
assertThat(response.getIndicesMergedFieldStats().size(), equalTo(0));
|
||||
assertEquals(response.getIndicesMergedFieldStats().size(), 0);
|
||||
|
||||
response = client().prepareFieldStats()
|
||||
.setFields("value")
|
||||
|
@ -302,8 +380,8 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
|
|||
new IndexConstraint("value", MAX, LTE, "3"))
|
||||
.setLevel("indices")
|
||||
.get();
|
||||
assertThat(response.getIndicesMergedFieldStats().size(), equalTo(1));
|
||||
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(), equalTo(3L));
|
||||
assertEquals(response.getIndicesMergedFieldStats().size(), 1);
|
||||
assertEquals(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(), 3L);
|
||||
|
||||
response = client().prepareFieldStats()
|
||||
.setFields("value")
|
||||
|
@ -311,8 +389,8 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
|
|||
new IndexConstraint("value", MAX, LTE, "4"))
|
||||
.setLevel("indices")
|
||||
.get();
|
||||
assertThat(response.getIndicesMergedFieldStats().size(), equalTo(1));
|
||||
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(), equalTo(3L));
|
||||
assertEquals(response.getIndicesMergedFieldStats().size(), 1);
|
||||
assertEquals(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(), 3L);
|
||||
|
||||
response = client().prepareFieldStats()
|
||||
.setFields("value")
|
||||
|
@ -320,7 +398,7 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
|
|||
new IndexConstraint("value", MAX, LTE, "4"))
|
||||
.setLevel("indices")
|
||||
.get();
|
||||
assertThat(response.getIndicesMergedFieldStats().size(), equalTo(0));
|
||||
assertEquals(response.getIndicesMergedFieldStats().size(), 0);
|
||||
|
||||
response = client().prepareFieldStats()
|
||||
.setFields("value")
|
||||
|
@ -328,9 +406,9 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
|
|||
new IndexConstraint("value", MAX, LTE, "3"))
|
||||
.setLevel("indices")
|
||||
.get();
|
||||
assertThat(response.getIndicesMergedFieldStats().size(), equalTo(2));
|
||||
assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValue(), equalTo(1L));
|
||||
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(), equalTo(3L));
|
||||
assertEquals(response.getIndicesMergedFieldStats().size(), 2);
|
||||
assertEquals(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValue(), 1L);
|
||||
assertEquals(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(), 3L);
|
||||
|
||||
response = client().prepareFieldStats()
|
||||
.setFields("value")
|
||||
|
@ -338,7 +416,7 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
|
|||
new IndexConstraint("value", MAX, LT, "3"))
|
||||
.setLevel("indices")
|
||||
.get();
|
||||
assertThat(response.getIndicesMergedFieldStats().size(), equalTo(0));
|
||||
assertEquals(response.getIndicesMergedFieldStats().size(), 0);
|
||||
}
|
||||
|
||||
public void testDateFiltering() {
|
||||
|
@ -347,8 +425,9 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
|
|||
DateTime dateTime2 = new DateTime(2014, 1, 2, 0, 0, 0, 0, DateTimeZone.UTC);
|
||||
String dateTime2Str = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parser().print(dateTime2);
|
||||
|
||||
createIndex("test1", Settings.EMPTY, "type", "value", "type=date");
|
||||
client().prepareIndex("test1", "test").setSource("value", dateTime1Str).get();
|
||||
createIndex("test1", Settings.EMPTY, "type", "value", "type=date", "value2", "type=date,index=false");
|
||||
client().prepareIndex("test1", "test")
|
||||
.setSource("value", dateTime1Str, "value2", dateTime1Str).get();
|
||||
createIndex("test2", Settings.EMPTY, "type", "value", "type=date");
|
||||
client().prepareIndex("test2", "test").setSource("value", dateTime2Str).get();
|
||||
client().admin().indices().prepareRefresh().get();
|
||||
|
@ -357,17 +436,17 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
|
|||
.setFields("value")
|
||||
.setLevel("indices")
|
||||
.get();
|
||||
assertThat(response.getIndicesMergedFieldStats().size(), equalTo(2));
|
||||
assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValue(),
|
||||
equalTo(dateTime1.getMillis()));
|
||||
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(),
|
||||
equalTo(dateTime2.getMillis()));
|
||||
assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValueAsString(),
|
||||
equalTo(dateTime1Str));
|
||||
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValueAsString(),
|
||||
equalTo(dateTime2Str));
|
||||
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getDisplayType(),
|
||||
equalTo("date"));
|
||||
assertEquals(response.getIndicesMergedFieldStats().size(), 2);
|
||||
assertEquals(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValue(),
|
||||
dateTime1.getMillis());
|
||||
assertEquals(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(),
|
||||
dateTime2.getMillis());
|
||||
assertEquals(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValueAsString(),
|
||||
dateTime1Str);
|
||||
assertEquals(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValueAsString(),
|
||||
dateTime2Str);
|
||||
assertEquals(response.getIndicesMergedFieldStats().get("test2").get("value").getDisplayType(),
|
||||
"date");
|
||||
|
||||
response = client().prepareFieldStats()
|
||||
.setFields("value")
|
||||
|
@ -375,7 +454,7 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
|
|||
new IndexConstraint("value", MAX, LTE, "2013-12-31T00:00:00.000Z"))
|
||||
.setLevel("indices")
|
||||
.get();
|
||||
assertThat(response.getIndicesMergedFieldStats().size(), equalTo(0));
|
||||
assertEquals(response.getIndicesMergedFieldStats().size(), 0);
|
||||
|
||||
response = client().prepareFieldStats()
|
||||
.setFields("value")
|
||||
|
@ -383,13 +462,13 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
|
|||
new IndexConstraint("value", MAX, LTE, "2014-01-01T00:00:00.000Z"))
|
||||
.setLevel("indices")
|
||||
.get();
|
||||
assertThat(response.getIndicesMergedFieldStats().size(), equalTo(1));
|
||||
assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValue(),
|
||||
equalTo(dateTime1.getMillis()));
|
||||
assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValueAsString(),
|
||||
equalTo(dateTime1Str));
|
||||
assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getDisplayType(),
|
||||
equalTo("date"));
|
||||
assertEquals(response.getIndicesMergedFieldStats().size(), 1);
|
||||
assertEquals(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValue(),
|
||||
dateTime1.getMillis());
|
||||
assertEquals(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValueAsString(),
|
||||
dateTime1Str);
|
||||
assertEquals(response.getIndicesMergedFieldStats().get("test1").get("value").getDisplayType(),
|
||||
"date");
|
||||
|
||||
response = client().prepareFieldStats()
|
||||
.setFields("value")
|
||||
|
@ -397,11 +476,11 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
|
|||
new IndexConstraint("value", MAX, LTE, "2014-01-02T00:00:00.000Z"))
|
||||
.setLevel("indices")
|
||||
.get();
|
||||
assertThat(response.getIndicesMergedFieldStats().size(), equalTo(1));
|
||||
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(),
|
||||
equalTo(dateTime2.getMillis()));
|
||||
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValueAsString(),
|
||||
equalTo(dateTime2Str));
|
||||
assertEquals(response.getIndicesMergedFieldStats().size(), 1);
|
||||
assertEquals(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(),
|
||||
dateTime2.getMillis());
|
||||
assertEquals(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValueAsString(),
|
||||
dateTime2Str);
|
||||
|
||||
response = client().prepareFieldStats()
|
||||
.setFields("value")
|
||||
|
@ -409,7 +488,7 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
|
|||
new IndexConstraint("value", MAX, LTE, "2014-01-03T00:00:00.000Z"))
|
||||
.setLevel("indices")
|
||||
.get();
|
||||
assertThat(response.getIndicesMergedFieldStats().size(), equalTo(0));
|
||||
assertEquals(response.getIndicesMergedFieldStats().size(), 0);
|
||||
|
||||
response = client().prepareFieldStats()
|
||||
.setFields("value")
|
||||
|
@ -417,47 +496,53 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
|
|||
new IndexConstraint("value", MAX, LTE, "2014-01-02T01:00:00.000Z"))
|
||||
.setLevel("indices")
|
||||
.get();
|
||||
assertThat(response.getIndicesMergedFieldStats().size(), equalTo(1));
|
||||
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(),
|
||||
equalTo(dateTime2.getMillis()));
|
||||
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValueAsString(),
|
||||
equalTo(dateTime2Str));
|
||||
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getDisplayType(),
|
||||
equalTo("date"));
|
||||
assertEquals(response.getIndicesMergedFieldStats().size(), 1);
|
||||
assertEquals(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(),
|
||||
dateTime2.getMillis());
|
||||
assertEquals(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValueAsString(),
|
||||
dateTime2Str);
|
||||
assertEquals(response.getIndicesMergedFieldStats().get("test2").get("value").getDisplayType(),
|
||||
"date");
|
||||
|
||||
response = client().prepareFieldStats()
|
||||
.setFields("value")
|
||||
.setIndexContraints(new IndexConstraint("value", MIN, GTE, "2014-01-01T00:00:00.000Z"))
|
||||
.setLevel("indices")
|
||||
.get();
|
||||
assertThat(response.getIndicesMergedFieldStats().size(), equalTo(2));
|
||||
assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValue(),
|
||||
equalTo(dateTime1.getMillis()));
|
||||
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(),
|
||||
equalTo(dateTime2.getMillis()));
|
||||
assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValueAsString(),
|
||||
equalTo(dateTime1Str));
|
||||
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValueAsString(),
|
||||
equalTo(dateTime2Str));
|
||||
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getDisplayType(),
|
||||
equalTo("date"));
|
||||
assertEquals(response.getIndicesMergedFieldStats().size(), 2);
|
||||
assertEquals(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValue(),
|
||||
dateTime1.getMillis());
|
||||
assertEquals(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(),
|
||||
dateTime2.getMillis());
|
||||
assertEquals(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValueAsString(),
|
||||
dateTime1Str);
|
||||
assertEquals(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValueAsString(),
|
||||
dateTime2Str);
|
||||
assertEquals(response.getIndicesMergedFieldStats().get("test2").get("value").getDisplayType(),
|
||||
"date");
|
||||
|
||||
response = client().prepareFieldStats()
|
||||
.setFields("value")
|
||||
.setIndexContraints(new IndexConstraint("value", MAX, LTE, "2014-01-02T00:00:00.000Z"))
|
||||
.setLevel("indices")
|
||||
.get();
|
||||
assertThat(response.getIndicesMergedFieldStats().size(), equalTo(2));
|
||||
assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValue(),
|
||||
equalTo(dateTime1.getMillis()));
|
||||
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(),
|
||||
equalTo(dateTime2.getMillis()));
|
||||
assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValueAsString(),
|
||||
equalTo(dateTime1Str));
|
||||
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValueAsString(),
|
||||
equalTo(dateTime2Str));
|
||||
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getDisplayType(),
|
||||
equalTo("date"));
|
||||
assertEquals(response.getIndicesMergedFieldStats().size(), 2);
|
||||
assertEquals(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValue(),
|
||||
dateTime1.getMillis());
|
||||
assertEquals(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(),
|
||||
dateTime2.getMillis());
|
||||
assertEquals(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValueAsString(),
|
||||
dateTime1Str);
|
||||
assertEquals(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValueAsString(),
|
||||
dateTime2Str);
|
||||
assertEquals(response.getIndicesMergedFieldStats().get("test2").get("value").getDisplayType(), "date");
|
||||
|
||||
response = client().prepareFieldStats()
|
||||
.setFields("value2")
|
||||
.setIndexContraints(new IndexConstraint("value2", MAX, LTE, "2014-01-02T00:00:00.000Z"))
|
||||
.setLevel("indices")
|
||||
.get();
|
||||
assertEquals(response.getIndicesMergedFieldStats().size(), 0);
|
||||
}
|
||||
|
||||
public void testDateFiltering_optionalFormat() {
|
||||
|
@ -476,11 +561,11 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
|
|||
new IndexConstraint("value", MAX, LTE, String.valueOf(dateTime2.getMillis()), "epoch_millis"))
|
||||
.setLevel("indices")
|
||||
.get();
|
||||
assertThat(response.getIndicesMergedFieldStats().size(), equalTo(1));
|
||||
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValueAsString(),
|
||||
equalTo("2014-01-02T00:00:00.000Z"));
|
||||
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getDisplayType(),
|
||||
equalTo("date"));
|
||||
assertEquals(response.getIndicesMergedFieldStats().size(), 1);
|
||||
assertEquals(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValueAsString(),
|
||||
"2014-01-02T00:00:00.000Z");
|
||||
assertEquals(response.getIndicesMergedFieldStats().get("test2").get("value").getDisplayType(),
|
||||
"date");
|
||||
|
||||
try {
|
||||
client().prepareFieldStats()
|
||||
|
@ -501,8 +586,8 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
|
|||
.setFields("*")
|
||||
.setLevel("indices")
|
||||
.get();
|
||||
assertThat(response.getIndicesMergedFieldStats().size(), equalTo(1));
|
||||
assertThat(response.getIndicesMergedFieldStats().get("test1").size(), equalTo(0));
|
||||
assertEquals(response.getIndicesMergedFieldStats().size(), 1);
|
||||
assertEquals(response.getIndicesMergedFieldStats().get("test1").size(), 0);
|
||||
}
|
||||
|
||||
public void testMetaFieldsNotIndexed() {
|
||||
|
@ -513,56 +598,91 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
|
|||
FieldStatsResponse response = client().prepareFieldStats()
|
||||
.setFields("_id", "_type")
|
||||
.get();
|
||||
assertThat(response.getAllFieldStats().size(), equalTo(1));
|
||||
assertThat(response.getAllFieldStats().get("_type").isSearchable(), equalTo(true));
|
||||
assertThat(response.getAllFieldStats().get("_type").isAggregatable(), equalTo(true));
|
||||
assertEquals(response.getAllFieldStats().size(), 1);
|
||||
assertEquals(response.getAllFieldStats().get("_type").isSearchable(), true);
|
||||
assertEquals(response.getAllFieldStats().get("_type").isAggregatable(), true);
|
||||
}
|
||||
|
||||
public void testSerialization() throws IOException {
|
||||
for (Version version : new Version[] {Version.CURRENT, Version.V_5_0_1}){
|
||||
for (int i = 0; i < 20; i++) {
|
||||
assertSerialization(randomFieldStats());
|
||||
assertSerialization(randomFieldStats(version.onOrAfter(Version.V_5_2_0_UNRELEASED)), version);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* creates a random field stats which does not guarantee that {@link FieldStats#maxValue} is greater than {@link FieldStats#minValue}
|
||||
**/
|
||||
private FieldStats randomFieldStats() throws UnknownHostException {
|
||||
private FieldStats randomFieldStats(boolean withNullMinMax) throws UnknownHostException {
|
||||
int type = randomInt(5);
|
||||
switch (type) {
|
||||
case 0:
|
||||
if (withNullMinMax && randomBoolean()) {
|
||||
return new FieldStats.Long(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(),
|
||||
randomPositiveLong(), randomBoolean(), randomBoolean());
|
||||
} else {
|
||||
return new FieldStats.Long(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(),
|
||||
randomPositiveLong(), randomBoolean(), randomBoolean(), randomLong(), randomLong());
|
||||
}
|
||||
case 1:
|
||||
if (withNullMinMax && randomBoolean()) {
|
||||
return new FieldStats.Double(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(),
|
||||
randomPositiveLong(), randomBoolean(), randomBoolean());
|
||||
} else {
|
||||
return new FieldStats.Double(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(),
|
||||
randomPositiveLong(), randomBoolean(), randomBoolean(), randomDouble(), randomDouble());
|
||||
}
|
||||
case 2:
|
||||
if (withNullMinMax && randomBoolean()) {
|
||||
return new FieldStats.Date(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(),
|
||||
randomPositiveLong(), randomBoolean(), randomBoolean());
|
||||
} else {
|
||||
return new FieldStats.Date(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(),
|
||||
randomPositiveLong(), randomBoolean(), randomBoolean(), Joda.forPattern("basicDate"),
|
||||
new Date().getTime(), new Date().getTime());
|
||||
}
|
||||
case 3:
|
||||
if (withNullMinMax && randomBoolean()) {
|
||||
return new FieldStats.Text(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(),
|
||||
randomPositiveLong(), randomBoolean(), randomBoolean());
|
||||
} else {
|
||||
return new FieldStats.Text(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(),
|
||||
randomPositiveLong(), randomBoolean(), randomBoolean(),
|
||||
new BytesRef(randomAsciiOfLength(10)), new BytesRef(randomAsciiOfLength(20)));
|
||||
}
|
||||
case 4:
|
||||
if (withNullMinMax && randomBoolean()) {
|
||||
return new FieldStats.Ip(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(),
|
||||
randomPositiveLong(), randomBoolean(), randomBoolean());
|
||||
} else {
|
||||
return new FieldStats.Ip(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(),
|
||||
randomPositiveLong(), randomBoolean(), randomBoolean(),
|
||||
InetAddress.getByName("::1"), InetAddress.getByName("::1"));
|
||||
}
|
||||
case 5:
|
||||
if (withNullMinMax && randomBoolean()) {
|
||||
return new FieldStats.Ip(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(),
|
||||
randomPositiveLong(), randomBoolean(), randomBoolean());
|
||||
} else {
|
||||
return new FieldStats.Ip(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(),
|
||||
randomPositiveLong(), randomBoolean(), randomBoolean(),
|
||||
InetAddress.getByName("1.2.3.4"), InetAddress.getByName("1.2.3.4"));
|
||||
}
|
||||
default:
|
||||
throw new IllegalArgumentException("Invalid type");
|
||||
}
|
||||
}
|
||||
|
||||
private void assertSerialization(FieldStats stats) throws IOException {
|
||||
private void assertSerialization(FieldStats stats, Version version) throws IOException {
|
||||
BytesStreamOutput output = new BytesStreamOutput();
|
||||
output.setVersion(version);
|
||||
stats.writeTo(output);
|
||||
output.flush();
|
||||
FieldStats deserializedStats = FieldStats.readFrom(output.bytes().streamInput());
|
||||
assertThat(stats, equalTo(deserializedStats));
|
||||
assertThat(stats.hashCode(), equalTo(deserializedStats.hashCode()));
|
||||
StreamInput input = output.bytes().streamInput();
|
||||
input.setVersion(version);
|
||||
FieldStats deserializedStats = FieldStats.readFrom(input);
|
||||
assertEquals(stats, deserializedStats);
|
||||
assertEquals(stats.hashCode(), deserializedStats.hashCode());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -94,7 +94,7 @@ public class GatewayIndexStateIT extends ESIntegTestCase {
|
|||
|
||||
public void testSimpleOpenClose() throws Exception {
|
||||
logger.info("--> starting 2 nodes");
|
||||
internalCluster().startNodesAsync(2).get();
|
||||
internalCluster().startNodes(2);
|
||||
|
||||
logger.info("--> creating test index");
|
||||
createIndex("test");
|
||||
|
@ -237,7 +237,7 @@ public class GatewayIndexStateIT extends ESIntegTestCase {
|
|||
logger.info("--> cleaning nodes");
|
||||
|
||||
logger.info("--> starting 2 nodes");
|
||||
internalCluster().startNodesAsync(2).get();
|
||||
internalCluster().startNodes(2);
|
||||
|
||||
logger.info("--> indexing a simple document");
|
||||
client().prepareIndex("test", "type1", "1").setSource("field1", "value1").setRefreshPolicy(IMMEDIATE).get();
|
||||
|
@ -277,7 +277,7 @@ public class GatewayIndexStateIT extends ESIntegTestCase {
|
|||
public void testDanglingIndices() throws Exception {
|
||||
logger.info("--> starting two nodes");
|
||||
|
||||
final String node_1 = internalCluster().startNodesAsync(2).get().get(0);
|
||||
final String node_1 = internalCluster().startNodes(2).get(0);
|
||||
|
||||
logger.info("--> indexing a simple document");
|
||||
client().prepareIndex("test", "type1", "1").setSource("field1", "value1").setRefreshPolicy(IMMEDIATE).get();
|
||||
|
@ -331,7 +331,7 @@ public class GatewayIndexStateIT extends ESIntegTestCase {
|
|||
if (randomBoolean()) {
|
||||
// test with a regular index
|
||||
logger.info("--> starting a cluster with " + numNodes + " nodes");
|
||||
nodes = internalCluster().startNodesAsync(numNodes).get();
|
||||
nodes = internalCluster().startNodes(numNodes);
|
||||
logger.info("--> create an index");
|
||||
createIndex(indexName);
|
||||
} else {
|
||||
|
@ -344,7 +344,7 @@ public class GatewayIndexStateIT extends ESIntegTestCase {
|
|||
.put(Environment.PATH_SHARED_DATA_SETTING.getKey(), dataPath.toString())
|
||||
.put("index.store.fs.fs_lock", randomFrom("native", "simple"))
|
||||
.build();
|
||||
nodes = internalCluster().startNodesAsync(numNodes, nodeSettings).get();
|
||||
nodes = internalCluster().startNodes(numNodes, nodeSettings);
|
||||
logger.info("--> create a shadow replica index");
|
||||
createShadowReplicaIndex(indexName, dataPath, numNodes - 1);
|
||||
}
|
||||
|
|
|
@ -36,6 +36,7 @@ import org.elasticsearch.test.InternalTestCluster.RestartCallback;
|
|||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
|
@ -57,10 +58,9 @@ public class MetaDataWriteDataNodesIT extends ESIntegTestCase {
|
|||
public void testMetaIsRemovedIfAllShardsFromIndexRemoved() throws Exception {
|
||||
// this test checks that the index state is removed from a data only node once all shards have been allocated away from it
|
||||
String masterNode = internalCluster().startMasterOnlyNode(Settings.EMPTY);
|
||||
InternalTestCluster.Async<String> nodeName1 = internalCluster().startDataOnlyNodeAsync();
|
||||
InternalTestCluster.Async<String> nodeName2 = internalCluster().startDataOnlyNodeAsync();
|
||||
String node1 = nodeName1.get();
|
||||
String node2 = nodeName2.get();
|
||||
List<String> nodeNames= internalCluster().startDataOnlyNodes(2);
|
||||
String node1 = nodeNames.get(0);
|
||||
String node2 = nodeNames.get(1);
|
||||
|
||||
String index = "index";
|
||||
assertAcked(prepareCreate(index).setSettings(Settings.builder().put("index.number_of_replicas", 0).put(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "_name", node1)));
|
||||
|
|
|
@ -46,8 +46,7 @@ public class QuorumGatewayIT extends ESIntegTestCase {
|
|||
public void testQuorumRecovery() throws Exception {
|
||||
logger.info("--> starting 3 nodes");
|
||||
// we are shutting down nodes - make sure we don't have 2 clusters if we test network
|
||||
internalCluster().startNodesAsync(3).get();
|
||||
|
||||
internalCluster().startNodes(3);
|
||||
|
||||
createIndex("test");
|
||||
ensureGreen();
|
||||
|
|
|
@ -316,7 +316,7 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase {
|
|||
|
||||
public void testLatestVersionLoaded() throws Exception {
|
||||
// clean two nodes
|
||||
internalCluster().startNodesAsync(2, Settings.builder().put("gateway.recover_after_nodes", 2).build()).get();
|
||||
internalCluster().startNodes(2, Settings.builder().put("gateway.recover_after_nodes", 2).build());
|
||||
|
||||
client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).execute().actionGet();
|
||||
client().admin().indices().prepareFlush().execute().actionGet();
|
||||
|
@ -366,7 +366,7 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase {
|
|||
|
||||
logger.info("--> starting the two nodes back");
|
||||
|
||||
internalCluster().startNodesAsync(2, Settings.builder().put("gateway.recover_after_nodes", 2).build()).get();
|
||||
internalCluster().startNodes(2, Settings.builder().put("gateway.recover_after_nodes", 2).build());
|
||||
|
||||
logger.info("--> running cluster_health (wait for the shards to startup)");
|
||||
ensureGreen();
|
||||
|
@ -392,7 +392,7 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase {
|
|||
.put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING.getKey(), 4)
|
||||
.put(MockFSDirectoryService.CRASH_INDEX_SETTING.getKey(), false).build();
|
||||
|
||||
internalCluster().startNodesAsync(4, settings).get();
|
||||
internalCluster().startNodes(4, settings);
|
||||
// prevent any rebalance actions during the peer recovery
|
||||
// if we run into a relocation the reuse count will be 0 and this fails the test. We are testing here if
|
||||
// we reuse the files on disk after full restarts for replicas.
|
||||
|
|
|
@ -110,7 +110,7 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase {
|
|||
|
||||
public void testCannotCreateWithBadPath() throws Exception {
|
||||
Settings nodeSettings = nodeSettings("/badpath");
|
||||
internalCluster().startNodesAsync(1, nodeSettings).get();
|
||||
internalCluster().startNodes(1, nodeSettings);
|
||||
Settings idxSettings = Settings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(IndexMetaData.SETTING_DATA_PATH, "/etc/foo")
|
||||
|
@ -132,7 +132,7 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase {
|
|||
final Path dataPath = createTempDir();
|
||||
Settings nodeSettings = nodeSettings(dataPath);
|
||||
|
||||
internalCluster().startNodesAsync(3, nodeSettings).get();
|
||||
internalCluster().startNodes(3, nodeSettings);
|
||||
Settings idxSettings = Settings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0).build();
|
||||
|
@ -189,7 +189,7 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase {
|
|||
final Path dataPath = createTempDir();
|
||||
Settings nodeSettings = nodeSettings(dataPath);
|
||||
|
||||
internalCluster().startNodesAsync(3, nodeSettings).get();
|
||||
internalCluster().startNodes(3, nodeSettings);
|
||||
final String IDX = "test";
|
||||
|
||||
Settings idxSettings = Settings.builder()
|
||||
|
@ -552,7 +552,7 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase {
|
|||
|
||||
final int nodeCount = randomIntBetween(2, 5);
|
||||
logger.info("--> starting {} nodes", nodeCount);
|
||||
final List<String> nodes = internalCluster().startNodesAsync(nodeCount, nodeSettings).get();
|
||||
final List<String> nodes = internalCluster().startNodes(nodeCount, nodeSettings);
|
||||
final String IDX = "test";
|
||||
final Tuple<Integer, Integer> numPrimariesAndReplicas = randomPrimariesAndReplicas(nodeCount);
|
||||
final int numPrimaries = numPrimariesAndReplicas.v1();
|
||||
|
@ -605,7 +605,7 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase {
|
|||
Path dataPath = createTempDir();
|
||||
Settings nodeSettings = nodeSettings(dataPath);
|
||||
|
||||
final List<String> nodes = internalCluster().startNodesAsync(2, nodeSettings).get();
|
||||
final List<String> nodes = internalCluster().startNodes(2, nodeSettings);
|
||||
String IDX = "test";
|
||||
|
||||
Settings idxSettings = Settings.builder()
|
||||
|
@ -661,7 +661,7 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase {
|
|||
Path dataPath = createTempDir();
|
||||
Settings nodeSettings = nodeSettings(dataPath);
|
||||
|
||||
internalCluster().startNodesAsync(3, nodeSettings).get();
|
||||
internalCluster().startNodes(3, nodeSettings);
|
||||
String IDX = "test";
|
||||
|
||||
Settings idxSettings = Settings.builder()
|
||||
|
@ -731,10 +731,9 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase {
|
|||
Settings fooSettings = Settings.builder().put(nodeSettings).put("node.attr.affinity", "foo").build();
|
||||
Settings barSettings = Settings.builder().put(nodeSettings).put("node.attr.affinity", "bar").build();
|
||||
|
||||
final InternalTestCluster.Async<List<String>> fooNodes = internalCluster().startNodesAsync(2, fooSettings);
|
||||
final InternalTestCluster.Async<List<String>> barNodes = internalCluster().startNodesAsync(2, barSettings);
|
||||
fooNodes.get();
|
||||
barNodes.get();
|
||||
List<String> allNodes = internalCluster().startNodes(fooSettings, fooSettings, barSettings, barSettings);
|
||||
List<String> fooNodes = allNodes.subList(0, 2);
|
||||
List<String> barNodes = allNodes.subList(2, 4);
|
||||
String IDX = "test";
|
||||
|
||||
Settings includeFoo = Settings.builder()
|
||||
|
@ -768,27 +767,27 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase {
|
|||
client().admin().indices().prepareUpdateSettings(IDX).setSettings(includeBar).get();
|
||||
|
||||
// wait for the shards to move from "foo" nodes to "bar" nodes
|
||||
assertNoShardsOn(fooNodes.get());
|
||||
assertNoShardsOn(fooNodes);
|
||||
|
||||
// put shards back on "foo"
|
||||
client().admin().indices().prepareUpdateSettings(IDX).setSettings(includeFoo).get();
|
||||
|
||||
// wait for the shards to move from "bar" nodes to "foo" nodes
|
||||
assertNoShardsOn(barNodes.get());
|
||||
assertNoShardsOn(barNodes);
|
||||
|
||||
// Stop a foo node
|
||||
logger.info("--> stopping first 'foo' node");
|
||||
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(fooNodes.get().get(0)));
|
||||
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(fooNodes.get(0)));
|
||||
|
||||
// Ensure that the other foo node has all the shards now
|
||||
assertShardCountOn(fooNodes.get().get(1), 5);
|
||||
assertShardCountOn(fooNodes.get(1), 5);
|
||||
|
||||
// Assert no shards on the "bar" nodes
|
||||
assertNoShardsOn(barNodes.get());
|
||||
assertNoShardsOn(barNodes);
|
||||
|
||||
// Stop the second "foo" node
|
||||
logger.info("--> stopping second 'foo' node");
|
||||
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(fooNodes.get().get(1)));
|
||||
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(fooNodes.get(1)));
|
||||
|
||||
// The index should still be able to be allocated (on the "bar" nodes),
|
||||
// all the "foo" nodes are gone
|
||||
|
@ -799,7 +798,7 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase {
|
|||
String newFooNode = internalCluster().startNode(fooSettings);
|
||||
|
||||
assertShardCountOn(newFooNode, 5);
|
||||
assertNoShardsOn(barNodes.get());
|
||||
assertNoShardsOn(barNodes);
|
||||
}
|
||||
|
||||
public void testDeletingClosedIndexRemovesFiles() throws Exception {
|
||||
|
@ -808,7 +807,7 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase {
|
|||
|
||||
final int numNodes = randomIntBetween(2, 5);
|
||||
logger.info("--> starting {} nodes", numNodes);
|
||||
final List<String> nodes = internalCluster().startNodesAsync(numNodes, nodeSettings).get();
|
||||
final List<String> nodes = internalCluster().startNodes(numNodes, nodeSettings);
|
||||
final String IDX = "test";
|
||||
final Tuple<Integer, Integer> numPrimariesAndReplicas = randomPrimariesAndReplicas(numNodes);
|
||||
final int numPrimaries = numPrimariesAndReplicas.v1();
|
||||
|
@ -851,7 +850,7 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase {
|
|||
Path dataPath = createTempDir();
|
||||
Settings nodeSettings = nodeSettings(dataPath);
|
||||
|
||||
internalCluster().startNodesAsync(2, nodeSettings).get();
|
||||
internalCluster().startNodes(2, nodeSettings);
|
||||
String IDX = "test";
|
||||
|
||||
Settings idxSettings = Settings.builder()
|
||||
|
@ -868,7 +867,7 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase {
|
|||
client().prepareIndex(IDX, "doc", "2").setSource("foo", "bar").get();
|
||||
flushAndRefresh(IDX);
|
||||
|
||||
internalCluster().startNodesAsync(1).get();
|
||||
internalCluster().startNodes(1);
|
||||
ensureYellow(IDX);
|
||||
|
||||
final ClusterHealthResponse clusterHealth = client().admin().cluster()
|
||||
|
|
|
@ -35,6 +35,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
|
|||
import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.fielddata.plain.AbstractAtomicOrdinalsFieldData;
|
||||
import org.elasticsearch.index.fielddata.plain.PagedBytesIndexFieldData;
|
||||
import org.elasticsearch.index.fielddata.plain.SortedSetDVOrdinalsIndexFieldData;
|
||||
import org.elasticsearch.index.mapper.TextFieldMapper;
|
||||
|
@ -86,7 +87,8 @@ public class FieldDataCacheTests extends ESTestCase {
|
|||
}
|
||||
|
||||
private SortedSetDVOrdinalsIndexFieldData createSortedDV(String fieldName, IndexFieldDataCache indexFieldDataCache) {
|
||||
return new SortedSetDVOrdinalsIndexFieldData(createIndexSettings(), indexFieldDataCache, fieldName, new NoneCircuitBreakerService());
|
||||
return new SortedSetDVOrdinalsIndexFieldData(createIndexSettings(), indexFieldDataCache, fieldName, new NoneCircuitBreakerService(),
|
||||
AbstractAtomicOrdinalsFieldData.DEFAULT_SCRIPT_FUNCTION);
|
||||
}
|
||||
|
||||
private PagedBytesIndexFieldData createPagedBytes(String fieldName, IndexFieldDataCache indexFieldDataCache) {
|
||||
|
|
|
@ -72,7 +72,7 @@ public class CorruptedTranslogIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
public void testCorruptTranslogFiles() throws Exception {
|
||||
internalCluster().startNodesAsync(1, Settings.EMPTY).get();
|
||||
internalCluster().startNodes(1, Settings.EMPTY);
|
||||
|
||||
assertAcked(prepareCreate("test").setSettings(Settings.builder()
|
||||
.put("index.number_of_shards", 1)
|
||||
|
|
|
@ -28,7 +28,6 @@ import org.apache.lucene.store.FSDirectory;
|
|||
import org.apache.lucene.store.Lock;
|
||||
import org.apache.lucene.store.LockObtainFailedException;
|
||||
import org.apache.lucene.store.NativeFSLockFactory;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
|
||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||
import org.elasticsearch.action.search.SearchPhaseExecutionException;
|
||||
|
@ -47,7 +46,6 @@ import org.elasticsearch.common.unit.TimeValue;
|
|||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.MockEngineFactoryPlugin;
|
||||
import org.elasticsearch.index.translog.TruncateTranslogCommand;
|
||||
import org.elasticsearch.monitor.fs.FsInfo;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
|
@ -85,7 +83,7 @@ public class TruncateTranslogIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
public void testCorruptTranslogTruncation() throws Exception {
|
||||
internalCluster().startNodesAsync(1, Settings.EMPTY).get();
|
||||
internalCluster().startNodes(1, Settings.EMPTY);
|
||||
|
||||
assertAcked(prepareCreate("test").setSettings(Settings.builder()
|
||||
.put("index.number_of_shards", 1)
|
||||
|
|
|
@ -34,7 +34,7 @@ public class DedicatedMasterGetFieldMappingIT extends SimpleGetFieldMappingsIT {
|
|||
Settings settings = Settings.builder()
|
||||
.put(Node.NODE_DATA_SETTING.getKey(), false)
|
||||
.build();
|
||||
internalCluster().startNodesAsync(settings, Settings.EMPTY).get();
|
||||
internalCluster().startNodes(settings, Settings.EMPTY);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -43,7 +43,6 @@ import org.elasticsearch.common.collect.ImmutableOpenMap;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.discovery.DiscoverySettings;
|
||||
import org.elasticsearch.discovery.zen.ElectMasterService;
|
||||
import org.elasticsearch.gateway.GatewayAllocator;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
|
@ -174,7 +173,7 @@ public class RareClusterStateIT extends ESIntegTestCase {
|
|||
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/14932")
|
||||
public void testDeleteCreateInOneBulk() throws Exception {
|
||||
internalCluster().startNodesAsync(2).get();
|
||||
internalCluster().startNodes(2);
|
||||
assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("2").get().isTimedOut());
|
||||
prepareCreate("test").setSettings(IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS, true).addMapping("type").get();
|
||||
ensureGreen("test");
|
||||
|
@ -213,7 +212,7 @@ public class RareClusterStateIT extends ESIntegTestCase {
|
|||
.put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), "30s") // explicitly set so it won't default to publish timeout
|
||||
.put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "0s") // don't wait post commit as we are blocking things by design
|
||||
.build();
|
||||
final List<String> nodeNames = internalCluster().startNodesAsync(2, settings).get();
|
||||
final List<String> nodeNames = internalCluster().startNodes(2, settings);
|
||||
assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("2").get().isTimedOut());
|
||||
|
||||
final String master = internalCluster().getMasterName();
|
||||
|
@ -328,11 +327,11 @@ public class RareClusterStateIT extends ESIntegTestCase {
|
|||
// Here we want to test that everything goes well if the mappings that
|
||||
// are needed for a document are not available on the replica at the
|
||||
// time of indexing it
|
||||
final List<String> nodeNames = internalCluster().startNodesAsync(2,
|
||||
final List<String> nodeNames = internalCluster().startNodes(2,
|
||||
Settings.builder()
|
||||
.put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), "30s") // explicitly set so it won't default to publish timeout
|
||||
.put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "0s") // don't wait post commit as we are blocking things by design
|
||||
.build()).get();
|
||||
.build());
|
||||
assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("2").get().isTimedOut());
|
||||
|
||||
final String master = internalCluster().getMasterName();
|
||||
|
|
|
@ -292,17 +292,14 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
public void testShardActiveElsewhereDoesNotDeleteAnother() throws Exception {
|
||||
InternalTestCluster.Async<String> masterFuture = internalCluster().startNodeAsync(
|
||||
Settings.builder().put(Node.NODE_MASTER_SETTING.getKey(), true, Node.NODE_DATA_SETTING.getKey(), false).build());
|
||||
InternalTestCluster.Async<List<String>> nodesFutures = internalCluster().startNodesAsync(4,
|
||||
Settings.builder().put(Node.NODE_MASTER_SETTING.getKey(), false, Node.NODE_DATA_SETTING.getKey(), true).build());
|
||||
final String masterNode = internalCluster().startMasterOnlyNode();
|
||||
final List<String> nodes = internalCluster().startDataOnlyNodes(4);
|
||||
|
||||
final String masterNode = masterFuture.get();
|
||||
final String node1 = nodesFutures.get().get(0);
|
||||
final String node2 = nodesFutures.get().get(1);
|
||||
final String node3 = nodesFutures.get().get(2);
|
||||
final String node1 = nodes.get(0);
|
||||
final String node2 = nodes.get(1);
|
||||
final String node3 = nodes.get(2);
|
||||
// we will use this later on, handy to start now to make sure it has a different data folder that node 1,2 &3
|
||||
final String node4 = nodesFutures.get().get(3);
|
||||
final String node4 = nodes.get(3);
|
||||
|
||||
assertAcked(prepareCreate("test").setSettings(Settings.builder()
|
||||
.put(indexSettings())
|
||||
|
@ -356,8 +353,7 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase {
|
|||
|
||||
logger.debug("--> starting the two old nodes back");
|
||||
|
||||
internalCluster().startNodesAsync(2,
|
||||
Settings.builder().put(Node.NODE_MASTER_SETTING.getKey(), false, Node.NODE_DATA_SETTING.getKey(), true).build());
|
||||
internalCluster().startDataOnlyNodes(2);
|
||||
|
||||
assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("5").get().isTimedOut());
|
||||
|
||||
|
@ -372,7 +368,7 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
public void testShardActiveElseWhere() throws Exception {
|
||||
List<String> nodes = internalCluster().startNodesAsync(2).get();
|
||||
List<String> nodes = internalCluster().startNodes(2);
|
||||
|
||||
final String masterNode = internalCluster().getMasterName();
|
||||
final String nonMasterNode = nodes.get(0).equals(masterNode) ? nodes.get(1) : nodes.get(0);
|
||||
|
|
|
@ -40,7 +40,7 @@ import static org.hamcrest.Matchers.notNullValue;
|
|||
public class SimpleNodesInfoIT extends ESIntegTestCase {
|
||||
|
||||
public void testNodesInfos() throws Exception {
|
||||
List<String> nodesIds = internalCluster().startNodesAsync(2).get();
|
||||
List<String> nodesIds = internalCluster().startNodes(2);
|
||||
final String node_1 = nodesIds.get(0);
|
||||
final String node_2 = nodesIds.get(1);
|
||||
|
||||
|
@ -79,7 +79,7 @@ public class SimpleNodesInfoIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
public void testNodesInfosTotalIndexingBuffer() throws Exception {
|
||||
List<String> nodesIds = internalCluster().startNodesAsync(2).get();
|
||||
List<String> nodesIds = internalCluster().startNodes(2);
|
||||
final String node_1 = nodesIds.get(0);
|
||||
final String node_2 = nodesIds.get(1);
|
||||
|
||||
|
@ -113,11 +113,10 @@ public class SimpleNodesInfoIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
public void testAllocatedProcessors() throws Exception {
|
||||
List<String> nodesIds = internalCluster().
|
||||
startNodesAsync(
|
||||
List<String> nodesIds = internalCluster().startNodes(
|
||||
Settings.builder().put(EsExecutors.PROCESSORS_SETTING.getKey(), 3).build(),
|
||||
Settings.builder().put(EsExecutors.PROCESSORS_SETTING.getKey(), 6).build()
|
||||
).get();
|
||||
);
|
||||
|
||||
final String node_1 = nodesIds.get(0);
|
||||
final String node_2 = nodesIds.get(1);
|
||||
|
|
|
@ -126,7 +126,7 @@ public class FullRollingRestartIT extends ESIntegTestCase {
|
|||
public void testNoRebalanceOnRollingRestart() throws Exception {
|
||||
// see https://github.com/elastic/elasticsearch/issues/14387
|
||||
internalCluster().startMasterOnlyNode(Settings.EMPTY);
|
||||
internalCluster().startDataOnlyNodesAsync(3).get();
|
||||
internalCluster().startDataOnlyNodes(3);
|
||||
/**
|
||||
* We start 3 nodes and a dedicated master. Restart on of the data-nodes and ensure that we got no relocations.
|
||||
* Yet we have 6 shards 0 replica so that means if the restarting node comes back both other nodes are subject
|
||||
|
|
|
@ -470,7 +470,7 @@ public class RelocationIT extends ESIntegTestCase {
|
|||
Stream.generate(() -> Settings.builder().put("node.attr.color", "blue").build()).limit(halfNodes),
|
||||
Stream.generate(() -> Settings.builder().put("node.attr.color", "red").build()).limit(halfNodes)
|
||||
).toArray(Settings[]::new);
|
||||
List<String> nodes = internalCluster().startNodesAsync(nodeSettings).get();
|
||||
List<String> nodes = internalCluster().startNodes(nodeSettings);
|
||||
String[] blueNodes = nodes.subList(0, halfNodes).stream().toArray(String[]::new);
|
||||
String[] redNodes = nodes.subList(halfNodes, nodes.size()).stream().toArray(String[]::new);
|
||||
logger.info("blue nodes: {}", (Object)blueNodes);
|
||||
|
|
|
@ -22,11 +22,46 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcke
|
|||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
|
||||
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.index.fielddata.ScriptDocValues;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptType;
|
||||
import org.elasticsearch.search.aggregations.AggregationBuilders;
|
||||
import org.elasticsearch.search.aggregations.AggregationTestScriptsPlugin;
|
||||
import org.elasticsearch.search.aggregations.bucket.terms.Terms;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
import java.util.function.Function;
|
||||
|
||||
public class IpTermsIT extends AbstractTermsTestCase {
|
||||
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> nodePlugins() {
|
||||
return Collections.singleton(CustomScriptPlugin.class);
|
||||
}
|
||||
|
||||
public static class CustomScriptPlugin extends AggregationTestScriptsPlugin {
|
||||
|
||||
@Override
|
||||
protected Map<String, Function<Map<String, Object>, Object>> pluginScripts() {
|
||||
Map<String, Function<Map<String, Object>, Object>> scripts = super.pluginScripts();
|
||||
|
||||
scripts.put("doc['ip'].value", vars -> {
|
||||
Map<?, ?> doc = (Map<?,?>) vars.get("doc");
|
||||
return doc.get("ip");
|
||||
});
|
||||
|
||||
scripts.put("doc['ip'].values", vars -> {
|
||||
Map<?, ?> doc = (Map<?,?>) vars.get("doc");
|
||||
return ((ScriptDocValues<?>) doc.get("ip")).get(0);
|
||||
});
|
||||
|
||||
return scripts;
|
||||
}
|
||||
}
|
||||
|
||||
public void testBasics() throws Exception {
|
||||
assertAcked(prepareCreate("index").addMapping("type", "ip", "type=ip"));
|
||||
indexRandom(true,
|
||||
|
@ -51,4 +86,55 @@ public class IpTermsIT extends AbstractTermsTestCase {
|
|||
assertEquals("2001:db8::2:1", bucket2.getKeyAsString());
|
||||
}
|
||||
|
||||
public void testScriptValue() throws Exception {
|
||||
assertAcked(prepareCreate("index").addMapping("type", "ip", "type=ip"));
|
||||
indexRandom(true,
|
||||
client().prepareIndex("index", "type", "1").setSource("ip", "192.168.1.7"),
|
||||
client().prepareIndex("index", "type", "2").setSource("ip", "192.168.1.7"),
|
||||
client().prepareIndex("index", "type", "3").setSource("ip", "2001:db8::2:1"));
|
||||
|
||||
Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME,
|
||||
"doc['ip'].value", Collections.emptyMap());
|
||||
SearchResponse response = client().prepareSearch("index").addAggregation(
|
||||
AggregationBuilders.terms("my_terms").script(script).executionHint(randomExecutionHint())).get();
|
||||
assertSearchResponse(response);
|
||||
Terms terms = response.getAggregations().get("my_terms");
|
||||
assertEquals(2, terms.getBuckets().size());
|
||||
|
||||
Terms.Bucket bucket1 = terms.getBuckets().get(0);
|
||||
assertEquals(2, bucket1.getDocCount());
|
||||
assertEquals("192.168.1.7", bucket1.getKey());
|
||||
assertEquals("192.168.1.7", bucket1.getKeyAsString());
|
||||
|
||||
Terms.Bucket bucket2 = terms.getBuckets().get(1);
|
||||
assertEquals(1, bucket2.getDocCount());
|
||||
assertEquals("2001:db8::2:1", bucket2.getKey());
|
||||
assertEquals("2001:db8::2:1", bucket2.getKeyAsString());
|
||||
}
|
||||
|
||||
public void testScriptValues() throws Exception {
|
||||
assertAcked(prepareCreate("index").addMapping("type", "ip", "type=ip"));
|
||||
indexRandom(true,
|
||||
client().prepareIndex("index", "type", "1").setSource("ip", "192.168.1.7"),
|
||||
client().prepareIndex("index", "type", "2").setSource("ip", "192.168.1.7"),
|
||||
client().prepareIndex("index", "type", "3").setSource("ip", "2001:db8::2:1"));
|
||||
|
||||
Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME,
|
||||
"doc['ip'].values", Collections.emptyMap());
|
||||
SearchResponse response = client().prepareSearch("index").addAggregation(
|
||||
AggregationBuilders.terms("my_terms").script(script).executionHint(randomExecutionHint())).get();
|
||||
assertSearchResponse(response);
|
||||
Terms terms = response.getAggregations().get("my_terms");
|
||||
assertEquals(2, terms.getBuckets().size());
|
||||
|
||||
Terms.Bucket bucket1 = terms.getBuckets().get(0);
|
||||
assertEquals(2, bucket1.getDocCount());
|
||||
assertEquals("192.168.1.7", bucket1.getKey());
|
||||
assertEquals("192.168.1.7", bucket1.getKeyAsString());
|
||||
|
||||
Terms.Bucket bucket2 = terms.getBuckets().get(1);
|
||||
assertEquals(1, bucket2.getDocCount());
|
||||
assertEquals("2001:db8::2:1", bucket2.getKey());
|
||||
assertEquals("2001:db8::2:1", bucket2.getKeyAsString());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -766,6 +766,9 @@ public class SearchFieldsIT extends ESIntegTestCase {
|
|||
.startObject("binary_field")
|
||||
.field("type", "binary")
|
||||
.endObject()
|
||||
.startObject("ip_field")
|
||||
.field("type", "ip")
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject()
|
||||
|
@ -784,6 +787,7 @@ public class SearchFieldsIT extends ESIntegTestCase {
|
|||
.field("double_field", 6.0d)
|
||||
.field("date_field", Joda.forPattern("dateOptionalTime").printer().print(new DateTime(2012, 3, 22, 0, 0, DateTimeZone.UTC)))
|
||||
.field("boolean_field", true)
|
||||
.field("ip_field", "::1")
|
||||
.endObject()).execute().actionGet();
|
||||
|
||||
client().admin().indices().prepareRefresh().execute().actionGet();
|
||||
|
@ -798,14 +802,16 @@ public class SearchFieldsIT extends ESIntegTestCase {
|
|||
.addDocValueField("float_field")
|
||||
.addDocValueField("double_field")
|
||||
.addDocValueField("date_field")
|
||||
.addDocValueField("boolean_field");
|
||||
.addDocValueField("boolean_field")
|
||||
.addDocValueField("ip_field");
|
||||
SearchResponse searchResponse = builder.execute().actionGet();
|
||||
|
||||
assertThat(searchResponse.getHits().getTotalHits(), equalTo(1L));
|
||||
assertThat(searchResponse.getHits().hits().length, equalTo(1));
|
||||
Set<String> fields = new HashSet<>(searchResponse.getHits().getAt(0).fields().keySet());
|
||||
assertThat(fields, equalTo(newHashSet("byte_field", "short_field", "integer_field", "long_field",
|
||||
"float_field", "double_field", "date_field", "boolean_field", "text_field", "keyword_field")));
|
||||
"float_field", "double_field", "date_field", "boolean_field", "text_field", "keyword_field",
|
||||
"ip_field")));
|
||||
|
||||
assertThat(searchResponse.getHits().getAt(0).fields().get("byte_field").value().toString(), equalTo("1"));
|
||||
assertThat(searchResponse.getHits().getAt(0).fields().get("short_field").value().toString(), equalTo("2"));
|
||||
|
@ -817,6 +823,7 @@ public class SearchFieldsIT extends ESIntegTestCase {
|
|||
assertThat(searchResponse.getHits().getAt(0).fields().get("boolean_field").value(), equalTo((Object) true));
|
||||
assertThat(searchResponse.getHits().getAt(0).fields().get("text_field").value(), equalTo("foo"));
|
||||
assertThat(searchResponse.getHits().getAt(0).fields().get("keyword_field").value(), equalTo("foo"));
|
||||
assertThat(searchResponse.getHits().getAt(0).fields().get("ip_field").value(), equalTo("::1"));
|
||||
}
|
||||
|
||||
public void testScriptFields() throws Exception {
|
||||
|
|
|
@ -621,7 +621,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
|
|||
public void testThatSensitiveRepositorySettingsAreNotExposed() throws Exception {
|
||||
Settings nodeSettings = Settings.builder().put().build();
|
||||
logger.info("--> start two nodes");
|
||||
internalCluster().startNodesAsync(2, nodeSettings).get();
|
||||
internalCluster().startNodes(2, nodeSettings);
|
||||
// Register mock repositories
|
||||
client().admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("mock").setSettings(Settings.builder()
|
||||
|
|
|
@ -299,7 +299,7 @@ Response:
|
|||
|
||||
Field stats index constraints allows to omit all field stats for indices that don't match with the constraint. An index
|
||||
constraint can exclude indices' field stats based on the `min_value` and `max_value` statistic. This option is only
|
||||
useful if the `level` option is set to `indices`.
|
||||
useful if the `level` option is set to `indices`. Fields that are not indexed (not searchable) are always omitted when an index constraint is defined.
|
||||
|
||||
For example index constraints can be useful to find out the min and max value of a particular property of your data in
|
||||
a time based scenario. The following request only returns field stats for the `answer_count` property for indices
|
||||
|
|
|
@ -48,14 +48,14 @@ public class RestDeleteByQueryAction extends AbstractBulkByQueryRestHandler<Dele
|
|||
|
||||
@Override
|
||||
public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException {
|
||||
if (false == request.hasContent()) {
|
||||
throw new ElasticsearchException("_delete_by_query requires a request body");
|
||||
}
|
||||
return doPrepareRequest(request, client, false, false);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected DeleteByQueryRequest buildRequest(RestRequest request) throws IOException {
|
||||
if (false == request.hasContent()) {
|
||||
throw new ElasticsearchException("_delete_by_query requires a request body");
|
||||
}
|
||||
/*
|
||||
* Passing the search request through DeleteByQueryRequest first allows
|
||||
* it to set its own defaults which differ from SearchRequest's
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.client.node.NodeClient;
|
||||
|
@ -113,14 +112,18 @@ public class RestReindexAction extends AbstractBaseReindexRestHandler<ReindexReq
|
|||
|
||||
@Override
|
||||
public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException {
|
||||
if (false == request.hasContent()) {
|
||||
throw new ElasticsearchException("_reindex requires a request body");
|
||||
}
|
||||
return doPrepareRequest(request, client, true, true);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ReindexRequest buildRequest(RestRequest request) throws IOException {
|
||||
if (false == request.hasContent()) {
|
||||
throw new IllegalArgumentException("_reindex requires a request body");
|
||||
}
|
||||
if (request.hasParam("pipeline")) {
|
||||
throw new IllegalArgumentException("_reindex doesn't support [pipeline] as a query parmaeter. "
|
||||
+ "Specify it in the [dest] object instead.");
|
||||
}
|
||||
ReindexRequest internal = new ReindexRequest(new SearchRequest(), new IndexRequest());
|
||||
try (XContentParser xcontent = XContentFactory.xContent(request.content()).createParser(request.content())) {
|
||||
PARSER.parse(xcontent, internal, new ReindexParseContext(searchRequestParsers, parseFieldMatcher));
|
||||
|
|
|
@ -23,20 +23,25 @@ import org.elasticsearch.action.index.IndexRequest;
|
|||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||
import org.elasticsearch.index.reindex.RestReindexAction.ReindexParseContext;
|
||||
import org.elasticsearch.index.reindex.remote.RemoteInfo;
|
||||
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
|
||||
import org.elasticsearch.rest.RestController;
|
||||
import org.elasticsearch.search.SearchRequestParsers;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.rest.FakeRestRequest;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import static java.util.Collections.singletonMap;
|
||||
import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
|
||||
import static org.mockito.Mockito.mock;
|
||||
|
||||
public class RestReindexActionTests extends ESTestCase {
|
||||
public void testBuildRemoteInfoNoRemote() throws IOException {
|
||||
|
@ -127,6 +132,31 @@ public class RestReindexActionTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testPipelineQueryParameterIsError() throws IOException {
|
||||
SearchRequestParsers parsers = new SearchRequestParsers(new IndicesQueriesRegistry(), null, null, null);
|
||||
RestReindexAction action = new RestReindexAction(Settings.EMPTY, mock(RestController.class), parsers, null);
|
||||
|
||||
FakeRestRequest.Builder request = new FakeRestRequest.Builder();
|
||||
try (XContentBuilder body = JsonXContent.contentBuilder().prettyPrint()) {
|
||||
body.startObject(); {
|
||||
body.startObject("source"); {
|
||||
body.field("index", "source");
|
||||
}
|
||||
body.endObject();
|
||||
body.startObject("dest"); {
|
||||
body.field("index", "dest");
|
||||
}
|
||||
body.endObject();
|
||||
}
|
||||
body.endObject();
|
||||
request.withContent(body.bytes());
|
||||
}
|
||||
request.withParams(singletonMap("pipeline", "doesn't matter"));
|
||||
Exception e = expectThrows(IllegalArgumentException.class, () -> action.buildRequest(request.build()));
|
||||
|
||||
assertEquals("_reindex doesn't support [pipeline] as a query parmaeter. Specify it in the [dest] object instead.", e.getMessage());
|
||||
}
|
||||
|
||||
private RemoteInfo buildRemoteInfoHostTestCase(String hostInRest) throws IOException {
|
||||
Map<String, Object> remote = new HashMap<>();
|
||||
remote.put("host", hostInRest);
|
||||
|
|
|
@ -64,8 +64,6 @@ import java.util.List;
|
|||
import java.util.UUID;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout;
|
||||
|
||||
@ESIntegTestCase.ClusterScope(numDataNodes = 2, numClientNodes = 0)
|
||||
@SuppressForbidden(reason = "use http server")
|
||||
// TODO this should be a IT but currently all ITs in this project run against a real cluster
|
||||
|
@ -269,7 +267,7 @@ public class AzureDiscoveryClusterFormationTests extends ESIntegTestCase {
|
|||
// only wait for the cluster to form
|
||||
ensureClusterSizeConsistency();
|
||||
// add one more node and wait for it to join
|
||||
internalCluster().startDataOnlyNodeAsync().get();
|
||||
internalCluster().startDataOnlyNode();
|
||||
ensureClusterSizeConsistency();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -243,7 +243,7 @@ public class Ec2DiscoveryClusterFormationTests extends ESIntegTestCase {
|
|||
// only wait for the cluster to form
|
||||
assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(2)).get());
|
||||
// add one more node and wait for it to join
|
||||
internalCluster().startDataOnlyNodeAsync().get();
|
||||
internalCluster().startDataOnlyNode();
|
||||
assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(3)).get());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -40,7 +40,6 @@ import org.junit.BeforeClass;
|
|||
import javax.net.ssl.KeyManagerFactory;
|
||||
import javax.net.ssl.SSLContext;
|
||||
import javax.net.ssl.TrustManagerFactory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
|
@ -200,7 +199,7 @@ public class GceDiscoverTests extends ESIntegTestCase {
|
|||
// only wait for the cluster to form
|
||||
assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(2)).get());
|
||||
// add one more node and wait for it to join
|
||||
internalCluster().startDataOnlyNodeAsync().get();
|
||||
internalCluster().startDataOnlyNode();
|
||||
assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(3)).get());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,6 +19,12 @@
|
|||
|
||||
package org.elasticsearch;
|
||||
|
||||
import org.apache.lucene.analysis.en.PorterStemFilterFactory;
|
||||
import org.apache.lucene.analysis.reverse.ReverseStringFilterFactory;
|
||||
import org.apache.lucene.analysis.snowball.SnowballPorterFilterFactory;
|
||||
import org.apache.lucene.analysis.util.CharFilterFactory;
|
||||
import org.apache.lucene.analysis.util.TokenFilterFactory;
|
||||
import org.apache.lucene.analysis.util.TokenizerFactory;
|
||||
import org.elasticsearch.common.collect.MapBuilder;
|
||||
import org.elasticsearch.index.analysis.ASCIIFoldingTokenFilterFactory;
|
||||
import org.elasticsearch.index.analysis.ApostropheFilterFactory;
|
||||
|
@ -86,13 +92,19 @@ import org.elasticsearch.index.analysis.WhitespaceTokenizerFactory;
|
|||
import org.elasticsearch.index.analysis.WordDelimiterTokenFilterFactory;
|
||||
import org.elasticsearch.index.analysis.compound.DictionaryCompoundWordTokenFilterFactory;
|
||||
import org.elasticsearch.index.analysis.compound.HyphenationCompoundWordTokenFilterFactory;
|
||||
import org.elasticsearch.indices.analysis.PreBuiltCharFilters;
|
||||
import org.elasticsearch.indices.analysis.PreBuiltTokenFilters;
|
||||
import org.elasticsearch.indices.analysis.PreBuiltTokenizers;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.TreeSet;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
/**
|
||||
* Alerts us if new analyzers are added to lucene, so we don't miss them.
|
||||
|
@ -102,6 +114,19 @@ import java.util.TreeSet;
|
|||
*/
|
||||
public class AnalysisFactoryTestCase extends ESTestCase {
|
||||
|
||||
private static final Pattern UNDERSCORE_THEN_ANYTHING = Pattern.compile("_(.)");
|
||||
|
||||
private static String toCamelCase(String s) {
|
||||
Matcher m = UNDERSCORE_THEN_ANYTHING.matcher(s);
|
||||
StringBuffer sb = new StringBuffer();
|
||||
while (m.find()) {
|
||||
m.appendReplacement(sb, m.group(1).toUpperCase());
|
||||
}
|
||||
m.appendTail(sb);
|
||||
sb.setCharAt(0, Character.toUpperCase(sb.charAt(0)));
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
static final Map<String,Class<?>> KNOWN_TOKENIZERS = new MapBuilder<String,Class<?>>()
|
||||
// exposed in ES
|
||||
.put("classic", ClassicTokenizerFactory.class)
|
||||
|
@ -121,6 +146,26 @@ public class AnalysisFactoryTestCase extends ESTestCase {
|
|||
.put("wikipedia", Void.class)
|
||||
.immutableMap();
|
||||
|
||||
static final Map<PreBuiltTokenizers, Class<?>> PREBUILT_TOKENIZERS;
|
||||
static {
|
||||
PREBUILT_TOKENIZERS = new HashMap<>();
|
||||
for (PreBuiltTokenizers tokenizer : PreBuiltTokenizers.values()) {
|
||||
Class<?> luceneFactoryClazz;
|
||||
switch (tokenizer) {
|
||||
case UAX_URL_EMAIL:
|
||||
luceneFactoryClazz = org.apache.lucene.analysis.standard.UAX29URLEmailTokenizerFactory.class;
|
||||
break;
|
||||
case PATH_HIERARCHY:
|
||||
luceneFactoryClazz = Void.class;
|
||||
break;
|
||||
default:
|
||||
luceneFactoryClazz = org.apache.lucene.analysis.util.TokenizerFactory.lookupClass(
|
||||
toCamelCase(tokenizer.getTokenizerFactory(Version.CURRENT).name()));
|
||||
}
|
||||
PREBUILT_TOKENIZERS.put(tokenizer, luceneFactoryClazz);
|
||||
}
|
||||
}
|
||||
|
||||
static final Map<String,Class<?>> KNOWN_TOKENFILTERS = new MapBuilder<String,Class<?>>()
|
||||
// exposed in ES
|
||||
.put("apostrophe", ApostropheFilterFactory.class)
|
||||
|
@ -233,6 +278,41 @@ public class AnalysisFactoryTestCase extends ESTestCase {
|
|||
|
||||
.immutableMap();
|
||||
|
||||
static final Map<PreBuiltTokenFilters, Class<?>> PREBUILT_TOKENFILTERS;
|
||||
static {
|
||||
PREBUILT_TOKENFILTERS = new HashMap<>();
|
||||
for (PreBuiltTokenFilters tokenizer : PreBuiltTokenFilters.values()) {
|
||||
Class<?> luceneFactoryClazz;
|
||||
switch (tokenizer) {
|
||||
case REVERSE:
|
||||
luceneFactoryClazz = ReverseStringFilterFactory.class;
|
||||
break;
|
||||
case UNIQUE:
|
||||
luceneFactoryClazz = Void.class;
|
||||
break;
|
||||
case SNOWBALL:
|
||||
case DUTCH_STEM:
|
||||
case FRENCH_STEM:
|
||||
case RUSSIAN_STEM:
|
||||
luceneFactoryClazz = SnowballPorterFilterFactory.class;
|
||||
break;
|
||||
case STEMMER:
|
||||
luceneFactoryClazz = PorterStemFilterFactory.class;
|
||||
break;
|
||||
case DELIMITED_PAYLOAD_FILTER:
|
||||
luceneFactoryClazz = org.apache.lucene.analysis.payloads.DelimitedPayloadTokenFilterFactory.class;
|
||||
break;
|
||||
case LIMIT:
|
||||
luceneFactoryClazz = org.apache.lucene.analysis.miscellaneous.LimitTokenCountFilterFactory.class;
|
||||
break;
|
||||
default:
|
||||
luceneFactoryClazz = org.apache.lucene.analysis.util.TokenFilterFactory.lookupClass(
|
||||
toCamelCase(tokenizer.getTokenFilterFactory(Version.CURRENT).name()));
|
||||
}
|
||||
PREBUILT_TOKENFILTERS.put(tokenizer, luceneFactoryClazz);
|
||||
}
|
||||
}
|
||||
|
||||
static final Map<String,Class<?>> KNOWN_CHARFILTERS = new MapBuilder<String,Class<?>>()
|
||||
// exposed in ES
|
||||
.put("htmlstrip", HtmlStripCharFilterFactory.class)
|
||||
|
@ -244,6 +324,20 @@ public class AnalysisFactoryTestCase extends ESTestCase {
|
|||
.put("persian", Void.class)
|
||||
.immutableMap();
|
||||
|
||||
static final Map<PreBuiltCharFilters, Class<?>> PREBUILT_CHARFILTERS;
|
||||
static {
|
||||
PREBUILT_CHARFILTERS = new HashMap<>();
|
||||
for (PreBuiltCharFilters tokenizer : PreBuiltCharFilters.values()) {
|
||||
Class<?> luceneFactoryClazz;
|
||||
switch (tokenizer) {
|
||||
default:
|
||||
luceneFactoryClazz = org.apache.lucene.analysis.util.CharFilterFactory.lookupClass(
|
||||
toCamelCase(tokenizer.getCharFilterFactory(Version.CURRENT).name()));
|
||||
}
|
||||
PREBUILT_CHARFILTERS.put(tokenizer, luceneFactoryClazz);
|
||||
}
|
||||
}
|
||||
|
||||
protected Map<String, Class<?>> getTokenizers() {
|
||||
return KNOWN_TOKENIZERS;
|
||||
}
|
||||
|
@ -325,4 +419,62 @@ public class AnalysisFactoryTestCase extends ESTestCase {
|
|||
classesThatShouldNotHaveMultiTermSupport.isEmpty());
|
||||
}
|
||||
|
||||
public void testPreBuiltMultiTermAware() {
|
||||
Collection<Object> expected = new HashSet<>();
|
||||
Collection<Object> actual = new HashSet<>();
|
||||
|
||||
for (Map.Entry<PreBuiltTokenizers, Class<?>> entry : PREBUILT_TOKENIZERS.entrySet()) {
|
||||
PreBuiltTokenizers tokenizer = entry.getKey();
|
||||
Class<?> luceneFactory = entry.getValue();
|
||||
if (luceneFactory == Void.class) {
|
||||
continue;
|
||||
}
|
||||
assertTrue(TokenizerFactory.class.isAssignableFrom(luceneFactory));
|
||||
if (tokenizer.getTokenizerFactory(Version.CURRENT) instanceof MultiTermAwareComponent) {
|
||||
actual.add(tokenizer);
|
||||
}
|
||||
if (org.apache.lucene.analysis.util.MultiTermAwareComponent.class.isAssignableFrom(luceneFactory)) {
|
||||
expected.add(tokenizer);
|
||||
}
|
||||
}
|
||||
for (Map.Entry<PreBuiltTokenFilters, Class<?>> entry : PREBUILT_TOKENFILTERS.entrySet()) {
|
||||
PreBuiltTokenFilters tokenFilter = entry.getKey();
|
||||
Class<?> luceneFactory = entry.getValue();
|
||||
if (luceneFactory == Void.class) {
|
||||
continue;
|
||||
}
|
||||
assertTrue(TokenFilterFactory.class.isAssignableFrom(luceneFactory));
|
||||
if (tokenFilter.getTokenFilterFactory(Version.CURRENT) instanceof MultiTermAwareComponent) {
|
||||
actual.add(tokenFilter);
|
||||
}
|
||||
if (org.apache.lucene.analysis.util.MultiTermAwareComponent.class.isAssignableFrom(luceneFactory)) {
|
||||
expected.add(tokenFilter);
|
||||
}
|
||||
}
|
||||
for (Map.Entry<PreBuiltCharFilters, Class<?>> entry : PREBUILT_CHARFILTERS.entrySet()) {
|
||||
PreBuiltCharFilters charFilter = entry.getKey();
|
||||
Class<?> luceneFactory = entry.getValue();
|
||||
if (luceneFactory == Void.class) {
|
||||
continue;
|
||||
}
|
||||
assertTrue(CharFilterFactory.class.isAssignableFrom(luceneFactory));
|
||||
if (charFilter.getCharFilterFactory(Version.CURRENT) instanceof MultiTermAwareComponent) {
|
||||
actual.add(charFilter);
|
||||
}
|
||||
if (org.apache.lucene.analysis.util.MultiTermAwareComponent.class.isAssignableFrom(luceneFactory)) {
|
||||
expected.add(charFilter);
|
||||
}
|
||||
}
|
||||
|
||||
Set<Object> classesMissingMultiTermSupport = new HashSet<>(expected);
|
||||
classesMissingMultiTermSupport.removeAll(actual);
|
||||
assertTrue("Pre-built components are missing multi-term support: " + classesMissingMultiTermSupport,
|
||||
classesMissingMultiTermSupport.isEmpty());
|
||||
|
||||
Set<Object> classesThatShouldNotHaveMultiTermSupport = new HashSet<>(actual);
|
||||
classesThatShouldNotHaveMultiTermSupport.removeAll(expected);
|
||||
assertTrue("Pre-built components should not have multi-term support: " + classesThatShouldNotHaveMultiTermSupport,
|
||||
classesThatShouldNotHaveMultiTermSupport.isEmpty());
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -130,7 +130,6 @@ import java.util.stream.Stream;
|
|||
|
||||
import static org.apache.lucene.util.LuceneTestCase.TEST_NIGHTLY;
|
||||
import static org.apache.lucene.util.LuceneTestCase.rarely;
|
||||
import static org.elasticsearch.discovery.DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING;
|
||||
import static org.elasticsearch.test.ESTestCase.assertBusy;
|
||||
import static org.elasticsearch.test.ESTestCase.randomFrom;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
|
@ -696,10 +695,6 @@ public final class InternalTestCluster extends TestCluster {
|
|||
ensureOpen(); // currently unused
|
||||
Builder builder = Settings.builder().put(settings).put(Node.NODE_MASTER_SETTING.getKey(), false)
|
||||
.put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_INGEST_SETTING.getKey(), false);
|
||||
if (size() == 0) {
|
||||
// if we are the first node - don't wait for a state
|
||||
builder.put(INITIAL_STATE_TIMEOUT_SETTING.getKey(), 0);
|
||||
}
|
||||
return startNode(builder);
|
||||
}
|
||||
|
||||
|
@ -791,6 +786,10 @@ public final class InternalTestCluster extends TestCluster {
|
|||
return nodeAndClientId;
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
public boolean isMasterEligible() {
|
||||
return Node.NODE_MASTER_SETTING.get(node.settings());
|
||||
}
|
||||
|
@ -887,9 +886,6 @@ public final class InternalTestCluster extends TestCluster {
|
|||
assert ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.exists(newSettings.build()) == false : "min master nodes is auto managed";
|
||||
newSettings.put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), minMasterNodes).build();
|
||||
}
|
||||
|
||||
// validation is (optionally) done in fullRestart/rollingRestart
|
||||
newSettings.put(INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s");
|
||||
if (clearDataIfNeeded) {
|
||||
clearDataIfNeeded(callback);
|
||||
}
|
||||
|
@ -1018,10 +1014,6 @@ public final class InternalTestCluster extends TestCluster {
|
|||
final Settings.Builder settings = Settings.builder();
|
||||
settings.put(Node.NODE_MASTER_SETTING.getKey(), true);
|
||||
settings.put(Node.NODE_DATA_SETTING.getKey(), false);
|
||||
if (autoManageMinMasterNodes) {
|
||||
settings.put(INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s"); // we wait at the end
|
||||
}
|
||||
|
||||
NodeAndClient nodeAndClient = buildNode(i, sharedNodesSeeds[i], settings.build(), true, defaultMinMasterNodes);
|
||||
toStartAndPublish.add(nodeAndClient);
|
||||
}
|
||||
|
@ -1032,9 +1024,6 @@ public final class InternalTestCluster extends TestCluster {
|
|||
settings.put(Node.NODE_MASTER_SETTING.getKey(), false).build();
|
||||
settings.put(Node.NODE_DATA_SETTING.getKey(), true).build();
|
||||
}
|
||||
if (autoManageMinMasterNodes) {
|
||||
settings.put(INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s"); // we wait at the end
|
||||
}
|
||||
NodeAndClient nodeAndClient = buildNode(i, sharedNodesSeeds[i], settings.build(), true, defaultMinMasterNodes);
|
||||
toStartAndPublish.add(nodeAndClient);
|
||||
}
|
||||
|
@ -1347,10 +1336,18 @@ public final class InternalTestCluster extends TestCluster {
|
|||
// special case for 1 node master - we can't update the min master nodes before we add more nodes.
|
||||
updateMinMasterNodes(currentMasters + newMasters);
|
||||
}
|
||||
for (NodeAndClient nodeAndClient : nodeAndClients) {
|
||||
nodeAndClient.startNode();
|
||||
publishNode(nodeAndClient);
|
||||
List<Future<?>> futures = nodeAndClients.stream().map(node -> executor.submit(node::startNode)).collect(Collectors.toList());
|
||||
try {
|
||||
for (Future<?> future : futures) {
|
||||
future.get();
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
throw new AssertionError("interrupted while starting nodes", e);
|
||||
} catch (ExecutionException e) {
|
||||
throw new RuntimeException("failed to start nodes", e);
|
||||
}
|
||||
nodeAndClients.forEach(this::publishNode);
|
||||
|
||||
if (autoManageMinMasterNodes && currentMasters == 1 && newMasters > 0) {
|
||||
// update once masters have joined
|
||||
validateClusterFormed();
|
||||
|
@ -1535,13 +1532,7 @@ public final class InternalTestCluster extends TestCluster {
|
|||
nodeAndClient.recreateNodeOnRestart(callback, false, autoManageMinMasterNodes ? getMinMasterNodes(getMasterNodesCount()) : -1);
|
||||
}
|
||||
|
||||
for (NodeAndClient nodeAndClient : startUpOrder) {
|
||||
logger.info("starting node [{}] ", nodeAndClient.name);
|
||||
nodeAndClient.startNode();
|
||||
if (activeDisruptionScheme != null) {
|
||||
activeDisruptionScheme.applyToNode(nodeAndClient.name, this);
|
||||
}
|
||||
}
|
||||
startAndPublishNodesAndClients(startUpOrder);
|
||||
|
||||
if (callback.validateClusterForming()) {
|
||||
validateClusterFormed();
|
||||
|
@ -1635,6 +1626,61 @@ public final class InternalTestCluster extends TestCluster {
|
|||
return buildNode.name;
|
||||
}
|
||||
|
||||
/**
|
||||
* Starts multiple nodes with default settings and returns their names
|
||||
*/
|
||||
public synchronized List<String> startNodes(int numOfNodes) {
|
||||
return startNodes(numOfNodes, Settings.EMPTY);
|
||||
}
|
||||
|
||||
/**
|
||||
* Starts multiple nodes with the given settings and returns their names
|
||||
*/
|
||||
public synchronized List<String> startNodes(int numOfNodes, Settings settings) {
|
||||
return startNodes(Collections.nCopies(numOfNodes, settings).stream().toArray(Settings[]::new));
|
||||
}
|
||||
|
||||
/**
|
||||
* Starts multiple nodes with the given settings and returns their names
|
||||
*/
|
||||
public synchronized List<String> startNodes(Settings... settings) {
|
||||
final int defaultMinMasterNodes;
|
||||
if (autoManageMinMasterNodes) {
|
||||
int mastersDelta = (int) Stream.of(settings).filter(Node.NODE_MASTER_SETTING::get).count();
|
||||
defaultMinMasterNodes = getMinMasterNodes(getMasterNodesCount() + mastersDelta);
|
||||
} else {
|
||||
defaultMinMasterNodes = -1;
|
||||
}
|
||||
List<NodeAndClient> nodes = new ArrayList<>();
|
||||
for (Settings nodeSettings: settings) {
|
||||
nodes.add(buildNode(nodeSettings, defaultMinMasterNodes));
|
||||
}
|
||||
startAndPublishNodesAndClients(nodes);
|
||||
if (autoManageMinMasterNodes) {
|
||||
validateClusterFormed();
|
||||
}
|
||||
|
||||
return nodes.stream().map(NodeAndClient::getName).collect(Collectors.toList());
|
||||
}
|
||||
|
||||
public synchronized List<String> startMasterOnlyNodes(int numNodes) {
|
||||
return startMasterOnlyNodes(numNodes, Settings.EMPTY);
|
||||
}
|
||||
|
||||
public synchronized List<String> startMasterOnlyNodes(int numNodes, Settings settings) {
|
||||
Settings settings1 = Settings.builder().put(settings).put(Node.NODE_MASTER_SETTING.getKey(), true).put(Node.NODE_DATA_SETTING.getKey(), false).build();
|
||||
return startNodes(numNodes, settings1);
|
||||
}
|
||||
|
||||
public synchronized List<String> startDataOnlyNodes(int numNodes) {
|
||||
return startDataOnlyNodes(numNodes, Settings.EMPTY);
|
||||
}
|
||||
|
||||
public synchronized List<String> startDataOnlyNodes(int numNodes, Settings settings) {
|
||||
Settings settings1 = Settings.builder().put(settings).put(Node.NODE_MASTER_SETTING.getKey(), false).put(Node.NODE_DATA_SETTING.getKey(), true).build();
|
||||
return startNodes(numNodes, settings1);
|
||||
}
|
||||
|
||||
/**
|
||||
* updates the min master nodes setting in the current running cluster.
|
||||
*
|
||||
|
@ -1667,31 +1713,8 @@ public final class InternalTestCluster extends TestCluster {
|
|||
return (int)nodes.values().stream().filter(n -> Node.NODE_MASTER_SETTING.get(n.node().settings())).count();
|
||||
}
|
||||
|
||||
public synchronized Async<List<String>> startMasterOnlyNodesAsync(int numNodes) {
|
||||
return startMasterOnlyNodesAsync(numNodes, Settings.EMPTY);
|
||||
}
|
||||
|
||||
public synchronized Async<List<String>> startMasterOnlyNodesAsync(int numNodes, Settings settings) {
|
||||
Settings settings1 = Settings.builder().put(settings).put(Node.NODE_MASTER_SETTING.getKey(), true).put(Node.NODE_DATA_SETTING.getKey(), false).build();
|
||||
return startNodesAsync(numNodes, settings1);
|
||||
}
|
||||
|
||||
public synchronized Async<List<String>> startDataOnlyNodesAsync(int numNodes) {
|
||||
return startDataOnlyNodesAsync(numNodes, Settings.EMPTY);
|
||||
}
|
||||
|
||||
public synchronized Async<List<String>> startDataOnlyNodesAsync(int numNodes, Settings settings) {
|
||||
Settings settings1 = Settings.builder().put(settings).put(Node.NODE_MASTER_SETTING.getKey(), false).put(Node.NODE_DATA_SETTING.getKey(), true).build();
|
||||
return startNodesAsync(numNodes, settings1);
|
||||
}
|
||||
|
||||
public synchronized Async<String> startMasterOnlyNodeAsync() {
|
||||
return startMasterOnlyNodeAsync(Settings.EMPTY);
|
||||
}
|
||||
|
||||
public synchronized Async<String> startMasterOnlyNodeAsync(Settings settings) {
|
||||
Settings settings1 = Settings.builder().put(settings).put(Node.NODE_MASTER_SETTING.getKey(), true).put(Node.NODE_DATA_SETTING.getKey(), false).build();
|
||||
return startNodeAsync(settings1);
|
||||
public synchronized String startMasterOnlyNode() {
|
||||
return startMasterOnlyNode(Settings.EMPTY);
|
||||
}
|
||||
|
||||
public synchronized String startMasterOnlyNode(Settings settings) {
|
||||
|
@ -1699,109 +1722,14 @@ public final class InternalTestCluster extends TestCluster {
|
|||
return startNode(settings1);
|
||||
}
|
||||
|
||||
public synchronized Async<String> startDataOnlyNodeAsync() {
|
||||
return startDataOnlyNodeAsync(Settings.EMPTY);
|
||||
public synchronized String startDataOnlyNode() {
|
||||
return startDataOnlyNode(Settings.EMPTY);
|
||||
}
|
||||
|
||||
public synchronized Async<String> startDataOnlyNodeAsync(Settings settings) {
|
||||
Settings settings1 = Settings.builder().put(settings).put(Node.NODE_MASTER_SETTING.getKey(), false).put(Node.NODE_DATA_SETTING.getKey(), true).build();
|
||||
return startNodeAsync(settings1);
|
||||
}
|
||||
|
||||
public synchronized String startDataOnlyNode(Settings settings) {
|
||||
Settings settings1 = Settings.builder().put(settings).put(Node.NODE_MASTER_SETTING.getKey(), false).put(Node.NODE_DATA_SETTING.getKey(), true).build();
|
||||
return startNode(settings1);
|
||||
}
|
||||
|
||||
/**
|
||||
* Starts a node in an async manner with the given settings and returns future with its name.
|
||||
*/
|
||||
public synchronized Async<String> startNodeAsync() {
|
||||
return startNodeAsync(Settings.EMPTY);
|
||||
}
|
||||
|
||||
/**
|
||||
* Starts a node in an async manner with the given settings and returns future with its name.
|
||||
*/
|
||||
public synchronized Async<String> startNodeAsync(final Settings settings) {
|
||||
final int defaultMinMasterNodes;
|
||||
if (autoManageMinMasterNodes) {
|
||||
int mastersDelta = Node.NODE_MASTER_SETTING.get(settings) ? 1 : 0;
|
||||
defaultMinMasterNodes = updateMinMasterNodes(getMasterNodesCount() + mastersDelta);
|
||||
} else {
|
||||
defaultMinMasterNodes = -1;
|
||||
}
|
||||
return startNodeAsync(settings, defaultMinMasterNodes);
|
||||
}
|
||||
|
||||
private synchronized Async<String> startNodeAsync(final Settings settings, int defaultMinMasterNodes) {
|
||||
final NodeAndClient buildNode = buildNode(settings, defaultMinMasterNodes);
|
||||
final Future<String> submit = executor.submit(() -> {
|
||||
buildNode.startNode();
|
||||
publishNode(buildNode);
|
||||
return buildNode.name;
|
||||
});
|
||||
return () -> submit.get();
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Starts multiple nodes in an async manner and returns future with its name.
|
||||
*/
|
||||
public synchronized Async<List<String>> startNodesAsync(final int numNodes) {
|
||||
return startNodesAsync(numNodes, Settings.EMPTY);
|
||||
}
|
||||
|
||||
/**
|
||||
* Starts multiple nodes in an async manner with the given settings and returns future with its name.
|
||||
*/
|
||||
public synchronized Async<List<String>> startNodesAsync(final int numNodes, final Settings settings) {
|
||||
final int defaultMinMasterNodes;
|
||||
if (autoManageMinMasterNodes) {
|
||||
int mastersDelta = Node.NODE_MASTER_SETTING.get(settings) ? numNodes : 0;
|
||||
defaultMinMasterNodes = updateMinMasterNodes(getMasterNodesCount() + mastersDelta);
|
||||
} else {
|
||||
defaultMinMasterNodes = -1;
|
||||
}
|
||||
final List<Async<String>> asyncs = new ArrayList<>();
|
||||
for (int i = 0; i < numNodes; i++) {
|
||||
asyncs.add(startNodeAsync(settings, defaultMinMasterNodes));
|
||||
}
|
||||
|
||||
return () -> {
|
||||
List<String> ids = new ArrayList<>();
|
||||
for (Async<String> async : asyncs) {
|
||||
ids.add(async.get());
|
||||
}
|
||||
return ids;
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Starts multiple nodes (based on the number of settings provided) in an async manner, with explicit settings for each node.
|
||||
* The order of the node names returned matches the order of the settings provided.
|
||||
*/
|
||||
public synchronized Async<List<String>> startNodesAsync(final Settings... settings) {
|
||||
final int defaultMinMasterNodes;
|
||||
if (autoManageMinMasterNodes) {
|
||||
int mastersDelta = (int) Stream.of(settings).filter(Node.NODE_MASTER_SETTING::get).count();
|
||||
defaultMinMasterNodes = updateMinMasterNodes(getMasterNodesCount() + mastersDelta);
|
||||
} else {
|
||||
defaultMinMasterNodes = -1;
|
||||
}
|
||||
List<Async<String>> asyncs = new ArrayList<>();
|
||||
for (Settings setting : settings) {
|
||||
asyncs.add(startNodeAsync(setting, defaultMinMasterNodes));
|
||||
}
|
||||
return () -> {
|
||||
List<String> ids = new ArrayList<>();
|
||||
for (Async<String> async : asyncs) {
|
||||
ids.add(async.get());
|
||||
}
|
||||
return ids;
|
||||
};
|
||||
}
|
||||
|
||||
private synchronized void publishNode(NodeAndClient nodeAndClient) {
|
||||
assert !nodeAndClient.node().isClosed();
|
||||
nodes.put(nodeAndClient.name, nodeAndClient);
|
||||
|
@ -1828,7 +1756,8 @@ public final class InternalTestCluster extends TestCluster {
|
|||
|
||||
|
||||
public void setDisruptionScheme(ServiceDisruptionScheme scheme) {
|
||||
clearDisruptionScheme();
|
||||
assert activeDisruptionScheme == null :
|
||||
"there is already and active disruption [" + activeDisruptionScheme + "]. call clearDisruptionScheme first";
|
||||
scheme.applyToCluster(this);
|
||||
activeDisruptionScheme = scheme;
|
||||
}
|
||||
|
@ -2121,14 +2050,4 @@ public final class InternalTestCluster extends TestCluster {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Simple interface that allows to wait for an async operation to finish
|
||||
*
|
||||
* @param <T> the result of the async execution
|
||||
*/
|
||||
public interface Async<T> {
|
||||
T get() throws ExecutionException, InterruptedException;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue