Merge branch 'master' into feature/rank-eval
This commit is contained in:
commit
5c6cdb90ad
|
@ -20,8 +20,10 @@
|
||||||
package org.elasticsearch.action.fieldstats;
|
package org.elasticsearch.action.fieldstats;
|
||||||
|
|
||||||
import org.apache.lucene.document.InetAddressPoint;
|
import org.apache.lucene.document.InetAddressPoint;
|
||||||
|
import org.apache.lucene.index.TermsEnum;
|
||||||
import org.apache.lucene.util.BytesRef;
|
import org.apache.lucene.util.BytesRef;
|
||||||
import org.apache.lucene.util.StringHelper;
|
import org.apache.lucene.util.StringHelper;
|
||||||
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
import org.elasticsearch.common.io.stream.Writeable;
|
import org.elasticsearch.common.io.stream.Writeable;
|
||||||
|
@ -45,9 +47,50 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
||||||
private long sumTotalTermFreq;
|
private long sumTotalTermFreq;
|
||||||
private boolean isSearchable;
|
private boolean isSearchable;
|
||||||
private boolean isAggregatable;
|
private boolean isAggregatable;
|
||||||
|
private boolean hasMinMax;
|
||||||
protected T minValue;
|
protected T minValue;
|
||||||
protected T maxValue;
|
protected T maxValue;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Builds a FieldStats where min and max value are not available for the field.
|
||||||
|
* @param type The native type of this FieldStats
|
||||||
|
* @param maxDoc Max number of docs
|
||||||
|
* @param docCount the number of documents that have at least one term for this field,
|
||||||
|
* or -1 if this information isn't available for this field.
|
||||||
|
* @param sumDocFreq the sum of {@link TermsEnum#docFreq()} for all terms in this field,
|
||||||
|
* or -1 if this information isn't available for this field.
|
||||||
|
* @param sumTotalTermFreq the sum of {@link TermsEnum#totalTermFreq} for all terms in this field,
|
||||||
|
* or -1 if this measure isn't available for this field.
|
||||||
|
* @param isSearchable true if this field is searchable
|
||||||
|
* @param isAggregatable true if this field is aggregatable
|
||||||
|
*/
|
||||||
|
FieldStats(byte type, long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
|
||||||
|
boolean isSearchable, boolean isAggregatable) {
|
||||||
|
this.type = type;
|
||||||
|
this.maxDoc = maxDoc;
|
||||||
|
this.docCount = docCount;
|
||||||
|
this.sumDocFreq = sumDocFreq;
|
||||||
|
this.sumTotalTermFreq = sumTotalTermFreq;
|
||||||
|
this.isSearchable = isSearchable;
|
||||||
|
this.isAggregatable = isAggregatable;
|
||||||
|
this.hasMinMax = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Builds a FieldStats with min and max value for the field.
|
||||||
|
* @param type The native type of this FieldStats
|
||||||
|
* @param maxDoc Max number of docs
|
||||||
|
* @param docCount the number of documents that have at least one term for this field,
|
||||||
|
* or -1 if this information isn't available for this field.
|
||||||
|
* @param sumDocFreq the sum of {@link TermsEnum#docFreq()} for all terms in this field,
|
||||||
|
* or -1 if this information isn't available for this field.
|
||||||
|
* @param sumTotalTermFreq the sum of {@link TermsEnum#totalTermFreq} for all terms in this field,
|
||||||
|
* or -1 if this measure isn't available for this field.
|
||||||
|
* @param isSearchable true if this field is searchable
|
||||||
|
* @param isAggregatable true if this field is aggregatable
|
||||||
|
* @param minValue the minimum value indexed in this field
|
||||||
|
* @param maxValue the maximum value indexed in this field
|
||||||
|
*/
|
||||||
FieldStats(byte type,
|
FieldStats(byte type,
|
||||||
long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
|
long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
|
||||||
boolean isSearchable, boolean isAggregatable, T minValue, T maxValue) {
|
boolean isSearchable, boolean isAggregatable, T minValue, T maxValue) {
|
||||||
|
@ -60,6 +103,7 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
||||||
this.sumTotalTermFreq = sumTotalTermFreq;
|
this.sumTotalTermFreq = sumTotalTermFreq;
|
||||||
this.isSearchable = isSearchable;
|
this.isSearchable = isSearchable;
|
||||||
this.isAggregatable = isAggregatable;
|
this.isAggregatable = isAggregatable;
|
||||||
|
this.hasMinMax = true;
|
||||||
this.minValue = minValue;
|
this.minValue = minValue;
|
||||||
this.maxValue = maxValue;
|
this.maxValue = maxValue;
|
||||||
}
|
}
|
||||||
|
@ -85,6 +129,13 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return true if min/max informations are available for this field
|
||||||
|
*/
|
||||||
|
public boolean hasMinMax() {
|
||||||
|
return hasMinMax;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return the total number of documents.
|
* @return the total number of documents.
|
||||||
*
|
*
|
||||||
|
@ -216,7 +267,13 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
||||||
isAggregatable |= other.isAggregatable;
|
isAggregatable |= other.isAggregatable;
|
||||||
|
|
||||||
assert type == other.getType();
|
assert type == other.getType();
|
||||||
updateMinMax((T) other.minValue, (T) other.maxValue);
|
if (hasMinMax && other.hasMinMax) {
|
||||||
|
updateMinMax((T) other.minValue, (T) other.maxValue);
|
||||||
|
} else {
|
||||||
|
hasMinMax = false;
|
||||||
|
minValue = null;
|
||||||
|
maxValue = null;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void updateMinMax(T min, T max) {
|
private void updateMinMax(T min, T max) {
|
||||||
|
@ -241,7 +298,9 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
||||||
builder.field(SUM_TOTAL_TERM_FREQ_FIELD, sumTotalTermFreq);
|
builder.field(SUM_TOTAL_TERM_FREQ_FIELD, sumTotalTermFreq);
|
||||||
builder.field(SEARCHABLE_FIELD, isSearchable);
|
builder.field(SEARCHABLE_FIELD, isSearchable);
|
||||||
builder.field(AGGREGATABLE_FIELD, isAggregatable);
|
builder.field(AGGREGATABLE_FIELD, isAggregatable);
|
||||||
toInnerXContent(builder);
|
if (hasMinMax) {
|
||||||
|
toInnerXContent(builder);
|
||||||
|
}
|
||||||
builder.endObject();
|
builder.endObject();
|
||||||
return builder;
|
return builder;
|
||||||
}
|
}
|
||||||
|
@ -262,7 +321,14 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
||||||
out.writeLong(sumTotalTermFreq);
|
out.writeLong(sumTotalTermFreq);
|
||||||
out.writeBoolean(isSearchable);
|
out.writeBoolean(isSearchable);
|
||||||
out.writeBoolean(isAggregatable);
|
out.writeBoolean(isAggregatable);
|
||||||
writeMinMax(out);
|
if (out.getVersion().onOrAfter(Version.V_5_2_0_UNRELEASED)) {
|
||||||
|
out.writeBoolean(hasMinMax);
|
||||||
|
if (hasMinMax) {
|
||||||
|
writeMinMax(out);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
writeMinMax(out);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
protected abstract void writeMinMax(StreamOutput out) throws IOException;
|
protected abstract void writeMinMax(StreamOutput out) throws IOException;
|
||||||
|
@ -272,6 +338,9 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
||||||
* otherwise <code>false</code> is returned
|
* otherwise <code>false</code> is returned
|
||||||
*/
|
*/
|
||||||
public boolean match(IndexConstraint constraint) {
|
public boolean match(IndexConstraint constraint) {
|
||||||
|
if (hasMinMax == false) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
int cmp;
|
int cmp;
|
||||||
T value = valueOf(constraint.getValue(), constraint.getOptionalFormat());
|
T value = valueOf(constraint.getValue(), constraint.getOptionalFormat());
|
||||||
if (constraint.getProperty() == IndexConstraint.Property.MIN) {
|
if (constraint.getProperty() == IndexConstraint.Property.MIN) {
|
||||||
|
@ -310,6 +379,10 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
||||||
if (sumTotalTermFreq != that.sumTotalTermFreq) return false;
|
if (sumTotalTermFreq != that.sumTotalTermFreq) return false;
|
||||||
if (isSearchable != that.isSearchable) return false;
|
if (isSearchable != that.isSearchable) return false;
|
||||||
if (isAggregatable != that.isAggregatable) return false;
|
if (isAggregatable != that.isAggregatable) return false;
|
||||||
|
if (hasMinMax != that.hasMinMax) return false;
|
||||||
|
if (hasMinMax == false) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
if (!minValue.equals(that.minValue)) return false;
|
if (!minValue.equals(that.minValue)) return false;
|
||||||
return maxValue.equals(that.maxValue);
|
return maxValue.equals(that.maxValue);
|
||||||
|
|
||||||
|
@ -318,10 +391,16 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
||||||
@Override
|
@Override
|
||||||
public int hashCode() {
|
public int hashCode() {
|
||||||
return Objects.hash(type, maxDoc, docCount, sumDocFreq, sumTotalTermFreq, isSearchable, isAggregatable,
|
return Objects.hash(type, maxDoc, docCount, sumDocFreq, sumTotalTermFreq, isSearchable, isAggregatable,
|
||||||
minValue, maxValue);
|
hasMinMax, minValue, maxValue);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static class Long extends FieldStats<java.lang.Long> {
|
public static class Long extends FieldStats<java.lang.Long> {
|
||||||
|
public Long(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
|
||||||
|
boolean isSearchable, boolean isAggregatable) {
|
||||||
|
super((byte) 0, maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
|
||||||
|
isSearchable, isAggregatable);
|
||||||
|
}
|
||||||
|
|
||||||
public Long(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
|
public Long(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
|
||||||
boolean isSearchable, boolean isAggregatable,
|
boolean isSearchable, boolean isAggregatable,
|
||||||
long minValue, long maxValue) {
|
long minValue, long maxValue) {
|
||||||
|
@ -357,6 +436,11 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
||||||
}
|
}
|
||||||
|
|
||||||
public static class Double extends FieldStats<java.lang.Double> {
|
public static class Double extends FieldStats<java.lang.Double> {
|
||||||
|
public Double(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
|
||||||
|
boolean isSearchable, boolean isAggregatable) {
|
||||||
|
super((byte) 1, maxDoc, docCount, sumDocFreq, sumTotalTermFreq, isSearchable, isAggregatable);
|
||||||
|
}
|
||||||
|
|
||||||
public Double(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
|
public Double(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
|
||||||
boolean isSearchable, boolean isAggregatable,
|
boolean isSearchable, boolean isAggregatable,
|
||||||
double minValue, double maxValue) {
|
double minValue, double maxValue) {
|
||||||
|
@ -397,6 +481,12 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
||||||
public static class Date extends FieldStats<java.lang.Long> {
|
public static class Date extends FieldStats<java.lang.Long> {
|
||||||
private FormatDateTimeFormatter formatter;
|
private FormatDateTimeFormatter formatter;
|
||||||
|
|
||||||
|
public Date(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
|
||||||
|
boolean isSearchable, boolean isAggregatable) {
|
||||||
|
super((byte) 2, maxDoc, docCount, sumDocFreq, sumTotalTermFreq, isSearchable, isAggregatable);
|
||||||
|
this.formatter = null;
|
||||||
|
}
|
||||||
|
|
||||||
public Date(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
|
public Date(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
|
||||||
boolean isSearchable, boolean isAggregatable,
|
boolean isSearchable, boolean isAggregatable,
|
||||||
FormatDateTimeFormatter formatter,
|
FormatDateTimeFormatter formatter,
|
||||||
|
@ -439,23 +529,27 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean equals(Object o) {
|
public boolean equals(Object o) {
|
||||||
if (this == o) return true;
|
|
||||||
if (o == null || getClass() != o.getClass()) return false;
|
|
||||||
if (!super.equals(o)) return false;
|
if (!super.equals(o)) return false;
|
||||||
|
|
||||||
Date that = (Date) o;
|
Date that = (Date) o;
|
||||||
return Objects.equals(formatter.format(), that.formatter.format());
|
return Objects.equals(formatter == null ? null : formatter.format(),
|
||||||
|
that.formatter == null ? null : that.formatter.format());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public int hashCode() {
|
public int hashCode() {
|
||||||
int result = super.hashCode();
|
int result = super.hashCode();
|
||||||
result = 31 * result + formatter.format().hashCode();
|
result = 31 * result + (formatter == null ? 0 : formatter.format().hashCode());
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static class Text extends FieldStats<BytesRef> {
|
public static class Text extends FieldStats<BytesRef> {
|
||||||
|
public Text(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
|
||||||
|
boolean isSearchable, boolean isAggregatable) {
|
||||||
|
super((byte) 3, maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
|
||||||
|
isSearchable, isAggregatable);
|
||||||
|
}
|
||||||
|
|
||||||
public Text(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
|
public Text(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
|
||||||
boolean isSearchable, boolean isAggregatable,
|
boolean isSearchable, boolean isAggregatable,
|
||||||
BytesRef minValue, BytesRef maxValue) {
|
BytesRef minValue, BytesRef maxValue) {
|
||||||
|
@ -501,6 +595,13 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
||||||
}
|
}
|
||||||
|
|
||||||
public static class Ip extends FieldStats<InetAddress> {
|
public static class Ip extends FieldStats<InetAddress> {
|
||||||
|
public Ip(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
|
||||||
|
boolean isSearchable, boolean isAggregatable) {
|
||||||
|
super((byte) 4, maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
|
||||||
|
isSearchable, isAggregatable);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
public Ip(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
|
public Ip(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
|
||||||
boolean isSearchable, boolean isAggregatable,
|
boolean isSearchable, boolean isAggregatable,
|
||||||
InetAddress minValue, InetAddress maxValue) {
|
InetAddress minValue, InetAddress maxValue) {
|
||||||
|
@ -550,27 +651,50 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
||||||
long sumTotalTermFreq = in.readLong();
|
long sumTotalTermFreq = in.readLong();
|
||||||
boolean isSearchable = in.readBoolean();
|
boolean isSearchable = in.readBoolean();
|
||||||
boolean isAggregatable = in.readBoolean();
|
boolean isAggregatable = in.readBoolean();
|
||||||
|
boolean hasMinMax = true;
|
||||||
|
if (in.getVersion().onOrAfter(Version.V_5_2_0_UNRELEASED)) {
|
||||||
|
hasMinMax = in.readBoolean();
|
||||||
|
}
|
||||||
switch (type) {
|
switch (type) {
|
||||||
case 0:
|
case 0:
|
||||||
return new Long(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
|
if (hasMinMax) {
|
||||||
|
return new Long(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
|
||||||
isSearchable, isAggregatable, in.readLong(), in.readLong());
|
isSearchable, isAggregatable, in.readLong(), in.readLong());
|
||||||
|
} else {
|
||||||
|
return new Long(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
|
||||||
|
isSearchable, isAggregatable);
|
||||||
|
}
|
||||||
case 1:
|
case 1:
|
||||||
return new Double(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
|
if (hasMinMax) {
|
||||||
isSearchable, isAggregatable, in.readDouble(), in.readDouble());
|
return new Double(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
|
||||||
|
isSearchable, isAggregatable, in.readDouble(), in.readDouble());
|
||||||
|
} else {
|
||||||
|
return new Double(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
|
||||||
|
isSearchable, isAggregatable);
|
||||||
|
}
|
||||||
case 2:
|
case 2:
|
||||||
FormatDateTimeFormatter formatter = Joda.forPattern(in.readString());
|
if (hasMinMax) {
|
||||||
return new Date(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
|
FormatDateTimeFormatter formatter = Joda.forPattern(in.readString());
|
||||||
isSearchable, isAggregatable, formatter, in.readLong(), in.readLong());
|
return new Date(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
|
||||||
|
isSearchable, isAggregatable, formatter, in.readLong(), in.readLong());
|
||||||
|
} else {
|
||||||
|
return new Date(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
|
||||||
|
isSearchable, isAggregatable);
|
||||||
|
}
|
||||||
case 3:
|
case 3:
|
||||||
return new Text(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
|
if (hasMinMax) {
|
||||||
isSearchable, isAggregatable, in.readBytesRef(), in.readBytesRef());
|
return new Text(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
|
||||||
|
isSearchable, isAggregatable, in.readBytesRef(), in.readBytesRef());
|
||||||
|
} else {
|
||||||
|
return new Text(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
|
||||||
|
isSearchable, isAggregatable);
|
||||||
|
}
|
||||||
|
|
||||||
case 4:
|
case 4:
|
||||||
|
if (hasMinMax == false) {
|
||||||
|
return new Ip(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
|
||||||
|
isSearchable, isAggregatable);
|
||||||
|
}
|
||||||
int l1 = in.readByte();
|
int l1 = in.readByte();
|
||||||
byte[] b1 = new byte[l1];
|
byte[] b1 = new byte[l1];
|
||||||
in.readBytes(b1, 0, l1);
|
in.readBytes(b1, 0, l1);
|
||||||
|
@ -599,5 +723,4 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
||||||
private static final String MIN_VALUE_AS_STRING_FIELD = "min_value_as_string";
|
private static final String MIN_VALUE_AS_STRING_FIELD = "min_value_as_string";
|
||||||
private static final String MAX_VALUE_FIELD = "max_value";
|
private static final String MAX_VALUE_FIELD = "max_value";
|
||||||
private static final String MAX_VALUE_AS_STRING_FIELD = "max_value_as_string";
|
private static final String MAX_VALUE_AS_STRING_FIELD = "max_value_as_string";
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,6 +19,7 @@
|
||||||
|
|
||||||
package org.elasticsearch.action.fieldstats;
|
package org.elasticsearch.action.fieldstats;
|
||||||
|
|
||||||
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.action.ShardOperationFailedException;
|
import org.elasticsearch.action.ShardOperationFailedException;
|
||||||
import org.elasticsearch.action.support.broadcast.BroadcastResponse;
|
import org.elasticsearch.action.support.broadcast.BroadcastResponse;
|
||||||
import org.elasticsearch.common.Nullable;
|
import org.elasticsearch.common.Nullable;
|
||||||
|
@ -91,10 +92,21 @@ public class FieldStatsResponse extends BroadcastResponse {
|
||||||
out.writeVInt(indicesMergedFieldStats.size());
|
out.writeVInt(indicesMergedFieldStats.size());
|
||||||
for (Map.Entry<String, Map<String, FieldStats>> entry1 : indicesMergedFieldStats.entrySet()) {
|
for (Map.Entry<String, Map<String, FieldStats>> entry1 : indicesMergedFieldStats.entrySet()) {
|
||||||
out.writeString(entry1.getKey());
|
out.writeString(entry1.getKey());
|
||||||
out.writeVInt(entry1.getValue().size());
|
int size = entry1.getValue().size();
|
||||||
|
if (out.getVersion().before(Version.V_5_2_0_UNRELEASED)) {
|
||||||
|
// filter fieldstats without min/max information
|
||||||
|
for (FieldStats stats : entry1.getValue().values()) {
|
||||||
|
if (stats.hasMinMax() == false) {
|
||||||
|
size--;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
out.writeVInt(size);
|
||||||
for (Map.Entry<String, FieldStats> entry2 : entry1.getValue().entrySet()) {
|
for (Map.Entry<String, FieldStats> entry2 : entry1.getValue().entrySet()) {
|
||||||
out.writeString(entry2.getKey());
|
if (entry2.getValue().hasMinMax() || out.getVersion().onOrAfter(Version.V_5_2_0_UNRELEASED)) {
|
||||||
entry2.getValue().writeTo(out);
|
out.writeString(entry2.getKey());
|
||||||
|
entry2.getValue().writeTo(out);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
out.writeVInt(conflicts.size());
|
out.writeVInt(conflicts.size());
|
||||||
|
|
|
@ -24,6 +24,7 @@ import org.elasticsearch.action.support.ActionFilters;
|
||||||
import org.elasticsearch.action.support.single.shard.TransportSingleShardAction;
|
import org.elasticsearch.action.support.single.shard.TransportSingleShardAction;
|
||||||
import org.elasticsearch.cluster.ClusterState;
|
import org.elasticsearch.cluster.ClusterState;
|
||||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||||
|
import org.elasticsearch.cluster.routing.GroupShardsIterator;
|
||||||
import org.elasticsearch.cluster.routing.ShardIterator;
|
import org.elasticsearch.cluster.routing.ShardIterator;
|
||||||
import org.elasticsearch.cluster.service.ClusterService;
|
import org.elasticsearch.cluster.service.ClusterService;
|
||||||
import org.elasticsearch.common.inject.Inject;
|
import org.elasticsearch.common.inject.Inject;
|
||||||
|
@ -55,6 +56,13 @@ public class TransportTermVectorsAction extends TransportSingleShardAction<TermV
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected ShardIterator shards(ClusterState state, InternalRequest request) {
|
protected ShardIterator shards(ClusterState state, InternalRequest request) {
|
||||||
|
if (request.request().doc() != null && request.request().routing() == null) {
|
||||||
|
// artificial document without routing specified, ignore its "id" and use either random shard or according to preference
|
||||||
|
GroupShardsIterator groupShardsIter = clusterService.operationRouting().searchShards(state,
|
||||||
|
new String[] { request.concreteIndex() }, null, request.request().preference());
|
||||||
|
return groupShardsIter.iterator().next();
|
||||||
|
}
|
||||||
|
|
||||||
return clusterService.operationRouting().getShards(state, request.concreteIndex(), request.request().id(),
|
return clusterService.operationRouting().getShards(state, request.concreteIndex(), request.request().id(),
|
||||||
request.request().routing(), request.request().preference());
|
request.request().routing(), request.request().preference());
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,6 +31,7 @@ import org.elasticsearch.common.unit.TimeValue;
|
||||||
import org.elasticsearch.index.IndexSettings;
|
import org.elasticsearch.index.IndexSettings;
|
||||||
import org.elasticsearch.index.fielddata.AtomicOrdinalsFieldData;
|
import org.elasticsearch.index.fielddata.AtomicOrdinalsFieldData;
|
||||||
import org.elasticsearch.index.fielddata.IndexOrdinalsFieldData;
|
import org.elasticsearch.index.fielddata.IndexOrdinalsFieldData;
|
||||||
|
import org.elasticsearch.index.fielddata.ScriptDocValues;
|
||||||
import org.elasticsearch.index.fielddata.plain.AbstractAtomicOrdinalsFieldData;
|
import org.elasticsearch.index.fielddata.plain.AbstractAtomicOrdinalsFieldData;
|
||||||
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
||||||
|
|
||||||
|
@ -38,6 +39,7 @@ import java.io.IOException;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
|
import java.util.function.Function;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Utility class to build global ordinals.
|
* Utility class to build global ordinals.
|
||||||
|
@ -48,7 +50,9 @@ public enum GlobalOrdinalsBuilder {
|
||||||
/**
|
/**
|
||||||
* Build global ordinals for the provided {@link IndexReader}.
|
* Build global ordinals for the provided {@link IndexReader}.
|
||||||
*/
|
*/
|
||||||
public static IndexOrdinalsFieldData build(final IndexReader indexReader, IndexOrdinalsFieldData indexFieldData, IndexSettings indexSettings, CircuitBreakerService breakerService, Logger logger) throws IOException {
|
public static IndexOrdinalsFieldData build(final IndexReader indexReader, IndexOrdinalsFieldData indexFieldData,
|
||||||
|
IndexSettings indexSettings, CircuitBreakerService breakerService, Logger logger,
|
||||||
|
Function<RandomAccessOrds, ScriptDocValues<?>> scriptFunction) throws IOException {
|
||||||
assert indexReader.leaves().size() > 1;
|
assert indexReader.leaves().size() > 1;
|
||||||
long startTimeNS = System.nanoTime();
|
long startTimeNS = System.nanoTime();
|
||||||
|
|
||||||
|
@ -71,7 +75,7 @@ public enum GlobalOrdinalsBuilder {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
return new InternalGlobalOrdinalsIndexFieldData(indexSettings, indexFieldData.getFieldName(),
|
return new InternalGlobalOrdinalsIndexFieldData(indexSettings, indexFieldData.getFieldName(),
|
||||||
atomicFD, ordinalMap, memorySizeInBytes
|
atomicFD, ordinalMap, memorySizeInBytes, scriptFunction
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -81,7 +85,7 @@ public enum GlobalOrdinalsBuilder {
|
||||||
final AtomicOrdinalsFieldData[] atomicFD = new AtomicOrdinalsFieldData[indexReader.leaves().size()];
|
final AtomicOrdinalsFieldData[] atomicFD = new AtomicOrdinalsFieldData[indexReader.leaves().size()];
|
||||||
final RandomAccessOrds[] subs = new RandomAccessOrds[indexReader.leaves().size()];
|
final RandomAccessOrds[] subs = new RandomAccessOrds[indexReader.leaves().size()];
|
||||||
for (int i = 0; i < indexReader.leaves().size(); ++i) {
|
for (int i = 0; i < indexReader.leaves().size(); ++i) {
|
||||||
atomicFD[i] = new AbstractAtomicOrdinalsFieldData() {
|
atomicFD[i] = new AbstractAtomicOrdinalsFieldData(AbstractAtomicOrdinalsFieldData.DEFAULT_SCRIPT_FUNCTION) {
|
||||||
@Override
|
@Override
|
||||||
public RandomAccessOrds getOrdinalsValues() {
|
public RandomAccessOrds getOrdinalsValues() {
|
||||||
return DocValues.emptySortedSet();
|
return DocValues.emptySortedSet();
|
||||||
|
@ -105,7 +109,7 @@ public enum GlobalOrdinalsBuilder {
|
||||||
}
|
}
|
||||||
final OrdinalMap ordinalMap = OrdinalMap.build(null, subs, PackedInts.DEFAULT);
|
final OrdinalMap ordinalMap = OrdinalMap.build(null, subs, PackedInts.DEFAULT);
|
||||||
return new InternalGlobalOrdinalsIndexFieldData(indexSettings, indexFieldData.getFieldName(),
|
return new InternalGlobalOrdinalsIndexFieldData(indexSettings, indexFieldData.getFieldName(),
|
||||||
atomicFD, ordinalMap, 0
|
atomicFD, ordinalMap, 0, AbstractAtomicOrdinalsFieldData.DEFAULT_SCRIPT_FUNCTION
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -24,9 +24,11 @@ import org.apache.lucene.index.RandomAccessOrds;
|
||||||
import org.apache.lucene.util.Accountable;
|
import org.apache.lucene.util.Accountable;
|
||||||
import org.elasticsearch.index.IndexSettings;
|
import org.elasticsearch.index.IndexSettings;
|
||||||
import org.elasticsearch.index.fielddata.AtomicOrdinalsFieldData;
|
import org.elasticsearch.index.fielddata.AtomicOrdinalsFieldData;
|
||||||
|
import org.elasticsearch.index.fielddata.ScriptDocValues;
|
||||||
import org.elasticsearch.index.fielddata.plain.AbstractAtomicOrdinalsFieldData;
|
import org.elasticsearch.index.fielddata.plain.AbstractAtomicOrdinalsFieldData;
|
||||||
|
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
|
import java.util.function.Function;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* {@link org.elasticsearch.index.fielddata.IndexFieldData} impl based on global ordinals.
|
* {@link org.elasticsearch.index.fielddata.IndexFieldData} impl based on global ordinals.
|
||||||
|
@ -34,13 +36,16 @@ import java.util.Collection;
|
||||||
final class InternalGlobalOrdinalsIndexFieldData extends GlobalOrdinalsIndexFieldData {
|
final class InternalGlobalOrdinalsIndexFieldData extends GlobalOrdinalsIndexFieldData {
|
||||||
|
|
||||||
private final Atomic[] atomicReaders;
|
private final Atomic[] atomicReaders;
|
||||||
|
private final Function<RandomAccessOrds, ScriptDocValues<?>> scriptFunction;
|
||||||
|
|
||||||
InternalGlobalOrdinalsIndexFieldData(IndexSettings indexSettings, String fieldName, AtomicOrdinalsFieldData[] segmentAfd, OrdinalMap ordinalMap, long memorySizeInBytes) {
|
InternalGlobalOrdinalsIndexFieldData(IndexSettings indexSettings, String fieldName, AtomicOrdinalsFieldData[] segmentAfd,
|
||||||
|
OrdinalMap ordinalMap, long memorySizeInBytes, Function<RandomAccessOrds, ScriptDocValues<?>> scriptFunction) {
|
||||||
super(indexSettings, fieldName, memorySizeInBytes);
|
super(indexSettings, fieldName, memorySizeInBytes);
|
||||||
this.atomicReaders = new Atomic[segmentAfd.length];
|
this.atomicReaders = new Atomic[segmentAfd.length];
|
||||||
for (int i = 0; i < segmentAfd.length; i++) {
|
for (int i = 0; i < segmentAfd.length; i++) {
|
||||||
atomicReaders[i] = new Atomic(segmentAfd[i], ordinalMap, i);
|
atomicReaders[i] = new Atomic(segmentAfd[i], ordinalMap, i);
|
||||||
}
|
}
|
||||||
|
this.scriptFunction = scriptFunction;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -55,6 +60,7 @@ final class InternalGlobalOrdinalsIndexFieldData extends GlobalOrdinalsIndexFiel
|
||||||
private final int segmentIndex;
|
private final int segmentIndex;
|
||||||
|
|
||||||
private Atomic(AtomicOrdinalsFieldData afd, OrdinalMap ordinalMap, int segmentIndex) {
|
private Atomic(AtomicOrdinalsFieldData afd, OrdinalMap ordinalMap, int segmentIndex) {
|
||||||
|
super(scriptFunction);
|
||||||
this.afd = afd;
|
this.afd = afd;
|
||||||
this.ordinalMap = ordinalMap;
|
this.ordinalMap = ordinalMap;
|
||||||
this.segmentIndex = segmentIndex;
|
this.segmentIndex = segmentIndex;
|
||||||
|
|
|
@ -29,13 +29,24 @@ import org.elasticsearch.index.fielddata.SortedBinaryDocValues;
|
||||||
|
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
|
import java.util.function.Function;
|
||||||
|
|
||||||
|
|
||||||
public abstract class AbstractAtomicOrdinalsFieldData implements AtomicOrdinalsFieldData {
|
public abstract class AbstractAtomicOrdinalsFieldData implements AtomicOrdinalsFieldData {
|
||||||
|
|
||||||
|
public static final Function<RandomAccessOrds, ScriptDocValues<?>> DEFAULT_SCRIPT_FUNCTION =
|
||||||
|
((Function<RandomAccessOrds, SortedBinaryDocValues>) FieldData::toString)
|
||||||
|
.andThen(ScriptDocValues.Strings::new);
|
||||||
|
|
||||||
|
private final Function<RandomAccessOrds, ScriptDocValues<?>> scriptFunction;
|
||||||
|
|
||||||
|
protected AbstractAtomicOrdinalsFieldData(Function<RandomAccessOrds, ScriptDocValues<?>> scriptFunction) {
|
||||||
|
this.scriptFunction = scriptFunction;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public final ScriptDocValues getScriptValues() {
|
public final ScriptDocValues<?> getScriptValues() {
|
||||||
return new ScriptDocValues.Strings(getBytesValues());
|
return scriptFunction.apply(getOrdinalsValues());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -44,7 +55,7 @@ public abstract class AbstractAtomicOrdinalsFieldData implements AtomicOrdinalsF
|
||||||
}
|
}
|
||||||
|
|
||||||
public static AtomicOrdinalsFieldData empty() {
|
public static AtomicOrdinalsFieldData empty() {
|
||||||
return new AbstractAtomicOrdinalsFieldData() {
|
return new AbstractAtomicOrdinalsFieldData(DEFAULT_SCRIPT_FUNCTION) {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long ramBytesUsed() {
|
public long ramBytesUsed() {
|
||||||
|
|
|
@ -97,7 +97,8 @@ public abstract class AbstractIndexOrdinalsFieldData extends AbstractIndexFieldD
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public IndexOrdinalsFieldData localGlobalDirect(DirectoryReader indexReader) throws Exception {
|
public IndexOrdinalsFieldData localGlobalDirect(DirectoryReader indexReader) throws Exception {
|
||||||
return GlobalOrdinalsBuilder.build(indexReader, this, indexSettings, breakerService, logger);
|
return GlobalOrdinalsBuilder.build(indexReader, this, indexSettings, breakerService, logger,
|
||||||
|
AbstractAtomicOrdinalsFieldData.DEFAULT_SCRIPT_FUNCTION);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -21,12 +21,14 @@ package org.elasticsearch.index.fielddata.plain;
|
||||||
|
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.IndexReader;
|
||||||
|
import org.apache.lucene.index.RandomAccessOrds;
|
||||||
import org.elasticsearch.common.logging.Loggers;
|
import org.elasticsearch.common.logging.Loggers;
|
||||||
import org.elasticsearch.index.Index;
|
import org.elasticsearch.index.Index;
|
||||||
import org.elasticsearch.index.IndexSettings;
|
import org.elasticsearch.index.IndexSettings;
|
||||||
import org.elasticsearch.index.fielddata.IndexFieldData;
|
import org.elasticsearch.index.fielddata.IndexFieldData;
|
||||||
import org.elasticsearch.index.fielddata.IndexFieldDataCache;
|
import org.elasticsearch.index.fielddata.IndexFieldDataCache;
|
||||||
import org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType;
|
import org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType;
|
||||||
|
import org.elasticsearch.index.fielddata.ScriptDocValues;
|
||||||
import org.elasticsearch.index.mapper.IdFieldMapper;
|
import org.elasticsearch.index.mapper.IdFieldMapper;
|
||||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||||
import org.elasticsearch.index.mapper.MapperService;
|
import org.elasticsearch.index.mapper.MapperService;
|
||||||
|
@ -34,6 +36,7 @@ import org.elasticsearch.index.mapper.UidFieldMapper;
|
||||||
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
||||||
|
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
import java.util.function.Function;
|
||||||
|
|
||||||
import static java.util.Collections.unmodifiableSet;
|
import static java.util.Collections.unmodifiableSet;
|
||||||
import static org.elasticsearch.common.util.set.Sets.newHashSet;
|
import static org.elasticsearch.common.util.set.Sets.newHashSet;
|
||||||
|
@ -72,12 +75,18 @@ public abstract class DocValuesIndexFieldData {
|
||||||
private static final Set<String> BINARY_INDEX_FIELD_NAMES = unmodifiableSet(newHashSet(UidFieldMapper.NAME, IdFieldMapper.NAME));
|
private static final Set<String> BINARY_INDEX_FIELD_NAMES = unmodifiableSet(newHashSet(UidFieldMapper.NAME, IdFieldMapper.NAME));
|
||||||
|
|
||||||
private NumericType numericType;
|
private NumericType numericType;
|
||||||
|
private Function<RandomAccessOrds, ScriptDocValues<?>> scriptFunction = AbstractAtomicOrdinalsFieldData.DEFAULT_SCRIPT_FUNCTION;
|
||||||
|
|
||||||
public Builder numericType(NumericType type) {
|
public Builder numericType(NumericType type) {
|
||||||
this.numericType = type;
|
this.numericType = type;
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public Builder scriptFunction(Function<RandomAccessOrds, ScriptDocValues<?>> scriptFunction) {
|
||||||
|
this.scriptFunction = scriptFunction;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public IndexFieldData<?> build(IndexSettings indexSettings, MappedFieldType fieldType, IndexFieldDataCache cache,
|
public IndexFieldData<?> build(IndexSettings indexSettings, MappedFieldType fieldType, IndexFieldDataCache cache,
|
||||||
CircuitBreakerService breakerService, MapperService mapperService) {
|
CircuitBreakerService breakerService, MapperService mapperService) {
|
||||||
|
@ -89,7 +98,7 @@ public abstract class DocValuesIndexFieldData {
|
||||||
} else if (numericType != null) {
|
} else if (numericType != null) {
|
||||||
return new SortedNumericDVIndexFieldData(indexSettings.getIndex(), fieldName, numericType);
|
return new SortedNumericDVIndexFieldData(indexSettings.getIndex(), fieldName, numericType);
|
||||||
} else {
|
} else {
|
||||||
return new SortedSetDVOrdinalsIndexFieldData(indexSettings, cache, fieldName, breakerService);
|
return new SortedSetDVOrdinalsIndexFieldData(indexSettings, cache, fieldName, breakerService, scriptFunction);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -56,6 +56,7 @@ public class IndexIndexFieldData extends AbstractIndexOrdinalsFieldData {
|
||||||
private final String index;
|
private final String index;
|
||||||
|
|
||||||
IndexAtomicFieldData(String index) {
|
IndexAtomicFieldData(String index) {
|
||||||
|
super(DEFAULT_SCRIPT_FUNCTION);
|
||||||
this.index = index;
|
this.index = index;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -38,6 +38,7 @@ public class PagedBytesAtomicFieldData extends AbstractAtomicOrdinalsFieldData {
|
||||||
protected final Ordinals ordinals;
|
protected final Ordinals ordinals;
|
||||||
|
|
||||||
public PagedBytesAtomicFieldData(PagedBytes.Reader bytes, PackedLongValues termOrdToBytesOffset, Ordinals ordinals) {
|
public PagedBytesAtomicFieldData(PagedBytes.Reader bytes, PackedLongValues termOrdToBytesOffset, Ordinals ordinals) {
|
||||||
|
super(DEFAULT_SCRIPT_FUNCTION);
|
||||||
this.bytes = bytes;
|
this.bytes = bytes;
|
||||||
this.termOrdToBytesOffset = termOrdToBytesOffset;
|
this.termOrdToBytesOffset = termOrdToBytesOffset;
|
||||||
this.ordinals = ordinals;
|
this.ordinals = ordinals;
|
||||||
|
|
|
@ -25,10 +25,12 @@ import org.apache.lucene.index.RandomAccessOrds;
|
||||||
import org.apache.lucene.util.Accountable;
|
import org.apache.lucene.util.Accountable;
|
||||||
import org.elasticsearch.index.fielddata.AtomicFieldData;
|
import org.elasticsearch.index.fielddata.AtomicFieldData;
|
||||||
import org.elasticsearch.index.fielddata.FieldData;
|
import org.elasticsearch.index.fielddata.FieldData;
|
||||||
|
import org.elasticsearch.index.fielddata.ScriptDocValues;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
|
import java.util.function.Function;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* An {@link AtomicFieldData} implementation that uses Lucene {@link org.apache.lucene.index.SortedSetDocValues}.
|
* An {@link AtomicFieldData} implementation that uses Lucene {@link org.apache.lucene.index.SortedSetDocValues}.
|
||||||
|
@ -38,7 +40,9 @@ public final class SortedSetDVBytesAtomicFieldData extends AbstractAtomicOrdinal
|
||||||
private final LeafReader reader;
|
private final LeafReader reader;
|
||||||
private final String field;
|
private final String field;
|
||||||
|
|
||||||
SortedSetDVBytesAtomicFieldData(LeafReader reader, String field) {
|
SortedSetDVBytesAtomicFieldData(LeafReader reader, String field, Function<RandomAccessOrds,
|
||||||
|
ScriptDocValues<?>> scriptFunction) {
|
||||||
|
super(scriptFunction);
|
||||||
this.reader = reader;
|
this.reader = reader;
|
||||||
this.field = field;
|
this.field = field;
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.index.fielddata.plain;
|
||||||
|
|
||||||
import org.apache.lucene.index.DirectoryReader;
|
import org.apache.lucene.index.DirectoryReader;
|
||||||
import org.apache.lucene.index.LeafReaderContext;
|
import org.apache.lucene.index.LeafReaderContext;
|
||||||
|
import org.apache.lucene.index.RandomAccessOrds;
|
||||||
import org.elasticsearch.ElasticsearchException;
|
import org.elasticsearch.ElasticsearchException;
|
||||||
import org.elasticsearch.index.IndexSettings;
|
import org.elasticsearch.index.IndexSettings;
|
||||||
import org.elasticsearch.index.fielddata.AtomicOrdinalsFieldData;
|
import org.elasticsearch.index.fielddata.AtomicOrdinalsFieldData;
|
||||||
|
@ -28,24 +29,29 @@ import org.elasticsearch.index.fielddata.IndexFieldData;
|
||||||
import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested;
|
import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested;
|
||||||
import org.elasticsearch.index.fielddata.IndexFieldDataCache;
|
import org.elasticsearch.index.fielddata.IndexFieldDataCache;
|
||||||
import org.elasticsearch.index.fielddata.IndexOrdinalsFieldData;
|
import org.elasticsearch.index.fielddata.IndexOrdinalsFieldData;
|
||||||
|
import org.elasticsearch.index.fielddata.ScriptDocValues;
|
||||||
import org.elasticsearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource;
|
import org.elasticsearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource;
|
||||||
import org.elasticsearch.index.fielddata.ordinals.GlobalOrdinalsBuilder;
|
import org.elasticsearch.index.fielddata.ordinals.GlobalOrdinalsBuilder;
|
||||||
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
||||||
import org.elasticsearch.search.MultiValueMode;
|
import org.elasticsearch.search.MultiValueMode;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.util.function.Function;
|
||||||
|
|
||||||
public class SortedSetDVOrdinalsIndexFieldData extends DocValuesIndexFieldData implements IndexOrdinalsFieldData {
|
public class SortedSetDVOrdinalsIndexFieldData extends DocValuesIndexFieldData implements IndexOrdinalsFieldData {
|
||||||
|
|
||||||
private final IndexSettings indexSettings;
|
private final IndexSettings indexSettings;
|
||||||
private final IndexFieldDataCache cache;
|
private final IndexFieldDataCache cache;
|
||||||
private final CircuitBreakerService breakerService;
|
private final CircuitBreakerService breakerService;
|
||||||
|
private final Function<RandomAccessOrds, ScriptDocValues<?>> scriptFunction;
|
||||||
|
|
||||||
public SortedSetDVOrdinalsIndexFieldData(IndexSettings indexSettings, IndexFieldDataCache cache, String fieldName, CircuitBreakerService breakerService) {
|
public SortedSetDVOrdinalsIndexFieldData(IndexSettings indexSettings, IndexFieldDataCache cache, String fieldName,
|
||||||
|
CircuitBreakerService breakerService, Function<RandomAccessOrds, ScriptDocValues<?>> scriptFunction) {
|
||||||
super(indexSettings.getIndex(), fieldName);
|
super(indexSettings.getIndex(), fieldName);
|
||||||
this.indexSettings = indexSettings;
|
this.indexSettings = indexSettings;
|
||||||
this.cache = cache;
|
this.cache = cache;
|
||||||
this.breakerService = breakerService;
|
this.breakerService = breakerService;
|
||||||
|
this.scriptFunction = scriptFunction;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -55,7 +61,7 @@ public class SortedSetDVOrdinalsIndexFieldData extends DocValuesIndexFieldData i
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public AtomicOrdinalsFieldData load(LeafReaderContext context) {
|
public AtomicOrdinalsFieldData load(LeafReaderContext context) {
|
||||||
return new SortedSetDVBytesAtomicFieldData(context.reader(), fieldName);
|
return new SortedSetDVBytesAtomicFieldData(context.reader(), fieldName, scriptFunction);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -100,6 +106,6 @@ public class SortedSetDVOrdinalsIndexFieldData extends DocValuesIndexFieldData i
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public IndexOrdinalsFieldData localGlobalDirect(DirectoryReader indexReader) throws Exception {
|
public IndexOrdinalsFieldData localGlobalDirect(DirectoryReader indexReader) throws Exception {
|
||||||
return GlobalOrdinalsBuilder.build(indexReader, this, indexSettings, breakerService, logger);
|
return GlobalOrdinalsBuilder.build(indexReader, this, indexSettings, breakerService, logger, scriptFunction);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.index.mapper;
|
||||||
import org.apache.lucene.document.StoredField;
|
import org.apache.lucene.document.StoredField;
|
||||||
import org.apache.lucene.document.SortedNumericDocValuesField;
|
import org.apache.lucene.document.SortedNumericDocValuesField;
|
||||||
import org.apache.lucene.document.LongPoint;
|
import org.apache.lucene.document.LongPoint;
|
||||||
|
import org.apache.lucene.index.FieldInfo;
|
||||||
import org.apache.lucene.index.PointValues;
|
import org.apache.lucene.index.PointValues;
|
||||||
import org.apache.lucene.index.IndexOptions;
|
import org.apache.lucene.index.IndexOptions;
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.IndexReader;
|
||||||
|
@ -299,9 +300,13 @@ public class DateFieldMapper extends FieldMapper {
|
||||||
@Override
|
@Override
|
||||||
public FieldStats.Date stats(IndexReader reader) throws IOException {
|
public FieldStats.Date stats(IndexReader reader) throws IOException {
|
||||||
String field = name();
|
String field = name();
|
||||||
|
FieldInfo fi = org.apache.lucene.index.MultiFields.getMergedFieldInfos(reader).fieldInfo(name());
|
||||||
|
if (fi == null) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
long size = PointValues.size(reader, field);
|
long size = PointValues.size(reader, field);
|
||||||
if (size == 0) {
|
if (size == 0) {
|
||||||
return null;
|
return new FieldStats.Date(reader.maxDoc(), 0, -1, -1, isSearchable(), isAggregatable());
|
||||||
}
|
}
|
||||||
int docCount = PointValues.getDocCount(reader, field);
|
int docCount = PointValues.getDocCount(reader, field);
|
||||||
byte[] min = PointValues.getMinPackedValue(reader, field);
|
byte[] min = PointValues.getMinPackedValue(reader, field);
|
||||||
|
|
|
@ -23,10 +23,12 @@ import org.apache.lucene.document.Field;
|
||||||
import org.apache.lucene.document.InetAddressPoint;
|
import org.apache.lucene.document.InetAddressPoint;
|
||||||
import org.apache.lucene.document.SortedSetDocValuesField;
|
import org.apache.lucene.document.SortedSetDocValuesField;
|
||||||
import org.apache.lucene.document.StoredField;
|
import org.apache.lucene.document.StoredField;
|
||||||
|
import org.apache.lucene.index.FieldInfo;
|
||||||
import org.apache.lucene.index.IndexOptions;
|
import org.apache.lucene.index.IndexOptions;
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.IndexReader;
|
||||||
import org.apache.lucene.index.IndexableField;
|
import org.apache.lucene.index.IndexableField;
|
||||||
import org.apache.lucene.index.PointValues;
|
import org.apache.lucene.index.PointValues;
|
||||||
|
import org.apache.lucene.index.RandomAccessOrds;
|
||||||
import org.apache.lucene.search.MatchNoDocsQuery;
|
import org.apache.lucene.search.MatchNoDocsQuery;
|
||||||
import org.apache.lucene.search.Query;
|
import org.apache.lucene.search.Query;
|
||||||
import org.apache.lucene.util.BytesRef;
|
import org.apache.lucene.util.BytesRef;
|
||||||
|
@ -38,6 +40,8 @@ import org.elasticsearch.common.network.InetAddresses;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
import org.elasticsearch.index.fielddata.IndexFieldData;
|
import org.elasticsearch.index.fielddata.IndexFieldData;
|
||||||
|
import org.elasticsearch.index.fielddata.ScriptDocValues;
|
||||||
|
import org.elasticsearch.index.fielddata.SortedBinaryDocValues;
|
||||||
import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData;
|
import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData;
|
||||||
import org.elasticsearch.index.query.QueryShardContext;
|
import org.elasticsearch.index.query.QueryShardContext;
|
||||||
import org.elasticsearch.search.DocValueFormat;
|
import org.elasticsearch.search.DocValueFormat;
|
||||||
|
@ -45,8 +49,13 @@ import org.joda.time.DateTimeZone;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.InetAddress;
|
import java.net.InetAddress;
|
||||||
|
import java.util.AbstractList;
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.Collection;
|
||||||
|
import java.util.Collections;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
import java.util.ListIterator;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
/** A {@link FieldMapper} for ip addresses. */
|
/** A {@link FieldMapper} for ip addresses. */
|
||||||
|
@ -213,9 +222,13 @@ public class IpFieldMapper extends FieldMapper {
|
||||||
@Override
|
@Override
|
||||||
public FieldStats.Ip stats(IndexReader reader) throws IOException {
|
public FieldStats.Ip stats(IndexReader reader) throws IOException {
|
||||||
String field = name();
|
String field = name();
|
||||||
|
FieldInfo fi = org.apache.lucene.index.MultiFields.getMergedFieldInfos(reader).fieldInfo(name());
|
||||||
|
if (fi == null) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
long size = PointValues.size(reader, field);
|
long size = PointValues.size(reader, field);
|
||||||
if (size == 0) {
|
if (size == 0) {
|
||||||
return null;
|
return new FieldStats.Ip(reader.maxDoc(), 0, -1, -1, isSearchable(), isAggregatable());
|
||||||
}
|
}
|
||||||
int docCount = PointValues.getDocCount(reader, field);
|
int docCount = PointValues.getDocCount(reader, field);
|
||||||
byte[] min = PointValues.getMinPackedValue(reader, field);
|
byte[] min = PointValues.getMinPackedValue(reader, field);
|
||||||
|
@ -225,10 +238,50 @@ public class IpFieldMapper extends FieldMapper {
|
||||||
InetAddressPoint.decode(min), InetAddressPoint.decode(max));
|
InetAddressPoint.decode(min), InetAddressPoint.decode(max));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private static class IpScriptDocValues extends AbstractList<String> implements ScriptDocValues<String> {
|
||||||
|
|
||||||
|
private final RandomAccessOrds values;
|
||||||
|
|
||||||
|
IpScriptDocValues(RandomAccessOrds values) {
|
||||||
|
this.values = values;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setNextDocId(int docId) {
|
||||||
|
values.setDocument(docId);
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getValue() {
|
||||||
|
if (isEmpty()) {
|
||||||
|
return null;
|
||||||
|
} else {
|
||||||
|
return get(0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<String> getValues() {
|
||||||
|
return Collections.unmodifiableList(this);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String get(int index) {
|
||||||
|
BytesRef encoded = values.lookupOrd(values.ordAt(0));
|
||||||
|
InetAddress address = InetAddressPoint.decode(
|
||||||
|
Arrays.copyOfRange(encoded.bytes, encoded.offset, encoded.offset + encoded.length));
|
||||||
|
return InetAddresses.toAddrString(address);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int size() {
|
||||||
|
return values.cardinality();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public IndexFieldData.Builder fielddataBuilder() {
|
public IndexFieldData.Builder fielddataBuilder() {
|
||||||
failIfNoDocValues();
|
failIfNoDocValues();
|
||||||
return new DocValuesIndexFieldData.Builder();
|
return new DocValuesIndexFieldData.Builder().scriptFunction(IpScriptDocValues::new);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -20,18 +20,19 @@
|
||||||
package org.elasticsearch.index.mapper;
|
package org.elasticsearch.index.mapper;
|
||||||
|
|
||||||
import org.apache.lucene.document.FieldType;
|
import org.apache.lucene.document.FieldType;
|
||||||
|
import org.apache.lucene.index.FieldInfo;
|
||||||
import org.apache.lucene.index.IndexOptions;
|
import org.apache.lucene.index.IndexOptions;
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.IndexReader;
|
||||||
import org.apache.lucene.index.MultiFields;
|
import org.apache.lucene.index.MultiFields;
|
||||||
import org.apache.lucene.index.Term;
|
import org.apache.lucene.index.Term;
|
||||||
import org.apache.lucene.index.Terms;
|
import org.apache.lucene.index.Terms;
|
||||||
|
import org.apache.lucene.search.BooleanClause.Occur;
|
||||||
|
import org.apache.lucene.search.BooleanQuery;
|
||||||
|
import org.apache.lucene.search.BoostQuery;
|
||||||
import org.apache.lucene.search.ConstantScoreQuery;
|
import org.apache.lucene.search.ConstantScoreQuery;
|
||||||
import org.apache.lucene.search.MultiTermQuery;
|
import org.apache.lucene.search.MultiTermQuery;
|
||||||
import org.apache.lucene.search.Query;
|
import org.apache.lucene.search.Query;
|
||||||
import org.apache.lucene.search.TermQuery;
|
import org.apache.lucene.search.TermQuery;
|
||||||
import org.apache.lucene.search.BooleanClause.Occur;
|
|
||||||
import org.apache.lucene.search.BooleanQuery;
|
|
||||||
import org.apache.lucene.search.BoostQuery;
|
|
||||||
import org.elasticsearch.action.fieldstats.FieldStats;
|
import org.elasticsearch.action.fieldstats.FieldStats;
|
||||||
import org.elasticsearch.common.Nullable;
|
import org.elasticsearch.common.Nullable;
|
||||||
import org.elasticsearch.common.joda.DateMathParser;
|
import org.elasticsearch.common.joda.DateMathParser;
|
||||||
|
@ -375,14 +376,16 @@ public abstract class MappedFieldType extends FieldType {
|
||||||
*/
|
*/
|
||||||
public FieldStats stats(IndexReader reader) throws IOException {
|
public FieldStats stats(IndexReader reader) throws IOException {
|
||||||
int maxDoc = reader.maxDoc();
|
int maxDoc = reader.maxDoc();
|
||||||
Terms terms = MultiFields.getTerms(reader, name());
|
FieldInfo fi = MultiFields.getMergedFieldInfos(reader).fieldInfo(name());
|
||||||
if (terms == null) {
|
if (fi == null) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
Terms terms = MultiFields.getTerms(reader, name());
|
||||||
|
if (terms == null) {
|
||||||
|
return new FieldStats.Text(maxDoc, 0, -1, -1, isSearchable(), isAggregatable());
|
||||||
|
}
|
||||||
FieldStats stats = new FieldStats.Text(maxDoc, terms.getDocCount(),
|
FieldStats stats = new FieldStats.Text(maxDoc, terms.getDocCount(),
|
||||||
terms.getSumDocFreq(), terms.getSumTotalTermFreq(),
|
terms.getSumDocFreq(), terms.getSumTotalTermFreq(), isSearchable(), isAggregatable(), terms.getMin(), terms.getMax());
|
||||||
isSearchable(), isAggregatable(),
|
|
||||||
terms.getMin(), terms.getMax());
|
|
||||||
return stats;
|
return stats;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -27,6 +27,7 @@ import org.apache.lucene.document.IntPoint;
|
||||||
import org.apache.lucene.document.LongPoint;
|
import org.apache.lucene.document.LongPoint;
|
||||||
import org.apache.lucene.document.SortedNumericDocValuesField;
|
import org.apache.lucene.document.SortedNumericDocValuesField;
|
||||||
import org.apache.lucene.document.StoredField;
|
import org.apache.lucene.document.StoredField;
|
||||||
|
import org.apache.lucene.index.FieldInfo;
|
||||||
import org.apache.lucene.index.IndexOptions;
|
import org.apache.lucene.index.IndexOptions;
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.IndexReader;
|
||||||
import org.apache.lucene.index.IndexableField;
|
import org.apache.lucene.index.IndexableField;
|
||||||
|
@ -227,14 +228,18 @@ public class NumberFieldMapper extends FieldMapper {
|
||||||
@Override
|
@Override
|
||||||
FieldStats.Double stats(IndexReader reader, String fieldName,
|
FieldStats.Double stats(IndexReader reader, String fieldName,
|
||||||
boolean isSearchable, boolean isAggregatable) throws IOException {
|
boolean isSearchable, boolean isAggregatable) throws IOException {
|
||||||
|
FieldInfo fi = org.apache.lucene.index.MultiFields.getMergedFieldInfos(reader).fieldInfo(fieldName);
|
||||||
|
if (fi == null) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
long size = PointValues.size(reader, fieldName);
|
long size = PointValues.size(reader, fieldName);
|
||||||
if (size == 0) {
|
if (size == 0) {
|
||||||
return null;
|
return new FieldStats.Double(reader.maxDoc(), 0, -1, -1, isSearchable, isAggregatable);
|
||||||
}
|
}
|
||||||
int docCount = PointValues.getDocCount(reader, fieldName);
|
int docCount = PointValues.getDocCount(reader, fieldName);
|
||||||
byte[] min = PointValues.getMinPackedValue(reader, fieldName);
|
byte[] min = PointValues.getMinPackedValue(reader, fieldName);
|
||||||
byte[] max = PointValues.getMaxPackedValue(reader, fieldName);
|
byte[] max = PointValues.getMaxPackedValue(reader, fieldName);
|
||||||
return new FieldStats.Double(reader.maxDoc(),docCount, -1L, size,
|
return new FieldStats.Double(reader.maxDoc(), docCount, -1L, size,
|
||||||
isSearchable, isAggregatable,
|
isSearchable, isAggregatable,
|
||||||
HalfFloatPoint.decodeDimension(min, 0), HalfFloatPoint.decodeDimension(max, 0));
|
HalfFloatPoint.decodeDimension(min, 0), HalfFloatPoint.decodeDimension(max, 0));
|
||||||
}
|
}
|
||||||
|
@ -311,9 +316,13 @@ public class NumberFieldMapper extends FieldMapper {
|
||||||
@Override
|
@Override
|
||||||
FieldStats.Double stats(IndexReader reader, String fieldName,
|
FieldStats.Double stats(IndexReader reader, String fieldName,
|
||||||
boolean isSearchable, boolean isAggregatable) throws IOException {
|
boolean isSearchable, boolean isAggregatable) throws IOException {
|
||||||
|
FieldInfo fi = org.apache.lucene.index.MultiFields.getMergedFieldInfos(reader).fieldInfo(fieldName);
|
||||||
|
if (fi == null) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
long size = PointValues.size(reader, fieldName);
|
long size = PointValues.size(reader, fieldName);
|
||||||
if (size == 0) {
|
if (size == 0) {
|
||||||
return null;
|
return new FieldStats.Double(reader.maxDoc(), 0, -1, -1, isSearchable, isAggregatable);
|
||||||
}
|
}
|
||||||
int docCount = PointValues.getDocCount(reader, fieldName);
|
int docCount = PointValues.getDocCount(reader, fieldName);
|
||||||
byte[] min = PointValues.getMinPackedValue(reader, fieldName);
|
byte[] min = PointValues.getMinPackedValue(reader, fieldName);
|
||||||
|
@ -395,9 +404,13 @@ public class NumberFieldMapper extends FieldMapper {
|
||||||
@Override
|
@Override
|
||||||
FieldStats.Double stats(IndexReader reader, String fieldName,
|
FieldStats.Double stats(IndexReader reader, String fieldName,
|
||||||
boolean isSearchable, boolean isAggregatable) throws IOException {
|
boolean isSearchable, boolean isAggregatable) throws IOException {
|
||||||
|
FieldInfo fi = org.apache.lucene.index.MultiFields.getMergedFieldInfos(reader).fieldInfo(fieldName);
|
||||||
|
if (fi == null) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
long size = PointValues.size(reader, fieldName);
|
long size = PointValues.size(reader, fieldName);
|
||||||
if (size == 0) {
|
if (size == 0) {
|
||||||
return null;
|
return new FieldStats.Double(reader.maxDoc(),0, -1, -1, isSearchable, isAggregatable);
|
||||||
}
|
}
|
||||||
int docCount = PointValues.getDocCount(reader, fieldName);
|
int docCount = PointValues.getDocCount(reader, fieldName);
|
||||||
byte[] min = PointValues.getMinPackedValue(reader, fieldName);
|
byte[] min = PointValues.getMinPackedValue(reader, fieldName);
|
||||||
|
@ -613,9 +626,13 @@ public class NumberFieldMapper extends FieldMapper {
|
||||||
@Override
|
@Override
|
||||||
FieldStats.Long stats(IndexReader reader, String fieldName,
|
FieldStats.Long stats(IndexReader reader, String fieldName,
|
||||||
boolean isSearchable, boolean isAggregatable) throws IOException {
|
boolean isSearchable, boolean isAggregatable) throws IOException {
|
||||||
|
FieldInfo fi = org.apache.lucene.index.MultiFields.getMergedFieldInfos(reader).fieldInfo(fieldName);
|
||||||
|
if (fi == null) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
long size = PointValues.size(reader, fieldName);
|
long size = PointValues.size(reader, fieldName);
|
||||||
if (size == 0) {
|
if (size == 0) {
|
||||||
return null;
|
return new FieldStats.Long(reader.maxDoc(), 0, -1, -1, isSearchable, isAggregatable);
|
||||||
}
|
}
|
||||||
int docCount = PointValues.getDocCount(reader, fieldName);
|
int docCount = PointValues.getDocCount(reader, fieldName);
|
||||||
byte[] min = PointValues.getMinPackedValue(reader, fieldName);
|
byte[] min = PointValues.getMinPackedValue(reader, fieldName);
|
||||||
|
@ -709,9 +726,13 @@ public class NumberFieldMapper extends FieldMapper {
|
||||||
@Override
|
@Override
|
||||||
FieldStats.Long stats(IndexReader reader, String fieldName,
|
FieldStats.Long stats(IndexReader reader, String fieldName,
|
||||||
boolean isSearchable, boolean isAggregatable) throws IOException {
|
boolean isSearchable, boolean isAggregatable) throws IOException {
|
||||||
|
FieldInfo fi = org.apache.lucene.index.MultiFields.getMergedFieldInfos(reader).fieldInfo(fieldName);
|
||||||
|
if (fi == null) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
long size = PointValues.size(reader, fieldName);
|
long size = PointValues.size(reader, fieldName);
|
||||||
if (size == 0) {
|
if (size == 0) {
|
||||||
return null;
|
return new FieldStats.Long(reader.maxDoc(), 0, -1, -1, isSearchable, isAggregatable);
|
||||||
}
|
}
|
||||||
int docCount = PointValues.getDocCount(reader, fieldName);
|
int docCount = PointValues.getDocCount(reader, fieldName);
|
||||||
byte[] min = PointValues.getMinPackedValue(reader, fieldName);
|
byte[] min = PointValues.getMinPackedValue(reader, fieldName);
|
||||||
|
|
|
@ -65,6 +65,7 @@ import org.apache.lucene.analysis.util.ElisionFilter;
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.index.analysis.DelimitedPayloadTokenFilterFactory;
|
import org.elasticsearch.index.analysis.DelimitedPayloadTokenFilterFactory;
|
||||||
import org.elasticsearch.index.analysis.LimitTokenCountFilterFactory;
|
import org.elasticsearch.index.analysis.LimitTokenCountFilterFactory;
|
||||||
|
import org.elasticsearch.index.analysis.MultiTermAwareComponent;
|
||||||
import org.elasticsearch.index.analysis.TokenFilterFactory;
|
import org.elasticsearch.index.analysis.TokenFilterFactory;
|
||||||
import org.elasticsearch.indices.analysis.PreBuiltCacheFactory.CachingStrategy;
|
import org.elasticsearch.indices.analysis.PreBuiltCacheFactory.CachingStrategy;
|
||||||
import org.tartarus.snowball.ext.DutchStemmer;
|
import org.tartarus.snowball.ext.DutchStemmer;
|
||||||
|
@ -112,6 +113,10 @@ public enum PreBuiltTokenFilters {
|
||||||
public TokenStream create(TokenStream tokenStream, Version version) {
|
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||||
return new ASCIIFoldingFilter(tokenStream);
|
return new ASCIIFoldingFilter(tokenStream);
|
||||||
}
|
}
|
||||||
|
@Override
|
||||||
|
protected boolean isMultiTermAware() {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
},
|
},
|
||||||
|
|
||||||
LENGTH(CachingStrategy.LUCENE) {
|
LENGTH(CachingStrategy.LUCENE) {
|
||||||
|
@ -133,6 +138,10 @@ public enum PreBuiltTokenFilters {
|
||||||
public TokenStream create(TokenStream tokenStream, Version version) {
|
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||||
return new LowerCaseFilter(tokenStream);
|
return new LowerCaseFilter(tokenStream);
|
||||||
}
|
}
|
||||||
|
@Override
|
||||||
|
protected boolean isMultiTermAware() {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
},
|
},
|
||||||
|
|
||||||
UPPERCASE(CachingStrategy.LUCENE) {
|
UPPERCASE(CachingStrategy.LUCENE) {
|
||||||
|
@ -140,6 +149,10 @@ public enum PreBuiltTokenFilters {
|
||||||
public TokenStream create(TokenStream tokenStream, Version version) {
|
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||||
return new UpperCaseFilter(tokenStream);
|
return new UpperCaseFilter(tokenStream);
|
||||||
}
|
}
|
||||||
|
@Override
|
||||||
|
protected boolean isMultiTermAware() {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
},
|
},
|
||||||
|
|
||||||
KSTEM(CachingStrategy.ONE) {
|
KSTEM(CachingStrategy.ONE) {
|
||||||
|
@ -218,6 +231,10 @@ public enum PreBuiltTokenFilters {
|
||||||
public TokenStream create(TokenStream tokenStream, Version version) {
|
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||||
return new ElisionFilter(tokenStream, FrenchAnalyzer.DEFAULT_ARTICLES);
|
return new ElisionFilter(tokenStream, FrenchAnalyzer.DEFAULT_ARTICLES);
|
||||||
}
|
}
|
||||||
|
@Override
|
||||||
|
protected boolean isMultiTermAware() {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
},
|
},
|
||||||
|
|
||||||
ARABIC_STEM(CachingStrategy.ONE) {
|
ARABIC_STEM(CachingStrategy.ONE) {
|
||||||
|
@ -281,6 +298,10 @@ public enum PreBuiltTokenFilters {
|
||||||
public TokenStream create(TokenStream tokenStream, Version version) {
|
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||||
return new ArabicNormalizationFilter(tokenStream);
|
return new ArabicNormalizationFilter(tokenStream);
|
||||||
}
|
}
|
||||||
|
@Override
|
||||||
|
protected boolean isMultiTermAware() {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
},
|
},
|
||||||
|
|
||||||
PERSIAN_NORMALIZATION(CachingStrategy.ONE) {
|
PERSIAN_NORMALIZATION(CachingStrategy.ONE) {
|
||||||
|
@ -288,6 +309,10 @@ public enum PreBuiltTokenFilters {
|
||||||
public TokenStream create(TokenStream tokenStream, Version version) {
|
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||||
return new PersianNormalizationFilter(tokenStream);
|
return new PersianNormalizationFilter(tokenStream);
|
||||||
}
|
}
|
||||||
|
@Override
|
||||||
|
protected boolean isMultiTermAware() {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
},
|
},
|
||||||
|
|
||||||
TYPE_AS_PAYLOAD(CachingStrategy.ONE) {
|
TYPE_AS_PAYLOAD(CachingStrategy.ONE) {
|
||||||
|
@ -309,6 +334,10 @@ public enum PreBuiltTokenFilters {
|
||||||
public TokenStream create(TokenStream tokenStream, Version version) {
|
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||||
return new GermanNormalizationFilter(tokenStream);
|
return new GermanNormalizationFilter(tokenStream);
|
||||||
}
|
}
|
||||||
|
@Override
|
||||||
|
protected boolean isMultiTermAware() {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
},
|
},
|
||||||
|
|
||||||
HINDI_NORMALIZATION(CachingStrategy.ONE) {
|
HINDI_NORMALIZATION(CachingStrategy.ONE) {
|
||||||
|
@ -316,6 +345,10 @@ public enum PreBuiltTokenFilters {
|
||||||
public TokenStream create(TokenStream tokenStream, Version version) {
|
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||||
return new HindiNormalizationFilter(tokenStream);
|
return new HindiNormalizationFilter(tokenStream);
|
||||||
}
|
}
|
||||||
|
@Override
|
||||||
|
protected boolean isMultiTermAware() {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
},
|
},
|
||||||
|
|
||||||
INDIC_NORMALIZATION(CachingStrategy.ONE) {
|
INDIC_NORMALIZATION(CachingStrategy.ONE) {
|
||||||
|
@ -323,6 +356,10 @@ public enum PreBuiltTokenFilters {
|
||||||
public TokenStream create(TokenStream tokenStream, Version version) {
|
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||||
return new IndicNormalizationFilter(tokenStream);
|
return new IndicNormalizationFilter(tokenStream);
|
||||||
}
|
}
|
||||||
|
@Override
|
||||||
|
protected boolean isMultiTermAware() {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
},
|
},
|
||||||
|
|
||||||
SORANI_NORMALIZATION(CachingStrategy.ONE) {
|
SORANI_NORMALIZATION(CachingStrategy.ONE) {
|
||||||
|
@ -330,6 +367,10 @@ public enum PreBuiltTokenFilters {
|
||||||
public TokenStream create(TokenStream tokenStream, Version version) {
|
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||||
return new SoraniNormalizationFilter(tokenStream);
|
return new SoraniNormalizationFilter(tokenStream);
|
||||||
}
|
}
|
||||||
|
@Override
|
||||||
|
protected boolean isMultiTermAware() {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
},
|
},
|
||||||
|
|
||||||
SCANDINAVIAN_NORMALIZATION(CachingStrategy.ONE) {
|
SCANDINAVIAN_NORMALIZATION(CachingStrategy.ONE) {
|
||||||
|
@ -337,6 +378,10 @@ public enum PreBuiltTokenFilters {
|
||||||
public TokenStream create(TokenStream tokenStream, Version version) {
|
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||||
return new ScandinavianNormalizationFilter(tokenStream);
|
return new ScandinavianNormalizationFilter(tokenStream);
|
||||||
}
|
}
|
||||||
|
@Override
|
||||||
|
protected boolean isMultiTermAware() {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
},
|
},
|
||||||
|
|
||||||
SCANDINAVIAN_FOLDING(CachingStrategy.ONE) {
|
SCANDINAVIAN_FOLDING(CachingStrategy.ONE) {
|
||||||
|
@ -344,6 +389,10 @@ public enum PreBuiltTokenFilters {
|
||||||
public TokenStream create(TokenStream tokenStream, Version version) {
|
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||||
return new ScandinavianFoldingFilter(tokenStream);
|
return new ScandinavianFoldingFilter(tokenStream);
|
||||||
}
|
}
|
||||||
|
@Override
|
||||||
|
protected boolean isMultiTermAware() {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
},
|
},
|
||||||
|
|
||||||
APOSTROPHE(CachingStrategy.ONE) {
|
APOSTROPHE(CachingStrategy.ONE) {
|
||||||
|
@ -358,6 +407,10 @@ public enum PreBuiltTokenFilters {
|
||||||
public TokenStream create(TokenStream tokenStream, Version version) {
|
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||||
return new CJKWidthFilter(tokenStream);
|
return new CJKWidthFilter(tokenStream);
|
||||||
}
|
}
|
||||||
|
@Override
|
||||||
|
protected boolean isMultiTermAware() {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
},
|
},
|
||||||
|
|
||||||
DECIMAL_DIGIT(CachingStrategy.ONE) {
|
DECIMAL_DIGIT(CachingStrategy.ONE) {
|
||||||
|
@ -365,6 +418,10 @@ public enum PreBuiltTokenFilters {
|
||||||
public TokenStream create(TokenStream tokenStream, Version version) {
|
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||||
return new DecimalDigitFilter(tokenStream);
|
return new DecimalDigitFilter(tokenStream);
|
||||||
}
|
}
|
||||||
|
@Override
|
||||||
|
protected boolean isMultiTermAware() {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
},
|
},
|
||||||
|
|
||||||
CJK_BIGRAM(CachingStrategy.ONE) {
|
CJK_BIGRAM(CachingStrategy.ONE) {
|
||||||
|
@ -390,7 +447,11 @@ public enum PreBuiltTokenFilters {
|
||||||
|
|
||||||
;
|
;
|
||||||
|
|
||||||
public abstract TokenStream create(TokenStream tokenStream, Version version);
|
protected boolean isMultiTermAware() {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
public abstract TokenStream create(TokenStream tokenStream, Version version);
|
||||||
|
|
||||||
protected final PreBuiltCacheFactory.PreBuiltCache<TokenFilterFactory> cache;
|
protected final PreBuiltCacheFactory.PreBuiltCache<TokenFilterFactory> cache;
|
||||||
|
|
||||||
|
@ -399,21 +460,42 @@ public enum PreBuiltTokenFilters {
|
||||||
cache = PreBuiltCacheFactory.getCache(cachingStrategy);
|
cache = PreBuiltCacheFactory.getCache(cachingStrategy);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private interface MultiTermAwareTokenFilterFactory extends TokenFilterFactory, MultiTermAwareComponent {}
|
||||||
|
|
||||||
public synchronized TokenFilterFactory getTokenFilterFactory(final Version version) {
|
public synchronized TokenFilterFactory getTokenFilterFactory(final Version version) {
|
||||||
TokenFilterFactory factory = cache.get(version);
|
TokenFilterFactory factory = cache.get(version);
|
||||||
if (factory == null) {
|
if (factory == null) {
|
||||||
final String finalName = name();
|
final String finalName = name().toLowerCase(Locale.ROOT);
|
||||||
factory = new TokenFilterFactory() {
|
if (isMultiTermAware()) {
|
||||||
@Override
|
factory = new MultiTermAwareTokenFilterFactory() {
|
||||||
public String name() {
|
@Override
|
||||||
return finalName.toLowerCase(Locale.ROOT);
|
public String name() {
|
||||||
}
|
return finalName;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public TokenStream create(TokenStream tokenStream) {
|
public TokenStream create(TokenStream tokenStream) {
|
||||||
return valueOf(finalName).create(tokenStream, version);
|
return PreBuiltTokenFilters.this.create(tokenStream, version);
|
||||||
}
|
}
|
||||||
};
|
|
||||||
|
@Override
|
||||||
|
public Object getMultiTermComponent() {
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
} else {
|
||||||
|
factory = new TokenFilterFactory() {
|
||||||
|
@Override
|
||||||
|
public String name() {
|
||||||
|
return finalName;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public TokenStream create(TokenStream tokenStream) {
|
||||||
|
return PreBuiltTokenFilters.this.create(tokenStream, version);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
cache.put(version, factory);
|
cache.put(version, factory);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -33,6 +33,8 @@ import org.apache.lucene.analysis.standard.UAX29URLEmailTokenizer;
|
||||||
import org.apache.lucene.analysis.th.ThaiTokenizer;
|
import org.apache.lucene.analysis.th.ThaiTokenizer;
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.common.regex.Regex;
|
import org.elasticsearch.common.regex.Regex;
|
||||||
|
import org.elasticsearch.index.analysis.MultiTermAwareComponent;
|
||||||
|
import org.elasticsearch.index.analysis.TokenFilterFactory;
|
||||||
import org.elasticsearch.index.analysis.TokenizerFactory;
|
import org.elasticsearch.index.analysis.TokenizerFactory;
|
||||||
import org.elasticsearch.indices.analysis.PreBuiltCacheFactory.CachingStrategy;
|
import org.elasticsearch.indices.analysis.PreBuiltCacheFactory.CachingStrategy;
|
||||||
|
|
||||||
|
@ -87,6 +89,10 @@ public enum PreBuiltTokenizers {
|
||||||
protected Tokenizer create(Version version) {
|
protected Tokenizer create(Version version) {
|
||||||
return new LowerCaseTokenizer();
|
return new LowerCaseTokenizer();
|
||||||
}
|
}
|
||||||
|
@Override
|
||||||
|
protected TokenFilterFactory getMultiTermComponent(Version version) {
|
||||||
|
return PreBuiltTokenFilters.LOWERCASE.getTokenFilterFactory(version);
|
||||||
|
}
|
||||||
},
|
},
|
||||||
|
|
||||||
WHITESPACE(CachingStrategy.LUCENE) {
|
WHITESPACE(CachingStrategy.LUCENE) {
|
||||||
|
@ -128,6 +134,10 @@ public enum PreBuiltTokenizers {
|
||||||
|
|
||||||
protected abstract Tokenizer create(Version version);
|
protected abstract Tokenizer create(Version version);
|
||||||
|
|
||||||
|
protected TokenFilterFactory getMultiTermComponent(Version version) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
protected final PreBuiltCacheFactory.PreBuiltCache<TokenizerFactory> cache;
|
protected final PreBuiltCacheFactory.PreBuiltCache<TokenizerFactory> cache;
|
||||||
|
|
||||||
|
|
||||||
|
@ -135,22 +145,42 @@ public enum PreBuiltTokenizers {
|
||||||
cache = PreBuiltCacheFactory.getCache(cachingStrategy);
|
cache = PreBuiltCacheFactory.getCache(cachingStrategy);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private interface MultiTermAwareTokenizerFactory extends TokenizerFactory, MultiTermAwareComponent {}
|
||||||
|
|
||||||
public synchronized TokenizerFactory getTokenizerFactory(final Version version) {
|
public synchronized TokenizerFactory getTokenizerFactory(final Version version) {
|
||||||
TokenizerFactory tokenizerFactory = cache.get(version);
|
TokenizerFactory tokenizerFactory = cache.get(version);
|
||||||
if (tokenizerFactory == null) {
|
if (tokenizerFactory == null) {
|
||||||
final String finalName = name();
|
final String finalName = name().toLowerCase(Locale.ROOT);
|
||||||
|
if (getMultiTermComponent(version) != null) {
|
||||||
|
tokenizerFactory = new MultiTermAwareTokenizerFactory() {
|
||||||
|
@Override
|
||||||
|
public String name() {
|
||||||
|
return finalName;
|
||||||
|
}
|
||||||
|
|
||||||
tokenizerFactory = new TokenizerFactory() {
|
@Override
|
||||||
@Override
|
public Tokenizer create() {
|
||||||
public String name() {
|
return PreBuiltTokenizers.this.create(version);
|
||||||
return finalName.toLowerCase(Locale.ROOT);
|
}
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Tokenizer create() {
|
public Object getMultiTermComponent() {
|
||||||
return valueOf(finalName).create(version);
|
return PreBuiltTokenizers.this.getMultiTermComponent(version);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
} else {
|
||||||
|
tokenizerFactory = new TokenizerFactory() {
|
||||||
|
@Override
|
||||||
|
public String name() {
|
||||||
|
return finalName;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Tokenizer create() {
|
||||||
|
return PreBuiltTokenizers.this.create(version);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
cache.put(version, tokenizerFactory);
|
cache.put(version, tokenizerFactory);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -45,7 +45,7 @@ public final class ClusterAllocationExplainIT extends ESIntegTestCase {
|
||||||
@TestLogging("_root:DEBUG")
|
@TestLogging("_root:DEBUG")
|
||||||
public void testDelayShards() throws Exception {
|
public void testDelayShards() throws Exception {
|
||||||
logger.info("--> starting 3 nodes");
|
logger.info("--> starting 3 nodes");
|
||||||
internalCluster().startNodesAsync(3).get();
|
internalCluster().startNodes(3);
|
||||||
|
|
||||||
// Wait for all 3 nodes to be up
|
// Wait for all 3 nodes to be up
|
||||||
logger.info("--> waiting for 3 nodes to be up");
|
logger.info("--> waiting for 3 nodes to be up");
|
||||||
|
|
|
@ -162,7 +162,7 @@ public class ClusterStatsIT extends ESIntegTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testValuesSmokeScreen() throws IOException, ExecutionException, InterruptedException {
|
public void testValuesSmokeScreen() throws IOException, ExecutionException, InterruptedException {
|
||||||
internalCluster().startNodesAsync(randomIntBetween(1, 3)).get();
|
internalCluster().startNodes(randomIntBetween(1, 3));
|
||||||
index("test1", "type", "1", "f", "f");
|
index("test1", "type", "1", "f", "f");
|
||||||
|
|
||||||
ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().get();
|
ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().get();
|
||||||
|
@ -202,7 +202,7 @@ public class ClusterStatsIT extends ESIntegTestCase {
|
||||||
|
|
||||||
public void testAllocatedProcessors() throws Exception {
|
public void testAllocatedProcessors() throws Exception {
|
||||||
// start one node with 7 processors.
|
// start one node with 7 processors.
|
||||||
internalCluster().startNodesAsync(Settings.builder().put(EsExecutors.PROCESSORS_SETTING.getKey(), 7).build()).get();
|
internalCluster().startNode(Settings.builder().put(EsExecutors.PROCESSORS_SETTING.getKey(), 7).build());
|
||||||
waitForNodes(1);
|
waitForNodes(1);
|
||||||
|
|
||||||
ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().get();
|
ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().get();
|
||||||
|
|
|
@ -75,7 +75,7 @@ public class IndexingMasterFailoverIT extends ESIntegTestCase {
|
||||||
.put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 2)
|
.put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 2)
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
internalCluster().startMasterOnlyNodesAsync(3, sharedSettings).get();
|
internalCluster().startMasterOnlyNodes(3, sharedSettings);
|
||||||
|
|
||||||
String dataNode = internalCluster().startDataOnlyNode(sharedSettings);
|
String dataNode = internalCluster().startDataOnlyNode(sharedSettings);
|
||||||
|
|
||||||
|
|
|
@ -20,6 +20,7 @@
|
||||||
package org.elasticsearch.action.termvectors;
|
package org.elasticsearch.action.termvectors;
|
||||||
|
|
||||||
import com.carrotsearch.hppc.ObjectIntHashMap;
|
import com.carrotsearch.hppc.ObjectIntHashMap;
|
||||||
|
|
||||||
import org.apache.lucene.analysis.payloads.PayloadHelper;
|
import org.apache.lucene.analysis.payloads.PayloadHelper;
|
||||||
import org.apache.lucene.document.FieldType;
|
import org.apache.lucene.document.FieldType;
|
||||||
import org.apache.lucene.index.DirectoryReader;
|
import org.apache.lucene.index.DirectoryReader;
|
||||||
|
@ -30,6 +31,7 @@ import org.apache.lucene.index.TermsEnum;
|
||||||
import org.apache.lucene.util.BytesRef;
|
import org.apache.lucene.util.BytesRef;
|
||||||
import org.elasticsearch.ElasticsearchException;
|
import org.elasticsearch.ElasticsearchException;
|
||||||
import org.elasticsearch.action.ActionFuture;
|
import org.elasticsearch.action.ActionFuture;
|
||||||
|
import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse;
|
||||||
import org.elasticsearch.action.admin.indices.alias.Alias;
|
import org.elasticsearch.action.admin.indices.alias.Alias;
|
||||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||||
import org.elasticsearch.common.Strings;
|
import org.elasticsearch.common.Strings;
|
||||||
|
@ -42,6 +44,7 @@ import org.elasticsearch.index.mapper.FieldMapper;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
|
import java.util.Arrays;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.HashSet;
|
import java.util.HashSet;
|
||||||
|
@ -49,6 +52,7 @@ import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.concurrent.ExecutionException;
|
import java.util.concurrent.ExecutionException;
|
||||||
|
import java.util.stream.Collectors;
|
||||||
|
|
||||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||||
|
@ -1170,6 +1174,48 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testArtificialDocWithPreference() throws ExecutionException, InterruptedException, IOException {
|
||||||
|
// setup indices
|
||||||
|
Settings.Builder settings = Settings.builder()
|
||||||
|
.put(indexSettings())
|
||||||
|
.put("index.analysis.analyzer", "standard");
|
||||||
|
assertAcked(prepareCreate("test")
|
||||||
|
.setSettings(settings)
|
||||||
|
.addMapping("type1", "field1", "type=text,term_vector=with_positions_offsets"));
|
||||||
|
ensureGreen();
|
||||||
|
|
||||||
|
// index document
|
||||||
|
indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("field1", "random permutation"));
|
||||||
|
|
||||||
|
// Get search shards
|
||||||
|
ClusterSearchShardsResponse searchShardsResponse = client().admin().cluster().prepareSearchShards("test").get();
|
||||||
|
List<Integer> shardIds = Arrays.stream(searchShardsResponse.getGroups()).map(s -> s.getShardId().id()).collect(Collectors.toList());
|
||||||
|
|
||||||
|
// request termvectors of artificial document from each shard
|
||||||
|
int sumTotalTermFreq = 0;
|
||||||
|
int sumDocFreq = 0;
|
||||||
|
for (Integer shardId : shardIds) {
|
||||||
|
TermVectorsResponse tvResponse = client().prepareTermVectors()
|
||||||
|
.setIndex("test")
|
||||||
|
.setType("type1")
|
||||||
|
.setPreference("_shards:" + shardId)
|
||||||
|
.setDoc(jsonBuilder().startObject().field("field1", "random permutation").endObject())
|
||||||
|
.setFieldStatistics(true)
|
||||||
|
.setTermStatistics(true)
|
||||||
|
.get();
|
||||||
|
Fields fields = tvResponse.getFields();
|
||||||
|
Terms terms = fields.terms("field1");
|
||||||
|
assertNotNull(terms);
|
||||||
|
TermsEnum termsEnum = terms.iterator();
|
||||||
|
while (termsEnum.next() != null) {
|
||||||
|
sumTotalTermFreq += termsEnum.totalTermFreq();
|
||||||
|
sumDocFreq += termsEnum.docFreq();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assertEquals("expected to find term statistics in exactly one shard!", 2, sumTotalTermFreq);
|
||||||
|
assertEquals("expected to find term statistics in exactly one shard!", 2, sumDocFreq);
|
||||||
|
}
|
||||||
|
|
||||||
private void checkBestTerms(Terms terms, List<String> expectedTerms) throws IOException {
|
private void checkBestTerms(Terms terms, List<String> expectedTerms) throws IOException {
|
||||||
final TermsEnum termsEnum = terms.iterator();
|
final TermsEnum termsEnum = terms.iterator();
|
||||||
List<String> bestTerms = new ArrayList<>();
|
List<String> bestTerms = new ArrayList<>();
|
||||||
|
|
|
@ -61,7 +61,6 @@ import org.elasticsearch.search.aggregations.bucket.terms.Terms;
|
||||||
import org.elasticsearch.search.sort.SortOrder;
|
import org.elasticsearch.search.sort.SortOrder;
|
||||||
import org.elasticsearch.test.ESIntegTestCase;
|
import org.elasticsearch.test.ESIntegTestCase;
|
||||||
import org.elasticsearch.test.InternalSettingsPlugin;
|
import org.elasticsearch.test.InternalSettingsPlugin;
|
||||||
import org.elasticsearch.test.InternalTestCluster;
|
|
||||||
import org.elasticsearch.test.OldIndexUtils;
|
import org.elasticsearch.test.OldIndexUtils;
|
||||||
import org.elasticsearch.test.VersionUtils;
|
import org.elasticsearch.test.VersionUtils;
|
||||||
import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
|
import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
|
||||||
|
@ -129,24 +128,23 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
void setupCluster() throws Exception {
|
void setupCluster() throws Exception {
|
||||||
InternalTestCluster.Async<List<String>> replicas = internalCluster().startNodesAsync(1); // for replicas
|
List<String> replicas = internalCluster().startNodes(1); // for replicas
|
||||||
|
|
||||||
Path baseTempDir = createTempDir();
|
Path baseTempDir = createTempDir();
|
||||||
// start single data path node
|
// start single data path node
|
||||||
Settings.Builder nodeSettings = Settings.builder()
|
Settings.Builder nodeSettings = Settings.builder()
|
||||||
.put(Environment.PATH_DATA_SETTING.getKey(), baseTempDir.resolve("single-path").toAbsolutePath())
|
.put(Environment.PATH_DATA_SETTING.getKey(), baseTempDir.resolve("single-path").toAbsolutePath())
|
||||||
.put(Node.NODE_MASTER_SETTING.getKey(), false); // workaround for dangling index loading issue when node is master
|
.put(Node.NODE_MASTER_SETTING.getKey(), false); // workaround for dangling index loading issue when node is master
|
||||||
InternalTestCluster.Async<String> singleDataPathNode = internalCluster().startNodeAsync(nodeSettings.build());
|
singleDataPathNodeName = internalCluster().startNode(nodeSettings);
|
||||||
|
|
||||||
// start multi data path node
|
// start multi data path node
|
||||||
nodeSettings = Settings.builder()
|
nodeSettings = Settings.builder()
|
||||||
.put(Environment.PATH_DATA_SETTING.getKey(), baseTempDir.resolve("multi-path1").toAbsolutePath() + "," + baseTempDir
|
.put(Environment.PATH_DATA_SETTING.getKey(), baseTempDir.resolve("multi-path1").toAbsolutePath() + "," + baseTempDir
|
||||||
.resolve("multi-path2").toAbsolutePath())
|
.resolve("multi-path2").toAbsolutePath())
|
||||||
.put(Node.NODE_MASTER_SETTING.getKey(), false); // workaround for dangling index loading issue when node is master
|
.put(Node.NODE_MASTER_SETTING.getKey(), false); // workaround for dangling index loading issue when node is master
|
||||||
InternalTestCluster.Async<String> multiDataPathNode = internalCluster().startNodeAsync(nodeSettings.build());
|
multiDataPathNodeName = internalCluster().startNode(nodeSettings);
|
||||||
|
|
||||||
// find single data path dir
|
// find single data path dir
|
||||||
singleDataPathNodeName = singleDataPathNode.get();
|
|
||||||
Path[] nodePaths = internalCluster().getInstance(NodeEnvironment.class, singleDataPathNodeName).nodeDataPaths();
|
Path[] nodePaths = internalCluster().getInstance(NodeEnvironment.class, singleDataPathNodeName).nodeDataPaths();
|
||||||
assertEquals(1, nodePaths.length);
|
assertEquals(1, nodePaths.length);
|
||||||
singleDataPath = nodePaths[0].resolve(NodeEnvironment.INDICES_FOLDER);
|
singleDataPath = nodePaths[0].resolve(NodeEnvironment.INDICES_FOLDER);
|
||||||
|
@ -155,7 +153,6 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
|
||||||
logger.info("--> Single data path: {}", singleDataPath);
|
logger.info("--> Single data path: {}", singleDataPath);
|
||||||
|
|
||||||
// find multi data path dirs
|
// find multi data path dirs
|
||||||
multiDataPathNodeName = multiDataPathNode.get();
|
|
||||||
nodePaths = internalCluster().getInstance(NodeEnvironment.class, multiDataPathNodeName).nodeDataPaths();
|
nodePaths = internalCluster().getInstance(NodeEnvironment.class, multiDataPathNodeName).nodeDataPaths();
|
||||||
assertEquals(2, nodePaths.length);
|
assertEquals(2, nodePaths.length);
|
||||||
multiDataPath = new Path[]{nodePaths[0].resolve(NodeEnvironment.INDICES_FOLDER),
|
multiDataPath = new Path[]{nodePaths[0].resolve(NodeEnvironment.INDICES_FOLDER),
|
||||||
|
@ -165,8 +162,6 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
|
||||||
Files.createDirectories(multiDataPath[0]);
|
Files.createDirectories(multiDataPath[0]);
|
||||||
Files.createDirectories(multiDataPath[1]);
|
Files.createDirectories(multiDataPath[1]);
|
||||||
logger.info("--> Multi data paths: {}, {}", multiDataPath[0], multiDataPath[1]);
|
logger.info("--> Multi data paths: {}, {}", multiDataPath[0], multiDataPath[1]);
|
||||||
|
|
||||||
replicas.get(); // wait for replicas
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void upgradeIndexFolder() throws Exception {
|
void upgradeIndexFolder() throws Exception {
|
||||||
|
|
|
@ -126,7 +126,7 @@ public class ClusterInfoServiceIT extends ESIntegTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testClusterInfoServiceCollectsInformation() throws Exception {
|
public void testClusterInfoServiceCollectsInformation() throws Exception {
|
||||||
internalCluster().startNodesAsync(2).get();
|
internalCluster().startNodes(2);
|
||||||
assertAcked(prepareCreate("test").setSettings(Settings.builder()
|
assertAcked(prepareCreate("test").setSettings(Settings.builder()
|
||||||
.put(Store.INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING.getKey(), 0)
|
.put(Store.INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING.getKey(), 0)
|
||||||
.put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE).build()));
|
.put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE).build()));
|
||||||
|
@ -174,10 +174,9 @@ public class ClusterInfoServiceIT extends ESIntegTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testClusterInfoServiceInformationClearOnError() throws InterruptedException, ExecutionException {
|
public void testClusterInfoServiceInformationClearOnError() throws InterruptedException, ExecutionException {
|
||||||
internalCluster().startNodesAsync(2,
|
internalCluster().startNodes(2,
|
||||||
// manually control publishing
|
// manually control publishing
|
||||||
Settings.builder().put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING.getKey(), "60m").build())
|
Settings.builder().put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING.getKey(), "60m").build());
|
||||||
.get();
|
|
||||||
prepareCreate("test").setSettings(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1).get();
|
prepareCreate("test").setSettings(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1).get();
|
||||||
ensureGreen("test");
|
ensureGreen("test");
|
||||||
InternalTestCluster internalTestCluster = internalCluster();
|
InternalTestCluster internalTestCluster = internalCluster();
|
||||||
|
|
|
@ -24,7 +24,6 @@ import org.elasticsearch.client.Client;
|
||||||
import org.elasticsearch.cluster.metadata.MetaData;
|
import org.elasticsearch.cluster.metadata.MetaData;
|
||||||
import org.elasticsearch.cluster.service.ClusterService;
|
import org.elasticsearch.cluster.service.ClusterService;
|
||||||
import org.elasticsearch.common.Priority;
|
import org.elasticsearch.common.Priority;
|
||||||
import org.elasticsearch.common.Strings;
|
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.unit.TimeValue;
|
import org.elasticsearch.common.unit.TimeValue;
|
||||||
import org.elasticsearch.discovery.Discovery;
|
import org.elasticsearch.discovery.Discovery;
|
||||||
|
@ -202,22 +201,19 @@ public class MinimumMasterNodesIT extends ESIntegTestCase {
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
logger.info("--> start first 2 nodes");
|
logger.info("--> start first 2 nodes");
|
||||||
internalCluster().startNodesAsync(2, settings).get();
|
internalCluster().startNodes(2, settings);
|
||||||
|
|
||||||
ClusterState state;
|
ClusterState state;
|
||||||
|
|
||||||
assertBusy(new Runnable() {
|
assertBusy(() -> {
|
||||||
@Override
|
for (Client client : clients()) {
|
||||||
public void run() {
|
ClusterState state1 = client.admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
|
||||||
for (Client client : clients()) {
|
assertThat(state1.blocks().hasGlobalBlock(DiscoverySettings.NO_MASTER_BLOCK_ID), equalTo(true));
|
||||||
ClusterState state = client.admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
|
|
||||||
assertThat(state.blocks().hasGlobalBlock(DiscoverySettings.NO_MASTER_BLOCK_ID), equalTo(true));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
logger.info("--> start two more nodes");
|
logger.info("--> start two more nodes");
|
||||||
internalCluster().startNodesAsync(2, settings).get();
|
internalCluster().startNodes(2, settings);
|
||||||
|
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("4").execute().actionGet();
|
ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("4").execute().actionGet();
|
||||||
|
@ -252,7 +248,7 @@ public class MinimumMasterNodesIT extends ESIntegTestCase {
|
||||||
assertNoMasterBlockOnAllNodes();
|
assertNoMasterBlockOnAllNodes();
|
||||||
|
|
||||||
logger.info("--> start back the 2 nodes ");
|
logger.info("--> start back the 2 nodes ");
|
||||||
String[] newNodes = internalCluster().startNodesAsync(2, settings).get().toArray(Strings.EMPTY_ARRAY);
|
String[] newNodes = internalCluster().startNodes(2, settings).stream().toArray(String[]::new);
|
||||||
|
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForNodes("4").execute().actionGet();
|
clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForNodes("4").execute().actionGet();
|
||||||
|
@ -338,7 +334,7 @@ public class MinimumMasterNodesIT extends ESIntegTestCase {
|
||||||
|
|
||||||
|
|
||||||
logger.info("--> starting [{}] nodes. min_master_nodes set to [{}]", nodeCount, initialMinMasterNodes);
|
logger.info("--> starting [{}] nodes. min_master_nodes set to [{}]", nodeCount, initialMinMasterNodes);
|
||||||
internalCluster().startNodesAsync(nodeCount, settings.build()).get();
|
internalCluster().startNodes(nodeCount, settings.build());
|
||||||
|
|
||||||
logger.info("--> waiting for nodes to join");
|
logger.info("--> waiting for nodes to join");
|
||||||
assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(nodeCount)).get().isTimedOut());
|
assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(nodeCount)).get().isTimedOut());
|
||||||
|
@ -371,7 +367,7 @@ public class MinimumMasterNodesIT extends ESIntegTestCase {
|
||||||
.put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 2)
|
.put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 2)
|
||||||
.put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), "100ms") // speed things up
|
.put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), "100ms") // speed things up
|
||||||
.build();
|
.build();
|
||||||
internalCluster().startNodesAsync(3, settings).get();
|
internalCluster().startNodes(3, settings);
|
||||||
ensureGreen(); // ensure cluster state is recovered before we disrupt things
|
ensureGreen(); // ensure cluster state is recovered before we disrupt things
|
||||||
|
|
||||||
final String master = internalCluster().getMasterName();
|
final String master = internalCluster().getMasterName();
|
||||||
|
|
|
@ -27,17 +27,16 @@ import org.elasticsearch.test.ESIntegTestCase;
|
||||||
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
|
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
|
||||||
import org.elasticsearch.test.ESIntegTestCase.Scope;
|
import org.elasticsearch.test.ESIntegTestCase.Scope;
|
||||||
|
|
||||||
|
|
||||||
import static org.hamcrest.Matchers.equalTo;
|
import static org.hamcrest.Matchers.equalTo;
|
||||||
|
|
||||||
@ClusterScope(scope= Scope.TEST, numDataNodes =0)
|
@ClusterScope(scope= Scope.TEST, numDataNodes =0)
|
||||||
public class UpdateSettingsValidationIT extends ESIntegTestCase {
|
public class UpdateSettingsValidationIT extends ESIntegTestCase {
|
||||||
public void testUpdateSettingsValidation() throws Exception {
|
public void testUpdateSettingsValidation() throws Exception {
|
||||||
internalCluster().startNodesAsync(
|
internalCluster().startNodes(
|
||||||
Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), false).build(),
|
Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), false).build(),
|
||||||
Settings.builder().put(Node.NODE_MASTER_SETTING.getKey(), false).build(),
|
Settings.builder().put(Node.NODE_MASTER_SETTING.getKey(), false).build(),
|
||||||
Settings.builder().put(Node.NODE_MASTER_SETTING.getKey(), false).build()
|
Settings.builder().put(Node.NODE_MASTER_SETTING.getKey(), false).build()
|
||||||
).get();
|
);
|
||||||
|
|
||||||
createIndex("test");
|
createIndex("test");
|
||||||
NumShards test = getNumShards("test");
|
NumShards test = getNumShards("test");
|
||||||
|
|
|
@ -57,7 +57,7 @@ public class AwarenessAllocationIT extends ESIntegTestCase {
|
||||||
|
|
||||||
|
|
||||||
logger.info("--> starting 2 nodes on the same rack");
|
logger.info("--> starting 2 nodes on the same rack");
|
||||||
internalCluster().startNodesAsync(2, Settings.builder().put(commonSettings).put("node.attr.rack_id", "rack_1").build()).get();
|
internalCluster().startNodes(2, Settings.builder().put(commonSettings).put("node.attr.rack_id", "rack_1").build());
|
||||||
|
|
||||||
createIndex("test1");
|
createIndex("test1");
|
||||||
createIndex("test2");
|
createIndex("test2");
|
||||||
|
@ -107,12 +107,12 @@ public class AwarenessAllocationIT extends ESIntegTestCase {
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
logger.info("--> starting 4 nodes on different zones");
|
logger.info("--> starting 4 nodes on different zones");
|
||||||
List<String> nodes = internalCluster().startNodesAsync(
|
List<String> nodes = internalCluster().startNodes(
|
||||||
Settings.builder().put(commonSettings).put("node.attr.zone", "a").build(),
|
Settings.builder().put(commonSettings).put("node.attr.zone", "a").build(),
|
||||||
Settings.builder().put(commonSettings).put("node.attr.zone", "b").build(),
|
Settings.builder().put(commonSettings).put("node.attr.zone", "b").build(),
|
||||||
Settings.builder().put(commonSettings).put("node.attr.zone", "b").build(),
|
Settings.builder().put(commonSettings).put("node.attr.zone", "b").build(),
|
||||||
Settings.builder().put(commonSettings).put("node.attr.zone", "a").build()
|
Settings.builder().put(commonSettings).put("node.attr.zone", "a").build()
|
||||||
).get();
|
);
|
||||||
String A_0 = nodes.get(0);
|
String A_0 = nodes.get(0);
|
||||||
String B_0 = nodes.get(1);
|
String B_0 = nodes.get(1);
|
||||||
String B_1 = nodes.get(2);
|
String B_1 = nodes.get(2);
|
||||||
|
@ -153,10 +153,10 @@ public class AwarenessAllocationIT extends ESIntegTestCase {
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
logger.info("--> starting 2 nodes on zones 'a' & 'b'");
|
logger.info("--> starting 2 nodes on zones 'a' & 'b'");
|
||||||
List<String> nodes = internalCluster().startNodesAsync(
|
List<String> nodes = internalCluster().startNodes(
|
||||||
Settings.builder().put(commonSettings).put("node.attr.zone", "a").build(),
|
Settings.builder().put(commonSettings).put("node.attr.zone", "a").build(),
|
||||||
Settings.builder().put(commonSettings).put("node.attr.zone", "b").build()
|
Settings.builder().put(commonSettings).put("node.attr.zone", "b").build()
|
||||||
).get();
|
);
|
||||||
String A_0 = nodes.get(0);
|
String A_0 = nodes.get(0);
|
||||||
String B_0 = nodes.get(1);
|
String B_0 = nodes.get(1);
|
||||||
client().admin().indices().prepareCreate("test")
|
client().admin().indices().prepareCreate("test")
|
||||||
|
|
|
@ -85,7 +85,7 @@ public class ClusterRerouteIT extends ESIntegTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
private void rerouteWithCommands(Settings commonSettings) throws Exception {
|
private void rerouteWithCommands(Settings commonSettings) throws Exception {
|
||||||
List<String> nodesIds = internalCluster().startNodesAsync(2, commonSettings).get();
|
List<String> nodesIds = internalCluster().startNodes(2, commonSettings);
|
||||||
final String node_1 = nodesIds.get(0);
|
final String node_1 = nodesIds.get(0);
|
||||||
final String node_2 = nodesIds.get(1);
|
final String node_2 = nodesIds.get(1);
|
||||||
|
|
||||||
|
@ -304,7 +304,7 @@ public class ClusterRerouteIT extends ESIntegTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testClusterRerouteWithBlocks() throws Exception {
|
public void testClusterRerouteWithBlocks() throws Exception {
|
||||||
List<String> nodesIds = internalCluster().startNodesAsync(2).get();
|
List<String> nodesIds = internalCluster().startNodes(2);
|
||||||
|
|
||||||
logger.info("--> create an index with 1 shard and 0 replicas");
|
logger.info("--> create an index with 1 shard and 0 replicas");
|
||||||
assertAcked(prepareCreate("test-blocks").setSettings(Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0)));
|
assertAcked(prepareCreate("test-blocks").setSettings(Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0)));
|
||||||
|
|
|
@ -43,7 +43,7 @@ public class FilteringAllocationIT extends ESIntegTestCase {
|
||||||
|
|
||||||
public void testDecommissionNodeNoReplicas() throws Exception {
|
public void testDecommissionNodeNoReplicas() throws Exception {
|
||||||
logger.info("--> starting 2 nodes");
|
logger.info("--> starting 2 nodes");
|
||||||
List<String> nodesIds = internalCluster().startNodesAsync(2).get();
|
List<String> nodesIds = internalCluster().startNodes(2);
|
||||||
final String node_0 = nodesIds.get(0);
|
final String node_0 = nodesIds.get(0);
|
||||||
final String node_1 = nodesIds.get(1);
|
final String node_1 = nodesIds.get(1);
|
||||||
assertThat(cluster().size(), equalTo(2));
|
assertThat(cluster().size(), equalTo(2));
|
||||||
|
@ -82,7 +82,7 @@ public class FilteringAllocationIT extends ESIntegTestCase {
|
||||||
|
|
||||||
public void testDisablingAllocationFiltering() throws Exception {
|
public void testDisablingAllocationFiltering() throws Exception {
|
||||||
logger.info("--> starting 2 nodes");
|
logger.info("--> starting 2 nodes");
|
||||||
List<String> nodesIds = internalCluster().startNodesAsync(2).get();
|
List<String> nodesIds = internalCluster().startNodes(2);
|
||||||
final String node_0 = nodesIds.get(0);
|
final String node_0 = nodesIds.get(0);
|
||||||
final String node_1 = nodesIds.get(1);
|
final String node_1 = nodesIds.get(1);
|
||||||
assertThat(cluster().size(), equalTo(2));
|
assertThat(cluster().size(), equalTo(2));
|
||||||
|
|
|
@ -26,7 +26,6 @@ import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.unit.TimeValue;
|
import org.elasticsearch.common.unit.TimeValue;
|
||||||
import org.elasticsearch.test.ESIntegTestCase;
|
import org.elasticsearch.test.ESIntegTestCase;
|
||||||
import org.elasticsearch.test.InternalTestCluster;
|
import org.elasticsearch.test.InternalTestCluster;
|
||||||
import org.elasticsearch.test.junit.annotations.TestLogging;
|
|
||||||
|
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
@ -42,7 +41,7 @@ public class DelayedAllocationIT extends ESIntegTestCase {
|
||||||
* get allocated to a free node when the node hosting it leaves the cluster.
|
* get allocated to a free node when the node hosting it leaves the cluster.
|
||||||
*/
|
*/
|
||||||
public void testNoDelayedTimeout() throws Exception {
|
public void testNoDelayedTimeout() throws Exception {
|
||||||
internalCluster().startNodesAsync(3).get();
|
internalCluster().startNodes(3);
|
||||||
prepareCreate("test").setSettings(Settings.builder()
|
prepareCreate("test").setSettings(Settings.builder()
|
||||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
|
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
|
||||||
|
@ -61,7 +60,7 @@ public class DelayedAllocationIT extends ESIntegTestCase {
|
||||||
* on it before.
|
* on it before.
|
||||||
*/
|
*/
|
||||||
public void testDelayedAllocationNodeLeavesAndComesBack() throws Exception {
|
public void testDelayedAllocationNodeLeavesAndComesBack() throws Exception {
|
||||||
internalCluster().startNodesAsync(3).get();
|
internalCluster().startNodes(3);
|
||||||
prepareCreate("test").setSettings(Settings.builder()
|
prepareCreate("test").setSettings(Settings.builder()
|
||||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
|
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
|
||||||
|
@ -85,7 +84,7 @@ public class DelayedAllocationIT extends ESIntegTestCase {
|
||||||
* though the node hosting the shard is not coming back.
|
* though the node hosting the shard is not coming back.
|
||||||
*/
|
*/
|
||||||
public void testDelayedAllocationTimesOut() throws Exception {
|
public void testDelayedAllocationTimesOut() throws Exception {
|
||||||
internalCluster().startNodesAsync(3).get();
|
internalCluster().startNodes(3);
|
||||||
prepareCreate("test").setSettings(Settings.builder()
|
prepareCreate("test").setSettings(Settings.builder()
|
||||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
|
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
|
||||||
|
@ -107,7 +106,7 @@ public class DelayedAllocationIT extends ESIntegTestCase {
|
||||||
* even though the node it was hosted on will not come back.
|
* even though the node it was hosted on will not come back.
|
||||||
*/
|
*/
|
||||||
public void testDelayedAllocationChangeWithSettingTo100ms() throws Exception {
|
public void testDelayedAllocationChangeWithSettingTo100ms() throws Exception {
|
||||||
internalCluster().startNodesAsync(3).get();
|
internalCluster().startNodes(3);
|
||||||
prepareCreate("test").setSettings(Settings.builder()
|
prepareCreate("test").setSettings(Settings.builder()
|
||||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
|
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
|
||||||
|
@ -133,7 +132,7 @@ public class DelayedAllocationIT extends ESIntegTestCase {
|
||||||
* even though the node it was hosted on will not come back.
|
* even though the node it was hosted on will not come back.
|
||||||
*/
|
*/
|
||||||
public void testDelayedAllocationChangeWithSettingTo0() throws Exception {
|
public void testDelayedAllocationChangeWithSettingTo0() throws Exception {
|
||||||
internalCluster().startNodesAsync(3).get();
|
internalCluster().startNodes(3);
|
||||||
prepareCreate("test").setSettings(Settings.builder()
|
prepareCreate("test").setSettings(Settings.builder()
|
||||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
|
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
|
||||||
|
|
|
@ -71,7 +71,7 @@ public class PrimaryAllocationIT extends ESIntegTestCase {
|
||||||
private void createStaleReplicaScenario() throws Exception {
|
private void createStaleReplicaScenario() throws Exception {
|
||||||
logger.info("--> starting 3 nodes, 1 master, 2 data");
|
logger.info("--> starting 3 nodes, 1 master, 2 data");
|
||||||
String master = internalCluster().startMasterOnlyNode(Settings.EMPTY);
|
String master = internalCluster().startMasterOnlyNode(Settings.EMPTY);
|
||||||
internalCluster().startDataOnlyNodesAsync(2).get();
|
internalCluster().startDataOnlyNodes(2);
|
||||||
|
|
||||||
assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder()
|
assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder()
|
||||||
.put("index.number_of_shards", 1).put("index.number_of_replicas", 1)).get());
|
.put("index.number_of_shards", 1).put("index.number_of_replicas", 1)).get());
|
||||||
|
@ -267,7 +267,7 @@ public class PrimaryAllocationIT extends ESIntegTestCase {
|
||||||
|
|
||||||
public void testNotWaitForQuorumCopies() throws Exception {
|
public void testNotWaitForQuorumCopies() throws Exception {
|
||||||
logger.info("--> starting 3 nodes");
|
logger.info("--> starting 3 nodes");
|
||||||
internalCluster().startNodesAsync(3).get();
|
internalCluster().startNodes(3);
|
||||||
logger.info("--> creating index with 1 primary and 2 replicas");
|
logger.info("--> creating index with 1 primary and 2 replicas");
|
||||||
assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder()
|
assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder()
|
||||||
.put("index.number_of_shards", randomIntBetween(1, 3)).put("index.number_of_replicas", 2)).get());
|
.put("index.number_of_shards", randomIntBetween(1, 3)).put("index.number_of_replicas", 2)).get());
|
||||||
|
@ -289,7 +289,7 @@ public class PrimaryAllocationIT extends ESIntegTestCase {
|
||||||
*/
|
*/
|
||||||
public void testForceAllocatePrimaryOnNoDecision() throws Exception {
|
public void testForceAllocatePrimaryOnNoDecision() throws Exception {
|
||||||
logger.info("--> starting 1 node");
|
logger.info("--> starting 1 node");
|
||||||
final String node = internalCluster().startNodeAsync().get();
|
final String node = internalCluster().startNode();
|
||||||
logger.info("--> creating index with 1 primary and 0 replicas");
|
logger.info("--> creating index with 1 primary and 0 replicas");
|
||||||
final String indexName = "test-idx";
|
final String indexName = "test-idx";
|
||||||
assertAcked(client().admin().indices()
|
assertAcked(client().admin().indices()
|
||||||
|
|
|
@ -54,7 +54,7 @@ public class MockDiskUsagesIT extends ESIntegTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testRerouteOccursOnDiskPassingHighWatermark() throws Exception {
|
public void testRerouteOccursOnDiskPassingHighWatermark() throws Exception {
|
||||||
List<String> nodes = internalCluster().startNodesAsync(3).get();
|
List<String> nodes = internalCluster().startNodes(3);
|
||||||
|
|
||||||
// Wait for all 3 nodes to be up
|
// Wait for all 3 nodes to be up
|
||||||
assertBusy(new Runnable() {
|
assertBusy(new Runnable() {
|
||||||
|
|
|
@ -43,8 +43,8 @@ import org.elasticsearch.cluster.routing.ShardRouting;
|
||||||
import org.elasticsearch.cluster.routing.ShardRoutingState;
|
import org.elasticsearch.cluster.routing.ShardRoutingState;
|
||||||
import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;
|
import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;
|
||||||
import org.elasticsearch.cluster.service.ClusterService;
|
import org.elasticsearch.cluster.service.ClusterService;
|
||||||
import org.elasticsearch.cluster.service.ClusterStateStatus;
|
|
||||||
import org.elasticsearch.cluster.service.ClusterServiceState;
|
import org.elasticsearch.cluster.service.ClusterServiceState;
|
||||||
|
import org.elasticsearch.cluster.service.ClusterStateStatus;
|
||||||
import org.elasticsearch.common.Nullable;
|
import org.elasticsearch.common.Nullable;
|
||||||
import org.elasticsearch.common.Priority;
|
import org.elasticsearch.common.Priority;
|
||||||
import org.elasticsearch.common.Strings;
|
import org.elasticsearch.common.Strings;
|
||||||
|
@ -187,13 +187,13 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
private List<String> startCluster(int numberOfNodes, int minimumMasterNode, @Nullable int[] unicastHostsOrdinals) throws
|
private List<String> startCluster(int numberOfNodes, int minimumMasterNode, @Nullable int[] unicastHostsOrdinals) throws
|
||||||
ExecutionException, InterruptedException {
|
ExecutionException, InterruptedException {
|
||||||
configureCluster(numberOfNodes, unicastHostsOrdinals, minimumMasterNode);
|
configureCluster(numberOfNodes, unicastHostsOrdinals, minimumMasterNode);
|
||||||
List<String> nodes = internalCluster().startNodesAsync(numberOfNodes).get();
|
List<String> nodes = internalCluster().startNodes(numberOfNodes);
|
||||||
ensureStableCluster(numberOfNodes);
|
ensureStableCluster(numberOfNodes);
|
||||||
|
|
||||||
// TODO: this is a temporary solution so that nodes will not base their reaction to a partition based on previous successful results
|
// TODO: this is a temporary solution so that nodes will not base their reaction to a partition based on previous successful results
|
||||||
ZenPing zenPing = ((TestZenDiscovery)internalCluster().getInstance(Discovery.class)).getZenPing();
|
ZenPing zenPing = ((TestZenDiscovery) internalCluster().getInstance(Discovery.class)).getZenPing();
|
||||||
if (zenPing instanceof UnicastZenPing) {
|
if (zenPing instanceof UnicastZenPing) {
|
||||||
((UnicastZenPing) zenPing).clearTemporalResponses();
|
((UnicastZenPing) zenPing).clearTemporalResponses();
|
||||||
}
|
}
|
||||||
|
@ -201,16 +201,16 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
static final Settings DEFAULT_SETTINGS = Settings.builder()
|
static final Settings DEFAULT_SETTINGS = Settings.builder()
|
||||||
.put(FaultDetection.PING_TIMEOUT_SETTING.getKey(), "1s") // for hitting simulated network failures quickly
|
.put(FaultDetection.PING_TIMEOUT_SETTING.getKey(), "1s") // for hitting simulated network failures quickly
|
||||||
.put(FaultDetection.PING_RETRIES_SETTING.getKey(), "1") // for hitting simulated network failures quickly
|
.put(FaultDetection.PING_RETRIES_SETTING.getKey(), "1") // for hitting simulated network failures quickly
|
||||||
.put("discovery.zen.join_timeout", "10s") // still long to induce failures but to long so test won't time out
|
.put("discovery.zen.join_timeout", "10s") // still long to induce failures but to long so test won't time out
|
||||||
.put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "1s") // <-- for hitting simulated network failures quickly
|
.put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "1s") // <-- for hitting simulated network failures quickly
|
||||||
.put(TcpTransport.TCP_CONNECT_TIMEOUT.getKey(), "10s") // Network delay disruption waits for the min between this
|
.put(TcpTransport.TCP_CONNECT_TIMEOUT.getKey(), "10s") // Network delay disruption waits for the min between this
|
||||||
// value and the time of disruption and does not recover immediately
|
// value and the time of disruption and does not recover immediately
|
||||||
// when disruption is stop. We should make sure we recover faster
|
// when disruption is stop. We should make sure we recover faster
|
||||||
// then the default of 30s, causing ensureGreen and friends to time out
|
// then the default of 30s, causing ensureGreen and friends to time out
|
||||||
|
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected Collection<Class<? extends Plugin>> nodePlugins() {
|
protected Collection<Class<? extends Plugin>> nodePlugins() {
|
||||||
|
@ -237,10 +237,10 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
||||||
logger.info("---> configured unicast");
|
logger.info("---> configured unicast");
|
||||||
// TODO: Rarely use default settings form some of these
|
// TODO: Rarely use default settings form some of these
|
||||||
Settings nodeSettings = Settings.builder()
|
Settings nodeSettings = Settings.builder()
|
||||||
.put(settings)
|
.put(settings)
|
||||||
.put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), numberOfNodes)
|
.put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), numberOfNodes)
|
||||||
.put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), minimumMasterNode)
|
.put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), minimumMasterNode)
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
if (discoveryConfig == null) {
|
if (discoveryConfig == null) {
|
||||||
if (unicastHostsOrdinals == null) {
|
if (unicastHostsOrdinals == null) {
|
||||||
|
@ -306,8 +306,8 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
||||||
|
|
||||||
logger.info("--> reducing min master nodes to 2");
|
logger.info("--> reducing min master nodes to 2");
|
||||||
assertAcked(client().admin().cluster().prepareUpdateSettings()
|
assertAcked(client().admin().cluster().prepareUpdateSettings()
|
||||||
.setTransientSettings(Settings.builder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 2))
|
.setTransientSettings(Settings.builder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 2))
|
||||||
.get());
|
.get());
|
||||||
|
|
||||||
String master = internalCluster().getMasterName();
|
String master = internalCluster().getMasterName();
|
||||||
String nonMaster = null;
|
String nonMaster = null;
|
||||||
|
@ -334,8 +334,8 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
||||||
|
|
||||||
// Makes sure that the get request can be executed on each node locally:
|
// Makes sure that the get request can be executed on each node locally:
|
||||||
assertAcked(prepareCreate("test").setSettings(Settings.builder()
|
assertAcked(prepareCreate("test").setSettings(Settings.builder()
|
||||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 2)
|
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 2)
|
||||||
));
|
));
|
||||||
|
|
||||||
// Everything is stable now, it is now time to simulate evil...
|
// Everything is stable now, it is now time to simulate evil...
|
||||||
|
@ -376,7 +376,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
||||||
}
|
}
|
||||||
if (!success) {
|
if (!success) {
|
||||||
fail("node [" + node + "] has no master or has blocks, despite of being on the right side of the partition. State dump:\n"
|
fail("node [" + node + "] has no master or has blocks, despite of being on the right side of the partition. State dump:\n"
|
||||||
+ nodeState);
|
+ nodeState);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -388,8 +388,8 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
||||||
|
|
||||||
logger.info("Verify no master block with {} set to {}", DiscoverySettings.NO_MASTER_BLOCK_SETTING.getKey(), "all");
|
logger.info("Verify no master block with {} set to {}", DiscoverySettings.NO_MASTER_BLOCK_SETTING.getKey(), "all");
|
||||||
client().admin().cluster().prepareUpdateSettings()
|
client().admin().cluster().prepareUpdateSettings()
|
||||||
.setTransientSettings(Settings.builder().put(DiscoverySettings.NO_MASTER_BLOCK_SETTING.getKey(), "all"))
|
.setTransientSettings(Settings.builder().put(DiscoverySettings.NO_MASTER_BLOCK_SETTING.getKey(), "all"))
|
||||||
.get();
|
.get();
|
||||||
|
|
||||||
networkDisruption.startDisrupting();
|
networkDisruption.startDisrupting();
|
||||||
|
|
||||||
|
@ -416,10 +416,10 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
||||||
final List<String> nodes = startCluster(3);
|
final List<String> nodes = startCluster(3);
|
||||||
|
|
||||||
assertAcked(prepareCreate("test")
|
assertAcked(prepareCreate("test")
|
||||||
.setSettings(Settings.builder()
|
.setSettings(Settings.builder()
|
||||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1 + randomInt(2))
|
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1 + randomInt(2))
|
||||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, randomInt(2))
|
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, randomInt(2))
|
||||||
));
|
));
|
||||||
|
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
String isolatedNode = internalCluster().getMasterName();
|
String isolatedNode = internalCluster().getMasterName();
|
||||||
|
@ -440,7 +440,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
||||||
|
|
||||||
for (String node : nodes) {
|
for (String node : nodes) {
|
||||||
ensureStableCluster(3, new TimeValue(DISRUPTION_HEALING_OVERHEAD.millis() + networkDisruption.expectedTimeToHeal().millis()),
|
ensureStableCluster(3, new TimeValue(DISRUPTION_HEALING_OVERHEAD.millis() + networkDisruption.expectedTimeToHeal().millis()),
|
||||||
true, node);
|
true, node);
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.info("issue a reroute");
|
logger.info("issue a reroute");
|
||||||
|
@ -468,8 +468,8 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
||||||
}
|
}
|
||||||
} catch (AssertionError t) {
|
} catch (AssertionError t) {
|
||||||
fail("failed comparing cluster state: " + t.getMessage() + "\n" +
|
fail("failed comparing cluster state: " + t.getMessage() + "\n" +
|
||||||
"--- cluster state of node [" + nodes.get(0) + "]: ---\n" + state +
|
"--- cluster state of node [" + nodes.get(0) + "]: ---\n" + state +
|
||||||
"\n--- cluster state [" + node + "]: ---\n" + nodeState);
|
"\n--- cluster state [" + node + "]: ---\n" + nodeState);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -482,7 +482,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
||||||
* This test is a superset of tests run in the Jepsen test suite, with the exception of versioned updates
|
* This test is a superset of tests run in the Jepsen test suite, with the exception of versioned updates
|
||||||
*/
|
*/
|
||||||
@TestLogging("_root:DEBUG,org.elasticsearch.action.index:TRACE,org.elasticsearch.action.get:TRACE,discovery:TRACE,org.elasticsearch.cluster.service:TRACE,"
|
@TestLogging("_root:DEBUG,org.elasticsearch.action.index:TRACE,org.elasticsearch.action.get:TRACE,discovery:TRACE,org.elasticsearch.cluster.service:TRACE,"
|
||||||
+ "org.elasticsearch.indices.recovery:TRACE,org.elasticsearch.indices.cluster:TRACE")
|
+ "org.elasticsearch.indices.recovery:TRACE,org.elasticsearch.indices.cluster:TRACE")
|
||||||
public void testAckedIndexing() throws Exception {
|
public void testAckedIndexing() throws Exception {
|
||||||
|
|
||||||
final int seconds = !(TEST_NIGHTLY && rarely()) ? 1 : 5;
|
final int seconds = !(TEST_NIGHTLY && rarely()) ? 1 : 5;
|
||||||
|
@ -491,10 +491,10 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
||||||
final List<String> nodes = startCluster(rarely() ? 5 : 3);
|
final List<String> nodes = startCluster(rarely() ? 5 : 3);
|
||||||
|
|
||||||
assertAcked(prepareCreate("test")
|
assertAcked(prepareCreate("test")
|
||||||
.setSettings(Settings.builder()
|
.setSettings(Settings.builder()
|
||||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1 + randomInt(2))
|
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1 + randomInt(2))
|
||||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, randomInt(2))
|
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, randomInt(2))
|
||||||
));
|
));
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
|
|
||||||
ServiceDisruptionScheme disruptionScheme = addRandomDisruptionScheme();
|
ServiceDisruptionScheme disruptionScheme = addRandomDisruptionScheme();
|
||||||
|
@ -530,7 +530,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
||||||
int shard = Math.floorMod(Murmur3HashFunction.hash(id), numPrimaries);
|
int shard = Math.floorMod(Murmur3HashFunction.hash(id), numPrimaries);
|
||||||
logger.trace("[{}] indexing id [{}] through node [{}] targeting shard [{}]", name, id, node, shard);
|
logger.trace("[{}] indexing id [{}] through node [{}] targeting shard [{}]", name, id, node, shard);
|
||||||
IndexResponse response =
|
IndexResponse response =
|
||||||
client.prepareIndex("test", "type", id).setSource("{}").setTimeout(timeout).get(timeout);
|
client.prepareIndex("test", "type", id).setSource("{}").setTimeout(timeout).get(timeout);
|
||||||
assertEquals(DocWriteResponse.Result.CREATED, response.getResult());
|
assertEquals(DocWriteResponse.Result.CREATED, response.getResult());
|
||||||
ackedDocs.put(id, node);
|
ackedDocs.put(id, node);
|
||||||
logger.trace("[{}] indexed id [{}] through node [{}]", name, id, node);
|
logger.trace("[{}] indexed id [{}] through node [{}]", name, id, node);
|
||||||
|
@ -584,7 +584,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
||||||
disruptionScheme.stopDisrupting();
|
disruptionScheme.stopDisrupting();
|
||||||
for (String node : internalCluster().getNodeNames()) {
|
for (String node : internalCluster().getNodeNames()) {
|
||||||
ensureStableCluster(nodes.size(), TimeValue.timeValueMillis(disruptionScheme.expectedTimeToHeal().millis() +
|
ensureStableCluster(nodes.size(), TimeValue.timeValueMillis(disruptionScheme.expectedTimeToHeal().millis() +
|
||||||
DISRUPTION_HEALING_OVERHEAD.millis()), true, node);
|
DISRUPTION_HEALING_OVERHEAD.millis()), true, node);
|
||||||
}
|
}
|
||||||
ensureGreen("test");
|
ensureGreen("test");
|
||||||
|
|
||||||
|
@ -594,7 +594,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
||||||
logger.debug("validating through node [{}] ([{}] acked docs)", node, ackedDocs.size());
|
logger.debug("validating through node [{}] ([{}] acked docs)", node, ackedDocs.size());
|
||||||
for (String id : ackedDocs.keySet()) {
|
for (String id : ackedDocs.keySet()) {
|
||||||
assertTrue("doc [" + id + "] indexed via node [" + ackedDocs.get(id) + "] not found",
|
assertTrue("doc [" + id + "] indexed via node [" + ackedDocs.get(id) + "] not found",
|
||||||
client(node).prepareGet("test", "type", id).setPreference("_local").get().isExists());
|
client(node).prepareGet("test", "type", id).setPreference("_local").get().isExists());
|
||||||
}
|
}
|
||||||
} catch (AssertionError e) {
|
} catch (AssertionError e) {
|
||||||
throw new AssertionError(e.getMessage() + " (checked via node [" + node + "]", e);
|
throw new AssertionError(e.getMessage() + " (checked via node [" + node + "]", e);
|
||||||
|
@ -684,7 +684,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
||||||
|
|
||||||
// Keeps track of the previous and current master when a master node transition took place on each node on the majority side:
|
// Keeps track of the previous and current master when a master node transition took place on each node on the majority side:
|
||||||
final Map<String, List<Tuple<String, String>>> masters = Collections.synchronizedMap(new HashMap<String, List<Tuple<String,
|
final Map<String, List<Tuple<String, String>>> masters = Collections.synchronizedMap(new HashMap<String, List<Tuple<String,
|
||||||
String>>>());
|
String>>>());
|
||||||
for (final String node : majoritySide) {
|
for (final String node : majoritySide) {
|
||||||
masters.put(node, new ArrayList<Tuple<String, String>>());
|
masters.put(node, new ArrayList<Tuple<String, String>>());
|
||||||
internalCluster().getInstance(ClusterService.class, node).add(new ClusterStateListener() {
|
internalCluster().getInstance(ClusterService.class, node).add(new ClusterStateListener() {
|
||||||
|
@ -694,7 +694,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
||||||
DiscoveryNode currentMaster = event.state().nodes().getMasterNode();
|
DiscoveryNode currentMaster = event.state().nodes().getMasterNode();
|
||||||
if (!Objects.equals(previousMaster, currentMaster)) {
|
if (!Objects.equals(previousMaster, currentMaster)) {
|
||||||
logger.info("node {} received new cluster state: {} \n and had previous cluster state: {}", node, event.state(),
|
logger.info("node {} received new cluster state: {} \n and had previous cluster state: {}", node, event.state(),
|
||||||
event.previousState());
|
event.previousState());
|
||||||
String previousMasterNodeName = previousMaster != null ? previousMaster.getName() : null;
|
String previousMasterNodeName = previousMaster != null ? previousMaster.getName() : null;
|
||||||
String currentMasterNodeName = currentMaster != null ? currentMaster.getName() : null;
|
String currentMasterNodeName = currentMaster != null ? currentMaster.getName() : null;
|
||||||
masters.get(node).add(new Tuple<>(previousMasterNodeName, currentMasterNodeName));
|
masters.get(node).add(new Tuple<>(previousMasterNodeName, currentMasterNodeName));
|
||||||
|
@ -739,17 +739,17 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
||||||
// The old master node will send this update + the cluster state where he is flagged as master to the other
|
// The old master node will send this update + the cluster state where he is flagged as master to the other
|
||||||
// nodes that follow the new master. These nodes should ignore this update.
|
// nodes that follow the new master. These nodes should ignore this update.
|
||||||
internalCluster().getInstance(ClusterService.class, oldMasterNode).submitStateUpdateTask("sneaky-update", new
|
internalCluster().getInstance(ClusterService.class, oldMasterNode).submitStateUpdateTask("sneaky-update", new
|
||||||
ClusterStateUpdateTask(Priority.IMMEDIATE) {
|
ClusterStateUpdateTask(Priority.IMMEDIATE) {
|
||||||
@Override
|
@Override
|
||||||
public ClusterState execute(ClusterState currentState) throws Exception {
|
public ClusterState execute(ClusterState currentState) throws Exception {
|
||||||
return ClusterState.builder(currentState).build();
|
return ClusterState.builder(currentState).build();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(String source, Exception e) {
|
public void onFailure(String source, Exception e) {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failure [{}]", source), e);
|
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failure [{}]", source), e);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
// Save the new elected master node
|
// Save the new elected master node
|
||||||
final String newMasterNode = internalCluster().getMasterName(majoritySide.get(0));
|
final String newMasterNode = internalCluster().getMasterName(majoritySide.get(0));
|
||||||
|
@ -769,15 +769,15 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
||||||
String nodeName = entry.getKey();
|
String nodeName = entry.getKey();
|
||||||
List<Tuple<String, String>> recordedMasterTransition = entry.getValue();
|
List<Tuple<String, String>> recordedMasterTransition = entry.getValue();
|
||||||
assertThat("[" + nodeName + "] Each node should only record two master node transitions", recordedMasterTransition.size(),
|
assertThat("[" + nodeName + "] Each node should only record two master node transitions", recordedMasterTransition.size(),
|
||||||
equalTo(2));
|
equalTo(2));
|
||||||
assertThat("[" + nodeName + "] First transition's previous master should be [null]", recordedMasterTransition.get(0).v1(),
|
assertThat("[" + nodeName + "] First transition's previous master should be [null]", recordedMasterTransition.get(0).v1(),
|
||||||
equalTo(oldMasterNode));
|
equalTo(oldMasterNode));
|
||||||
assertThat("[" + nodeName + "] First transition's current master should be [" + newMasterNode + "]", recordedMasterTransition
|
assertThat("[" + nodeName + "] First transition's current master should be [" + newMasterNode + "]", recordedMasterTransition
|
||||||
.get(0).v2(), nullValue());
|
.get(0).v2(), nullValue());
|
||||||
assertThat("[" + nodeName + "] Second transition's previous master should be [null]", recordedMasterTransition.get(1).v1(),
|
assertThat("[" + nodeName + "] Second transition's previous master should be [null]", recordedMasterTransition.get(1).v1(),
|
||||||
nullValue());
|
nullValue());
|
||||||
assertThat("[" + nodeName + "] Second transition's current master should be [" + newMasterNode + "]",
|
assertThat("[" + nodeName + "] Second transition's current master should be [" + newMasterNode + "]",
|
||||||
recordedMasterTransition.get(1).v2(), equalTo(newMasterNode));
|
recordedMasterTransition.get(1).v2(), equalTo(newMasterNode));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -789,11 +789,11 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
||||||
List<String> nodes = startCluster(3);
|
List<String> nodes = startCluster(3);
|
||||||
|
|
||||||
assertAcked(prepareCreate("test")
|
assertAcked(prepareCreate("test")
|
||||||
.setSettings(Settings.builder()
|
.setSettings(Settings.builder()
|
||||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 2)
|
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 2)
|
||||||
)
|
)
|
||||||
.get());
|
.get());
|
||||||
ensureGreen("test");
|
ensureGreen("test");
|
||||||
|
|
||||||
nodes = new ArrayList<>(nodes);
|
nodes = new ArrayList<>(nodes);
|
||||||
|
@ -809,13 +809,13 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
||||||
|
|
||||||
|
|
||||||
IndexResponse indexResponse = internalCluster().client(notIsolatedNode).prepareIndex("test", "type").setSource("field", "value")
|
IndexResponse indexResponse = internalCluster().client(notIsolatedNode).prepareIndex("test", "type").setSource("field", "value")
|
||||||
.get();
|
.get();
|
||||||
assertThat(indexResponse.getVersion(), equalTo(1L));
|
assertThat(indexResponse.getVersion(), equalTo(1L));
|
||||||
|
|
||||||
logger.info("Verifying if document exists via node[{}]", notIsolatedNode);
|
logger.info("Verifying if document exists via node[{}]", notIsolatedNode);
|
||||||
GetResponse getResponse = internalCluster().client(notIsolatedNode).prepareGet("test", "type", indexResponse.getId())
|
GetResponse getResponse = internalCluster().client(notIsolatedNode).prepareGet("test", "type", indexResponse.getId())
|
||||||
.setPreference("_local")
|
.setPreference("_local")
|
||||||
.get();
|
.get();
|
||||||
assertThat(getResponse.isExists(), is(true));
|
assertThat(getResponse.isExists(), is(true));
|
||||||
assertThat(getResponse.getVersion(), equalTo(1L));
|
assertThat(getResponse.getVersion(), equalTo(1L));
|
||||||
assertThat(getResponse.getId(), equalTo(indexResponse.getId()));
|
assertThat(getResponse.getId(), equalTo(indexResponse.getId()));
|
||||||
|
@ -828,8 +828,8 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
||||||
for (String node : nodes) {
|
for (String node : nodes) {
|
||||||
logger.info("Verifying if document exists after isolating node[{}] via node[{}]", isolatedNode, node);
|
logger.info("Verifying if document exists after isolating node[{}] via node[{}]", isolatedNode, node);
|
||||||
getResponse = internalCluster().client(node).prepareGet("test", "type", indexResponse.getId())
|
getResponse = internalCluster().client(node).prepareGet("test", "type", indexResponse.getId())
|
||||||
.setPreference("_local")
|
.setPreference("_local")
|
||||||
.get();
|
.get();
|
||||||
assertThat(getResponse.isExists(), is(true));
|
assertThat(getResponse.isExists(), is(true));
|
||||||
assertThat(getResponse.getVersion(), equalTo(1L));
|
assertThat(getResponse.getVersion(), equalTo(1L));
|
||||||
assertThat(getResponse.getId(), equalTo(indexResponse.getId()));
|
assertThat(getResponse.getId(), equalTo(indexResponse.getId()));
|
||||||
|
@ -853,7 +853,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
||||||
|
|
||||||
// Forcefully clean temporal response lists on all nodes. Otherwise the node in the unicast host list
|
// Forcefully clean temporal response lists on all nodes. Otherwise the node in the unicast host list
|
||||||
// includes all the other nodes that have pinged it and the issue doesn't manifest
|
// includes all the other nodes that have pinged it and the issue doesn't manifest
|
||||||
ZenPing zenPing = ((TestZenDiscovery)internalCluster().getInstance(Discovery.class)).getZenPing();
|
ZenPing zenPing = ((TestZenDiscovery) internalCluster().getInstance(Discovery.class)).getZenPing();
|
||||||
if (zenPing instanceof UnicastZenPing) {
|
if (zenPing instanceof UnicastZenPing) {
|
||||||
((UnicastZenPing) zenPing).clearTemporalResponses();
|
((UnicastZenPing) zenPing).clearTemporalResponses();
|
||||||
}
|
}
|
||||||
|
@ -890,7 +890,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
||||||
|
|
||||||
// Forcefully clean temporal response lists on all nodes. Otherwise the node in the unicast host list
|
// Forcefully clean temporal response lists on all nodes. Otherwise the node in the unicast host list
|
||||||
// includes all the other nodes that have pinged it and the issue doesn't manifest
|
// includes all the other nodes that have pinged it and the issue doesn't manifest
|
||||||
ZenPing zenPing = ((TestZenDiscovery)internalCluster().getInstance(Discovery.class)).getZenPing();
|
ZenPing zenPing = ((TestZenDiscovery) internalCluster().getInstance(Discovery.class)).getZenPing();
|
||||||
if (zenPing instanceof UnicastZenPing) {
|
if (zenPing instanceof UnicastZenPing) {
|
||||||
((UnicastZenPing) zenPing).clearTemporalResponses();
|
((UnicastZenPing) zenPing).clearTemporalResponses();
|
||||||
}
|
}
|
||||||
|
@ -928,11 +928,11 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
||||||
DiscoveryNodes discoveryNodes = internalCluster().getInstance(ClusterService.class, nonMasterNode).state().nodes();
|
DiscoveryNodes discoveryNodes = internalCluster().getInstance(ClusterService.class, nonMasterNode).state().nodes();
|
||||||
|
|
||||||
TransportService masterTranspotService =
|
TransportService masterTranspotService =
|
||||||
internalCluster().getInstance(TransportService.class, discoveryNodes.getMasterNode().getName());
|
internalCluster().getInstance(TransportService.class, discoveryNodes.getMasterNode().getName());
|
||||||
|
|
||||||
logger.info("blocking requests from non master [{}] to master [{}]", nonMasterNode, masterNode);
|
logger.info("blocking requests from non master [{}] to master [{}]", nonMasterNode, masterNode);
|
||||||
MockTransportService nonMasterTransportService = (MockTransportService) internalCluster().getInstance(TransportService.class,
|
MockTransportService nonMasterTransportService = (MockTransportService) internalCluster().getInstance(TransportService.class,
|
||||||
nonMasterNode);
|
nonMasterNode);
|
||||||
nonMasterTransportService.addFailToSendNoConnectRule(masterTranspotService);
|
nonMasterTransportService.addFailToSendNoConnectRule(masterTranspotService);
|
||||||
|
|
||||||
assertNoMaster(nonMasterNode);
|
assertNoMaster(nonMasterNode);
|
||||||
|
@ -951,10 +951,10 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
||||||
logger.info("allowing requests from non master [{}] to master [{}], waiting for two join request", nonMasterNode, masterNode);
|
logger.info("allowing requests from non master [{}] to master [{}], waiting for two join request", nonMasterNode, masterNode);
|
||||||
final CountDownLatch countDownLatch = new CountDownLatch(2);
|
final CountDownLatch countDownLatch = new CountDownLatch(2);
|
||||||
nonMasterTransportService.addDelegate(masterTranspotService, new MockTransportService.DelegateTransport(nonMasterTransportService
|
nonMasterTransportService.addDelegate(masterTranspotService, new MockTransportService.DelegateTransport(nonMasterTransportService
|
||||||
.original()) {
|
.original()) {
|
||||||
@Override
|
@Override
|
||||||
public void sendRequest(DiscoveryNode node, long requestId, String action, TransportRequest request, TransportRequestOptions
|
public void sendRequest(DiscoveryNode node, long requestId, String action, TransportRequest request, TransportRequestOptions
|
||||||
options) throws IOException, TransportException {
|
options) throws IOException, TransportException {
|
||||||
if (action.equals(MembershipAction.DISCOVERY_JOIN_ACTION_NAME)) {
|
if (action.equals(MembershipAction.DISCOVERY_JOIN_ACTION_NAME)) {
|
||||||
countDownLatch.countDown();
|
countDownLatch.countDown();
|
||||||
}
|
}
|
||||||
|
@ -982,16 +982,16 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
||||||
List<String> nonMasterNodes = nodes.stream().filter(node -> !node.equals(masterNode)).collect(Collectors.toList());
|
List<String> nonMasterNodes = nodes.stream().filter(node -> !node.equals(masterNode)).collect(Collectors.toList());
|
||||||
String nonMasterNode = randomFrom(nonMasterNodes);
|
String nonMasterNode = randomFrom(nonMasterNodes);
|
||||||
assertAcked(prepareCreate("test")
|
assertAcked(prepareCreate("test")
|
||||||
.setSettings(Settings.builder()
|
.setSettings(Settings.builder()
|
||||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 3)
|
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 3)
|
||||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 2)
|
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 2)
|
||||||
));
|
));
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
String nonMasterNodeId = internalCluster().clusterService(nonMasterNode).localNode().getId();
|
String nonMasterNodeId = internalCluster().clusterService(nonMasterNode).localNode().getId();
|
||||||
|
|
||||||
// fail a random shard
|
// fail a random shard
|
||||||
ShardRouting failedShard =
|
ShardRouting failedShard =
|
||||||
randomFrom(clusterService().state().getRoutingNodes().node(nonMasterNodeId).shardsWithState(ShardRoutingState.STARTED));
|
randomFrom(clusterService().state().getRoutingNodes().node(nonMasterNodeId).shardsWithState(ShardRoutingState.STARTED));
|
||||||
ShardStateAction service = internalCluster().getInstance(ShardStateAction.class, nonMasterNode);
|
ShardStateAction service = internalCluster().getInstance(ShardStateAction.class, nonMasterNode);
|
||||||
CountDownLatch latch = new CountDownLatch(1);
|
CountDownLatch latch = new CountDownLatch(1);
|
||||||
AtomicBoolean success = new AtomicBoolean();
|
AtomicBoolean success = new AtomicBoolean();
|
||||||
|
@ -1006,20 +1006,20 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
||||||
networkDisruption.startDisrupting();
|
networkDisruption.startDisrupting();
|
||||||
|
|
||||||
service.localShardFailed(failedShard, "simulated", new CorruptIndexException("simulated", (String) null), new
|
service.localShardFailed(failedShard, "simulated", new CorruptIndexException("simulated", (String) null), new
|
||||||
ShardStateAction.Listener() {
|
ShardStateAction.Listener() {
|
||||||
@Override
|
@Override
|
||||||
public void onSuccess() {
|
public void onSuccess() {
|
||||||
success.set(true);
|
success.set(true);
|
||||||
latch.countDown();
|
latch.countDown();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(Exception e) {
|
public void onFailure(Exception e) {
|
||||||
success.set(false);
|
success.set(false);
|
||||||
latch.countDown();
|
latch.countDown();
|
||||||
assert false;
|
assert false;
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
if (isolatedNode.equals(nonMasterNode)) {
|
if (isolatedNode.equals(nonMasterNode)) {
|
||||||
assertNoMaster(nonMasterNode);
|
assertNoMaster(nonMasterNode);
|
||||||
|
@ -1051,11 +1051,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
||||||
SlowClusterStateProcessing disruption = new SlowClusterStateProcessing(random(), 0, 0, 1000, 2000);
|
SlowClusterStateProcessing disruption = new SlowClusterStateProcessing(random(), 0, 0, 1000, 2000);
|
||||||
|
|
||||||
// don't wait for initial state, wat want to add the disruption while the cluster is forming..
|
// don't wait for initial state, wat want to add the disruption while the cluster is forming..
|
||||||
internalCluster().startNodesAsync(3,
|
internalCluster().startNodes(3, Settings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "3s").build());
|
||||||
Settings.builder()
|
|
||||||
.put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "1ms")
|
|
||||||
.put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "3s")
|
|
||||||
.build()).get();
|
|
||||||
|
|
||||||
logger.info("applying disruption while cluster is forming ...");
|
logger.info("applying disruption while cluster is forming ...");
|
||||||
|
|
||||||
|
@ -1084,7 +1080,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
||||||
|
|
||||||
logger.info("blocking request from master [{}] to [{}]", masterNode, nonMasterNode);
|
logger.info("blocking request from master [{}] to [{}]", masterNode, nonMasterNode);
|
||||||
MockTransportService masterTransportService = (MockTransportService) internalCluster().getInstance(TransportService.class,
|
MockTransportService masterTransportService = (MockTransportService) internalCluster().getInstance(TransportService.class,
|
||||||
masterNode);
|
masterNode);
|
||||||
if (randomBoolean()) {
|
if (randomBoolean()) {
|
||||||
masterTransportService.addUnresponsiveRule(internalCluster().getInstance(TransportService.class, nonMasterNode));
|
masterTransportService.addUnresponsiveRule(internalCluster().getInstance(TransportService.class, nonMasterNode));
|
||||||
} else {
|
} else {
|
||||||
|
@ -1110,21 +1106,18 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
||||||
public void testSearchWithRelocationAndSlowClusterStateProcessing() throws Exception {
|
public void testSearchWithRelocationAndSlowClusterStateProcessing() throws Exception {
|
||||||
// don't use DEFAULT settings (which can cause node disconnects on a slow CI machine)
|
// don't use DEFAULT settings (which can cause node disconnects on a slow CI machine)
|
||||||
configureCluster(Settings.EMPTY, 3, null, 1);
|
configureCluster(Settings.EMPTY, 3, null, 1);
|
||||||
InternalTestCluster.Async<String> masterNodeFuture = internalCluster().startMasterOnlyNodeAsync();
|
final String masterNode = internalCluster().startMasterOnlyNode();
|
||||||
InternalTestCluster.Async<String> node_1Future = internalCluster().startDataOnlyNodeAsync();
|
final String node_1 = internalCluster().startDataOnlyNode();
|
||||||
|
|
||||||
final String node_1 = node_1Future.get();
|
|
||||||
final String masterNode = masterNodeFuture.get();
|
|
||||||
logger.info("--> creating index [test] with one shard and on replica");
|
logger.info("--> creating index [test] with one shard and on replica");
|
||||||
assertAcked(prepareCreate("test").setSettings(
|
assertAcked(prepareCreate("test").setSettings(
|
||||||
Settings.builder().put(indexSettings())
|
Settings.builder().put(indexSettings())
|
||||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0))
|
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0))
|
||||||
);
|
);
|
||||||
ensureGreen("test");
|
ensureGreen("test");
|
||||||
|
|
||||||
InternalTestCluster.Async<String> node_2Future = internalCluster().startDataOnlyNodeAsync();
|
final String node_2 = internalCluster().startDataOnlyNode();
|
||||||
final String node_2 = node_2Future.get();
|
|
||||||
List<IndexRequestBuilder> indexRequestBuilderList = new ArrayList<>();
|
List<IndexRequestBuilder> indexRequestBuilderList = new ArrayList<>();
|
||||||
for (int i = 0; i < 100; i++) {
|
for (int i = 0; i < 100; i++) {
|
||||||
indexRequestBuilderList.add(client().prepareIndex().setIndex("test").setType("doc").setSource("{\"int_field\":1}"));
|
indexRequestBuilderList.add(client().prepareIndex().setIndex("test").setType("doc").setSource("{\"int_field\":1}"));
|
||||||
|
@ -1137,7 +1130,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
||||||
CountDownLatch beginRelocationLatch = new CountDownLatch(1);
|
CountDownLatch beginRelocationLatch = new CountDownLatch(1);
|
||||||
CountDownLatch endRelocationLatch = new CountDownLatch(1);
|
CountDownLatch endRelocationLatch = new CountDownLatch(1);
|
||||||
transportServiceNode2.addTracer(new IndicesStoreIntegrationIT.ReclocationStartEndTracer(logger, beginRelocationLatch,
|
transportServiceNode2.addTracer(new IndicesStoreIntegrationIT.ReclocationStartEndTracer(logger, beginRelocationLatch,
|
||||||
endRelocationLatch));
|
endRelocationLatch));
|
||||||
internalCluster().client().admin().cluster().prepareReroute().add(new MoveAllocationCommand("test", 0, node_1, node_2)).get();
|
internalCluster().client().admin().cluster().prepareReroute().add(new MoveAllocationCommand("test", 0, node_1, node_2)).get();
|
||||||
// wait for relocation to start
|
// wait for relocation to start
|
||||||
beginRelocationLatch.await();
|
beginRelocationLatch.await();
|
||||||
|
@ -1176,21 +1169,19 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
||||||
*/
|
*/
|
||||||
public void testIndicesDeleted() throws Exception {
|
public void testIndicesDeleted() throws Exception {
|
||||||
final Settings settings = Settings.builder()
|
final Settings settings = Settings.builder()
|
||||||
.put(DEFAULT_SETTINGS)
|
.put(DEFAULT_SETTINGS)
|
||||||
.put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "0s") // don't wait on isolated data node
|
.put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "0s") // don't wait on isolated data node
|
||||||
.put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), "30s") // wait till cluster state is committed
|
.put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), "30s") // wait till cluster state is committed
|
||||||
.build();
|
.build();
|
||||||
final String idxName = "test";
|
final String idxName = "test";
|
||||||
configureCluster(settings, 3, null, 2);
|
configureCluster(settings, 3, null, 2);
|
||||||
InternalTestCluster.Async<List<String>> masterNodes = internalCluster().startMasterOnlyNodesAsync(2);
|
final List<String> allMasterEligibleNodes = internalCluster().startMasterOnlyNodes(2);
|
||||||
InternalTestCluster.Async<String> dataNode = internalCluster().startDataOnlyNodeAsync();
|
final String dataNode = internalCluster().startDataOnlyNode();
|
||||||
dataNode.get();
|
|
||||||
final List<String> allMasterEligibleNodes = masterNodes.get();
|
|
||||||
ensureStableCluster(3);
|
ensureStableCluster(3);
|
||||||
assertAcked(prepareCreate("test"));
|
assertAcked(prepareCreate("test"));
|
||||||
|
|
||||||
final String masterNode1 = internalCluster().getMasterName();
|
final String masterNode1 = internalCluster().getMasterName();
|
||||||
NetworkDisruption networkDisruption = new NetworkDisruption(new TwoPartitions(masterNode1, dataNode.get()),
|
NetworkDisruption networkDisruption = new NetworkDisruption(new TwoPartitions(masterNode1, dataNode),
|
||||||
new NetworkUnresponsive());
|
new NetworkUnresponsive());
|
||||||
internalCluster().setDisruptionScheme(networkDisruption);
|
internalCluster().setDisruptionScheme(networkDisruption);
|
||||||
networkDisruption.startDisrupting();
|
networkDisruption.startDisrupting();
|
||||||
|
@ -1202,7 +1193,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
||||||
for (String masterNode : allMasterEligibleNodes) {
|
for (String masterNode : allMasterEligibleNodes) {
|
||||||
final ClusterServiceState masterState = internalCluster().clusterService(masterNode).clusterServiceState();
|
final ClusterServiceState masterState = internalCluster().clusterService(masterNode).clusterServiceState();
|
||||||
assertTrue("index not deleted on " + masterNode, masterState.getClusterState().metaData().hasIndex(idxName) == false &&
|
assertTrue("index not deleted on " + masterNode, masterState.getClusterState().metaData().hasIndex(idxName) == false &&
|
||||||
masterState.getClusterStateStatus() == ClusterStateStatus.APPLIED);
|
masterState.getClusterStateStatus() == ClusterStateStatus.APPLIED);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
internalCluster().restartNode(masterNode1, InternalTestCluster.EMPTY_CALLBACK);
|
internalCluster().restartNode(masterNode1, InternalTestCluster.EMPTY_CALLBACK);
|
||||||
|
@ -1212,21 +1203,21 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
||||||
|
|
||||||
public void testElectMasterWithLatestVersion() throws Exception {
|
public void testElectMasterWithLatestVersion() throws Exception {
|
||||||
configureCluster(3, null, 2);
|
configureCluster(3, null, 2);
|
||||||
final Set<String> nodes = new HashSet<>(internalCluster().startNodesAsync(3).get());
|
final Set<String> nodes = new HashSet<>(internalCluster().startNodes(3));
|
||||||
ensureStableCluster(3);
|
ensureStableCluster(3);
|
||||||
ServiceDisruptionScheme isolateAllNodes = new NetworkDisruption(new NetworkDisruption.IsolateAllNodes(nodes), new NetworkDisconnect());
|
ServiceDisruptionScheme isolateAllNodes = new NetworkDisruption(new NetworkDisruption.IsolateAllNodes(nodes), new NetworkDisconnect());
|
||||||
internalCluster().setDisruptionScheme(isolateAllNodes);
|
internalCluster().setDisruptionScheme(isolateAllNodes);
|
||||||
|
|
||||||
logger.info("--> forcing a complete election to make sure \"preferred\" master is elected");
|
logger.info("--> forcing a complete election to make sure \"preferred\" master is elected");
|
||||||
isolateAllNodes.startDisrupting();
|
isolateAllNodes.startDisrupting();
|
||||||
for (String node: nodes) {
|
for (String node : nodes) {
|
||||||
assertNoMaster(node);
|
assertNoMaster(node);
|
||||||
}
|
}
|
||||||
isolateAllNodes.stopDisrupting();
|
internalCluster().clearDisruptionScheme();
|
||||||
ensureStableCluster(3);
|
ensureStableCluster(3);
|
||||||
final String preferredMasterName = internalCluster().getMasterName();
|
final String preferredMasterName = internalCluster().getMasterName();
|
||||||
final DiscoveryNode preferredMaster = internalCluster().clusterService(preferredMasterName).localNode();
|
final DiscoveryNode preferredMaster = internalCluster().clusterService(preferredMasterName).localNode();
|
||||||
for (String node: nodes) {
|
for (String node : nodes) {
|
||||||
DiscoveryNode discoveryNode = internalCluster().clusterService(node).localNode();
|
DiscoveryNode discoveryNode = internalCluster().clusterService(node).localNode();
|
||||||
assertThat(discoveryNode.getId(), greaterThanOrEqualTo(preferredMaster.getId()));
|
assertThat(discoveryNode.getId(), greaterThanOrEqualTo(preferredMaster.getId()));
|
||||||
}
|
}
|
||||||
|
@ -1252,7 +1243,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
||||||
|
|
||||||
logger.info("--> forcing a complete election again");
|
logger.info("--> forcing a complete election again");
|
||||||
isolateAllNodes.startDisrupting();
|
isolateAllNodes.startDisrupting();
|
||||||
for (String node: nodes) {
|
for (String node : nodes) {
|
||||||
assertNoMaster(node);
|
assertNoMaster(node);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1298,10 +1289,17 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
||||||
}
|
}
|
||||||
final NetworkLinkDisruptionType disruptionType;
|
final NetworkLinkDisruptionType disruptionType;
|
||||||
switch (randomInt(2)) {
|
switch (randomInt(2)) {
|
||||||
case 0: disruptionType = new NetworkUnresponsive(); break;
|
case 0:
|
||||||
case 1: disruptionType = new NetworkDisconnect(); break;
|
disruptionType = new NetworkUnresponsive();
|
||||||
case 2: disruptionType = NetworkDelay.random(random()); break;
|
break;
|
||||||
default: throw new IllegalArgumentException();
|
case 1:
|
||||||
|
disruptionType = new NetworkDisconnect();
|
||||||
|
break;
|
||||||
|
case 2:
|
||||||
|
disruptionType = NetworkDelay.random(random());
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
throw new IllegalArgumentException();
|
||||||
}
|
}
|
||||||
final ServiceDisruptionScheme scheme;
|
final ServiceDisruptionScheme scheme;
|
||||||
if (rarely()) {
|
if (rarely()) {
|
||||||
|
@ -1334,7 +1332,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
||||||
if (expectedBlocks != null) {
|
if (expectedBlocks != null) {
|
||||||
for (ClusterBlockLevel level : expectedBlocks.levels()) {
|
for (ClusterBlockLevel level : expectedBlocks.levels()) {
|
||||||
assertTrue("node [" + node + "] does have level [" + level + "] in it's blocks", state.getBlocks().hasGlobalBlock
|
assertTrue("node [" + node + "] does have level [" + level + "] in it's blocks", state.getBlocks().hasGlobalBlock
|
||||||
(level));
|
(level));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1352,7 +1350,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
||||||
}
|
}
|
||||||
logger.trace("[{}] master is [{}]", node, state.nodes().getMasterNode());
|
logger.trace("[{}] master is [{}]", node, state.nodes().getMasterNode());
|
||||||
assertThat("node [" + node + "] still has [" + masterNode + "] as master",
|
assertThat("node [" + node + "] still has [" + masterNode + "] as master",
|
||||||
oldMasterNode, not(equalTo(masterNode)));
|
oldMasterNode, not(equalTo(masterNode)));
|
||||||
}
|
}
|
||||||
}, 10, TimeUnit.SECONDS);
|
}, 10, TimeUnit.SECONDS);
|
||||||
}
|
}
|
||||||
|
@ -1372,12 +1370,12 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
||||||
private void assertDiscoveryCompleted(List<String> nodes) throws InterruptedException {
|
private void assertDiscoveryCompleted(List<String> nodes) throws InterruptedException {
|
||||||
for (final String node : nodes) {
|
for (final String node : nodes) {
|
||||||
assertTrue(
|
assertTrue(
|
||||||
"node [" + node + "] is still joining master",
|
"node [" + node + "] is still joining master",
|
||||||
awaitBusy(
|
awaitBusy(
|
||||||
() -> !((ZenDiscovery) internalCluster().getInstance(Discovery.class, node)).joiningCluster(),
|
() -> !((ZenDiscovery) internalCluster().getInstance(Discovery.class, node)).joiningCluster(),
|
||||||
30,
|
30,
|
||||||
TimeUnit.SECONDS
|
TimeUnit.SECONDS
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,102 +0,0 @@
|
||||||
/*
|
|
||||||
* Licensed to Elasticsearch under one or more contributor
|
|
||||||
* license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright
|
|
||||||
* ownership. Elasticsearch licenses this file to you under
|
|
||||||
* the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
* not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing,
|
|
||||||
* software distributed under the License is distributed on an
|
|
||||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
* KIND, either express or implied. See the License for the
|
|
||||||
* specific language governing permissions and limitations
|
|
||||||
* under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package org.elasticsearch.discovery;
|
|
||||||
|
|
||||||
import org.elasticsearch.cluster.ClusterState;
|
|
||||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
|
||||||
import org.elasticsearch.common.settings.Settings;
|
|
||||||
import org.elasticsearch.common.unit.TimeValue;
|
|
||||||
import org.elasticsearch.test.ESIntegTestCase;
|
|
||||||
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
|
|
||||||
import org.elasticsearch.test.ESIntegTestCase.Scope;
|
|
||||||
import org.elasticsearch.test.discovery.ClusterDiscoveryConfiguration;
|
|
||||||
import org.junit.Before;
|
|
||||||
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.concurrent.ExecutionException;
|
|
||||||
|
|
||||||
import static org.hamcrest.Matchers.equalTo;
|
|
||||||
|
|
||||||
@ClusterScope(scope = Scope.TEST, numDataNodes = 0, autoMinMasterNodes = false)
|
|
||||||
public class ZenUnicastDiscoveryIT extends ESIntegTestCase {
|
|
||||||
|
|
||||||
private ClusterDiscoveryConfiguration discoveryConfig;
|
|
||||||
|
|
||||||
@Override
|
|
||||||
protected Settings nodeSettings(int nodeOrdinal) {
|
|
||||||
return discoveryConfig.nodeSettings(nodeOrdinal);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Before
|
|
||||||
public void clearConfig() {
|
|
||||||
discoveryConfig = null;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void testNormalClusterForming() throws ExecutionException, InterruptedException {
|
|
||||||
int currentNumNodes = randomIntBetween(3, 5);
|
|
||||||
|
|
||||||
// use explicit unicast hosts so we can start those first
|
|
||||||
int[] unicastHostOrdinals = new int[randomIntBetween(1, currentNumNodes)];
|
|
||||||
for (int i = 0; i < unicastHostOrdinals.length; i++) {
|
|
||||||
unicastHostOrdinals[i] = i;
|
|
||||||
}
|
|
||||||
discoveryConfig = new ClusterDiscoveryConfiguration.UnicastZen(currentNumNodes, unicastHostOrdinals);
|
|
||||||
|
|
||||||
// start the unicast hosts
|
|
||||||
internalCluster().startNodesAsync(unicastHostOrdinals.length).get();
|
|
||||||
|
|
||||||
// start the rest of the cluster
|
|
||||||
internalCluster().startNodesAsync(currentNumNodes - unicastHostOrdinals.length).get();
|
|
||||||
|
|
||||||
if (client().admin().cluster().prepareHealth().setWaitForNodes("" + currentNumNodes).get().isTimedOut()) {
|
|
||||||
logger.info("cluster forming timed out, cluster state:\n{}", client().admin().cluster().prepareState().get().getState());
|
|
||||||
fail("timed out waiting for cluster to form with [" + currentNumNodes + "] nodes");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Without the 'include temporalResponses responses to nodesToConnect' improvement in UnicastZenPing#sendPings this
|
|
||||||
// test fails, because 2 nodes elect themselves as master and the health request times out b/c waiting_for_nodes=N
|
|
||||||
// can't be satisfied.
|
|
||||||
public void testMinimumMasterNodes() throws Exception {
|
|
||||||
int currentNumNodes = randomIntBetween(3, 5);
|
|
||||||
final int min_master_nodes = currentNumNodes / 2 + 1;
|
|
||||||
int currentNumOfUnicastHosts = randomIntBetween(min_master_nodes, currentNumNodes);
|
|
||||||
final Settings settings = Settings.builder()
|
|
||||||
.put("discovery.zen.join_timeout", TimeValue.timeValueSeconds(10))
|
|
||||||
.put("discovery.zen.minimum_master_nodes", min_master_nodes)
|
|
||||||
.build();
|
|
||||||
discoveryConfig = new ClusterDiscoveryConfiguration.UnicastZen(currentNumNodes, currentNumOfUnicastHosts, settings);
|
|
||||||
|
|
||||||
List<String> nodes = internalCluster().startNodesAsync(currentNumNodes).get();
|
|
||||||
|
|
||||||
ensureStableCluster(currentNumNodes);
|
|
||||||
|
|
||||||
DiscoveryNode masterDiscoNode = null;
|
|
||||||
for (String node : nodes) {
|
|
||||||
ClusterState state = internalCluster().client(node).admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
|
|
||||||
assertThat(state.nodes().getSize(), equalTo(currentNumNodes));
|
|
||||||
if (masterDiscoNode == null) {
|
|
||||||
masterDiscoNode = state.nodes().getMasterNode();
|
|
||||||
} else {
|
|
||||||
assertThat(masterDiscoNode.equals(state.nodes().getMasterNode()), equalTo(true));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -80,12 +80,12 @@ public class ZenDiscoveryIT extends ESIntegTestCase {
|
||||||
.put(Node.NODE_DATA_SETTING.getKey(), false)
|
.put(Node.NODE_DATA_SETTING.getKey(), false)
|
||||||
.put(defaultSettings)
|
.put(defaultSettings)
|
||||||
.build();
|
.build();
|
||||||
internalCluster().startNodesAsync(2, masterNodeSettings).get();
|
internalCluster().startNodes(2, masterNodeSettings);
|
||||||
Settings dateNodeSettings = Settings.builder()
|
Settings dateNodeSettings = Settings.builder()
|
||||||
.put(Node.NODE_MASTER_SETTING.getKey(), false)
|
.put(Node.NODE_MASTER_SETTING.getKey(), false)
|
||||||
.put(defaultSettings)
|
.put(defaultSettings)
|
||||||
.build();
|
.build();
|
||||||
internalCluster().startNodesAsync(2, dateNodeSettings).get();
|
internalCluster().startNodes(2, dateNodeSettings);
|
||||||
ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth()
|
ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth()
|
||||||
.setWaitForEvents(Priority.LANGUID)
|
.setWaitForEvents(Priority.LANGUID)
|
||||||
.setWaitForNodes("4")
|
.setWaitForNodes("4")
|
||||||
|
@ -100,13 +100,10 @@ public class ZenDiscoveryIT extends ESIntegTestCase {
|
||||||
|
|
||||||
final String oldMaster = internalCluster().getMasterName();
|
final String oldMaster = internalCluster().getMasterName();
|
||||||
internalCluster().stopCurrentMasterNode();
|
internalCluster().stopCurrentMasterNode();
|
||||||
assertBusy(new Runnable() {
|
assertBusy(() -> {
|
||||||
@Override
|
String current = internalCluster().getMasterName();
|
||||||
public void run() {
|
assertThat(current, notNullValue());
|
||||||
String current = internalCluster().getMasterName();
|
assertThat(current, not(equalTo(oldMaster)));
|
||||||
assertThat(current, notNullValue());
|
|
||||||
assertThat(current, not(equalTo(oldMaster)));
|
|
||||||
}
|
|
||||||
});
|
});
|
||||||
ensureSearchable("test");
|
ensureSearchable("test");
|
||||||
|
|
||||||
|
@ -130,7 +127,7 @@ public class ZenDiscoveryIT extends ESIntegTestCase {
|
||||||
.put(Node.NODE_MASTER_SETTING.getKey(), false)
|
.put(Node.NODE_MASTER_SETTING.getKey(), false)
|
||||||
.put(defaultSettings)
|
.put(defaultSettings)
|
||||||
.build();
|
.build();
|
||||||
internalCluster().startNodesAsync(2, dateNodeSettings).get();
|
internalCluster().startNodes(2, dateNodeSettings);
|
||||||
client().admin().cluster().prepareHealth().setWaitForNodes("3").get();
|
client().admin().cluster().prepareHealth().setWaitForNodes("3").get();
|
||||||
|
|
||||||
ClusterService clusterService = internalCluster().getInstance(ClusterService.class, master);
|
ClusterService clusterService = internalCluster().getInstance(ClusterService.class, master);
|
||||||
|
@ -155,8 +152,7 @@ public class ZenDiscoveryIT extends ESIntegTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testNodeRejectsClusterStateWithWrongMasterNode() throws Exception {
|
public void testNodeRejectsClusterStateWithWrongMasterNode() throws Exception {
|
||||||
List<String> nodeNames = internalCluster().startNodesAsync(2).get();
|
List<String> nodeNames = internalCluster().startNodes(2);
|
||||||
client().admin().cluster().prepareHealth().setWaitForNodes("2").get();
|
|
||||||
|
|
||||||
List<String> nonMasterNodes = new ArrayList<>(nodeNames);
|
List<String> nonMasterNodes = new ArrayList<>(nodeNames);
|
||||||
nonMasterNodes.remove(internalCluster().getMasterName());
|
nonMasterNodes.remove(internalCluster().getMasterName());
|
||||||
|
|
|
@ -20,10 +20,12 @@
|
||||||
package org.elasticsearch.fieldstats;
|
package org.elasticsearch.fieldstats;
|
||||||
|
|
||||||
import org.apache.lucene.util.BytesRef;
|
import org.apache.lucene.util.BytesRef;
|
||||||
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.action.fieldstats.FieldStats;
|
import org.elasticsearch.action.fieldstats.FieldStats;
|
||||||
import org.elasticsearch.action.fieldstats.FieldStatsResponse;
|
import org.elasticsearch.action.fieldstats.FieldStatsResponse;
|
||||||
import org.elasticsearch.action.fieldstats.IndexConstraint;
|
import org.elasticsearch.action.fieldstats.IndexConstraint;
|
||||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||||
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.common.joda.Joda;
|
import org.elasticsearch.common.joda.Joda;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.index.mapper.DateFieldMapper;
|
import org.elasticsearch.index.mapper.DateFieldMapper;
|
||||||
|
@ -46,7 +48,6 @@ import static org.elasticsearch.action.fieldstats.IndexConstraint.Comparison.LTE
|
||||||
import static org.elasticsearch.action.fieldstats.IndexConstraint.Property.MAX;
|
import static org.elasticsearch.action.fieldstats.IndexConstraint.Property.MAX;
|
||||||
import static org.elasticsearch.action.fieldstats.IndexConstraint.Property.MIN;
|
import static org.elasticsearch.action.fieldstats.IndexConstraint.Property.MIN;
|
||||||
import static org.hamcrest.Matchers.containsString;
|
import static org.hamcrest.Matchers.containsString;
|
||||||
import static org.hamcrest.Matchers.equalTo;
|
|
||||||
|
|
||||||
public class FieldStatsTests extends ESSingleNodeTestCase {
|
public class FieldStatsTests extends ESSingleNodeTestCase {
|
||||||
public void testByte() {
|
public void testByte() {
|
||||||
|
@ -73,83 +74,157 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
|
||||||
testNumberRange("field1", "long", -312321312312422L, -312321312312412L);
|
testNumberRange("field1", "long", -312321312312422L, -312321312312412L);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private static String makeType(String type, boolean indexed, boolean docValues, boolean stored) {
|
||||||
|
return new StringBuilder()
|
||||||
|
.append("type=").append(type)
|
||||||
|
.append(",index=").append(indexed)
|
||||||
|
.append(",doc_values=").append(docValues)
|
||||||
|
.append(",store=").append(stored).toString();
|
||||||
|
}
|
||||||
|
|
||||||
public void testString() {
|
public void testString() {
|
||||||
createIndex("test", Settings.EMPTY, "test", "field", "type=text");
|
createIndex("test", Settings.EMPTY, "test",
|
||||||
|
"field_index", makeType("keyword", true, false, false),
|
||||||
|
"field_dv", makeType("keyword", false, true, false),
|
||||||
|
"field_stored", makeType("keyword", false, true, true),
|
||||||
|
"field_source", makeType("keyword", false, false, false));
|
||||||
for (int value = 0; value <= 10; value++) {
|
for (int value = 0; value <= 10; value++) {
|
||||||
client().prepareIndex("test", "test").setSource("field",
|
String keyword = String.format(Locale.ENGLISH, "%03d", value);
|
||||||
String.format(Locale.ENGLISH, "%03d", value)).get();
|
client().prepareIndex("test", "test")
|
||||||
|
.setSource("field_index", keyword,
|
||||||
|
"field_dv", keyword,
|
||||||
|
"field_stored", keyword,
|
||||||
|
"field_source", keyword).get();
|
||||||
}
|
}
|
||||||
client().admin().indices().prepareRefresh().get();
|
client().admin().indices().prepareRefresh().get();
|
||||||
|
|
||||||
FieldStatsResponse result = client().prepareFieldStats().setFields("field").get();
|
FieldStatsResponse result = client().prepareFieldStats()
|
||||||
assertThat(result.getAllFieldStats().get("field").getMaxDoc(), equalTo(11L));
|
.setFields("field_index", "field_dv", "field_stored", "field_source").get();
|
||||||
assertThat(result.getAllFieldStats().get("field").getDocCount(), equalTo(11L));
|
assertEquals(result.getAllFieldStats().size(), 3);
|
||||||
assertThat(result.getAllFieldStats().get("field").getDensity(), equalTo(100));
|
for (String field : new String[] {"field_index", "field_dv", "field_stored"}) {
|
||||||
assertThat(result.getAllFieldStats().get("field").getMinValue(),
|
FieldStats stats = result.getAllFieldStats().get(field);
|
||||||
equalTo(new BytesRef(String.format(Locale.ENGLISH, "%03d", 0))));
|
assertEquals(stats.getMaxDoc(), 11L);
|
||||||
assertThat(result.getAllFieldStats().get("field").getMaxValue(),
|
assertEquals(stats.getDisplayType(),
|
||||||
equalTo(new BytesRef(String.format(Locale.ENGLISH, "%03d", 10))));
|
"string");
|
||||||
assertThat(result.getAllFieldStats().get("field").getMinValueAsString(),
|
if ("field_index".equals(field)) {
|
||||||
equalTo(String.format(Locale.ENGLISH, "%03d", 0)));
|
assertEquals(stats.getMinValue(),
|
||||||
assertThat(result.getAllFieldStats().get("field").getMaxValueAsString(),
|
new BytesRef(String.format(Locale.ENGLISH, "%03d", 0)));
|
||||||
equalTo(String.format(Locale.ENGLISH, "%03d", 10)));
|
assertEquals(stats.getMaxValue(),
|
||||||
assertThat(result.getAllFieldStats().get("field").getDisplayType(),
|
new BytesRef(String.format(Locale.ENGLISH, "%03d", 10)));
|
||||||
equalTo("string"));
|
assertEquals(stats.getMinValueAsString(),
|
||||||
|
String.format(Locale.ENGLISH, "%03d", 0));
|
||||||
|
assertEquals(stats.getMaxValueAsString(),
|
||||||
|
String.format(Locale.ENGLISH, "%03d", 10));
|
||||||
|
assertEquals(stats.getDocCount(), 11L);
|
||||||
|
assertEquals(stats.getDensity(), 100);
|
||||||
|
} else {
|
||||||
|
assertEquals(stats.getDocCount(), 0L);
|
||||||
|
assertNull(stats.getMinValue());
|
||||||
|
assertNull(stats.getMaxValue());
|
||||||
|
assertEquals(stats.getDensity(), 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testDouble() {
|
public void testDouble() {
|
||||||
String fieldName = "field";
|
createIndex("test", Settings.EMPTY, "test",
|
||||||
createIndex("test", Settings.EMPTY, "test", fieldName, "type=double");
|
"field_index", makeType("double", true, false, false),
|
||||||
|
"field_dv", makeType("double", false, true, false),
|
||||||
|
"field_stored", makeType("double", false, true, true),
|
||||||
|
"field_source", makeType("double", false, false, false));
|
||||||
for (double value = -1; value <= 9; value++) {
|
for (double value = -1; value <= 9; value++) {
|
||||||
client().prepareIndex("test", "test").setSource(fieldName, value).get();
|
client().prepareIndex("test", "test")
|
||||||
|
.setSource("field_index", value, "field_dv", value, "field_stored", value, "field_source", value).get();
|
||||||
}
|
}
|
||||||
client().admin().indices().prepareRefresh().get();
|
client().admin().indices().prepareRefresh().get();
|
||||||
|
FieldStatsResponse result = client().prepareFieldStats()
|
||||||
FieldStatsResponse result = client().prepareFieldStats().setFields(fieldName).get();
|
.setFields("field_index", "field_dv", "field_stored", "field_source").get();
|
||||||
assertThat(result.getAllFieldStats().get(fieldName).getMaxDoc(), equalTo(11L));
|
for (String field : new String[] {"field_index", "field_dv", "field_stored"}) {
|
||||||
assertThat(result.getAllFieldStats().get(fieldName).getDocCount(), equalTo(11L));
|
FieldStats stats = result.getAllFieldStats().get(field);
|
||||||
assertThat(result.getAllFieldStats().get(fieldName).getDensity(), equalTo(100));
|
assertEquals(stats.getMaxDoc(), 11L);
|
||||||
assertThat(result.getAllFieldStats().get(fieldName).getMinValue(), equalTo(-1d));
|
assertEquals(stats.getDisplayType(), "float");
|
||||||
assertThat(result.getAllFieldStats().get(fieldName).getMaxValue(), equalTo(9d));
|
if ("field_index".equals(field)) {
|
||||||
assertThat(result.getAllFieldStats().get(fieldName).getMinValueAsString(), equalTo(Double.toString(-1)));
|
assertEquals(stats.getDocCount(), 11L);
|
||||||
assertThat(result.getAllFieldStats().get(fieldName).getDisplayType(), equalTo("float"));
|
assertEquals(stats.getDensity(), 100);
|
||||||
|
assertEquals(stats.getMinValue(), -1d);
|
||||||
|
assertEquals(stats.getMaxValue(), 9d);
|
||||||
|
assertEquals(stats.getMinValueAsString(), Double.toString(-1));
|
||||||
|
} else {
|
||||||
|
assertEquals(stats.getDocCount(), 0L);
|
||||||
|
assertNull(stats.getMinValue());
|
||||||
|
assertNull(stats.getMaxValue());
|
||||||
|
assertEquals(stats.getDensity(), 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testHalfFloat() {
|
public void testHalfFloat() {
|
||||||
String fieldName = "field";
|
createIndex("test", Settings.EMPTY, "test",
|
||||||
createIndex("test", Settings.EMPTY, "test", fieldName, "type=half_float");
|
"field_index", makeType("half_float", true, false, false),
|
||||||
|
"field_dv", makeType("half_float", false, true, false),
|
||||||
|
"field_stored", makeType("half_float", false, true, true),
|
||||||
|
"field_source", makeType("half_float", false, false, false));
|
||||||
for (float value = -1; value <= 9; value++) {
|
for (float value = -1; value <= 9; value++) {
|
||||||
client().prepareIndex("test", "test").setSource(fieldName, value).get();
|
client().prepareIndex("test", "test")
|
||||||
|
.setSource("field_index", value, "field_dv", value, "field_stored", value, "field_source", value).get();
|
||||||
}
|
}
|
||||||
client().admin().indices().prepareRefresh().get();
|
client().admin().indices().prepareRefresh().get();
|
||||||
|
|
||||||
FieldStatsResponse result = client().prepareFieldStats().setFields(fieldName).get();
|
FieldStatsResponse result = client().prepareFieldStats()
|
||||||
assertThat(result.getAllFieldStats().get(fieldName).getMaxDoc(), equalTo(11L));
|
.setFields("field_index", "field_dv", "field_stored", "field_source").get();
|
||||||
assertThat(result.getAllFieldStats().get(fieldName).getDocCount(), equalTo(11L));
|
for (String field : new String[] {"field_index", "field_dv", "field_stored"}) {
|
||||||
assertThat(result.getAllFieldStats().get(fieldName).getDensity(), equalTo(100));
|
FieldStats stats = result.getAllFieldStats().get(field);
|
||||||
assertThat(result.getAllFieldStats().get(fieldName).getMinValue(), equalTo(-1d));
|
assertEquals(stats.getMaxDoc(), 11L);
|
||||||
assertThat(result.getAllFieldStats().get(fieldName).getMaxValue(), equalTo(9d));
|
assertEquals(stats.getDisplayType(), "float");
|
||||||
assertThat(result.getAllFieldStats().get(fieldName).getMinValueAsString(), equalTo(Float.toString(-1)));
|
if (field.equals("field_index")) {
|
||||||
assertThat(result.getAllFieldStats().get(fieldName).getMaxValueAsString(), equalTo(Float.toString(9)));
|
assertEquals(stats.getDocCount(), 11L);
|
||||||
assertThat(result.getAllFieldStats().get(fieldName).getDisplayType(), equalTo("float"));
|
assertEquals(stats.getDensity(), 100);
|
||||||
|
assertEquals(stats.getMinValue(), -1d);
|
||||||
|
assertEquals(stats.getMaxValue(), 9d);
|
||||||
|
assertEquals(stats.getMinValueAsString(), Float.toString(-1));
|
||||||
|
assertEquals(stats.getMaxValueAsString(), Float.toString(9));
|
||||||
|
} else {
|
||||||
|
assertEquals(stats.getDocCount(), 0L);
|
||||||
|
assertNull(stats.getMinValue());
|
||||||
|
assertNull(stats.getMaxValue());
|
||||||
|
assertEquals(stats.getDensity(), 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testFloat() {
|
public void testFloat() {
|
||||||
String fieldName = "field";
|
String fieldName = "field";
|
||||||
createIndex("test", Settings.EMPTY, "test", fieldName, "type=float");
|
createIndex("test", Settings.EMPTY, "test",
|
||||||
|
"field_index", makeType("float", true, false, false),
|
||||||
|
"field_dv", makeType("float", false, true, false),
|
||||||
|
"field_stored", makeType("float", false, true, true),
|
||||||
|
"field_source", makeType("float", false, false, false));
|
||||||
for (float value = -1; value <= 9; value++) {
|
for (float value = -1; value <= 9; value++) {
|
||||||
client().prepareIndex("test", "test").setSource(fieldName, value).get();
|
client().prepareIndex("test", "test")
|
||||||
|
.setSource("field_index", value, "field_dv", value, "field_stored", value, "field_source", value).get();
|
||||||
}
|
}
|
||||||
client().admin().indices().prepareRefresh().get();
|
client().admin().indices().prepareRefresh().get();
|
||||||
|
|
||||||
FieldStatsResponse result = client().prepareFieldStats().setFields(fieldName).get();
|
FieldStatsResponse result = client().prepareFieldStats()
|
||||||
assertThat(result.getAllFieldStats().get(fieldName).getMaxDoc(), equalTo(11L));
|
.setFields("field_index", "field_dv", "field_stored", "field_source").get();
|
||||||
assertThat(result.getAllFieldStats().get(fieldName).getDocCount(), equalTo(11L));
|
for (String field : new String[]{"field_index", "field_dv", "field_stored"}) {
|
||||||
assertThat(result.getAllFieldStats().get(fieldName).getDensity(), equalTo(100));
|
FieldStats stats = result.getAllFieldStats().get(field);
|
||||||
assertThat(result.getAllFieldStats().get(fieldName).getMinValue(), equalTo(-1d));
|
assertEquals(stats.getMaxDoc(), 11L);
|
||||||
assertThat(result.getAllFieldStats().get(fieldName).getMaxValue(), equalTo(9d));
|
assertEquals(stats.getDisplayType(), "float");
|
||||||
assertThat(result.getAllFieldStats().get(fieldName).getMinValueAsString(), equalTo(Float.toString(-1)));
|
if (field.equals("field_index")) {
|
||||||
assertThat(result.getAllFieldStats().get(fieldName).getMaxValueAsString(), equalTo(Float.toString(9)));
|
assertEquals(stats.getDocCount(), 11L);
|
||||||
|
assertEquals(stats.getDensity(), 100);
|
||||||
|
assertEquals(stats.getMinValue(), -1d);
|
||||||
|
assertEquals(stats.getMaxValue(), 9d);
|
||||||
|
assertEquals(stats.getMinValueAsString(), Float.toString(-1));
|
||||||
|
assertEquals(stats.getMaxValueAsString(), Float.toString(9));
|
||||||
|
} else {
|
||||||
|
assertEquals(stats.getDocCount(), 0L);
|
||||||
|
assertNull(stats.getMinValue());
|
||||||
|
assertNull(stats.getMaxValue());
|
||||||
|
assertEquals(stats.getDensity(), 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void testNumberRange(String fieldName, String fieldType, long min, long max) {
|
private void testNumberRange(String fieldName, String fieldType, long min, long max) {
|
||||||
|
@ -166,21 +241,21 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
|
||||||
|
|
||||||
FieldStatsResponse result = client().prepareFieldStats().setFields(fieldName).get();
|
FieldStatsResponse result = client().prepareFieldStats().setFields(fieldName).get();
|
||||||
long numDocs = max - min + 1;
|
long numDocs = max - min + 1;
|
||||||
assertThat(result.getAllFieldStats().get(fieldName).getMaxDoc(), equalTo(numDocs));
|
assertEquals(result.getAllFieldStats().get(fieldName).getMaxDoc(), numDocs);
|
||||||
assertThat(result.getAllFieldStats().get(fieldName).getDocCount(), equalTo(numDocs));
|
assertEquals(result.getAllFieldStats().get(fieldName).getDocCount(), numDocs);
|
||||||
assertThat(result.getAllFieldStats().get(fieldName).getDensity(), equalTo(100));
|
assertEquals(result.getAllFieldStats().get(fieldName).getDensity(), 100);
|
||||||
assertThat(result.getAllFieldStats().get(fieldName).getMinValue(), equalTo(min));
|
assertEquals(result.getAllFieldStats().get(fieldName).getMinValue(), min);
|
||||||
assertThat(result.getAllFieldStats().get(fieldName).getMaxValue(), equalTo(max));
|
assertEquals(result.getAllFieldStats().get(fieldName).getMaxValue(), max);
|
||||||
assertThat(result.getAllFieldStats().get(fieldName).getMinValueAsString(),
|
assertEquals(result.getAllFieldStats().get(fieldName).getMinValueAsString(),
|
||||||
equalTo(java.lang.Long.toString(min)));
|
java.lang.Long.toString(min));
|
||||||
assertThat(result.getAllFieldStats().get(fieldName).getMaxValueAsString(),
|
assertEquals(result.getAllFieldStats().get(fieldName).getMaxValueAsString(),
|
||||||
equalTo(java.lang.Long.toString(max)));
|
java.lang.Long.toString(max));
|
||||||
assertThat(result.getAllFieldStats().get(fieldName).isSearchable(), equalTo(true));
|
assertEquals(result.getAllFieldStats().get(fieldName).isSearchable(), true);
|
||||||
assertThat(result.getAllFieldStats().get(fieldName).isAggregatable(), equalTo(true));
|
assertEquals(result.getAllFieldStats().get(fieldName).isAggregatable(), true);
|
||||||
if (fieldType.equals("float") || fieldType.equals("double") || fieldType.equals("half-float")) {
|
if (fieldType.equals("float") || fieldType.equals("double") || fieldType.equals("half-float")) {
|
||||||
assertThat(result.getAllFieldStats().get(fieldName).getDisplayType(), equalTo("float"));
|
assertEquals(result.getAllFieldStats().get(fieldName).getDisplayType(), "float");
|
||||||
} else {
|
} else {
|
||||||
assertThat(result.getAllFieldStats().get(fieldName).getDisplayType(), equalTo("integer"));
|
assertEquals(result.getAllFieldStats().get(fieldName).getDisplayType(), "integer");
|
||||||
}
|
}
|
||||||
|
|
||||||
client().admin().indices().prepareDelete("test").get();
|
client().admin().indices().prepareDelete("test").get();
|
||||||
|
@ -193,18 +268,19 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
|
||||||
stats.add(new FieldStats.Long(1, 1L, 1L, 1L, true, false, 1L, 1L));
|
stats.add(new FieldStats.Long(1, 1L, 1L, 1L, true, false, 1L, 1L));
|
||||||
stats.add(new FieldStats.Long(1, 1L, 1L, 1L, true, false, 1L, 1L));
|
stats.add(new FieldStats.Long(1, 1L, 1L, 1L, true, false, 1L, 1L));
|
||||||
stats.add(new FieldStats.Long(1, 1L, 1L, 1L, true, false, 1L, 1L));
|
stats.add(new FieldStats.Long(1, 1L, 1L, 1L, true, false, 1L, 1L));
|
||||||
|
stats.add(new FieldStats.Long(0, 0, 0, 0, false, false));
|
||||||
|
|
||||||
FieldStats stat = new FieldStats.Long(1, 1L, 1L, 1L, true, false, 1L, 1L);
|
FieldStats stat = new FieldStats.Long(1, 1L, 1L, 1L, true, false, 1L, 1L);
|
||||||
for (FieldStats otherStat : stats) {
|
for (FieldStats otherStat : stats) {
|
||||||
stat.accumulate(otherStat);
|
stat.accumulate(otherStat);
|
||||||
}
|
}
|
||||||
assertThat(stat.getMaxDoc(), equalTo(4L));
|
assertEquals(stat.getMaxDoc(), 4L);
|
||||||
assertThat(stat.getDocCount(), equalTo(4L));
|
assertEquals(stat.getDocCount(), 4L);
|
||||||
assertThat(stat.getSumDocFreq(), equalTo(4L));
|
assertEquals(stat.getSumDocFreq(), 4L);
|
||||||
assertThat(stat.getSumTotalTermFreq(), equalTo(4L));
|
assertEquals(stat.getSumTotalTermFreq(), 4L);
|
||||||
assertThat(stat.isSearchable(), equalTo(true));
|
assertEquals(stat.isSearchable(), true);
|
||||||
assertThat(stat.isAggregatable(), equalTo(false));
|
assertEquals(stat.isAggregatable(), false);
|
||||||
assertThat(stat.getDisplayType(), equalTo("integer"));
|
assertEquals(stat.getDisplayType(), "integer");
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testMerge_notAvailable() {
|
public void testMerge_notAvailable() {
|
||||||
|
@ -217,26 +293,28 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
|
||||||
for (FieldStats otherStat : stats) {
|
for (FieldStats otherStat : stats) {
|
||||||
stat.accumulate(otherStat);
|
stat.accumulate(otherStat);
|
||||||
}
|
}
|
||||||
assertThat(stat.getMaxDoc(), equalTo(4L));
|
assertEquals(stat.getMaxDoc(), 4L);
|
||||||
assertThat(stat.getDocCount(), equalTo(-1L));
|
assertEquals(stat.getDocCount(), -1L);
|
||||||
assertThat(stat.getSumDocFreq(), equalTo(-1L));
|
assertEquals(stat.getSumDocFreq(), -1L);
|
||||||
assertThat(stat.getSumTotalTermFreq(), equalTo(-1L));
|
assertEquals(stat.getSumTotalTermFreq(), -1L);
|
||||||
assertThat(stat.isSearchable(), equalTo(true));
|
assertEquals(stat.isSearchable(), true);
|
||||||
assertThat(stat.isAggregatable(), equalTo(true));
|
assertEquals(stat.isAggregatable(), true);
|
||||||
assertThat(stat.getDisplayType(), equalTo("integer"));
|
assertEquals(stat.getDisplayType(), "integer");
|
||||||
|
|
||||||
stats.add(new FieldStats.Long(1, -1L, -1L, -1L, true, true, 1L, 1L));
|
stats.add(new FieldStats.Long(1, -1L, -1L, -1L, false, true));
|
||||||
stat = stats.remove(0);
|
stat = stats.remove(0);
|
||||||
for (FieldStats otherStat : stats) {
|
for (FieldStats otherStat : stats) {
|
||||||
stat.accumulate(otherStat);
|
stat.accumulate(otherStat);
|
||||||
}
|
}
|
||||||
assertThat(stat.getMaxDoc(), equalTo(4L));
|
assertEquals(stat.getMaxDoc(), 4L);
|
||||||
assertThat(stat.getDocCount(), equalTo(-1L));
|
assertEquals(stat.getDocCount(), -1L);
|
||||||
assertThat(stat.getSumDocFreq(), equalTo(-1L));
|
assertEquals(stat.getSumDocFreq(), -1L);
|
||||||
assertThat(stat.getSumTotalTermFreq(), equalTo(-1L));
|
assertEquals(stat.getSumTotalTermFreq(), -1L);
|
||||||
assertThat(stat.isSearchable(), equalTo(true));
|
assertEquals(stat.isSearchable(), true);
|
||||||
assertThat(stat.isAggregatable(), equalTo(true));
|
assertEquals(stat.isAggregatable(), true);
|
||||||
assertThat(stat.getDisplayType(), equalTo("integer"));
|
assertEquals(stat.getDisplayType(), "integer");
|
||||||
|
assertNull(stat.getMaxValue());
|
||||||
|
assertNull(stat.getMinValue());
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testNumberFiltering() {
|
public void testNumberFiltering() {
|
||||||
|
@ -250,9 +328,9 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
|
||||||
.setFields("value")
|
.setFields("value")
|
||||||
.setLevel("indices")
|
.setLevel("indices")
|
||||||
.get();
|
.get();
|
||||||
assertThat(response.getIndicesMergedFieldStats().size(), equalTo(2));
|
assertEquals(response.getIndicesMergedFieldStats().size(), 2);
|
||||||
assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValue(), equalTo(1L));
|
assertEquals(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValue(), 1L);
|
||||||
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(), equalTo(3L));
|
assertEquals(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(), 3L);
|
||||||
|
|
||||||
response = client().prepareFieldStats()
|
response = client().prepareFieldStats()
|
||||||
.setFields("value")
|
.setFields("value")
|
||||||
|
@ -260,7 +338,7 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
|
||||||
new IndexConstraint("value", MAX, LTE, "0"))
|
new IndexConstraint("value", MAX, LTE, "0"))
|
||||||
.setLevel("indices")
|
.setLevel("indices")
|
||||||
.get();
|
.get();
|
||||||
assertThat(response.getIndicesMergedFieldStats().size(), equalTo(0));
|
assertEquals(response.getIndicesMergedFieldStats().size(), 0);
|
||||||
|
|
||||||
response = client().prepareFieldStats()
|
response = client().prepareFieldStats()
|
||||||
.setFields("value")
|
.setFields("value")
|
||||||
|
@ -268,7 +346,7 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
|
||||||
new IndexConstraint("value", MAX, LT, "1"))
|
new IndexConstraint("value", MAX, LT, "1"))
|
||||||
.setLevel("indices")
|
.setLevel("indices")
|
||||||
.get();
|
.get();
|
||||||
assertThat(response.getIndicesMergedFieldStats().size(), equalTo(0));
|
assertEquals(response.getIndicesMergedFieldStats().size(), 0);
|
||||||
|
|
||||||
response = client().prepareFieldStats()
|
response = client().prepareFieldStats()
|
||||||
.setFields("value")
|
.setFields("value")
|
||||||
|
@ -276,8 +354,8 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
|
||||||
new IndexConstraint("value", MAX, LTE, "1"))
|
new IndexConstraint("value", MAX, LTE, "1"))
|
||||||
.setLevel("indices")
|
.setLevel("indices")
|
||||||
.get();
|
.get();
|
||||||
assertThat(response.getIndicesMergedFieldStats().size(), equalTo(1));
|
assertEquals(response.getIndicesMergedFieldStats().size(), 1);
|
||||||
assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValue(), equalTo(1L));
|
assertEquals(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValue(), 1L);
|
||||||
|
|
||||||
response = client().prepareFieldStats()
|
response = client().prepareFieldStats()
|
||||||
.setFields("value")
|
.setFields("value")
|
||||||
|
@ -285,8 +363,8 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
|
||||||
new IndexConstraint("value", MAX, LTE, "2"))
|
new IndexConstraint("value", MAX, LTE, "2"))
|
||||||
.setLevel("indices")
|
.setLevel("indices")
|
||||||
.get();
|
.get();
|
||||||
assertThat(response.getIndicesMergedFieldStats().size(), equalTo(1));
|
assertEquals(response.getIndicesMergedFieldStats().size(), 1);
|
||||||
assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValue(), equalTo(1L));
|
assertEquals(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValue(), 1L);
|
||||||
|
|
||||||
response = client().prepareFieldStats()
|
response = client().prepareFieldStats()
|
||||||
.setFields("value")
|
.setFields("value")
|
||||||
|
@ -294,7 +372,7 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
|
||||||
new IndexConstraint("value", MAX, LTE, "2"))
|
new IndexConstraint("value", MAX, LTE, "2"))
|
||||||
.setLevel("indices")
|
.setLevel("indices")
|
||||||
.get();
|
.get();
|
||||||
assertThat(response.getIndicesMergedFieldStats().size(), equalTo(0));
|
assertEquals(response.getIndicesMergedFieldStats().size(), 0);
|
||||||
|
|
||||||
response = client().prepareFieldStats()
|
response = client().prepareFieldStats()
|
||||||
.setFields("value")
|
.setFields("value")
|
||||||
|
@ -302,8 +380,8 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
|
||||||
new IndexConstraint("value", MAX, LTE, "3"))
|
new IndexConstraint("value", MAX, LTE, "3"))
|
||||||
.setLevel("indices")
|
.setLevel("indices")
|
||||||
.get();
|
.get();
|
||||||
assertThat(response.getIndicesMergedFieldStats().size(), equalTo(1));
|
assertEquals(response.getIndicesMergedFieldStats().size(), 1);
|
||||||
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(), equalTo(3L));
|
assertEquals(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(), 3L);
|
||||||
|
|
||||||
response = client().prepareFieldStats()
|
response = client().prepareFieldStats()
|
||||||
.setFields("value")
|
.setFields("value")
|
||||||
|
@ -311,8 +389,8 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
|
||||||
new IndexConstraint("value", MAX, LTE, "4"))
|
new IndexConstraint("value", MAX, LTE, "4"))
|
||||||
.setLevel("indices")
|
.setLevel("indices")
|
||||||
.get();
|
.get();
|
||||||
assertThat(response.getIndicesMergedFieldStats().size(), equalTo(1));
|
assertEquals(response.getIndicesMergedFieldStats().size(), 1);
|
||||||
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(), equalTo(3L));
|
assertEquals(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(), 3L);
|
||||||
|
|
||||||
response = client().prepareFieldStats()
|
response = client().prepareFieldStats()
|
||||||
.setFields("value")
|
.setFields("value")
|
||||||
|
@ -320,7 +398,7 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
|
||||||
new IndexConstraint("value", MAX, LTE, "4"))
|
new IndexConstraint("value", MAX, LTE, "4"))
|
||||||
.setLevel("indices")
|
.setLevel("indices")
|
||||||
.get();
|
.get();
|
||||||
assertThat(response.getIndicesMergedFieldStats().size(), equalTo(0));
|
assertEquals(response.getIndicesMergedFieldStats().size(), 0);
|
||||||
|
|
||||||
response = client().prepareFieldStats()
|
response = client().prepareFieldStats()
|
||||||
.setFields("value")
|
.setFields("value")
|
||||||
|
@ -328,9 +406,9 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
|
||||||
new IndexConstraint("value", MAX, LTE, "3"))
|
new IndexConstraint("value", MAX, LTE, "3"))
|
||||||
.setLevel("indices")
|
.setLevel("indices")
|
||||||
.get();
|
.get();
|
||||||
assertThat(response.getIndicesMergedFieldStats().size(), equalTo(2));
|
assertEquals(response.getIndicesMergedFieldStats().size(), 2);
|
||||||
assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValue(), equalTo(1L));
|
assertEquals(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValue(), 1L);
|
||||||
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(), equalTo(3L));
|
assertEquals(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(), 3L);
|
||||||
|
|
||||||
response = client().prepareFieldStats()
|
response = client().prepareFieldStats()
|
||||||
.setFields("value")
|
.setFields("value")
|
||||||
|
@ -338,7 +416,7 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
|
||||||
new IndexConstraint("value", MAX, LT, "3"))
|
new IndexConstraint("value", MAX, LT, "3"))
|
||||||
.setLevel("indices")
|
.setLevel("indices")
|
||||||
.get();
|
.get();
|
||||||
assertThat(response.getIndicesMergedFieldStats().size(), equalTo(0));
|
assertEquals(response.getIndicesMergedFieldStats().size(), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testDateFiltering() {
|
public void testDateFiltering() {
|
||||||
|
@ -347,8 +425,9 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
|
||||||
DateTime dateTime2 = new DateTime(2014, 1, 2, 0, 0, 0, 0, DateTimeZone.UTC);
|
DateTime dateTime2 = new DateTime(2014, 1, 2, 0, 0, 0, 0, DateTimeZone.UTC);
|
||||||
String dateTime2Str = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parser().print(dateTime2);
|
String dateTime2Str = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parser().print(dateTime2);
|
||||||
|
|
||||||
createIndex("test1", Settings.EMPTY, "type", "value", "type=date");
|
createIndex("test1", Settings.EMPTY, "type", "value", "type=date", "value2", "type=date,index=false");
|
||||||
client().prepareIndex("test1", "test").setSource("value", dateTime1Str).get();
|
client().prepareIndex("test1", "test")
|
||||||
|
.setSource("value", dateTime1Str, "value2", dateTime1Str).get();
|
||||||
createIndex("test2", Settings.EMPTY, "type", "value", "type=date");
|
createIndex("test2", Settings.EMPTY, "type", "value", "type=date");
|
||||||
client().prepareIndex("test2", "test").setSource("value", dateTime2Str).get();
|
client().prepareIndex("test2", "test").setSource("value", dateTime2Str).get();
|
||||||
client().admin().indices().prepareRefresh().get();
|
client().admin().indices().prepareRefresh().get();
|
||||||
|
@ -357,17 +436,17 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
|
||||||
.setFields("value")
|
.setFields("value")
|
||||||
.setLevel("indices")
|
.setLevel("indices")
|
||||||
.get();
|
.get();
|
||||||
assertThat(response.getIndicesMergedFieldStats().size(), equalTo(2));
|
assertEquals(response.getIndicesMergedFieldStats().size(), 2);
|
||||||
assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValue(),
|
assertEquals(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValue(),
|
||||||
equalTo(dateTime1.getMillis()));
|
dateTime1.getMillis());
|
||||||
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(),
|
assertEquals(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(),
|
||||||
equalTo(dateTime2.getMillis()));
|
dateTime2.getMillis());
|
||||||
assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValueAsString(),
|
assertEquals(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValueAsString(),
|
||||||
equalTo(dateTime1Str));
|
dateTime1Str);
|
||||||
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValueAsString(),
|
assertEquals(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValueAsString(),
|
||||||
equalTo(dateTime2Str));
|
dateTime2Str);
|
||||||
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getDisplayType(),
|
assertEquals(response.getIndicesMergedFieldStats().get("test2").get("value").getDisplayType(),
|
||||||
equalTo("date"));
|
"date");
|
||||||
|
|
||||||
response = client().prepareFieldStats()
|
response = client().prepareFieldStats()
|
||||||
.setFields("value")
|
.setFields("value")
|
||||||
|
@ -375,7 +454,7 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
|
||||||
new IndexConstraint("value", MAX, LTE, "2013-12-31T00:00:00.000Z"))
|
new IndexConstraint("value", MAX, LTE, "2013-12-31T00:00:00.000Z"))
|
||||||
.setLevel("indices")
|
.setLevel("indices")
|
||||||
.get();
|
.get();
|
||||||
assertThat(response.getIndicesMergedFieldStats().size(), equalTo(0));
|
assertEquals(response.getIndicesMergedFieldStats().size(), 0);
|
||||||
|
|
||||||
response = client().prepareFieldStats()
|
response = client().prepareFieldStats()
|
||||||
.setFields("value")
|
.setFields("value")
|
||||||
|
@ -383,13 +462,13 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
|
||||||
new IndexConstraint("value", MAX, LTE, "2014-01-01T00:00:00.000Z"))
|
new IndexConstraint("value", MAX, LTE, "2014-01-01T00:00:00.000Z"))
|
||||||
.setLevel("indices")
|
.setLevel("indices")
|
||||||
.get();
|
.get();
|
||||||
assertThat(response.getIndicesMergedFieldStats().size(), equalTo(1));
|
assertEquals(response.getIndicesMergedFieldStats().size(), 1);
|
||||||
assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValue(),
|
assertEquals(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValue(),
|
||||||
equalTo(dateTime1.getMillis()));
|
dateTime1.getMillis());
|
||||||
assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValueAsString(),
|
assertEquals(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValueAsString(),
|
||||||
equalTo(dateTime1Str));
|
dateTime1Str);
|
||||||
assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getDisplayType(),
|
assertEquals(response.getIndicesMergedFieldStats().get("test1").get("value").getDisplayType(),
|
||||||
equalTo("date"));
|
"date");
|
||||||
|
|
||||||
response = client().prepareFieldStats()
|
response = client().prepareFieldStats()
|
||||||
.setFields("value")
|
.setFields("value")
|
||||||
|
@ -397,11 +476,11 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
|
||||||
new IndexConstraint("value", MAX, LTE, "2014-01-02T00:00:00.000Z"))
|
new IndexConstraint("value", MAX, LTE, "2014-01-02T00:00:00.000Z"))
|
||||||
.setLevel("indices")
|
.setLevel("indices")
|
||||||
.get();
|
.get();
|
||||||
assertThat(response.getIndicesMergedFieldStats().size(), equalTo(1));
|
assertEquals(response.getIndicesMergedFieldStats().size(), 1);
|
||||||
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(),
|
assertEquals(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(),
|
||||||
equalTo(dateTime2.getMillis()));
|
dateTime2.getMillis());
|
||||||
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValueAsString(),
|
assertEquals(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValueAsString(),
|
||||||
equalTo(dateTime2Str));
|
dateTime2Str);
|
||||||
|
|
||||||
response = client().prepareFieldStats()
|
response = client().prepareFieldStats()
|
||||||
.setFields("value")
|
.setFields("value")
|
||||||
|
@ -409,7 +488,7 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
|
||||||
new IndexConstraint("value", MAX, LTE, "2014-01-03T00:00:00.000Z"))
|
new IndexConstraint("value", MAX, LTE, "2014-01-03T00:00:00.000Z"))
|
||||||
.setLevel("indices")
|
.setLevel("indices")
|
||||||
.get();
|
.get();
|
||||||
assertThat(response.getIndicesMergedFieldStats().size(), equalTo(0));
|
assertEquals(response.getIndicesMergedFieldStats().size(), 0);
|
||||||
|
|
||||||
response = client().prepareFieldStats()
|
response = client().prepareFieldStats()
|
||||||
.setFields("value")
|
.setFields("value")
|
||||||
|
@ -417,47 +496,53 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
|
||||||
new IndexConstraint("value", MAX, LTE, "2014-01-02T01:00:00.000Z"))
|
new IndexConstraint("value", MAX, LTE, "2014-01-02T01:00:00.000Z"))
|
||||||
.setLevel("indices")
|
.setLevel("indices")
|
||||||
.get();
|
.get();
|
||||||
assertThat(response.getIndicesMergedFieldStats().size(), equalTo(1));
|
assertEquals(response.getIndicesMergedFieldStats().size(), 1);
|
||||||
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(),
|
assertEquals(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(),
|
||||||
equalTo(dateTime2.getMillis()));
|
dateTime2.getMillis());
|
||||||
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValueAsString(),
|
assertEquals(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValueAsString(),
|
||||||
equalTo(dateTime2Str));
|
dateTime2Str);
|
||||||
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getDisplayType(),
|
assertEquals(response.getIndicesMergedFieldStats().get("test2").get("value").getDisplayType(),
|
||||||
equalTo("date"));
|
"date");
|
||||||
|
|
||||||
response = client().prepareFieldStats()
|
response = client().prepareFieldStats()
|
||||||
.setFields("value")
|
.setFields("value")
|
||||||
.setIndexContraints(new IndexConstraint("value", MIN, GTE, "2014-01-01T00:00:00.000Z"))
|
.setIndexContraints(new IndexConstraint("value", MIN, GTE, "2014-01-01T00:00:00.000Z"))
|
||||||
.setLevel("indices")
|
.setLevel("indices")
|
||||||
.get();
|
.get();
|
||||||
assertThat(response.getIndicesMergedFieldStats().size(), equalTo(2));
|
assertEquals(response.getIndicesMergedFieldStats().size(), 2);
|
||||||
assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValue(),
|
assertEquals(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValue(),
|
||||||
equalTo(dateTime1.getMillis()));
|
dateTime1.getMillis());
|
||||||
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(),
|
assertEquals(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(),
|
||||||
equalTo(dateTime2.getMillis()));
|
dateTime2.getMillis());
|
||||||
assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValueAsString(),
|
assertEquals(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValueAsString(),
|
||||||
equalTo(dateTime1Str));
|
dateTime1Str);
|
||||||
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValueAsString(),
|
assertEquals(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValueAsString(),
|
||||||
equalTo(dateTime2Str));
|
dateTime2Str);
|
||||||
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getDisplayType(),
|
assertEquals(response.getIndicesMergedFieldStats().get("test2").get("value").getDisplayType(),
|
||||||
equalTo("date"));
|
"date");
|
||||||
|
|
||||||
response = client().prepareFieldStats()
|
response = client().prepareFieldStats()
|
||||||
.setFields("value")
|
.setFields("value")
|
||||||
.setIndexContraints(new IndexConstraint("value", MAX, LTE, "2014-01-02T00:00:00.000Z"))
|
.setIndexContraints(new IndexConstraint("value", MAX, LTE, "2014-01-02T00:00:00.000Z"))
|
||||||
.setLevel("indices")
|
.setLevel("indices")
|
||||||
.get();
|
.get();
|
||||||
assertThat(response.getIndicesMergedFieldStats().size(), equalTo(2));
|
assertEquals(response.getIndicesMergedFieldStats().size(), 2);
|
||||||
assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValue(),
|
assertEquals(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValue(),
|
||||||
equalTo(dateTime1.getMillis()));
|
dateTime1.getMillis());
|
||||||
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(),
|
assertEquals(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(),
|
||||||
equalTo(dateTime2.getMillis()));
|
dateTime2.getMillis());
|
||||||
assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValueAsString(),
|
assertEquals(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValueAsString(),
|
||||||
equalTo(dateTime1Str));
|
dateTime1Str);
|
||||||
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValueAsString(),
|
assertEquals(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValueAsString(),
|
||||||
equalTo(dateTime2Str));
|
dateTime2Str);
|
||||||
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getDisplayType(),
|
assertEquals(response.getIndicesMergedFieldStats().get("test2").get("value").getDisplayType(), "date");
|
||||||
equalTo("date"));
|
|
||||||
|
response = client().prepareFieldStats()
|
||||||
|
.setFields("value2")
|
||||||
|
.setIndexContraints(new IndexConstraint("value2", MAX, LTE, "2014-01-02T00:00:00.000Z"))
|
||||||
|
.setLevel("indices")
|
||||||
|
.get();
|
||||||
|
assertEquals(response.getIndicesMergedFieldStats().size(), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testDateFiltering_optionalFormat() {
|
public void testDateFiltering_optionalFormat() {
|
||||||
|
@ -476,11 +561,11 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
|
||||||
new IndexConstraint("value", MAX, LTE, String.valueOf(dateTime2.getMillis()), "epoch_millis"))
|
new IndexConstraint("value", MAX, LTE, String.valueOf(dateTime2.getMillis()), "epoch_millis"))
|
||||||
.setLevel("indices")
|
.setLevel("indices")
|
||||||
.get();
|
.get();
|
||||||
assertThat(response.getIndicesMergedFieldStats().size(), equalTo(1));
|
assertEquals(response.getIndicesMergedFieldStats().size(), 1);
|
||||||
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValueAsString(),
|
assertEquals(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValueAsString(),
|
||||||
equalTo("2014-01-02T00:00:00.000Z"));
|
"2014-01-02T00:00:00.000Z");
|
||||||
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getDisplayType(),
|
assertEquals(response.getIndicesMergedFieldStats().get("test2").get("value").getDisplayType(),
|
||||||
equalTo("date"));
|
"date");
|
||||||
|
|
||||||
try {
|
try {
|
||||||
client().prepareFieldStats()
|
client().prepareFieldStats()
|
||||||
|
@ -501,8 +586,8 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
|
||||||
.setFields("*")
|
.setFields("*")
|
||||||
.setLevel("indices")
|
.setLevel("indices")
|
||||||
.get();
|
.get();
|
||||||
assertThat(response.getIndicesMergedFieldStats().size(), equalTo(1));
|
assertEquals(response.getIndicesMergedFieldStats().size(), 1);
|
||||||
assertThat(response.getIndicesMergedFieldStats().get("test1").size(), equalTo(0));
|
assertEquals(response.getIndicesMergedFieldStats().get("test1").size(), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testMetaFieldsNotIndexed() {
|
public void testMetaFieldsNotIndexed() {
|
||||||
|
@ -513,56 +598,91 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
|
||||||
FieldStatsResponse response = client().prepareFieldStats()
|
FieldStatsResponse response = client().prepareFieldStats()
|
||||||
.setFields("_id", "_type")
|
.setFields("_id", "_type")
|
||||||
.get();
|
.get();
|
||||||
assertThat(response.getAllFieldStats().size(), equalTo(1));
|
assertEquals(response.getAllFieldStats().size(), 1);
|
||||||
assertThat(response.getAllFieldStats().get("_type").isSearchable(), equalTo(true));
|
assertEquals(response.getAllFieldStats().get("_type").isSearchable(), true);
|
||||||
assertThat(response.getAllFieldStats().get("_type").isAggregatable(), equalTo(true));
|
assertEquals(response.getAllFieldStats().get("_type").isAggregatable(), true);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testSerialization() throws IOException {
|
public void testSerialization() throws IOException {
|
||||||
for (int i = 0; i < 20; i++) {
|
for (Version version : new Version[] {Version.CURRENT, Version.V_5_0_1}){
|
||||||
assertSerialization(randomFieldStats());
|
for (int i = 0; i < 20; i++) {
|
||||||
|
assertSerialization(randomFieldStats(version.onOrAfter(Version.V_5_2_0_UNRELEASED)), version);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* creates a random field stats which does not guarantee that {@link FieldStats#maxValue} is greater than {@link FieldStats#minValue}
|
* creates a random field stats which does not guarantee that {@link FieldStats#maxValue} is greater than {@link FieldStats#minValue}
|
||||||
**/
|
**/
|
||||||
private FieldStats randomFieldStats() throws UnknownHostException {
|
private FieldStats randomFieldStats(boolean withNullMinMax) throws UnknownHostException {
|
||||||
int type = randomInt(5);
|
int type = randomInt(5);
|
||||||
switch (type) {
|
switch (type) {
|
||||||
case 0:
|
case 0:
|
||||||
return new FieldStats.Long(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(),
|
if (withNullMinMax && randomBoolean()) {
|
||||||
randomPositiveLong(), randomBoolean(), randomBoolean(), randomLong(), randomLong());
|
return new FieldStats.Long(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(),
|
||||||
|
randomPositiveLong(), randomBoolean(), randomBoolean());
|
||||||
|
} else {
|
||||||
|
return new FieldStats.Long(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(),
|
||||||
|
randomPositiveLong(), randomBoolean(), randomBoolean(), randomLong(), randomLong());
|
||||||
|
}
|
||||||
case 1:
|
case 1:
|
||||||
return new FieldStats.Double(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(),
|
if (withNullMinMax && randomBoolean()) {
|
||||||
randomPositiveLong(), randomBoolean(), randomBoolean(), randomDouble(), randomDouble());
|
return new FieldStats.Double(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(),
|
||||||
|
randomPositiveLong(), randomBoolean(), randomBoolean());
|
||||||
|
} else {
|
||||||
|
return new FieldStats.Double(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(),
|
||||||
|
randomPositiveLong(), randomBoolean(), randomBoolean(), randomDouble(), randomDouble());
|
||||||
|
}
|
||||||
case 2:
|
case 2:
|
||||||
return new FieldStats.Date(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(),
|
if (withNullMinMax && randomBoolean()) {
|
||||||
randomPositiveLong(), randomBoolean(), randomBoolean(), Joda.forPattern("basicDate"),
|
return new FieldStats.Date(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(),
|
||||||
new Date().getTime(), new Date().getTime());
|
randomPositiveLong(), randomBoolean(), randomBoolean());
|
||||||
|
} else {
|
||||||
|
return new FieldStats.Date(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(),
|
||||||
|
randomPositiveLong(), randomBoolean(), randomBoolean(), Joda.forPattern("basicDate"),
|
||||||
|
new Date().getTime(), new Date().getTime());
|
||||||
|
}
|
||||||
case 3:
|
case 3:
|
||||||
return new FieldStats.Text(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(),
|
if (withNullMinMax && randomBoolean()) {
|
||||||
randomPositiveLong(), randomBoolean(), randomBoolean(),
|
return new FieldStats.Text(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(),
|
||||||
new BytesRef(randomAsciiOfLength(10)), new BytesRef(randomAsciiOfLength(20)));
|
randomPositiveLong(), randomBoolean(), randomBoolean());
|
||||||
|
} else {
|
||||||
|
return new FieldStats.Text(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(),
|
||||||
|
randomPositiveLong(), randomBoolean(), randomBoolean(),
|
||||||
|
new BytesRef(randomAsciiOfLength(10)), new BytesRef(randomAsciiOfLength(20)));
|
||||||
|
}
|
||||||
case 4:
|
case 4:
|
||||||
return new FieldStats.Ip(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(),
|
if (withNullMinMax && randomBoolean()) {
|
||||||
randomPositiveLong(), randomBoolean(), randomBoolean(),
|
return new FieldStats.Ip(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(),
|
||||||
InetAddress.getByName("::1"), InetAddress.getByName("::1"));
|
randomPositiveLong(), randomBoolean(), randomBoolean());
|
||||||
|
} else {
|
||||||
|
return new FieldStats.Ip(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(),
|
||||||
|
randomPositiveLong(), randomBoolean(), randomBoolean(),
|
||||||
|
InetAddress.getByName("::1"), InetAddress.getByName("::1"));
|
||||||
|
}
|
||||||
case 5:
|
case 5:
|
||||||
return new FieldStats.Ip(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(),
|
if (withNullMinMax && randomBoolean()) {
|
||||||
randomPositiveLong(), randomBoolean(), randomBoolean(),
|
return new FieldStats.Ip(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(),
|
||||||
InetAddress.getByName("1.2.3.4"), InetAddress.getByName("1.2.3.4"));
|
randomPositiveLong(), randomBoolean(), randomBoolean());
|
||||||
|
} else {
|
||||||
|
return new FieldStats.Ip(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(),
|
||||||
|
randomPositiveLong(), randomBoolean(), randomBoolean(),
|
||||||
|
InetAddress.getByName("1.2.3.4"), InetAddress.getByName("1.2.3.4"));
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
throw new IllegalArgumentException("Invalid type");
|
throw new IllegalArgumentException("Invalid type");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void assertSerialization(FieldStats stats) throws IOException {
|
private void assertSerialization(FieldStats stats, Version version) throws IOException {
|
||||||
BytesStreamOutput output = new BytesStreamOutput();
|
BytesStreamOutput output = new BytesStreamOutput();
|
||||||
|
output.setVersion(version);
|
||||||
stats.writeTo(output);
|
stats.writeTo(output);
|
||||||
output.flush();
|
output.flush();
|
||||||
FieldStats deserializedStats = FieldStats.readFrom(output.bytes().streamInput());
|
StreamInput input = output.bytes().streamInput();
|
||||||
assertThat(stats, equalTo(deserializedStats));
|
input.setVersion(version);
|
||||||
assertThat(stats.hashCode(), equalTo(deserializedStats.hashCode()));
|
FieldStats deserializedStats = FieldStats.readFrom(input);
|
||||||
|
assertEquals(stats, deserializedStats);
|
||||||
|
assertEquals(stats.hashCode(), deserializedStats.hashCode());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -94,7 +94,7 @@ public class GatewayIndexStateIT extends ESIntegTestCase {
|
||||||
|
|
||||||
public void testSimpleOpenClose() throws Exception {
|
public void testSimpleOpenClose() throws Exception {
|
||||||
logger.info("--> starting 2 nodes");
|
logger.info("--> starting 2 nodes");
|
||||||
internalCluster().startNodesAsync(2).get();
|
internalCluster().startNodes(2);
|
||||||
|
|
||||||
logger.info("--> creating test index");
|
logger.info("--> creating test index");
|
||||||
createIndex("test");
|
createIndex("test");
|
||||||
|
@ -237,7 +237,7 @@ public class GatewayIndexStateIT extends ESIntegTestCase {
|
||||||
logger.info("--> cleaning nodes");
|
logger.info("--> cleaning nodes");
|
||||||
|
|
||||||
logger.info("--> starting 2 nodes");
|
logger.info("--> starting 2 nodes");
|
||||||
internalCluster().startNodesAsync(2).get();
|
internalCluster().startNodes(2);
|
||||||
|
|
||||||
logger.info("--> indexing a simple document");
|
logger.info("--> indexing a simple document");
|
||||||
client().prepareIndex("test", "type1", "1").setSource("field1", "value1").setRefreshPolicy(IMMEDIATE).get();
|
client().prepareIndex("test", "type1", "1").setSource("field1", "value1").setRefreshPolicy(IMMEDIATE).get();
|
||||||
|
@ -277,7 +277,7 @@ public class GatewayIndexStateIT extends ESIntegTestCase {
|
||||||
public void testDanglingIndices() throws Exception {
|
public void testDanglingIndices() throws Exception {
|
||||||
logger.info("--> starting two nodes");
|
logger.info("--> starting two nodes");
|
||||||
|
|
||||||
final String node_1 = internalCluster().startNodesAsync(2).get().get(0);
|
final String node_1 = internalCluster().startNodes(2).get(0);
|
||||||
|
|
||||||
logger.info("--> indexing a simple document");
|
logger.info("--> indexing a simple document");
|
||||||
client().prepareIndex("test", "type1", "1").setSource("field1", "value1").setRefreshPolicy(IMMEDIATE).get();
|
client().prepareIndex("test", "type1", "1").setSource("field1", "value1").setRefreshPolicy(IMMEDIATE).get();
|
||||||
|
@ -331,7 +331,7 @@ public class GatewayIndexStateIT extends ESIntegTestCase {
|
||||||
if (randomBoolean()) {
|
if (randomBoolean()) {
|
||||||
// test with a regular index
|
// test with a regular index
|
||||||
logger.info("--> starting a cluster with " + numNodes + " nodes");
|
logger.info("--> starting a cluster with " + numNodes + " nodes");
|
||||||
nodes = internalCluster().startNodesAsync(numNodes).get();
|
nodes = internalCluster().startNodes(numNodes);
|
||||||
logger.info("--> create an index");
|
logger.info("--> create an index");
|
||||||
createIndex(indexName);
|
createIndex(indexName);
|
||||||
} else {
|
} else {
|
||||||
|
@ -344,7 +344,7 @@ public class GatewayIndexStateIT extends ESIntegTestCase {
|
||||||
.put(Environment.PATH_SHARED_DATA_SETTING.getKey(), dataPath.toString())
|
.put(Environment.PATH_SHARED_DATA_SETTING.getKey(), dataPath.toString())
|
||||||
.put("index.store.fs.fs_lock", randomFrom("native", "simple"))
|
.put("index.store.fs.fs_lock", randomFrom("native", "simple"))
|
||||||
.build();
|
.build();
|
||||||
nodes = internalCluster().startNodesAsync(numNodes, nodeSettings).get();
|
nodes = internalCluster().startNodes(numNodes, nodeSettings);
|
||||||
logger.info("--> create a shadow replica index");
|
logger.info("--> create a shadow replica index");
|
||||||
createShadowReplicaIndex(indexName, dataPath, numNodes - 1);
|
createShadowReplicaIndex(indexName, dataPath, numNodes - 1);
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,6 +36,7 @@ import org.elasticsearch.test.InternalTestCluster.RestartCallback;
|
||||||
import java.nio.file.Files;
|
import java.nio.file.Files;
|
||||||
import java.nio.file.Path;
|
import java.nio.file.Path;
|
||||||
import java.util.LinkedHashMap;
|
import java.util.LinkedHashMap;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||||
|
@ -57,10 +58,9 @@ public class MetaDataWriteDataNodesIT extends ESIntegTestCase {
|
||||||
public void testMetaIsRemovedIfAllShardsFromIndexRemoved() throws Exception {
|
public void testMetaIsRemovedIfAllShardsFromIndexRemoved() throws Exception {
|
||||||
// this test checks that the index state is removed from a data only node once all shards have been allocated away from it
|
// this test checks that the index state is removed from a data only node once all shards have been allocated away from it
|
||||||
String masterNode = internalCluster().startMasterOnlyNode(Settings.EMPTY);
|
String masterNode = internalCluster().startMasterOnlyNode(Settings.EMPTY);
|
||||||
InternalTestCluster.Async<String> nodeName1 = internalCluster().startDataOnlyNodeAsync();
|
List<String> nodeNames= internalCluster().startDataOnlyNodes(2);
|
||||||
InternalTestCluster.Async<String> nodeName2 = internalCluster().startDataOnlyNodeAsync();
|
String node1 = nodeNames.get(0);
|
||||||
String node1 = nodeName1.get();
|
String node2 = nodeNames.get(1);
|
||||||
String node2 = nodeName2.get();
|
|
||||||
|
|
||||||
String index = "index";
|
String index = "index";
|
||||||
assertAcked(prepareCreate(index).setSettings(Settings.builder().put("index.number_of_replicas", 0).put(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "_name", node1)));
|
assertAcked(prepareCreate(index).setSettings(Settings.builder().put("index.number_of_replicas", 0).put(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "_name", node1)));
|
||||||
|
|
|
@ -46,8 +46,7 @@ public class QuorumGatewayIT extends ESIntegTestCase {
|
||||||
public void testQuorumRecovery() throws Exception {
|
public void testQuorumRecovery() throws Exception {
|
||||||
logger.info("--> starting 3 nodes");
|
logger.info("--> starting 3 nodes");
|
||||||
// we are shutting down nodes - make sure we don't have 2 clusters if we test network
|
// we are shutting down nodes - make sure we don't have 2 clusters if we test network
|
||||||
internalCluster().startNodesAsync(3).get();
|
internalCluster().startNodes(3);
|
||||||
|
|
||||||
|
|
||||||
createIndex("test");
|
createIndex("test");
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
|
|
|
@ -316,7 +316,7 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase {
|
||||||
|
|
||||||
public void testLatestVersionLoaded() throws Exception {
|
public void testLatestVersionLoaded() throws Exception {
|
||||||
// clean two nodes
|
// clean two nodes
|
||||||
internalCluster().startNodesAsync(2, Settings.builder().put("gateway.recover_after_nodes", 2).build()).get();
|
internalCluster().startNodes(2, Settings.builder().put("gateway.recover_after_nodes", 2).build());
|
||||||
|
|
||||||
client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).execute().actionGet();
|
client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).execute().actionGet();
|
||||||
client().admin().indices().prepareFlush().execute().actionGet();
|
client().admin().indices().prepareFlush().execute().actionGet();
|
||||||
|
@ -366,7 +366,7 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase {
|
||||||
|
|
||||||
logger.info("--> starting the two nodes back");
|
logger.info("--> starting the two nodes back");
|
||||||
|
|
||||||
internalCluster().startNodesAsync(2, Settings.builder().put("gateway.recover_after_nodes", 2).build()).get();
|
internalCluster().startNodes(2, Settings.builder().put("gateway.recover_after_nodes", 2).build());
|
||||||
|
|
||||||
logger.info("--> running cluster_health (wait for the shards to startup)");
|
logger.info("--> running cluster_health (wait for the shards to startup)");
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
|
@ -392,7 +392,7 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase {
|
||||||
.put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING.getKey(), 4)
|
.put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING.getKey(), 4)
|
||||||
.put(MockFSDirectoryService.CRASH_INDEX_SETTING.getKey(), false).build();
|
.put(MockFSDirectoryService.CRASH_INDEX_SETTING.getKey(), false).build();
|
||||||
|
|
||||||
internalCluster().startNodesAsync(4, settings).get();
|
internalCluster().startNodes(4, settings);
|
||||||
// prevent any rebalance actions during the peer recovery
|
// prevent any rebalance actions during the peer recovery
|
||||||
// if we run into a relocation the reuse count will be 0 and this fails the test. We are testing here if
|
// if we run into a relocation the reuse count will be 0 and this fails the test. We are testing here if
|
||||||
// we reuse the files on disk after full restarts for replicas.
|
// we reuse the files on disk after full restarts for replicas.
|
||||||
|
|
|
@ -110,7 +110,7 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase {
|
||||||
|
|
||||||
public void testCannotCreateWithBadPath() throws Exception {
|
public void testCannotCreateWithBadPath() throws Exception {
|
||||||
Settings nodeSettings = nodeSettings("/badpath");
|
Settings nodeSettings = nodeSettings("/badpath");
|
||||||
internalCluster().startNodesAsync(1, nodeSettings).get();
|
internalCluster().startNodes(1, nodeSettings);
|
||||||
Settings idxSettings = Settings.builder()
|
Settings idxSettings = Settings.builder()
|
||||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||||
.put(IndexMetaData.SETTING_DATA_PATH, "/etc/foo")
|
.put(IndexMetaData.SETTING_DATA_PATH, "/etc/foo")
|
||||||
|
@ -132,7 +132,7 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase {
|
||||||
final Path dataPath = createTempDir();
|
final Path dataPath = createTempDir();
|
||||||
Settings nodeSettings = nodeSettings(dataPath);
|
Settings nodeSettings = nodeSettings(dataPath);
|
||||||
|
|
||||||
internalCluster().startNodesAsync(3, nodeSettings).get();
|
internalCluster().startNodes(3, nodeSettings);
|
||||||
Settings idxSettings = Settings.builder()
|
Settings idxSettings = Settings.builder()
|
||||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0).build();
|
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0).build();
|
||||||
|
@ -189,7 +189,7 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase {
|
||||||
final Path dataPath = createTempDir();
|
final Path dataPath = createTempDir();
|
||||||
Settings nodeSettings = nodeSettings(dataPath);
|
Settings nodeSettings = nodeSettings(dataPath);
|
||||||
|
|
||||||
internalCluster().startNodesAsync(3, nodeSettings).get();
|
internalCluster().startNodes(3, nodeSettings);
|
||||||
final String IDX = "test";
|
final String IDX = "test";
|
||||||
|
|
||||||
Settings idxSettings = Settings.builder()
|
Settings idxSettings = Settings.builder()
|
||||||
|
@ -552,7 +552,7 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase {
|
||||||
|
|
||||||
final int nodeCount = randomIntBetween(2, 5);
|
final int nodeCount = randomIntBetween(2, 5);
|
||||||
logger.info("--> starting {} nodes", nodeCount);
|
logger.info("--> starting {} nodes", nodeCount);
|
||||||
final List<String> nodes = internalCluster().startNodesAsync(nodeCount, nodeSettings).get();
|
final List<String> nodes = internalCluster().startNodes(nodeCount, nodeSettings);
|
||||||
final String IDX = "test";
|
final String IDX = "test";
|
||||||
final Tuple<Integer, Integer> numPrimariesAndReplicas = randomPrimariesAndReplicas(nodeCount);
|
final Tuple<Integer, Integer> numPrimariesAndReplicas = randomPrimariesAndReplicas(nodeCount);
|
||||||
final int numPrimaries = numPrimariesAndReplicas.v1();
|
final int numPrimaries = numPrimariesAndReplicas.v1();
|
||||||
|
@ -605,7 +605,7 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase {
|
||||||
Path dataPath = createTempDir();
|
Path dataPath = createTempDir();
|
||||||
Settings nodeSettings = nodeSettings(dataPath);
|
Settings nodeSettings = nodeSettings(dataPath);
|
||||||
|
|
||||||
final List<String> nodes = internalCluster().startNodesAsync(2, nodeSettings).get();
|
final List<String> nodes = internalCluster().startNodes(2, nodeSettings);
|
||||||
String IDX = "test";
|
String IDX = "test";
|
||||||
|
|
||||||
Settings idxSettings = Settings.builder()
|
Settings idxSettings = Settings.builder()
|
||||||
|
@ -661,7 +661,7 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase {
|
||||||
Path dataPath = createTempDir();
|
Path dataPath = createTempDir();
|
||||||
Settings nodeSettings = nodeSettings(dataPath);
|
Settings nodeSettings = nodeSettings(dataPath);
|
||||||
|
|
||||||
internalCluster().startNodesAsync(3, nodeSettings).get();
|
internalCluster().startNodes(3, nodeSettings);
|
||||||
String IDX = "test";
|
String IDX = "test";
|
||||||
|
|
||||||
Settings idxSettings = Settings.builder()
|
Settings idxSettings = Settings.builder()
|
||||||
|
@ -731,10 +731,9 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase {
|
||||||
Settings fooSettings = Settings.builder().put(nodeSettings).put("node.attr.affinity", "foo").build();
|
Settings fooSettings = Settings.builder().put(nodeSettings).put("node.attr.affinity", "foo").build();
|
||||||
Settings barSettings = Settings.builder().put(nodeSettings).put("node.attr.affinity", "bar").build();
|
Settings barSettings = Settings.builder().put(nodeSettings).put("node.attr.affinity", "bar").build();
|
||||||
|
|
||||||
final InternalTestCluster.Async<List<String>> fooNodes = internalCluster().startNodesAsync(2, fooSettings);
|
List<String> allNodes = internalCluster().startNodes(fooSettings, fooSettings, barSettings, barSettings);
|
||||||
final InternalTestCluster.Async<List<String>> barNodes = internalCluster().startNodesAsync(2, barSettings);
|
List<String> fooNodes = allNodes.subList(0, 2);
|
||||||
fooNodes.get();
|
List<String> barNodes = allNodes.subList(2, 4);
|
||||||
barNodes.get();
|
|
||||||
String IDX = "test";
|
String IDX = "test";
|
||||||
|
|
||||||
Settings includeFoo = Settings.builder()
|
Settings includeFoo = Settings.builder()
|
||||||
|
@ -768,27 +767,27 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase {
|
||||||
client().admin().indices().prepareUpdateSettings(IDX).setSettings(includeBar).get();
|
client().admin().indices().prepareUpdateSettings(IDX).setSettings(includeBar).get();
|
||||||
|
|
||||||
// wait for the shards to move from "foo" nodes to "bar" nodes
|
// wait for the shards to move from "foo" nodes to "bar" nodes
|
||||||
assertNoShardsOn(fooNodes.get());
|
assertNoShardsOn(fooNodes);
|
||||||
|
|
||||||
// put shards back on "foo"
|
// put shards back on "foo"
|
||||||
client().admin().indices().prepareUpdateSettings(IDX).setSettings(includeFoo).get();
|
client().admin().indices().prepareUpdateSettings(IDX).setSettings(includeFoo).get();
|
||||||
|
|
||||||
// wait for the shards to move from "bar" nodes to "foo" nodes
|
// wait for the shards to move from "bar" nodes to "foo" nodes
|
||||||
assertNoShardsOn(barNodes.get());
|
assertNoShardsOn(barNodes);
|
||||||
|
|
||||||
// Stop a foo node
|
// Stop a foo node
|
||||||
logger.info("--> stopping first 'foo' node");
|
logger.info("--> stopping first 'foo' node");
|
||||||
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(fooNodes.get().get(0)));
|
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(fooNodes.get(0)));
|
||||||
|
|
||||||
// Ensure that the other foo node has all the shards now
|
// Ensure that the other foo node has all the shards now
|
||||||
assertShardCountOn(fooNodes.get().get(1), 5);
|
assertShardCountOn(fooNodes.get(1), 5);
|
||||||
|
|
||||||
// Assert no shards on the "bar" nodes
|
// Assert no shards on the "bar" nodes
|
||||||
assertNoShardsOn(barNodes.get());
|
assertNoShardsOn(barNodes);
|
||||||
|
|
||||||
// Stop the second "foo" node
|
// Stop the second "foo" node
|
||||||
logger.info("--> stopping second 'foo' node");
|
logger.info("--> stopping second 'foo' node");
|
||||||
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(fooNodes.get().get(1)));
|
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(fooNodes.get(1)));
|
||||||
|
|
||||||
// The index should still be able to be allocated (on the "bar" nodes),
|
// The index should still be able to be allocated (on the "bar" nodes),
|
||||||
// all the "foo" nodes are gone
|
// all the "foo" nodes are gone
|
||||||
|
@ -799,7 +798,7 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase {
|
||||||
String newFooNode = internalCluster().startNode(fooSettings);
|
String newFooNode = internalCluster().startNode(fooSettings);
|
||||||
|
|
||||||
assertShardCountOn(newFooNode, 5);
|
assertShardCountOn(newFooNode, 5);
|
||||||
assertNoShardsOn(barNodes.get());
|
assertNoShardsOn(barNodes);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testDeletingClosedIndexRemovesFiles() throws Exception {
|
public void testDeletingClosedIndexRemovesFiles() throws Exception {
|
||||||
|
@ -808,7 +807,7 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase {
|
||||||
|
|
||||||
final int numNodes = randomIntBetween(2, 5);
|
final int numNodes = randomIntBetween(2, 5);
|
||||||
logger.info("--> starting {} nodes", numNodes);
|
logger.info("--> starting {} nodes", numNodes);
|
||||||
final List<String> nodes = internalCluster().startNodesAsync(numNodes, nodeSettings).get();
|
final List<String> nodes = internalCluster().startNodes(numNodes, nodeSettings);
|
||||||
final String IDX = "test";
|
final String IDX = "test";
|
||||||
final Tuple<Integer, Integer> numPrimariesAndReplicas = randomPrimariesAndReplicas(numNodes);
|
final Tuple<Integer, Integer> numPrimariesAndReplicas = randomPrimariesAndReplicas(numNodes);
|
||||||
final int numPrimaries = numPrimariesAndReplicas.v1();
|
final int numPrimaries = numPrimariesAndReplicas.v1();
|
||||||
|
@ -851,7 +850,7 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase {
|
||||||
Path dataPath = createTempDir();
|
Path dataPath = createTempDir();
|
||||||
Settings nodeSettings = nodeSettings(dataPath);
|
Settings nodeSettings = nodeSettings(dataPath);
|
||||||
|
|
||||||
internalCluster().startNodesAsync(2, nodeSettings).get();
|
internalCluster().startNodes(2, nodeSettings);
|
||||||
String IDX = "test";
|
String IDX = "test";
|
||||||
|
|
||||||
Settings idxSettings = Settings.builder()
|
Settings idxSettings = Settings.builder()
|
||||||
|
@ -868,7 +867,7 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase {
|
||||||
client().prepareIndex(IDX, "doc", "2").setSource("foo", "bar").get();
|
client().prepareIndex(IDX, "doc", "2").setSource("foo", "bar").get();
|
||||||
flushAndRefresh(IDX);
|
flushAndRefresh(IDX);
|
||||||
|
|
||||||
internalCluster().startNodesAsync(1).get();
|
internalCluster().startNodes(1);
|
||||||
ensureYellow(IDX);
|
ensureYellow(IDX);
|
||||||
|
|
||||||
final ClusterHealthResponse clusterHealth = client().admin().cluster()
|
final ClusterHealthResponse clusterHealth = client().admin().cluster()
|
||||||
|
|
|
@ -35,6 +35,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||||
import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader;
|
import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.index.IndexSettings;
|
import org.elasticsearch.index.IndexSettings;
|
||||||
|
import org.elasticsearch.index.fielddata.plain.AbstractAtomicOrdinalsFieldData;
|
||||||
import org.elasticsearch.index.fielddata.plain.PagedBytesIndexFieldData;
|
import org.elasticsearch.index.fielddata.plain.PagedBytesIndexFieldData;
|
||||||
import org.elasticsearch.index.fielddata.plain.SortedSetDVOrdinalsIndexFieldData;
|
import org.elasticsearch.index.fielddata.plain.SortedSetDVOrdinalsIndexFieldData;
|
||||||
import org.elasticsearch.index.mapper.TextFieldMapper;
|
import org.elasticsearch.index.mapper.TextFieldMapper;
|
||||||
|
@ -86,7 +87,8 @@ public class FieldDataCacheTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
private SortedSetDVOrdinalsIndexFieldData createSortedDV(String fieldName, IndexFieldDataCache indexFieldDataCache) {
|
private SortedSetDVOrdinalsIndexFieldData createSortedDV(String fieldName, IndexFieldDataCache indexFieldDataCache) {
|
||||||
return new SortedSetDVOrdinalsIndexFieldData(createIndexSettings(), indexFieldDataCache, fieldName, new NoneCircuitBreakerService());
|
return new SortedSetDVOrdinalsIndexFieldData(createIndexSettings(), indexFieldDataCache, fieldName, new NoneCircuitBreakerService(),
|
||||||
|
AbstractAtomicOrdinalsFieldData.DEFAULT_SCRIPT_FUNCTION);
|
||||||
}
|
}
|
||||||
|
|
||||||
private PagedBytesIndexFieldData createPagedBytes(String fieldName, IndexFieldDataCache indexFieldDataCache) {
|
private PagedBytesIndexFieldData createPagedBytes(String fieldName, IndexFieldDataCache indexFieldDataCache) {
|
||||||
|
|
|
@ -72,7 +72,7 @@ public class CorruptedTranslogIT extends ESIntegTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testCorruptTranslogFiles() throws Exception {
|
public void testCorruptTranslogFiles() throws Exception {
|
||||||
internalCluster().startNodesAsync(1, Settings.EMPTY).get();
|
internalCluster().startNodes(1, Settings.EMPTY);
|
||||||
|
|
||||||
assertAcked(prepareCreate("test").setSettings(Settings.builder()
|
assertAcked(prepareCreate("test").setSettings(Settings.builder()
|
||||||
.put("index.number_of_shards", 1)
|
.put("index.number_of_shards", 1)
|
||||||
|
|
|
@ -28,7 +28,6 @@ import org.apache.lucene.store.FSDirectory;
|
||||||
import org.apache.lucene.store.Lock;
|
import org.apache.lucene.store.Lock;
|
||||||
import org.apache.lucene.store.LockObtainFailedException;
|
import org.apache.lucene.store.LockObtainFailedException;
|
||||||
import org.apache.lucene.store.NativeFSLockFactory;
|
import org.apache.lucene.store.NativeFSLockFactory;
|
||||||
import org.elasticsearch.ElasticsearchException;
|
|
||||||
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
|
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
|
||||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||||
import org.elasticsearch.action.search.SearchPhaseExecutionException;
|
import org.elasticsearch.action.search.SearchPhaseExecutionException;
|
||||||
|
@ -47,7 +46,6 @@ import org.elasticsearch.common.unit.TimeValue;
|
||||||
import org.elasticsearch.index.Index;
|
import org.elasticsearch.index.Index;
|
||||||
import org.elasticsearch.index.IndexSettings;
|
import org.elasticsearch.index.IndexSettings;
|
||||||
import org.elasticsearch.index.MockEngineFactoryPlugin;
|
import org.elasticsearch.index.MockEngineFactoryPlugin;
|
||||||
import org.elasticsearch.index.translog.TruncateTranslogCommand;
|
|
||||||
import org.elasticsearch.monitor.fs.FsInfo;
|
import org.elasticsearch.monitor.fs.FsInfo;
|
||||||
import org.elasticsearch.plugins.Plugin;
|
import org.elasticsearch.plugins.Plugin;
|
||||||
import org.elasticsearch.test.ESIntegTestCase;
|
import org.elasticsearch.test.ESIntegTestCase;
|
||||||
|
@ -85,7 +83,7 @@ public class TruncateTranslogIT extends ESIntegTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testCorruptTranslogTruncation() throws Exception {
|
public void testCorruptTranslogTruncation() throws Exception {
|
||||||
internalCluster().startNodesAsync(1, Settings.EMPTY).get();
|
internalCluster().startNodes(1, Settings.EMPTY);
|
||||||
|
|
||||||
assertAcked(prepareCreate("test").setSettings(Settings.builder()
|
assertAcked(prepareCreate("test").setSettings(Settings.builder()
|
||||||
.put("index.number_of_shards", 1)
|
.put("index.number_of_shards", 1)
|
||||||
|
|
|
@ -34,7 +34,7 @@ public class DedicatedMasterGetFieldMappingIT extends SimpleGetFieldMappingsIT {
|
||||||
Settings settings = Settings.builder()
|
Settings settings = Settings.builder()
|
||||||
.put(Node.NODE_DATA_SETTING.getKey(), false)
|
.put(Node.NODE_DATA_SETTING.getKey(), false)
|
||||||
.build();
|
.build();
|
||||||
internalCluster().startNodesAsync(settings, Settings.EMPTY).get();
|
internalCluster().startNodes(settings, Settings.EMPTY);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -43,7 +43,6 @@ import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.unit.TimeValue;
|
import org.elasticsearch.common.unit.TimeValue;
|
||||||
import org.elasticsearch.discovery.DiscoverySettings;
|
import org.elasticsearch.discovery.DiscoverySettings;
|
||||||
import org.elasticsearch.discovery.zen.ElectMasterService;
|
|
||||||
import org.elasticsearch.gateway.GatewayAllocator;
|
import org.elasticsearch.gateway.GatewayAllocator;
|
||||||
import org.elasticsearch.index.Index;
|
import org.elasticsearch.index.Index;
|
||||||
import org.elasticsearch.index.IndexService;
|
import org.elasticsearch.index.IndexService;
|
||||||
|
@ -174,7 +173,7 @@ public class RareClusterStateIT extends ESIntegTestCase {
|
||||||
|
|
||||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/14932")
|
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/14932")
|
||||||
public void testDeleteCreateInOneBulk() throws Exception {
|
public void testDeleteCreateInOneBulk() throws Exception {
|
||||||
internalCluster().startNodesAsync(2).get();
|
internalCluster().startNodes(2);
|
||||||
assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("2").get().isTimedOut());
|
assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("2").get().isTimedOut());
|
||||||
prepareCreate("test").setSettings(IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS, true).addMapping("type").get();
|
prepareCreate("test").setSettings(IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS, true).addMapping("type").get();
|
||||||
ensureGreen("test");
|
ensureGreen("test");
|
||||||
|
@ -213,7 +212,7 @@ public class RareClusterStateIT extends ESIntegTestCase {
|
||||||
.put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), "30s") // explicitly set so it won't default to publish timeout
|
.put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), "30s") // explicitly set so it won't default to publish timeout
|
||||||
.put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "0s") // don't wait post commit as we are blocking things by design
|
.put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "0s") // don't wait post commit as we are blocking things by design
|
||||||
.build();
|
.build();
|
||||||
final List<String> nodeNames = internalCluster().startNodesAsync(2, settings).get();
|
final List<String> nodeNames = internalCluster().startNodes(2, settings);
|
||||||
assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("2").get().isTimedOut());
|
assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("2").get().isTimedOut());
|
||||||
|
|
||||||
final String master = internalCluster().getMasterName();
|
final String master = internalCluster().getMasterName();
|
||||||
|
@ -328,11 +327,11 @@ public class RareClusterStateIT extends ESIntegTestCase {
|
||||||
// Here we want to test that everything goes well if the mappings that
|
// Here we want to test that everything goes well if the mappings that
|
||||||
// are needed for a document are not available on the replica at the
|
// are needed for a document are not available on the replica at the
|
||||||
// time of indexing it
|
// time of indexing it
|
||||||
final List<String> nodeNames = internalCluster().startNodesAsync(2,
|
final List<String> nodeNames = internalCluster().startNodes(2,
|
||||||
Settings.builder()
|
Settings.builder()
|
||||||
.put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), "30s") // explicitly set so it won't default to publish timeout
|
.put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), "30s") // explicitly set so it won't default to publish timeout
|
||||||
.put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "0s") // don't wait post commit as we are blocking things by design
|
.put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "0s") // don't wait post commit as we are blocking things by design
|
||||||
.build()).get();
|
.build());
|
||||||
assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("2").get().isTimedOut());
|
assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("2").get().isTimedOut());
|
||||||
|
|
||||||
final String master = internalCluster().getMasterName();
|
final String master = internalCluster().getMasterName();
|
||||||
|
|
|
@ -292,17 +292,14 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testShardActiveElsewhereDoesNotDeleteAnother() throws Exception {
|
public void testShardActiveElsewhereDoesNotDeleteAnother() throws Exception {
|
||||||
InternalTestCluster.Async<String> masterFuture = internalCluster().startNodeAsync(
|
final String masterNode = internalCluster().startMasterOnlyNode();
|
||||||
Settings.builder().put(Node.NODE_MASTER_SETTING.getKey(), true, Node.NODE_DATA_SETTING.getKey(), false).build());
|
final List<String> nodes = internalCluster().startDataOnlyNodes(4);
|
||||||
InternalTestCluster.Async<List<String>> nodesFutures = internalCluster().startNodesAsync(4,
|
|
||||||
Settings.builder().put(Node.NODE_MASTER_SETTING.getKey(), false, Node.NODE_DATA_SETTING.getKey(), true).build());
|
|
||||||
|
|
||||||
final String masterNode = masterFuture.get();
|
final String node1 = nodes.get(0);
|
||||||
final String node1 = nodesFutures.get().get(0);
|
final String node2 = nodes.get(1);
|
||||||
final String node2 = nodesFutures.get().get(1);
|
final String node3 = nodes.get(2);
|
||||||
final String node3 = nodesFutures.get().get(2);
|
|
||||||
// we will use this later on, handy to start now to make sure it has a different data folder that node 1,2 &3
|
// we will use this later on, handy to start now to make sure it has a different data folder that node 1,2 &3
|
||||||
final String node4 = nodesFutures.get().get(3);
|
final String node4 = nodes.get(3);
|
||||||
|
|
||||||
assertAcked(prepareCreate("test").setSettings(Settings.builder()
|
assertAcked(prepareCreate("test").setSettings(Settings.builder()
|
||||||
.put(indexSettings())
|
.put(indexSettings())
|
||||||
|
@ -356,8 +353,7 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase {
|
||||||
|
|
||||||
logger.debug("--> starting the two old nodes back");
|
logger.debug("--> starting the two old nodes back");
|
||||||
|
|
||||||
internalCluster().startNodesAsync(2,
|
internalCluster().startDataOnlyNodes(2);
|
||||||
Settings.builder().put(Node.NODE_MASTER_SETTING.getKey(), false, Node.NODE_DATA_SETTING.getKey(), true).build());
|
|
||||||
|
|
||||||
assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("5").get().isTimedOut());
|
assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("5").get().isTimedOut());
|
||||||
|
|
||||||
|
@ -372,7 +368,7 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testShardActiveElseWhere() throws Exception {
|
public void testShardActiveElseWhere() throws Exception {
|
||||||
List<String> nodes = internalCluster().startNodesAsync(2).get();
|
List<String> nodes = internalCluster().startNodes(2);
|
||||||
|
|
||||||
final String masterNode = internalCluster().getMasterName();
|
final String masterNode = internalCluster().getMasterName();
|
||||||
final String nonMasterNode = nodes.get(0).equals(masterNode) ? nodes.get(1) : nodes.get(0);
|
final String nonMasterNode = nodes.get(0).equals(masterNode) ? nodes.get(1) : nodes.get(0);
|
||||||
|
|
|
@ -40,7 +40,7 @@ import static org.hamcrest.Matchers.notNullValue;
|
||||||
public class SimpleNodesInfoIT extends ESIntegTestCase {
|
public class SimpleNodesInfoIT extends ESIntegTestCase {
|
||||||
|
|
||||||
public void testNodesInfos() throws Exception {
|
public void testNodesInfos() throws Exception {
|
||||||
List<String> nodesIds = internalCluster().startNodesAsync(2).get();
|
List<String> nodesIds = internalCluster().startNodes(2);
|
||||||
final String node_1 = nodesIds.get(0);
|
final String node_1 = nodesIds.get(0);
|
||||||
final String node_2 = nodesIds.get(1);
|
final String node_2 = nodesIds.get(1);
|
||||||
|
|
||||||
|
@ -79,7 +79,7 @@ public class SimpleNodesInfoIT extends ESIntegTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testNodesInfosTotalIndexingBuffer() throws Exception {
|
public void testNodesInfosTotalIndexingBuffer() throws Exception {
|
||||||
List<String> nodesIds = internalCluster().startNodesAsync(2).get();
|
List<String> nodesIds = internalCluster().startNodes(2);
|
||||||
final String node_1 = nodesIds.get(0);
|
final String node_1 = nodesIds.get(0);
|
||||||
final String node_2 = nodesIds.get(1);
|
final String node_2 = nodesIds.get(1);
|
||||||
|
|
||||||
|
@ -113,11 +113,10 @@ public class SimpleNodesInfoIT extends ESIntegTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testAllocatedProcessors() throws Exception {
|
public void testAllocatedProcessors() throws Exception {
|
||||||
List<String> nodesIds = internalCluster().
|
List<String> nodesIds = internalCluster().startNodes(
|
||||||
startNodesAsync(
|
|
||||||
Settings.builder().put(EsExecutors.PROCESSORS_SETTING.getKey(), 3).build(),
|
Settings.builder().put(EsExecutors.PROCESSORS_SETTING.getKey(), 3).build(),
|
||||||
Settings.builder().put(EsExecutors.PROCESSORS_SETTING.getKey(), 6).build()
|
Settings.builder().put(EsExecutors.PROCESSORS_SETTING.getKey(), 6).build()
|
||||||
).get();
|
);
|
||||||
|
|
||||||
final String node_1 = nodesIds.get(0);
|
final String node_1 = nodesIds.get(0);
|
||||||
final String node_2 = nodesIds.get(1);
|
final String node_2 = nodesIds.get(1);
|
||||||
|
|
|
@ -126,7 +126,7 @@ public class FullRollingRestartIT extends ESIntegTestCase {
|
||||||
public void testNoRebalanceOnRollingRestart() throws Exception {
|
public void testNoRebalanceOnRollingRestart() throws Exception {
|
||||||
// see https://github.com/elastic/elasticsearch/issues/14387
|
// see https://github.com/elastic/elasticsearch/issues/14387
|
||||||
internalCluster().startMasterOnlyNode(Settings.EMPTY);
|
internalCluster().startMasterOnlyNode(Settings.EMPTY);
|
||||||
internalCluster().startDataOnlyNodesAsync(3).get();
|
internalCluster().startDataOnlyNodes(3);
|
||||||
/**
|
/**
|
||||||
* We start 3 nodes and a dedicated master. Restart on of the data-nodes and ensure that we got no relocations.
|
* We start 3 nodes and a dedicated master. Restart on of the data-nodes and ensure that we got no relocations.
|
||||||
* Yet we have 6 shards 0 replica so that means if the restarting node comes back both other nodes are subject
|
* Yet we have 6 shards 0 replica so that means if the restarting node comes back both other nodes are subject
|
||||||
|
|
|
@ -470,7 +470,7 @@ public class RelocationIT extends ESIntegTestCase {
|
||||||
Stream.generate(() -> Settings.builder().put("node.attr.color", "blue").build()).limit(halfNodes),
|
Stream.generate(() -> Settings.builder().put("node.attr.color", "blue").build()).limit(halfNodes),
|
||||||
Stream.generate(() -> Settings.builder().put("node.attr.color", "red").build()).limit(halfNodes)
|
Stream.generate(() -> Settings.builder().put("node.attr.color", "red").build()).limit(halfNodes)
|
||||||
).toArray(Settings[]::new);
|
).toArray(Settings[]::new);
|
||||||
List<String> nodes = internalCluster().startNodesAsync(nodeSettings).get();
|
List<String> nodes = internalCluster().startNodes(nodeSettings);
|
||||||
String[] blueNodes = nodes.subList(0, halfNodes).stream().toArray(String[]::new);
|
String[] blueNodes = nodes.subList(0, halfNodes).stream().toArray(String[]::new);
|
||||||
String[] redNodes = nodes.subList(halfNodes, nodes.size()).stream().toArray(String[]::new);
|
String[] redNodes = nodes.subList(halfNodes, nodes.size()).stream().toArray(String[]::new);
|
||||||
logger.info("blue nodes: {}", (Object)blueNodes);
|
logger.info("blue nodes: {}", (Object)blueNodes);
|
||||||
|
|
|
@ -22,11 +22,46 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcke
|
||||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
|
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
|
||||||
|
|
||||||
import org.elasticsearch.action.search.SearchResponse;
|
import org.elasticsearch.action.search.SearchResponse;
|
||||||
|
import org.elasticsearch.index.fielddata.ScriptDocValues;
|
||||||
|
import org.elasticsearch.plugins.Plugin;
|
||||||
|
import org.elasticsearch.script.Script;
|
||||||
|
import org.elasticsearch.script.ScriptType;
|
||||||
import org.elasticsearch.search.aggregations.AggregationBuilders;
|
import org.elasticsearch.search.aggregations.AggregationBuilders;
|
||||||
|
import org.elasticsearch.search.aggregations.AggregationTestScriptsPlugin;
|
||||||
import org.elasticsearch.search.aggregations.bucket.terms.Terms;
|
import org.elasticsearch.search.aggregations.bucket.terms.Terms;
|
||||||
|
|
||||||
|
import java.util.Collection;
|
||||||
|
import java.util.Collections;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.function.Function;
|
||||||
|
|
||||||
public class IpTermsIT extends AbstractTermsTestCase {
|
public class IpTermsIT extends AbstractTermsTestCase {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected Collection<Class<? extends Plugin>> nodePlugins() {
|
||||||
|
return Collections.singleton(CustomScriptPlugin.class);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static class CustomScriptPlugin extends AggregationTestScriptsPlugin {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected Map<String, Function<Map<String, Object>, Object>> pluginScripts() {
|
||||||
|
Map<String, Function<Map<String, Object>, Object>> scripts = super.pluginScripts();
|
||||||
|
|
||||||
|
scripts.put("doc['ip'].value", vars -> {
|
||||||
|
Map<?, ?> doc = (Map<?,?>) vars.get("doc");
|
||||||
|
return doc.get("ip");
|
||||||
|
});
|
||||||
|
|
||||||
|
scripts.put("doc['ip'].values", vars -> {
|
||||||
|
Map<?, ?> doc = (Map<?,?>) vars.get("doc");
|
||||||
|
return ((ScriptDocValues<?>) doc.get("ip")).get(0);
|
||||||
|
});
|
||||||
|
|
||||||
|
return scripts;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
public void testBasics() throws Exception {
|
public void testBasics() throws Exception {
|
||||||
assertAcked(prepareCreate("index").addMapping("type", "ip", "type=ip"));
|
assertAcked(prepareCreate("index").addMapping("type", "ip", "type=ip"));
|
||||||
indexRandom(true,
|
indexRandom(true,
|
||||||
|
@ -51,4 +86,55 @@ public class IpTermsIT extends AbstractTermsTestCase {
|
||||||
assertEquals("2001:db8::2:1", bucket2.getKeyAsString());
|
assertEquals("2001:db8::2:1", bucket2.getKeyAsString());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testScriptValue() throws Exception {
|
||||||
|
assertAcked(prepareCreate("index").addMapping("type", "ip", "type=ip"));
|
||||||
|
indexRandom(true,
|
||||||
|
client().prepareIndex("index", "type", "1").setSource("ip", "192.168.1.7"),
|
||||||
|
client().prepareIndex("index", "type", "2").setSource("ip", "192.168.1.7"),
|
||||||
|
client().prepareIndex("index", "type", "3").setSource("ip", "2001:db8::2:1"));
|
||||||
|
|
||||||
|
Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME,
|
||||||
|
"doc['ip'].value", Collections.emptyMap());
|
||||||
|
SearchResponse response = client().prepareSearch("index").addAggregation(
|
||||||
|
AggregationBuilders.terms("my_terms").script(script).executionHint(randomExecutionHint())).get();
|
||||||
|
assertSearchResponse(response);
|
||||||
|
Terms terms = response.getAggregations().get("my_terms");
|
||||||
|
assertEquals(2, terms.getBuckets().size());
|
||||||
|
|
||||||
|
Terms.Bucket bucket1 = terms.getBuckets().get(0);
|
||||||
|
assertEquals(2, bucket1.getDocCount());
|
||||||
|
assertEquals("192.168.1.7", bucket1.getKey());
|
||||||
|
assertEquals("192.168.1.7", bucket1.getKeyAsString());
|
||||||
|
|
||||||
|
Terms.Bucket bucket2 = terms.getBuckets().get(1);
|
||||||
|
assertEquals(1, bucket2.getDocCount());
|
||||||
|
assertEquals("2001:db8::2:1", bucket2.getKey());
|
||||||
|
assertEquals("2001:db8::2:1", bucket2.getKeyAsString());
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testScriptValues() throws Exception {
|
||||||
|
assertAcked(prepareCreate("index").addMapping("type", "ip", "type=ip"));
|
||||||
|
indexRandom(true,
|
||||||
|
client().prepareIndex("index", "type", "1").setSource("ip", "192.168.1.7"),
|
||||||
|
client().prepareIndex("index", "type", "2").setSource("ip", "192.168.1.7"),
|
||||||
|
client().prepareIndex("index", "type", "3").setSource("ip", "2001:db8::2:1"));
|
||||||
|
|
||||||
|
Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME,
|
||||||
|
"doc['ip'].values", Collections.emptyMap());
|
||||||
|
SearchResponse response = client().prepareSearch("index").addAggregation(
|
||||||
|
AggregationBuilders.terms("my_terms").script(script).executionHint(randomExecutionHint())).get();
|
||||||
|
assertSearchResponse(response);
|
||||||
|
Terms terms = response.getAggregations().get("my_terms");
|
||||||
|
assertEquals(2, terms.getBuckets().size());
|
||||||
|
|
||||||
|
Terms.Bucket bucket1 = terms.getBuckets().get(0);
|
||||||
|
assertEquals(2, bucket1.getDocCount());
|
||||||
|
assertEquals("192.168.1.7", bucket1.getKey());
|
||||||
|
assertEquals("192.168.1.7", bucket1.getKeyAsString());
|
||||||
|
|
||||||
|
Terms.Bucket bucket2 = terms.getBuckets().get(1);
|
||||||
|
assertEquals(1, bucket2.getDocCount());
|
||||||
|
assertEquals("2001:db8::2:1", bucket2.getKey());
|
||||||
|
assertEquals("2001:db8::2:1", bucket2.getKeyAsString());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -766,6 +766,9 @@ public class SearchFieldsIT extends ESIntegTestCase {
|
||||||
.startObject("binary_field")
|
.startObject("binary_field")
|
||||||
.field("type", "binary")
|
.field("type", "binary")
|
||||||
.endObject()
|
.endObject()
|
||||||
|
.startObject("ip_field")
|
||||||
|
.field("type", "ip")
|
||||||
|
.endObject()
|
||||||
.endObject()
|
.endObject()
|
||||||
.endObject()
|
.endObject()
|
||||||
.endObject()
|
.endObject()
|
||||||
|
@ -784,6 +787,7 @@ public class SearchFieldsIT extends ESIntegTestCase {
|
||||||
.field("double_field", 6.0d)
|
.field("double_field", 6.0d)
|
||||||
.field("date_field", Joda.forPattern("dateOptionalTime").printer().print(new DateTime(2012, 3, 22, 0, 0, DateTimeZone.UTC)))
|
.field("date_field", Joda.forPattern("dateOptionalTime").printer().print(new DateTime(2012, 3, 22, 0, 0, DateTimeZone.UTC)))
|
||||||
.field("boolean_field", true)
|
.field("boolean_field", true)
|
||||||
|
.field("ip_field", "::1")
|
||||||
.endObject()).execute().actionGet();
|
.endObject()).execute().actionGet();
|
||||||
|
|
||||||
client().admin().indices().prepareRefresh().execute().actionGet();
|
client().admin().indices().prepareRefresh().execute().actionGet();
|
||||||
|
@ -798,14 +802,16 @@ public class SearchFieldsIT extends ESIntegTestCase {
|
||||||
.addDocValueField("float_field")
|
.addDocValueField("float_field")
|
||||||
.addDocValueField("double_field")
|
.addDocValueField("double_field")
|
||||||
.addDocValueField("date_field")
|
.addDocValueField("date_field")
|
||||||
.addDocValueField("boolean_field");
|
.addDocValueField("boolean_field")
|
||||||
|
.addDocValueField("ip_field");
|
||||||
SearchResponse searchResponse = builder.execute().actionGet();
|
SearchResponse searchResponse = builder.execute().actionGet();
|
||||||
|
|
||||||
assertThat(searchResponse.getHits().getTotalHits(), equalTo(1L));
|
assertThat(searchResponse.getHits().getTotalHits(), equalTo(1L));
|
||||||
assertThat(searchResponse.getHits().hits().length, equalTo(1));
|
assertThat(searchResponse.getHits().hits().length, equalTo(1));
|
||||||
Set<String> fields = new HashSet<>(searchResponse.getHits().getAt(0).fields().keySet());
|
Set<String> fields = new HashSet<>(searchResponse.getHits().getAt(0).fields().keySet());
|
||||||
assertThat(fields, equalTo(newHashSet("byte_field", "short_field", "integer_field", "long_field",
|
assertThat(fields, equalTo(newHashSet("byte_field", "short_field", "integer_field", "long_field",
|
||||||
"float_field", "double_field", "date_field", "boolean_field", "text_field", "keyword_field")));
|
"float_field", "double_field", "date_field", "boolean_field", "text_field", "keyword_field",
|
||||||
|
"ip_field")));
|
||||||
|
|
||||||
assertThat(searchResponse.getHits().getAt(0).fields().get("byte_field").value().toString(), equalTo("1"));
|
assertThat(searchResponse.getHits().getAt(0).fields().get("byte_field").value().toString(), equalTo("1"));
|
||||||
assertThat(searchResponse.getHits().getAt(0).fields().get("short_field").value().toString(), equalTo("2"));
|
assertThat(searchResponse.getHits().getAt(0).fields().get("short_field").value().toString(), equalTo("2"));
|
||||||
|
@ -817,6 +823,7 @@ public class SearchFieldsIT extends ESIntegTestCase {
|
||||||
assertThat(searchResponse.getHits().getAt(0).fields().get("boolean_field").value(), equalTo((Object) true));
|
assertThat(searchResponse.getHits().getAt(0).fields().get("boolean_field").value(), equalTo((Object) true));
|
||||||
assertThat(searchResponse.getHits().getAt(0).fields().get("text_field").value(), equalTo("foo"));
|
assertThat(searchResponse.getHits().getAt(0).fields().get("text_field").value(), equalTo("foo"));
|
||||||
assertThat(searchResponse.getHits().getAt(0).fields().get("keyword_field").value(), equalTo("foo"));
|
assertThat(searchResponse.getHits().getAt(0).fields().get("keyword_field").value(), equalTo("foo"));
|
||||||
|
assertThat(searchResponse.getHits().getAt(0).fields().get("ip_field").value(), equalTo("::1"));
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testScriptFields() throws Exception {
|
public void testScriptFields() throws Exception {
|
||||||
|
|
|
@ -621,7 +621,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
|
||||||
public void testThatSensitiveRepositorySettingsAreNotExposed() throws Exception {
|
public void testThatSensitiveRepositorySettingsAreNotExposed() throws Exception {
|
||||||
Settings nodeSettings = Settings.builder().put().build();
|
Settings nodeSettings = Settings.builder().put().build();
|
||||||
logger.info("--> start two nodes");
|
logger.info("--> start two nodes");
|
||||||
internalCluster().startNodesAsync(2, nodeSettings).get();
|
internalCluster().startNodes(2, nodeSettings);
|
||||||
// Register mock repositories
|
// Register mock repositories
|
||||||
client().admin().cluster().preparePutRepository("test-repo")
|
client().admin().cluster().preparePutRepository("test-repo")
|
||||||
.setType("mock").setSettings(Settings.builder()
|
.setType("mock").setSettings(Settings.builder()
|
||||||
|
|
|
@ -299,7 +299,7 @@ Response:
|
||||||
|
|
||||||
Field stats index constraints allows to omit all field stats for indices that don't match with the constraint. An index
|
Field stats index constraints allows to omit all field stats for indices that don't match with the constraint. An index
|
||||||
constraint can exclude indices' field stats based on the `min_value` and `max_value` statistic. This option is only
|
constraint can exclude indices' field stats based on the `min_value` and `max_value` statistic. This option is only
|
||||||
useful if the `level` option is set to `indices`.
|
useful if the `level` option is set to `indices`. Fields that are not indexed (not searchable) are always omitted when an index constraint is defined.
|
||||||
|
|
||||||
For example index constraints can be useful to find out the min and max value of a particular property of your data in
|
For example index constraints can be useful to find out the min and max value of a particular property of your data in
|
||||||
a time based scenario. The following request only returns field stats for the `answer_count` property for indices
|
a time based scenario. The following request only returns field stats for the `answer_count` property for indices
|
||||||
|
|
|
@ -48,15 +48,15 @@ public class RestDeleteByQueryAction extends AbstractBulkByQueryRestHandler<Dele
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException {
|
public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException {
|
||||||
if (false == request.hasContent()) {
|
|
||||||
throw new ElasticsearchException("_delete_by_query requires a request body");
|
|
||||||
}
|
|
||||||
return doPrepareRequest(request, client, false, false);
|
return doPrepareRequest(request, client, false, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected DeleteByQueryRequest buildRequest(RestRequest request) throws IOException {
|
protected DeleteByQueryRequest buildRequest(RestRequest request) throws IOException {
|
||||||
/*
|
if (false == request.hasContent()) {
|
||||||
|
throw new ElasticsearchException("_delete_by_query requires a request body");
|
||||||
|
}
|
||||||
|
/*
|
||||||
* Passing the search request through DeleteByQueryRequest first allows
|
* Passing the search request through DeleteByQueryRequest first allows
|
||||||
* it to set its own defaults which differ from SearchRequest's
|
* it to set its own defaults which differ from SearchRequest's
|
||||||
* defaults. Then the parseInternalRequest can override them.
|
* defaults. Then the parseInternalRequest can override them.
|
||||||
|
|
|
@ -19,7 +19,6 @@
|
||||||
|
|
||||||
package org.elasticsearch.index.reindex;
|
package org.elasticsearch.index.reindex;
|
||||||
|
|
||||||
import org.elasticsearch.ElasticsearchException;
|
|
||||||
import org.elasticsearch.action.index.IndexRequest;
|
import org.elasticsearch.action.index.IndexRequest;
|
||||||
import org.elasticsearch.action.search.SearchRequest;
|
import org.elasticsearch.action.search.SearchRequest;
|
||||||
import org.elasticsearch.client.node.NodeClient;
|
import org.elasticsearch.client.node.NodeClient;
|
||||||
|
@ -113,14 +112,18 @@ public class RestReindexAction extends AbstractBaseReindexRestHandler<ReindexReq
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException {
|
public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException {
|
||||||
if (false == request.hasContent()) {
|
|
||||||
throw new ElasticsearchException("_reindex requires a request body");
|
|
||||||
}
|
|
||||||
return doPrepareRequest(request, client, true, true);
|
return doPrepareRequest(request, client, true, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected ReindexRequest buildRequest(RestRequest request) throws IOException {
|
protected ReindexRequest buildRequest(RestRequest request) throws IOException {
|
||||||
|
if (false == request.hasContent()) {
|
||||||
|
throw new IllegalArgumentException("_reindex requires a request body");
|
||||||
|
}
|
||||||
|
if (request.hasParam("pipeline")) {
|
||||||
|
throw new IllegalArgumentException("_reindex doesn't support [pipeline] as a query parmaeter. "
|
||||||
|
+ "Specify it in the [dest] object instead.");
|
||||||
|
}
|
||||||
ReindexRequest internal = new ReindexRequest(new SearchRequest(), new IndexRequest());
|
ReindexRequest internal = new ReindexRequest(new SearchRequest(), new IndexRequest());
|
||||||
try (XContentParser xcontent = XContentFactory.xContent(request.content()).createParser(request.content())) {
|
try (XContentParser xcontent = XContentFactory.xContent(request.content()).createParser(request.content())) {
|
||||||
PARSER.parse(xcontent, internal, new ReindexParseContext(searchRequestParsers, parseFieldMatcher));
|
PARSER.parse(xcontent, internal, new ReindexParseContext(searchRequestParsers, parseFieldMatcher));
|
||||||
|
|
|
@ -23,20 +23,25 @@ import org.elasticsearch.action.index.IndexRequest;
|
||||||
import org.elasticsearch.action.search.SearchRequest;
|
import org.elasticsearch.action.search.SearchRequest;
|
||||||
import org.elasticsearch.common.ParseFieldMatcher;
|
import org.elasticsearch.common.ParseFieldMatcher;
|
||||||
import org.elasticsearch.common.bytes.BytesReference;
|
import org.elasticsearch.common.bytes.BytesReference;
|
||||||
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
import org.elasticsearch.common.xcontent.XContentParser;
|
import org.elasticsearch.common.xcontent.XContentParser;
|
||||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||||
import org.elasticsearch.index.reindex.RestReindexAction.ReindexParseContext;
|
import org.elasticsearch.index.reindex.RestReindexAction.ReindexParseContext;
|
||||||
import org.elasticsearch.index.reindex.remote.RemoteInfo;
|
import org.elasticsearch.index.reindex.remote.RemoteInfo;
|
||||||
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
|
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
|
||||||
|
import org.elasticsearch.rest.RestController;
|
||||||
import org.elasticsearch.search.SearchRequestParsers;
|
import org.elasticsearch.search.SearchRequestParsers;
|
||||||
import org.elasticsearch.test.ESTestCase;
|
import org.elasticsearch.test.ESTestCase;
|
||||||
|
import org.elasticsearch.test.rest.FakeRestRequest;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
|
import static java.util.Collections.singletonMap;
|
||||||
import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
|
import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
|
||||||
|
import static org.mockito.Mockito.mock;
|
||||||
|
|
||||||
public class RestReindexActionTests extends ESTestCase {
|
public class RestReindexActionTests extends ESTestCase {
|
||||||
public void testBuildRemoteInfoNoRemote() throws IOException {
|
public void testBuildRemoteInfoNoRemote() throws IOException {
|
||||||
|
@ -127,6 +132,31 @@ public class RestReindexActionTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testPipelineQueryParameterIsError() throws IOException {
|
||||||
|
SearchRequestParsers parsers = new SearchRequestParsers(new IndicesQueriesRegistry(), null, null, null);
|
||||||
|
RestReindexAction action = new RestReindexAction(Settings.EMPTY, mock(RestController.class), parsers, null);
|
||||||
|
|
||||||
|
FakeRestRequest.Builder request = new FakeRestRequest.Builder();
|
||||||
|
try (XContentBuilder body = JsonXContent.contentBuilder().prettyPrint()) {
|
||||||
|
body.startObject(); {
|
||||||
|
body.startObject("source"); {
|
||||||
|
body.field("index", "source");
|
||||||
|
}
|
||||||
|
body.endObject();
|
||||||
|
body.startObject("dest"); {
|
||||||
|
body.field("index", "dest");
|
||||||
|
}
|
||||||
|
body.endObject();
|
||||||
|
}
|
||||||
|
body.endObject();
|
||||||
|
request.withContent(body.bytes());
|
||||||
|
}
|
||||||
|
request.withParams(singletonMap("pipeline", "doesn't matter"));
|
||||||
|
Exception e = expectThrows(IllegalArgumentException.class, () -> action.buildRequest(request.build()));
|
||||||
|
|
||||||
|
assertEquals("_reindex doesn't support [pipeline] as a query parmaeter. Specify it in the [dest] object instead.", e.getMessage());
|
||||||
|
}
|
||||||
|
|
||||||
private RemoteInfo buildRemoteInfoHostTestCase(String hostInRest) throws IOException {
|
private RemoteInfo buildRemoteInfoHostTestCase(String hostInRest) throws IOException {
|
||||||
Map<String, Object> remote = new HashMap<>();
|
Map<String, Object> remote = new HashMap<>();
|
||||||
remote.put("host", hostInRest);
|
remote.put("host", hostInRest);
|
||||||
|
|
|
@ -64,8 +64,6 @@ import java.util.List;
|
||||||
import java.util.UUID;
|
import java.util.UUID;
|
||||||
import java.util.concurrent.ExecutionException;
|
import java.util.concurrent.ExecutionException;
|
||||||
|
|
||||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout;
|
|
||||||
|
|
||||||
@ESIntegTestCase.ClusterScope(numDataNodes = 2, numClientNodes = 0)
|
@ESIntegTestCase.ClusterScope(numDataNodes = 2, numClientNodes = 0)
|
||||||
@SuppressForbidden(reason = "use http server")
|
@SuppressForbidden(reason = "use http server")
|
||||||
// TODO this should be a IT but currently all ITs in this project run against a real cluster
|
// TODO this should be a IT but currently all ITs in this project run against a real cluster
|
||||||
|
@ -269,7 +267,7 @@ public class AzureDiscoveryClusterFormationTests extends ESIntegTestCase {
|
||||||
// only wait for the cluster to form
|
// only wait for the cluster to form
|
||||||
ensureClusterSizeConsistency();
|
ensureClusterSizeConsistency();
|
||||||
// add one more node and wait for it to join
|
// add one more node and wait for it to join
|
||||||
internalCluster().startDataOnlyNodeAsync().get();
|
internalCluster().startDataOnlyNode();
|
||||||
ensureClusterSizeConsistency();
|
ensureClusterSizeConsistency();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -243,7 +243,7 @@ public class Ec2DiscoveryClusterFormationTests extends ESIntegTestCase {
|
||||||
// only wait for the cluster to form
|
// only wait for the cluster to form
|
||||||
assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(2)).get());
|
assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(2)).get());
|
||||||
// add one more node and wait for it to join
|
// add one more node and wait for it to join
|
||||||
internalCluster().startDataOnlyNodeAsync().get();
|
internalCluster().startDataOnlyNode();
|
||||||
assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(3)).get());
|
assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(3)).get());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -40,7 +40,6 @@ import org.junit.BeforeClass;
|
||||||
import javax.net.ssl.KeyManagerFactory;
|
import javax.net.ssl.KeyManagerFactory;
|
||||||
import javax.net.ssl.SSLContext;
|
import javax.net.ssl.SSLContext;
|
||||||
import javax.net.ssl.TrustManagerFactory;
|
import javax.net.ssl.TrustManagerFactory;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.InputStream;
|
import java.io.InputStream;
|
||||||
import java.io.OutputStream;
|
import java.io.OutputStream;
|
||||||
|
@ -200,7 +199,7 @@ public class GceDiscoverTests extends ESIntegTestCase {
|
||||||
// only wait for the cluster to form
|
// only wait for the cluster to form
|
||||||
assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(2)).get());
|
assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(2)).get());
|
||||||
// add one more node and wait for it to join
|
// add one more node and wait for it to join
|
||||||
internalCluster().startDataOnlyNodeAsync().get();
|
internalCluster().startDataOnlyNode();
|
||||||
assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(3)).get());
|
assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(3)).get());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,6 +19,12 @@
|
||||||
|
|
||||||
package org.elasticsearch;
|
package org.elasticsearch;
|
||||||
|
|
||||||
|
import org.apache.lucene.analysis.en.PorterStemFilterFactory;
|
||||||
|
import org.apache.lucene.analysis.reverse.ReverseStringFilterFactory;
|
||||||
|
import org.apache.lucene.analysis.snowball.SnowballPorterFilterFactory;
|
||||||
|
import org.apache.lucene.analysis.util.CharFilterFactory;
|
||||||
|
import org.apache.lucene.analysis.util.TokenFilterFactory;
|
||||||
|
import org.apache.lucene.analysis.util.TokenizerFactory;
|
||||||
import org.elasticsearch.common.collect.MapBuilder;
|
import org.elasticsearch.common.collect.MapBuilder;
|
||||||
import org.elasticsearch.index.analysis.ASCIIFoldingTokenFilterFactory;
|
import org.elasticsearch.index.analysis.ASCIIFoldingTokenFilterFactory;
|
||||||
import org.elasticsearch.index.analysis.ApostropheFilterFactory;
|
import org.elasticsearch.index.analysis.ApostropheFilterFactory;
|
||||||
|
@ -86,13 +92,19 @@ import org.elasticsearch.index.analysis.WhitespaceTokenizerFactory;
|
||||||
import org.elasticsearch.index.analysis.WordDelimiterTokenFilterFactory;
|
import org.elasticsearch.index.analysis.WordDelimiterTokenFilterFactory;
|
||||||
import org.elasticsearch.index.analysis.compound.DictionaryCompoundWordTokenFilterFactory;
|
import org.elasticsearch.index.analysis.compound.DictionaryCompoundWordTokenFilterFactory;
|
||||||
import org.elasticsearch.index.analysis.compound.HyphenationCompoundWordTokenFilterFactory;
|
import org.elasticsearch.index.analysis.compound.HyphenationCompoundWordTokenFilterFactory;
|
||||||
|
import org.elasticsearch.indices.analysis.PreBuiltCharFilters;
|
||||||
|
import org.elasticsearch.indices.analysis.PreBuiltTokenFilters;
|
||||||
|
import org.elasticsearch.indices.analysis.PreBuiltTokenizers;
|
||||||
import org.elasticsearch.test.ESTestCase;
|
import org.elasticsearch.test.ESTestCase;
|
||||||
|
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
|
import java.util.HashMap;
|
||||||
import java.util.HashSet;
|
import java.util.HashSet;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.TreeSet;
|
import java.util.TreeSet;
|
||||||
|
import java.util.regex.Matcher;
|
||||||
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Alerts us if new analyzers are added to lucene, so we don't miss them.
|
* Alerts us if new analyzers are added to lucene, so we don't miss them.
|
||||||
|
@ -102,6 +114,19 @@ import java.util.TreeSet;
|
||||||
*/
|
*/
|
||||||
public class AnalysisFactoryTestCase extends ESTestCase {
|
public class AnalysisFactoryTestCase extends ESTestCase {
|
||||||
|
|
||||||
|
private static final Pattern UNDERSCORE_THEN_ANYTHING = Pattern.compile("_(.)");
|
||||||
|
|
||||||
|
private static String toCamelCase(String s) {
|
||||||
|
Matcher m = UNDERSCORE_THEN_ANYTHING.matcher(s);
|
||||||
|
StringBuffer sb = new StringBuffer();
|
||||||
|
while (m.find()) {
|
||||||
|
m.appendReplacement(sb, m.group(1).toUpperCase());
|
||||||
|
}
|
||||||
|
m.appendTail(sb);
|
||||||
|
sb.setCharAt(0, Character.toUpperCase(sb.charAt(0)));
|
||||||
|
return sb.toString();
|
||||||
|
}
|
||||||
|
|
||||||
static final Map<String,Class<?>> KNOWN_TOKENIZERS = new MapBuilder<String,Class<?>>()
|
static final Map<String,Class<?>> KNOWN_TOKENIZERS = new MapBuilder<String,Class<?>>()
|
||||||
// exposed in ES
|
// exposed in ES
|
||||||
.put("classic", ClassicTokenizerFactory.class)
|
.put("classic", ClassicTokenizerFactory.class)
|
||||||
|
@ -121,6 +146,26 @@ public class AnalysisFactoryTestCase extends ESTestCase {
|
||||||
.put("wikipedia", Void.class)
|
.put("wikipedia", Void.class)
|
||||||
.immutableMap();
|
.immutableMap();
|
||||||
|
|
||||||
|
static final Map<PreBuiltTokenizers, Class<?>> PREBUILT_TOKENIZERS;
|
||||||
|
static {
|
||||||
|
PREBUILT_TOKENIZERS = new HashMap<>();
|
||||||
|
for (PreBuiltTokenizers tokenizer : PreBuiltTokenizers.values()) {
|
||||||
|
Class<?> luceneFactoryClazz;
|
||||||
|
switch (tokenizer) {
|
||||||
|
case UAX_URL_EMAIL:
|
||||||
|
luceneFactoryClazz = org.apache.lucene.analysis.standard.UAX29URLEmailTokenizerFactory.class;
|
||||||
|
break;
|
||||||
|
case PATH_HIERARCHY:
|
||||||
|
luceneFactoryClazz = Void.class;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
luceneFactoryClazz = org.apache.lucene.analysis.util.TokenizerFactory.lookupClass(
|
||||||
|
toCamelCase(tokenizer.getTokenizerFactory(Version.CURRENT).name()));
|
||||||
|
}
|
||||||
|
PREBUILT_TOKENIZERS.put(tokenizer, luceneFactoryClazz);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static final Map<String,Class<?>> KNOWN_TOKENFILTERS = new MapBuilder<String,Class<?>>()
|
static final Map<String,Class<?>> KNOWN_TOKENFILTERS = new MapBuilder<String,Class<?>>()
|
||||||
// exposed in ES
|
// exposed in ES
|
||||||
.put("apostrophe", ApostropheFilterFactory.class)
|
.put("apostrophe", ApostropheFilterFactory.class)
|
||||||
|
@ -233,6 +278,41 @@ public class AnalysisFactoryTestCase extends ESTestCase {
|
||||||
|
|
||||||
.immutableMap();
|
.immutableMap();
|
||||||
|
|
||||||
|
static final Map<PreBuiltTokenFilters, Class<?>> PREBUILT_TOKENFILTERS;
|
||||||
|
static {
|
||||||
|
PREBUILT_TOKENFILTERS = new HashMap<>();
|
||||||
|
for (PreBuiltTokenFilters tokenizer : PreBuiltTokenFilters.values()) {
|
||||||
|
Class<?> luceneFactoryClazz;
|
||||||
|
switch (tokenizer) {
|
||||||
|
case REVERSE:
|
||||||
|
luceneFactoryClazz = ReverseStringFilterFactory.class;
|
||||||
|
break;
|
||||||
|
case UNIQUE:
|
||||||
|
luceneFactoryClazz = Void.class;
|
||||||
|
break;
|
||||||
|
case SNOWBALL:
|
||||||
|
case DUTCH_STEM:
|
||||||
|
case FRENCH_STEM:
|
||||||
|
case RUSSIAN_STEM:
|
||||||
|
luceneFactoryClazz = SnowballPorterFilterFactory.class;
|
||||||
|
break;
|
||||||
|
case STEMMER:
|
||||||
|
luceneFactoryClazz = PorterStemFilterFactory.class;
|
||||||
|
break;
|
||||||
|
case DELIMITED_PAYLOAD_FILTER:
|
||||||
|
luceneFactoryClazz = org.apache.lucene.analysis.payloads.DelimitedPayloadTokenFilterFactory.class;
|
||||||
|
break;
|
||||||
|
case LIMIT:
|
||||||
|
luceneFactoryClazz = org.apache.lucene.analysis.miscellaneous.LimitTokenCountFilterFactory.class;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
luceneFactoryClazz = org.apache.lucene.analysis.util.TokenFilterFactory.lookupClass(
|
||||||
|
toCamelCase(tokenizer.getTokenFilterFactory(Version.CURRENT).name()));
|
||||||
|
}
|
||||||
|
PREBUILT_TOKENFILTERS.put(tokenizer, luceneFactoryClazz);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static final Map<String,Class<?>> KNOWN_CHARFILTERS = new MapBuilder<String,Class<?>>()
|
static final Map<String,Class<?>> KNOWN_CHARFILTERS = new MapBuilder<String,Class<?>>()
|
||||||
// exposed in ES
|
// exposed in ES
|
||||||
.put("htmlstrip", HtmlStripCharFilterFactory.class)
|
.put("htmlstrip", HtmlStripCharFilterFactory.class)
|
||||||
|
@ -244,6 +324,20 @@ public class AnalysisFactoryTestCase extends ESTestCase {
|
||||||
.put("persian", Void.class)
|
.put("persian", Void.class)
|
||||||
.immutableMap();
|
.immutableMap();
|
||||||
|
|
||||||
|
static final Map<PreBuiltCharFilters, Class<?>> PREBUILT_CHARFILTERS;
|
||||||
|
static {
|
||||||
|
PREBUILT_CHARFILTERS = new HashMap<>();
|
||||||
|
for (PreBuiltCharFilters tokenizer : PreBuiltCharFilters.values()) {
|
||||||
|
Class<?> luceneFactoryClazz;
|
||||||
|
switch (tokenizer) {
|
||||||
|
default:
|
||||||
|
luceneFactoryClazz = org.apache.lucene.analysis.util.CharFilterFactory.lookupClass(
|
||||||
|
toCamelCase(tokenizer.getCharFilterFactory(Version.CURRENT).name()));
|
||||||
|
}
|
||||||
|
PREBUILT_CHARFILTERS.put(tokenizer, luceneFactoryClazz);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
protected Map<String, Class<?>> getTokenizers() {
|
protected Map<String, Class<?>> getTokenizers() {
|
||||||
return KNOWN_TOKENIZERS;
|
return KNOWN_TOKENIZERS;
|
||||||
}
|
}
|
||||||
|
@ -325,4 +419,62 @@ public class AnalysisFactoryTestCase extends ESTestCase {
|
||||||
classesThatShouldNotHaveMultiTermSupport.isEmpty());
|
classesThatShouldNotHaveMultiTermSupport.isEmpty());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testPreBuiltMultiTermAware() {
|
||||||
|
Collection<Object> expected = new HashSet<>();
|
||||||
|
Collection<Object> actual = new HashSet<>();
|
||||||
|
|
||||||
|
for (Map.Entry<PreBuiltTokenizers, Class<?>> entry : PREBUILT_TOKENIZERS.entrySet()) {
|
||||||
|
PreBuiltTokenizers tokenizer = entry.getKey();
|
||||||
|
Class<?> luceneFactory = entry.getValue();
|
||||||
|
if (luceneFactory == Void.class) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
assertTrue(TokenizerFactory.class.isAssignableFrom(luceneFactory));
|
||||||
|
if (tokenizer.getTokenizerFactory(Version.CURRENT) instanceof MultiTermAwareComponent) {
|
||||||
|
actual.add(tokenizer);
|
||||||
|
}
|
||||||
|
if (org.apache.lucene.analysis.util.MultiTermAwareComponent.class.isAssignableFrom(luceneFactory)) {
|
||||||
|
expected.add(tokenizer);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (Map.Entry<PreBuiltTokenFilters, Class<?>> entry : PREBUILT_TOKENFILTERS.entrySet()) {
|
||||||
|
PreBuiltTokenFilters tokenFilter = entry.getKey();
|
||||||
|
Class<?> luceneFactory = entry.getValue();
|
||||||
|
if (luceneFactory == Void.class) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
assertTrue(TokenFilterFactory.class.isAssignableFrom(luceneFactory));
|
||||||
|
if (tokenFilter.getTokenFilterFactory(Version.CURRENT) instanceof MultiTermAwareComponent) {
|
||||||
|
actual.add(tokenFilter);
|
||||||
|
}
|
||||||
|
if (org.apache.lucene.analysis.util.MultiTermAwareComponent.class.isAssignableFrom(luceneFactory)) {
|
||||||
|
expected.add(tokenFilter);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (Map.Entry<PreBuiltCharFilters, Class<?>> entry : PREBUILT_CHARFILTERS.entrySet()) {
|
||||||
|
PreBuiltCharFilters charFilter = entry.getKey();
|
||||||
|
Class<?> luceneFactory = entry.getValue();
|
||||||
|
if (luceneFactory == Void.class) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
assertTrue(CharFilterFactory.class.isAssignableFrom(luceneFactory));
|
||||||
|
if (charFilter.getCharFilterFactory(Version.CURRENT) instanceof MultiTermAwareComponent) {
|
||||||
|
actual.add(charFilter);
|
||||||
|
}
|
||||||
|
if (org.apache.lucene.analysis.util.MultiTermAwareComponent.class.isAssignableFrom(luceneFactory)) {
|
||||||
|
expected.add(charFilter);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Set<Object> classesMissingMultiTermSupport = new HashSet<>(expected);
|
||||||
|
classesMissingMultiTermSupport.removeAll(actual);
|
||||||
|
assertTrue("Pre-built components are missing multi-term support: " + classesMissingMultiTermSupport,
|
||||||
|
classesMissingMultiTermSupport.isEmpty());
|
||||||
|
|
||||||
|
Set<Object> classesThatShouldNotHaveMultiTermSupport = new HashSet<>(actual);
|
||||||
|
classesThatShouldNotHaveMultiTermSupport.removeAll(expected);
|
||||||
|
assertTrue("Pre-built components should not have multi-term support: " + classesThatShouldNotHaveMultiTermSupport,
|
||||||
|
classesThatShouldNotHaveMultiTermSupport.isEmpty());
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -130,7 +130,6 @@ import java.util.stream.Stream;
|
||||||
|
|
||||||
import static org.apache.lucene.util.LuceneTestCase.TEST_NIGHTLY;
|
import static org.apache.lucene.util.LuceneTestCase.TEST_NIGHTLY;
|
||||||
import static org.apache.lucene.util.LuceneTestCase.rarely;
|
import static org.apache.lucene.util.LuceneTestCase.rarely;
|
||||||
import static org.elasticsearch.discovery.DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING;
|
|
||||||
import static org.elasticsearch.test.ESTestCase.assertBusy;
|
import static org.elasticsearch.test.ESTestCase.assertBusy;
|
||||||
import static org.elasticsearch.test.ESTestCase.randomFrom;
|
import static org.elasticsearch.test.ESTestCase.randomFrom;
|
||||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||||
|
@ -696,10 +695,6 @@ public final class InternalTestCluster extends TestCluster {
|
||||||
ensureOpen(); // currently unused
|
ensureOpen(); // currently unused
|
||||||
Builder builder = Settings.builder().put(settings).put(Node.NODE_MASTER_SETTING.getKey(), false)
|
Builder builder = Settings.builder().put(settings).put(Node.NODE_MASTER_SETTING.getKey(), false)
|
||||||
.put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_INGEST_SETTING.getKey(), false);
|
.put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_INGEST_SETTING.getKey(), false);
|
||||||
if (size() == 0) {
|
|
||||||
// if we are the first node - don't wait for a state
|
|
||||||
builder.put(INITIAL_STATE_TIMEOUT_SETTING.getKey(), 0);
|
|
||||||
}
|
|
||||||
return startNode(builder);
|
return startNode(builder);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -791,6 +786,10 @@ public final class InternalTestCluster extends TestCluster {
|
||||||
return nodeAndClientId;
|
return nodeAndClientId;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public String getName() {
|
||||||
|
return name;
|
||||||
|
}
|
||||||
|
|
||||||
public boolean isMasterEligible() {
|
public boolean isMasterEligible() {
|
||||||
return Node.NODE_MASTER_SETTING.get(node.settings());
|
return Node.NODE_MASTER_SETTING.get(node.settings());
|
||||||
}
|
}
|
||||||
|
@ -887,9 +886,6 @@ public final class InternalTestCluster extends TestCluster {
|
||||||
assert ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.exists(newSettings.build()) == false : "min master nodes is auto managed";
|
assert ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.exists(newSettings.build()) == false : "min master nodes is auto managed";
|
||||||
newSettings.put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), minMasterNodes).build();
|
newSettings.put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), minMasterNodes).build();
|
||||||
}
|
}
|
||||||
|
|
||||||
// validation is (optionally) done in fullRestart/rollingRestart
|
|
||||||
newSettings.put(INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s");
|
|
||||||
if (clearDataIfNeeded) {
|
if (clearDataIfNeeded) {
|
||||||
clearDataIfNeeded(callback);
|
clearDataIfNeeded(callback);
|
||||||
}
|
}
|
||||||
|
@ -1018,10 +1014,6 @@ public final class InternalTestCluster extends TestCluster {
|
||||||
final Settings.Builder settings = Settings.builder();
|
final Settings.Builder settings = Settings.builder();
|
||||||
settings.put(Node.NODE_MASTER_SETTING.getKey(), true);
|
settings.put(Node.NODE_MASTER_SETTING.getKey(), true);
|
||||||
settings.put(Node.NODE_DATA_SETTING.getKey(), false);
|
settings.put(Node.NODE_DATA_SETTING.getKey(), false);
|
||||||
if (autoManageMinMasterNodes) {
|
|
||||||
settings.put(INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s"); // we wait at the end
|
|
||||||
}
|
|
||||||
|
|
||||||
NodeAndClient nodeAndClient = buildNode(i, sharedNodesSeeds[i], settings.build(), true, defaultMinMasterNodes);
|
NodeAndClient nodeAndClient = buildNode(i, sharedNodesSeeds[i], settings.build(), true, defaultMinMasterNodes);
|
||||||
toStartAndPublish.add(nodeAndClient);
|
toStartAndPublish.add(nodeAndClient);
|
||||||
}
|
}
|
||||||
|
@ -1032,9 +1024,6 @@ public final class InternalTestCluster extends TestCluster {
|
||||||
settings.put(Node.NODE_MASTER_SETTING.getKey(), false).build();
|
settings.put(Node.NODE_MASTER_SETTING.getKey(), false).build();
|
||||||
settings.put(Node.NODE_DATA_SETTING.getKey(), true).build();
|
settings.put(Node.NODE_DATA_SETTING.getKey(), true).build();
|
||||||
}
|
}
|
||||||
if (autoManageMinMasterNodes) {
|
|
||||||
settings.put(INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s"); // we wait at the end
|
|
||||||
}
|
|
||||||
NodeAndClient nodeAndClient = buildNode(i, sharedNodesSeeds[i], settings.build(), true, defaultMinMasterNodes);
|
NodeAndClient nodeAndClient = buildNode(i, sharedNodesSeeds[i], settings.build(), true, defaultMinMasterNodes);
|
||||||
toStartAndPublish.add(nodeAndClient);
|
toStartAndPublish.add(nodeAndClient);
|
||||||
}
|
}
|
||||||
|
@ -1347,10 +1336,18 @@ public final class InternalTestCluster extends TestCluster {
|
||||||
// special case for 1 node master - we can't update the min master nodes before we add more nodes.
|
// special case for 1 node master - we can't update the min master nodes before we add more nodes.
|
||||||
updateMinMasterNodes(currentMasters + newMasters);
|
updateMinMasterNodes(currentMasters + newMasters);
|
||||||
}
|
}
|
||||||
for (NodeAndClient nodeAndClient : nodeAndClients) {
|
List<Future<?>> futures = nodeAndClients.stream().map(node -> executor.submit(node::startNode)).collect(Collectors.toList());
|
||||||
nodeAndClient.startNode();
|
try {
|
||||||
publishNode(nodeAndClient);
|
for (Future<?> future : futures) {
|
||||||
|
future.get();
|
||||||
|
}
|
||||||
|
} catch (InterruptedException e) {
|
||||||
|
throw new AssertionError("interrupted while starting nodes", e);
|
||||||
|
} catch (ExecutionException e) {
|
||||||
|
throw new RuntimeException("failed to start nodes", e);
|
||||||
}
|
}
|
||||||
|
nodeAndClients.forEach(this::publishNode);
|
||||||
|
|
||||||
if (autoManageMinMasterNodes && currentMasters == 1 && newMasters > 0) {
|
if (autoManageMinMasterNodes && currentMasters == 1 && newMasters > 0) {
|
||||||
// update once masters have joined
|
// update once masters have joined
|
||||||
validateClusterFormed();
|
validateClusterFormed();
|
||||||
|
@ -1535,13 +1532,7 @@ public final class InternalTestCluster extends TestCluster {
|
||||||
nodeAndClient.recreateNodeOnRestart(callback, false, autoManageMinMasterNodes ? getMinMasterNodes(getMasterNodesCount()) : -1);
|
nodeAndClient.recreateNodeOnRestart(callback, false, autoManageMinMasterNodes ? getMinMasterNodes(getMasterNodesCount()) : -1);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (NodeAndClient nodeAndClient : startUpOrder) {
|
startAndPublishNodesAndClients(startUpOrder);
|
||||||
logger.info("starting node [{}] ", nodeAndClient.name);
|
|
||||||
nodeAndClient.startNode();
|
|
||||||
if (activeDisruptionScheme != null) {
|
|
||||||
activeDisruptionScheme.applyToNode(nodeAndClient.name, this);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (callback.validateClusterForming()) {
|
if (callback.validateClusterForming()) {
|
||||||
validateClusterFormed();
|
validateClusterFormed();
|
||||||
|
@ -1635,6 +1626,61 @@ public final class InternalTestCluster extends TestCluster {
|
||||||
return buildNode.name;
|
return buildNode.name;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Starts multiple nodes with default settings and returns their names
|
||||||
|
*/
|
||||||
|
public synchronized List<String> startNodes(int numOfNodes) {
|
||||||
|
return startNodes(numOfNodes, Settings.EMPTY);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Starts multiple nodes with the given settings and returns their names
|
||||||
|
*/
|
||||||
|
public synchronized List<String> startNodes(int numOfNodes, Settings settings) {
|
||||||
|
return startNodes(Collections.nCopies(numOfNodes, settings).stream().toArray(Settings[]::new));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Starts multiple nodes with the given settings and returns their names
|
||||||
|
*/
|
||||||
|
public synchronized List<String> startNodes(Settings... settings) {
|
||||||
|
final int defaultMinMasterNodes;
|
||||||
|
if (autoManageMinMasterNodes) {
|
||||||
|
int mastersDelta = (int) Stream.of(settings).filter(Node.NODE_MASTER_SETTING::get).count();
|
||||||
|
defaultMinMasterNodes = getMinMasterNodes(getMasterNodesCount() + mastersDelta);
|
||||||
|
} else {
|
||||||
|
defaultMinMasterNodes = -1;
|
||||||
|
}
|
||||||
|
List<NodeAndClient> nodes = new ArrayList<>();
|
||||||
|
for (Settings nodeSettings: settings) {
|
||||||
|
nodes.add(buildNode(nodeSettings, defaultMinMasterNodes));
|
||||||
|
}
|
||||||
|
startAndPublishNodesAndClients(nodes);
|
||||||
|
if (autoManageMinMasterNodes) {
|
||||||
|
validateClusterFormed();
|
||||||
|
}
|
||||||
|
|
||||||
|
return nodes.stream().map(NodeAndClient::getName).collect(Collectors.toList());
|
||||||
|
}
|
||||||
|
|
||||||
|
public synchronized List<String> startMasterOnlyNodes(int numNodes) {
|
||||||
|
return startMasterOnlyNodes(numNodes, Settings.EMPTY);
|
||||||
|
}
|
||||||
|
|
||||||
|
public synchronized List<String> startMasterOnlyNodes(int numNodes, Settings settings) {
|
||||||
|
Settings settings1 = Settings.builder().put(settings).put(Node.NODE_MASTER_SETTING.getKey(), true).put(Node.NODE_DATA_SETTING.getKey(), false).build();
|
||||||
|
return startNodes(numNodes, settings1);
|
||||||
|
}
|
||||||
|
|
||||||
|
public synchronized List<String> startDataOnlyNodes(int numNodes) {
|
||||||
|
return startDataOnlyNodes(numNodes, Settings.EMPTY);
|
||||||
|
}
|
||||||
|
|
||||||
|
public synchronized List<String> startDataOnlyNodes(int numNodes, Settings settings) {
|
||||||
|
Settings settings1 = Settings.builder().put(settings).put(Node.NODE_MASTER_SETTING.getKey(), false).put(Node.NODE_DATA_SETTING.getKey(), true).build();
|
||||||
|
return startNodes(numNodes, settings1);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* updates the min master nodes setting in the current running cluster.
|
* updates the min master nodes setting in the current running cluster.
|
||||||
*
|
*
|
||||||
|
@ -1667,31 +1713,8 @@ public final class InternalTestCluster extends TestCluster {
|
||||||
return (int)nodes.values().stream().filter(n -> Node.NODE_MASTER_SETTING.get(n.node().settings())).count();
|
return (int)nodes.values().stream().filter(n -> Node.NODE_MASTER_SETTING.get(n.node().settings())).count();
|
||||||
}
|
}
|
||||||
|
|
||||||
public synchronized Async<List<String>> startMasterOnlyNodesAsync(int numNodes) {
|
public synchronized String startMasterOnlyNode() {
|
||||||
return startMasterOnlyNodesAsync(numNodes, Settings.EMPTY);
|
return startMasterOnlyNode(Settings.EMPTY);
|
||||||
}
|
|
||||||
|
|
||||||
public synchronized Async<List<String>> startMasterOnlyNodesAsync(int numNodes, Settings settings) {
|
|
||||||
Settings settings1 = Settings.builder().put(settings).put(Node.NODE_MASTER_SETTING.getKey(), true).put(Node.NODE_DATA_SETTING.getKey(), false).build();
|
|
||||||
return startNodesAsync(numNodes, settings1);
|
|
||||||
}
|
|
||||||
|
|
||||||
public synchronized Async<List<String>> startDataOnlyNodesAsync(int numNodes) {
|
|
||||||
return startDataOnlyNodesAsync(numNodes, Settings.EMPTY);
|
|
||||||
}
|
|
||||||
|
|
||||||
public synchronized Async<List<String>> startDataOnlyNodesAsync(int numNodes, Settings settings) {
|
|
||||||
Settings settings1 = Settings.builder().put(settings).put(Node.NODE_MASTER_SETTING.getKey(), false).put(Node.NODE_DATA_SETTING.getKey(), true).build();
|
|
||||||
return startNodesAsync(numNodes, settings1);
|
|
||||||
}
|
|
||||||
|
|
||||||
public synchronized Async<String> startMasterOnlyNodeAsync() {
|
|
||||||
return startMasterOnlyNodeAsync(Settings.EMPTY);
|
|
||||||
}
|
|
||||||
|
|
||||||
public synchronized Async<String> startMasterOnlyNodeAsync(Settings settings) {
|
|
||||||
Settings settings1 = Settings.builder().put(settings).put(Node.NODE_MASTER_SETTING.getKey(), true).put(Node.NODE_DATA_SETTING.getKey(), false).build();
|
|
||||||
return startNodeAsync(settings1);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public synchronized String startMasterOnlyNode(Settings settings) {
|
public synchronized String startMasterOnlyNode(Settings settings) {
|
||||||
|
@ -1699,109 +1722,14 @@ public final class InternalTestCluster extends TestCluster {
|
||||||
return startNode(settings1);
|
return startNode(settings1);
|
||||||
}
|
}
|
||||||
|
|
||||||
public synchronized Async<String> startDataOnlyNodeAsync() {
|
public synchronized String startDataOnlyNode() {
|
||||||
return startDataOnlyNodeAsync(Settings.EMPTY);
|
return startDataOnlyNode(Settings.EMPTY);
|
||||||
}
|
}
|
||||||
|
|
||||||
public synchronized Async<String> startDataOnlyNodeAsync(Settings settings) {
|
|
||||||
Settings settings1 = Settings.builder().put(settings).put(Node.NODE_MASTER_SETTING.getKey(), false).put(Node.NODE_DATA_SETTING.getKey(), true).build();
|
|
||||||
return startNodeAsync(settings1);
|
|
||||||
}
|
|
||||||
|
|
||||||
public synchronized String startDataOnlyNode(Settings settings) {
|
public synchronized String startDataOnlyNode(Settings settings) {
|
||||||
Settings settings1 = Settings.builder().put(settings).put(Node.NODE_MASTER_SETTING.getKey(), false).put(Node.NODE_DATA_SETTING.getKey(), true).build();
|
Settings settings1 = Settings.builder().put(settings).put(Node.NODE_MASTER_SETTING.getKey(), false).put(Node.NODE_DATA_SETTING.getKey(), true).build();
|
||||||
return startNode(settings1);
|
return startNode(settings1);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Starts a node in an async manner with the given settings and returns future with its name.
|
|
||||||
*/
|
|
||||||
public synchronized Async<String> startNodeAsync() {
|
|
||||||
return startNodeAsync(Settings.EMPTY);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Starts a node in an async manner with the given settings and returns future with its name.
|
|
||||||
*/
|
|
||||||
public synchronized Async<String> startNodeAsync(final Settings settings) {
|
|
||||||
final int defaultMinMasterNodes;
|
|
||||||
if (autoManageMinMasterNodes) {
|
|
||||||
int mastersDelta = Node.NODE_MASTER_SETTING.get(settings) ? 1 : 0;
|
|
||||||
defaultMinMasterNodes = updateMinMasterNodes(getMasterNodesCount() + mastersDelta);
|
|
||||||
} else {
|
|
||||||
defaultMinMasterNodes = -1;
|
|
||||||
}
|
|
||||||
return startNodeAsync(settings, defaultMinMasterNodes);
|
|
||||||
}
|
|
||||||
|
|
||||||
private synchronized Async<String> startNodeAsync(final Settings settings, int defaultMinMasterNodes) {
|
|
||||||
final NodeAndClient buildNode = buildNode(settings, defaultMinMasterNodes);
|
|
||||||
final Future<String> submit = executor.submit(() -> {
|
|
||||||
buildNode.startNode();
|
|
||||||
publishNode(buildNode);
|
|
||||||
return buildNode.name;
|
|
||||||
});
|
|
||||||
return () -> submit.get();
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Starts multiple nodes in an async manner and returns future with its name.
|
|
||||||
*/
|
|
||||||
public synchronized Async<List<String>> startNodesAsync(final int numNodes) {
|
|
||||||
return startNodesAsync(numNodes, Settings.EMPTY);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Starts multiple nodes in an async manner with the given settings and returns future with its name.
|
|
||||||
*/
|
|
||||||
public synchronized Async<List<String>> startNodesAsync(final int numNodes, final Settings settings) {
|
|
||||||
final int defaultMinMasterNodes;
|
|
||||||
if (autoManageMinMasterNodes) {
|
|
||||||
int mastersDelta = Node.NODE_MASTER_SETTING.get(settings) ? numNodes : 0;
|
|
||||||
defaultMinMasterNodes = updateMinMasterNodes(getMasterNodesCount() + mastersDelta);
|
|
||||||
} else {
|
|
||||||
defaultMinMasterNodes = -1;
|
|
||||||
}
|
|
||||||
final List<Async<String>> asyncs = new ArrayList<>();
|
|
||||||
for (int i = 0; i < numNodes; i++) {
|
|
||||||
asyncs.add(startNodeAsync(settings, defaultMinMasterNodes));
|
|
||||||
}
|
|
||||||
|
|
||||||
return () -> {
|
|
||||||
List<String> ids = new ArrayList<>();
|
|
||||||
for (Async<String> async : asyncs) {
|
|
||||||
ids.add(async.get());
|
|
||||||
}
|
|
||||||
return ids;
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Starts multiple nodes (based on the number of settings provided) in an async manner, with explicit settings for each node.
|
|
||||||
* The order of the node names returned matches the order of the settings provided.
|
|
||||||
*/
|
|
||||||
public synchronized Async<List<String>> startNodesAsync(final Settings... settings) {
|
|
||||||
final int defaultMinMasterNodes;
|
|
||||||
if (autoManageMinMasterNodes) {
|
|
||||||
int mastersDelta = (int) Stream.of(settings).filter(Node.NODE_MASTER_SETTING::get).count();
|
|
||||||
defaultMinMasterNodes = updateMinMasterNodes(getMasterNodesCount() + mastersDelta);
|
|
||||||
} else {
|
|
||||||
defaultMinMasterNodes = -1;
|
|
||||||
}
|
|
||||||
List<Async<String>> asyncs = new ArrayList<>();
|
|
||||||
for (Settings setting : settings) {
|
|
||||||
asyncs.add(startNodeAsync(setting, defaultMinMasterNodes));
|
|
||||||
}
|
|
||||||
return () -> {
|
|
||||||
List<String> ids = new ArrayList<>();
|
|
||||||
for (Async<String> async : asyncs) {
|
|
||||||
ids.add(async.get());
|
|
||||||
}
|
|
||||||
return ids;
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
private synchronized void publishNode(NodeAndClient nodeAndClient) {
|
private synchronized void publishNode(NodeAndClient nodeAndClient) {
|
||||||
assert !nodeAndClient.node().isClosed();
|
assert !nodeAndClient.node().isClosed();
|
||||||
nodes.put(nodeAndClient.name, nodeAndClient);
|
nodes.put(nodeAndClient.name, nodeAndClient);
|
||||||
|
@ -1828,7 +1756,8 @@ public final class InternalTestCluster extends TestCluster {
|
||||||
|
|
||||||
|
|
||||||
public void setDisruptionScheme(ServiceDisruptionScheme scheme) {
|
public void setDisruptionScheme(ServiceDisruptionScheme scheme) {
|
||||||
clearDisruptionScheme();
|
assert activeDisruptionScheme == null :
|
||||||
|
"there is already and active disruption [" + activeDisruptionScheme + "]. call clearDisruptionScheme first";
|
||||||
scheme.applyToCluster(this);
|
scheme.applyToCluster(this);
|
||||||
activeDisruptionScheme = scheme;
|
activeDisruptionScheme = scheme;
|
||||||
}
|
}
|
||||||
|
@ -2121,14 +2050,4 @@ public final class InternalTestCluster extends TestCluster {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Simple interface that allows to wait for an async operation to finish
|
|
||||||
*
|
|
||||||
* @param <T> the result of the async execution
|
|
||||||
*/
|
|
||||||
public interface Async<T> {
|
|
||||||
T get() throws ExecutionException, InterruptedException;
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue