Merge branch 'master' into feature/rank-eval
This commit is contained in:
commit
8f88917c4e
|
@ -120,7 +120,8 @@ Please follow these formatting guidelines:
|
|||
* The rest is left to Java coding standards
|
||||
* Disable “auto-format on save” to prevent unnecessary format changes. This makes reviews much harder as it generates unnecessary formatting changes. If your IDE supports formatting only modified chunks that is fine to do.
|
||||
* Wildcard imports (`import foo.bar.baz.*`) are forbidden and will cause the build to fail. Please attempt to tame your IDE so it doesn't make them and please send a PR against this document with instructions for your IDE if it doesn't contain them.
|
||||
* Eclipse: Preferences->Java->Code Style->Organize Imports. There are two boxes labeled "`Number of (static )? imports needed for .*`". Set their values to 99999 or some other absurdly high value.
|
||||
* Eclipse: `Preferences->Java->Code Style->Organize Imports`. There are two boxes labeled "`Number of (static )? imports needed for .*`". Set their values to 99999 or some other absurdly high value.
|
||||
* IntelliJ: `Preferences->Editor->Code Style->Java->Imports`. There are two configuration options: `Class count to use import with '*'` and `Names count to use static import with '*'`. Set their values to 99999 or some other absurdly high value.
|
||||
* Don't worry too much about import order. Try not to change it but don't worry about fighting your IDE to stop it from doing so.
|
||||
|
||||
To create a distribution from the source, simply run:
|
||||
|
|
|
@ -34,6 +34,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.net.InetAddress;
|
||||
import java.util.Objects;
|
||||
|
||||
public abstract class FieldStats<T> implements Writeable, ToXContent {
|
||||
private final byte type;
|
||||
|
@ -46,13 +47,11 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
|||
protected T minValue;
|
||||
protected T maxValue;
|
||||
|
||||
FieldStats(byte type, long maxDoc, boolean isSearchable, boolean isAggregatable) {
|
||||
this(type, maxDoc, 0, 0, 0, isSearchable, isAggregatable, null, null);
|
||||
}
|
||||
|
||||
FieldStats(byte type,
|
||||
long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
|
||||
boolean isSearchable, boolean isAggregatable, T minValue, T maxValue) {
|
||||
Objects.requireNonNull(minValue, "minValue must not be null");
|
||||
Objects.requireNonNull(maxValue, "maxValue must not be null");
|
||||
this.type = type;
|
||||
this.maxDoc = maxDoc;
|
||||
this.docCount = docCount;
|
||||
|
@ -220,14 +219,10 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
|||
}
|
||||
|
||||
private void updateMinMax(T min, T max) {
|
||||
if (minValue == null) {
|
||||
minValue = min;
|
||||
} else if (min != null && compare(minValue, min) > 0) {
|
||||
if (compare(minValue, min) > 0) {
|
||||
minValue = min;
|
||||
}
|
||||
if (maxValue == null) {
|
||||
maxValue = max;
|
||||
} else if (max != null && compare(maxValue, max) < 0) {
|
||||
if (compare(maxValue, max) < 0) {
|
||||
maxValue = max;
|
||||
}
|
||||
}
|
||||
|
@ -266,11 +261,7 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
|||
out.writeLong(sumTotalTermFreq);
|
||||
out.writeBoolean(isSearchable);
|
||||
out.writeBoolean(isAggregatable);
|
||||
boolean hasMinMax = minValue != null;
|
||||
out.writeBoolean(hasMinMax);
|
||||
if (hasMinMax) {
|
||||
writeMinMax(out);
|
||||
}
|
||||
writeMinMax(out);
|
||||
}
|
||||
|
||||
protected abstract void writeMinMax(StreamOutput out) throws IOException;
|
||||
|
@ -280,9 +271,6 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
|||
* otherwise <code>false</code> is returned
|
||||
*/
|
||||
public boolean match(IndexConstraint constraint) {
|
||||
if (minValue == null) {
|
||||
return false;
|
||||
}
|
||||
int cmp;
|
||||
T value = valueOf(constraint.getValue(), constraint.getOptionalFormat());
|
||||
if (constraint.getProperty() == IndexConstraint.Property.MIN) {
|
||||
|
@ -307,6 +295,31 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
|
||||
FieldStats<?> that = (FieldStats<?>) o;
|
||||
|
||||
if (type != that.type) return false;
|
||||
if (maxDoc != that.maxDoc) return false;
|
||||
if (docCount != that.docCount) return false;
|
||||
if (sumDocFreq != that.sumDocFreq) return false;
|
||||
if (sumTotalTermFreq != that.sumTotalTermFreq) return false;
|
||||
if (isSearchable != that.isSearchable) return false;
|
||||
if (isAggregatable != that.isAggregatable) return false;
|
||||
if (!minValue.equals(that.minValue)) return false;
|
||||
return maxValue.equals(that.maxValue);
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(type, maxDoc, docCount, sumDocFreq, sumTotalTermFreq, isSearchable, isAggregatable,
|
||||
minValue, maxValue);
|
||||
}
|
||||
|
||||
public static class Long extends FieldStats<java.lang.Long> {
|
||||
public Long(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
|
||||
boolean isSearchable, boolean isAggregatable,
|
||||
|
@ -315,17 +328,6 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
|||
isSearchable, isAggregatable, minValue, maxValue);
|
||||
}
|
||||
|
||||
public Long(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
|
||||
boolean isSearchable, boolean isAggregatable) {
|
||||
super((byte) 0, maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
|
||||
isSearchable, isAggregatable, null, null);
|
||||
}
|
||||
|
||||
public Long(long maxDoc,
|
||||
boolean isSearchable, boolean isAggregatable) {
|
||||
super((byte) 0, maxDoc, isSearchable, isAggregatable);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compare(java.lang.Long o1, java.lang.Long o2) {
|
||||
return o1.compareTo(o2);
|
||||
|
@ -344,12 +346,12 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
|||
|
||||
@Override
|
||||
public String getMinValueAsString() {
|
||||
return minValue != null ? java.lang.Long.toString(minValue) : null;
|
||||
return java.lang.Long.toString(minValue);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getMaxValueAsString() {
|
||||
return maxValue != null ? java.lang.Long.toString(maxValue) : null;
|
||||
return java.lang.Long.toString(maxValue);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -361,15 +363,6 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
|||
minValue, maxValue);
|
||||
}
|
||||
|
||||
public Double(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
|
||||
boolean isSearchable, boolean isAggregatable) {
|
||||
super((byte) 1, maxDoc, docCount, sumDocFreq, sumTotalTermFreq, isSearchable, isAggregatable, null, null);
|
||||
}
|
||||
|
||||
public Double(long maxDoc, boolean isSearchable, boolean isAggregatable) {
|
||||
super((byte) 1, maxDoc, isSearchable, isAggregatable);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compare(java.lang.Double o1, java.lang.Double o2) {
|
||||
return o1.compareTo(o2);
|
||||
|
@ -391,12 +384,12 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
|||
|
||||
@Override
|
||||
public String getMinValueAsString() {
|
||||
return minValue != null ? java.lang.Double.toString(minValue) : null;
|
||||
return java.lang.Double.toString(minValue);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getMaxValueAsString() {
|
||||
return maxValue != null ? java.lang.Double.toString(maxValue) : null;
|
||||
return java.lang.Double.toString(maxValue);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -412,20 +405,6 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
|||
this.formatter = formatter;
|
||||
}
|
||||
|
||||
public Date(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
|
||||
boolean isSearchable, boolean isAggregatable,
|
||||
FormatDateTimeFormatter formatter) {
|
||||
super((byte) 2, maxDoc, docCount, sumDocFreq, sumTotalTermFreq, isSearchable, isAggregatable,
|
||||
null, null);
|
||||
this.formatter = formatter;
|
||||
}
|
||||
|
||||
public Date(long maxDoc, boolean isSearchable, boolean isAggregatable,
|
||||
FormatDateTimeFormatter formatter) {
|
||||
super((byte) 2, maxDoc, isSearchable, isAggregatable);
|
||||
this.formatter = formatter;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compare(java.lang.Long o1, java.lang.Long o2) {
|
||||
return o1.compareTo(o2);
|
||||
|
@ -449,12 +428,29 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
|||
|
||||
@Override
|
||||
public String getMinValueAsString() {
|
||||
return minValue != null ? formatter.printer().print(minValue) : null;
|
||||
return formatter.printer().print(minValue);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getMaxValueAsString() {
|
||||
return maxValue != null ? formatter.printer().print(maxValue) : null;
|
||||
return formatter.printer().print(maxValue);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
if (!super.equals(o)) return false;
|
||||
|
||||
Date that = (Date) o;
|
||||
return Objects.equals(formatter.format(), that.formatter.format());
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = super.hashCode();
|
||||
result = 31 * result + formatter.format().hashCode();
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -467,10 +463,6 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
|||
minValue, maxValue);
|
||||
}
|
||||
|
||||
public Text(long maxDoc, boolean isSearchable, boolean isAggregatable) {
|
||||
super((byte) 3, maxDoc, isSearchable, isAggregatable);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compare(BytesRef o1, BytesRef o2) {
|
||||
return o1.compareTo(o2);
|
||||
|
@ -492,12 +484,12 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
|||
|
||||
@Override
|
||||
public String getMinValueAsString() {
|
||||
return minValue != null ? minValue.utf8ToString() : null;
|
||||
return minValue.utf8ToString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getMaxValueAsString() {
|
||||
return maxValue != null ? maxValue.utf8ToString() : null;
|
||||
return maxValue.utf8ToString();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -516,10 +508,6 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
|||
minValue, maxValue);
|
||||
}
|
||||
|
||||
public Ip(long maxDoc, boolean isSearchable, boolean isAggregatable) {
|
||||
super((byte) 4, maxDoc, isSearchable, isAggregatable);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compare(InetAddress o1, InetAddress o2) {
|
||||
byte[] b1 = InetAddressPoint.encode(o1);
|
||||
|
@ -544,12 +532,12 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
|||
|
||||
@Override
|
||||
public String getMinValueAsString() {
|
||||
return minValue != null ? NetworkAddress.format(minValue) : null;
|
||||
return NetworkAddress.format(minValue);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getMaxValueAsString() {
|
||||
return maxValue != null ? NetworkAddress.format(maxValue) : null;
|
||||
return NetworkAddress.format(maxValue);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -561,53 +549,35 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
|||
long sumTotalTermFreq = in.readLong();
|
||||
boolean isSearchable = in.readBoolean();
|
||||
boolean isAggregatable = in.readBoolean();
|
||||
boolean hasMinMax = in.readBoolean();
|
||||
|
||||
switch (type) {
|
||||
case 0:
|
||||
if (hasMinMax) {
|
||||
return new Long(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
|
||||
isSearchable, isAggregatable, in.readLong(), in.readLong());
|
||||
}
|
||||
return new Long(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
|
||||
isSearchable, isAggregatable);
|
||||
isSearchable, isAggregatable, in.readLong(), in.readLong());
|
||||
|
||||
case 1:
|
||||
if (hasMinMax) {
|
||||
return new Double(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
|
||||
isSearchable, isAggregatable, in.readDouble(), in.readDouble());
|
||||
}
|
||||
return new Double(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
|
||||
isSearchable, isAggregatable);
|
||||
isSearchable, isAggregatable, in.readDouble(), in.readDouble());
|
||||
|
||||
case 2:
|
||||
FormatDateTimeFormatter formatter = Joda.forPattern(in.readString());
|
||||
if (hasMinMax) {
|
||||
return new Date(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
|
||||
isSearchable, isAggregatable, formatter, in.readLong(), in.readLong());
|
||||
}
|
||||
return new Date(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
|
||||
isSearchable, isAggregatable, formatter);
|
||||
isSearchable, isAggregatable, formatter, in.readLong(), in.readLong());
|
||||
|
||||
|
||||
case 3:
|
||||
if (hasMinMax) {
|
||||
return new Text(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
|
||||
isSearchable, isAggregatable, in.readBytesRef(), in.readBytesRef());
|
||||
}
|
||||
return new Text(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
|
||||
isSearchable, isAggregatable, null, null);
|
||||
isSearchable, isAggregatable, in.readBytesRef(), in.readBytesRef());
|
||||
|
||||
case 4:
|
||||
InetAddress min = null;
|
||||
InetAddress max = null;
|
||||
if (hasMinMax) {
|
||||
int l1 = in.readByte();
|
||||
byte[] b1 = new byte[l1];
|
||||
int l2 = in.readByte();
|
||||
byte[] b2 = new byte[l2];
|
||||
min = InetAddressPoint.decode(b1);
|
||||
max = InetAddressPoint.decode(b2);
|
||||
}
|
||||
int l1 = in.readByte();
|
||||
byte[] b1 = new byte[l1];
|
||||
in.readBytes(b1, 0, l1);
|
||||
int l2 = in.readByte();
|
||||
byte[] b2 = new byte[l2];
|
||||
in.readBytes(b2, 0, l2);
|
||||
InetAddress min = InetAddressPoint.decode(b1);
|
||||
InetAddress max = InetAddressPoint.decode(b2);
|
||||
return new Ip(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
|
||||
isSearchable, isAggregatable, min, max);
|
||||
|
||||
|
|
|
@ -35,16 +35,18 @@ public class MainResponse extends ActionResponse implements ToXContent {
|
|||
private String nodeName;
|
||||
private Version version;
|
||||
private ClusterName clusterName;
|
||||
private String clusterUuid;
|
||||
private Build build;
|
||||
private boolean available;
|
||||
|
||||
MainResponse() {
|
||||
}
|
||||
|
||||
public MainResponse(String nodeName, Version version, ClusterName clusterName, Build build, boolean available) {
|
||||
public MainResponse(String nodeName, Version version, ClusterName clusterName, String clusterUuid, Build build, boolean available) {
|
||||
this.nodeName = nodeName;
|
||||
this.version = version;
|
||||
this.clusterName = clusterName;
|
||||
this.clusterUuid = clusterUuid;
|
||||
this.build = build;
|
||||
this.available = available;
|
||||
}
|
||||
|
@ -61,6 +63,10 @@ public class MainResponse extends ActionResponse implements ToXContent {
|
|||
return clusterName;
|
||||
}
|
||||
|
||||
public String getClusterUuid() {
|
||||
return clusterUuid;
|
||||
}
|
||||
|
||||
public Build getBuild() {
|
||||
return build;
|
||||
}
|
||||
|
@ -75,6 +81,7 @@ public class MainResponse extends ActionResponse implements ToXContent {
|
|||
out.writeString(nodeName);
|
||||
Version.writeVersion(version, out);
|
||||
clusterName.writeTo(out);
|
||||
out.writeString(clusterUuid);
|
||||
Build.writeBuild(build, out);
|
||||
out.writeBoolean(available);
|
||||
}
|
||||
|
@ -85,6 +92,7 @@ public class MainResponse extends ActionResponse implements ToXContent {
|
|||
nodeName = in.readString();
|
||||
version = Version.readVersion(in);
|
||||
clusterName = new ClusterName(in);
|
||||
clusterUuid = in.readString();
|
||||
build = Build.readBuild(in);
|
||||
available = in.readBoolean();
|
||||
}
|
||||
|
@ -94,6 +102,7 @@ public class MainResponse extends ActionResponse implements ToXContent {
|
|||
builder.startObject();
|
||||
builder.field("name", nodeName);
|
||||
builder.field("cluster_name", clusterName.value());
|
||||
builder.field("cluster_uuid", clusterUuid);
|
||||
builder.startObject("version")
|
||||
.field("number", version.toString())
|
||||
.field("build_hash", build.shortHash())
|
||||
|
|
|
@ -52,7 +52,7 @@ public class TransportMainAction extends HandledTransportAction<MainRequest, Mai
|
|||
assert Node.NODE_NAME_SETTING.exists(settings);
|
||||
final boolean available = clusterState.getBlocks().hasGlobalBlock(RestStatus.SERVICE_UNAVAILABLE) == false;
|
||||
listener.onResponse(
|
||||
new MainResponse(Node.NODE_NAME_SETTING.get(settings), Version.CURRENT, clusterState.getClusterName(), Build.CURRENT,
|
||||
available));
|
||||
new MainResponse(Node.NODE_NAME_SETTING.get(settings), Version.CURRENT, clusterState.getClusterName(),
|
||||
clusterState.metaData().clusterUUID(), Build.CURRENT, available));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.apache.lucene.util.StringHelper;
|
|||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cli.Terminal;
|
||||
import org.elasticsearch.cli.UserException;
|
||||
import org.elasticsearch.common.PidFile;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.inject.CreationException;
|
||||
|
@ -233,7 +234,7 @@ final class Bootstrap {
|
|||
final boolean foreground,
|
||||
final Path pidFile,
|
||||
final boolean quiet,
|
||||
final Map<String, String> esSettings) throws BootstrapException, NodeValidationException {
|
||||
final Map<String, String> esSettings) throws BootstrapException, NodeValidationException, UserException {
|
||||
// Set the system property before anything has a chance to trigger its use
|
||||
initLoggerPrefix();
|
||||
|
||||
|
|
|
@ -107,7 +107,7 @@ class Elasticsearch extends SettingCommand {
|
|||
}
|
||||
|
||||
void init(final boolean daemonize, final Path pidFile, final boolean quiet, final Map<String, String> esSettings)
|
||||
throws NodeValidationException {
|
||||
throws NodeValidationException, UserException {
|
||||
try {
|
||||
Bootstrap.init(!daemonize, pidFile, quiet, esSettings);
|
||||
} catch (BootstrapException | RuntimeException e) {
|
||||
|
@ -123,7 +123,8 @@ class Elasticsearch extends SettingCommand {
|
|||
*
|
||||
* http://commons.apache.org/proper/commons-daemon/procrun.html
|
||||
*
|
||||
* NOTE: If this method is renamed and/or moved, make sure to update service.bat!
|
||||
* NOTE: If this method is renamed and/or moved, make sure to
|
||||
* update elasticsearch-service.bat!
|
||||
*/
|
||||
static void close(String[] args) throws IOException {
|
||||
Bootstrap.stop();
|
||||
|
|
|
@ -100,16 +100,7 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
|
|||
* Returns true if this index can be supported by the current version of elasticsearch
|
||||
*/
|
||||
private static boolean isSupportedVersion(IndexMetaData indexMetaData) {
|
||||
if (indexMetaData.getCreationVersion().onOrAfter(Version.V_2_0_0_beta1)) {
|
||||
// The index was created with elasticsearch that was using Lucene 5.2.1
|
||||
return true;
|
||||
}
|
||||
if (indexMetaData.getMinimumCompatibleVersion() != null &&
|
||||
indexMetaData.getMinimumCompatibleVersion().onOrAfter(org.apache.lucene.util.Version.LUCENE_5_0_0)) {
|
||||
//The index was upgraded we can work with it
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
return indexMetaData.getCreationVersion().onOrAfter(Version.V_2_0_0_beta1);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -30,6 +30,8 @@ import org.apache.logging.log4j.core.config.builder.impl.BuiltConfiguration;
|
|||
import org.apache.logging.log4j.core.config.composite.CompositeConfiguration;
|
||||
import org.apache.logging.log4j.core.config.properties.PropertiesConfiguration;
|
||||
import org.apache.logging.log4j.core.config.properties.PropertiesConfigurationFactory;
|
||||
import org.elasticsearch.cli.ExitCodes;
|
||||
import org.elasticsearch.cli.UserException;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -50,7 +52,7 @@ import java.util.Set;
|
|||
|
||||
public class LogConfigurator {
|
||||
|
||||
public static void configure(final Environment environment, final boolean resolveConfig) throws IOException {
|
||||
public static void configure(final Environment environment, final boolean resolveConfig) throws IOException, UserException {
|
||||
final Settings settings = environment.settings();
|
||||
|
||||
setLogConfigurationSystemProperty(environment, settings);
|
||||
|
@ -75,6 +77,13 @@ public class LogConfigurator {
|
|||
return FileVisitResult.CONTINUE;
|
||||
}
|
||||
});
|
||||
|
||||
if (configurations.isEmpty()) {
|
||||
throw new UserException(
|
||||
ExitCodes.CONFIG,
|
||||
"no log4j2.properties found; tried [" + environment.configFile() + "] and its subdirectories");
|
||||
}
|
||||
|
||||
context.start(new CompositeConfiguration(configurations));
|
||||
}
|
||||
|
||||
|
|
|
@ -42,11 +42,15 @@ import org.elasticsearch.http.HttpServerTransport;
|
|||
import org.elasticsearch.tasks.RawTaskStatus;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.transport.Transport;
|
||||
import org.elasticsearch.transport.TransportInterceptor;
|
||||
import org.elasticsearch.transport.TransportRequest;
|
||||
import org.elasticsearch.transport.TransportRequestHandler;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.transport.local.LocalTransport;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* A module to handle registering and binding all network related classes.
|
||||
|
@ -54,7 +58,6 @@ import java.util.List;
|
|||
public class NetworkModule extends AbstractModule {
|
||||
|
||||
public static final String TRANSPORT_TYPE_KEY = "transport.type";
|
||||
public static final String TRANSPORT_SERVICE_TYPE_KEY = "transport.service.type";
|
||||
public static final String HTTP_TYPE_KEY = "http.type";
|
||||
public static final String LOCAL_TRANSPORT = "local";
|
||||
public static final String HTTP_TYPE_DEFAULT_KEY = "http.type.default";
|
||||
|
@ -65,8 +68,6 @@ public class NetworkModule extends AbstractModule {
|
|||
public static final Setting<String> HTTP_DEFAULT_TYPE_SETTING = Setting.simpleString(HTTP_TYPE_DEFAULT_KEY, Property.NodeScope);
|
||||
public static final Setting<String> HTTP_TYPE_SETTING = Setting.simpleString(HTTP_TYPE_KEY, Property.NodeScope);
|
||||
public static final Setting<Boolean> HTTP_ENABLED = Setting.boolSetting("http.enabled", true, Property.NodeScope);
|
||||
public static final Setting<String> TRANSPORT_SERVICE_TYPE_SETTING =
|
||||
Setting.simpleString(TRANSPORT_SERVICE_TYPE_KEY, Property.NodeScope);
|
||||
public static final Setting<String> TRANSPORT_TYPE_SETTING = Setting.simpleString(TRANSPORT_TYPE_KEY, Property.NodeScope);
|
||||
|
||||
private final NetworkService networkService;
|
||||
|
@ -74,10 +75,10 @@ public class NetworkModule extends AbstractModule {
|
|||
private final boolean transportClient;
|
||||
|
||||
private final AllocationCommandRegistry allocationCommandRegistry = new AllocationCommandRegistry();
|
||||
private final ExtensionPoint.SelectedType<TransportService> transportServiceTypes = new ExtensionPoint.SelectedType<>("transport_service", TransportService.class);
|
||||
private final ExtensionPoint.SelectedType<Transport> transportTypes = new ExtensionPoint.SelectedType<>("transport", Transport.class);
|
||||
private final ExtensionPoint.SelectedType<HttpServerTransport> httpTransportTypes = new ExtensionPoint.SelectedType<>("http_transport", HttpServerTransport.class);
|
||||
private final List<NamedWriteableRegistry.Entry> namedWriteables = new ArrayList<>();
|
||||
private final List<TransportInterceptor> transportIntercetors = new ArrayList<>();
|
||||
|
||||
/**
|
||||
* Creates a network module that custom networking classes can be plugged into.
|
||||
|
@ -89,7 +90,6 @@ public class NetworkModule extends AbstractModule {
|
|||
this.networkService = networkService;
|
||||
this.settings = settings;
|
||||
this.transportClient = transportClient;
|
||||
registerTransportService("default", TransportService.class);
|
||||
registerTransport(LOCAL_TRANSPORT, LocalTransport.class);
|
||||
namedWriteables.add(new NamedWriteableRegistry.Entry(Task.Status.class, ReplicationTask.Status.NAME, ReplicationTask.Status::new));
|
||||
namedWriteables.add(new NamedWriteableRegistry.Entry(Task.Status.class, RawTaskStatus.NAME, RawTaskStatus::new));
|
||||
|
@ -100,11 +100,6 @@ public class NetworkModule extends AbstractModule {
|
|||
return transportClient;
|
||||
}
|
||||
|
||||
/** Adds a transport service implementation that can be selected by setting {@link #TRANSPORT_SERVICE_TYPE_KEY}. */
|
||||
public void registerTransportService(String name, Class<? extends TransportService> clazz) {
|
||||
transportServiceTypes.registerExtension(name, clazz);
|
||||
}
|
||||
|
||||
/** Adds a transport implementation that can be selected by setting {@link #TRANSPORT_TYPE_KEY}. */
|
||||
public void registerTransport(String name, Class<? extends Transport> clazz) {
|
||||
transportTypes.registerExtension(name, clazz);
|
||||
|
@ -149,9 +144,9 @@ public class NetworkModule extends AbstractModule {
|
|||
@Override
|
||||
protected void configure() {
|
||||
bind(NetworkService.class).toInstance(networkService);
|
||||
transportServiceTypes.bindType(binder(), settings, TRANSPORT_SERVICE_TYPE_KEY, "default");
|
||||
bindTransportService();
|
||||
transportTypes.bindType(binder(), settings, TRANSPORT_TYPE_KEY, TRANSPORT_DEFAULT_TYPE_SETTING.get(settings));
|
||||
|
||||
bind(TransportInterceptor.class).toInstance(new CompositeTransportInterceptor(this.transportIntercetors));
|
||||
if (transportClient == false) {
|
||||
if (HTTP_ENABLED.get(settings)) {
|
||||
bind(HttpServer.class).asEagerSingleton();
|
||||
|
@ -181,4 +176,39 @@ public class NetworkModule extends AbstractModule {
|
|||
public boolean canRegisterHttpExtensions() {
|
||||
return transportClient == false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Registers a new {@link TransportInterceptor}
|
||||
*/
|
||||
public void addTransportInterceptor(TransportInterceptor interceptor) {
|
||||
this.transportIntercetors.add(Objects.requireNonNull(interceptor, "interceptor must not be null"));
|
||||
}
|
||||
|
||||
static final class CompositeTransportInterceptor implements TransportInterceptor {
|
||||
final List<TransportInterceptor> transportInterceptors;
|
||||
|
||||
private CompositeTransportInterceptor(List<TransportInterceptor> transportInterceptors) {
|
||||
this.transportInterceptors = new ArrayList<>(transportInterceptors);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T extends TransportRequest> TransportRequestHandler<T> interceptHandler(String action, TransportRequestHandler<T> actualHandler) {
|
||||
for (TransportInterceptor interceptor : this.transportInterceptors) {
|
||||
actualHandler = interceptor.interceptHandler(action, actualHandler);
|
||||
}
|
||||
return actualHandler;
|
||||
}
|
||||
|
||||
@Override
|
||||
public AsyncSender interceptSender(AsyncSender sender) {
|
||||
for (TransportInterceptor interceptor : this.transportInterceptors) {
|
||||
sender = interceptor.interceptSender(sender);
|
||||
}
|
||||
return sender;
|
||||
}
|
||||
}
|
||||
|
||||
protected void bindTransportService() {
|
||||
bind(TransportService.class).asEagerSingleton();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -54,8 +54,8 @@ import org.elasticsearch.common.util.concurrent.EsExecutors;
|
|||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.discovery.DiscoveryModule;
|
||||
import org.elasticsearch.discovery.DiscoverySettings;
|
||||
import org.elasticsearch.discovery.zen.ElectMasterService;
|
||||
import org.elasticsearch.discovery.zen.ZenDiscovery;
|
||||
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
|
||||
import org.elasticsearch.discovery.zen.fd.FaultDetection;
|
||||
import org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing;
|
||||
import org.elasticsearch.env.Environment;
|
||||
|
@ -226,7 +226,6 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
|||
NetworkModule.HTTP_DEFAULT_TYPE_SETTING,
|
||||
NetworkModule.TRANSPORT_DEFAULT_TYPE_SETTING,
|
||||
NetworkModule.HTTP_TYPE_SETTING,
|
||||
NetworkModule.TRANSPORT_SERVICE_TYPE_SETTING,
|
||||
NetworkModule.TRANSPORT_TYPE_SETTING,
|
||||
HttpTransportSettings.SETTING_CORS_ALLOW_CREDENTIALS,
|
||||
HttpTransportSettings.SETTING_CORS_ENABLED,
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.discovery;
|
||||
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.inject.AbstractModule;
|
||||
import org.elasticsearch.common.inject.multibindings.Multibinder;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
|
@ -27,8 +26,8 @@ import org.elasticsearch.common.settings.Setting.Property;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.ExtensionPoint;
|
||||
import org.elasticsearch.discovery.local.LocalDiscovery;
|
||||
import org.elasticsearch.discovery.zen.ElectMasterService;
|
||||
import org.elasticsearch.discovery.zen.ZenDiscovery;
|
||||
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
|
||||
import org.elasticsearch.discovery.zen.ping.ZenPing;
|
||||
import org.elasticsearch.discovery.zen.ping.ZenPingService;
|
||||
import org.elasticsearch.discovery.zen.ping.unicast.UnicastHostsProvider;
|
||||
|
|
|
@ -17,11 +17,10 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.discovery.zen.elect;
|
||||
package org.elasticsearch.discovery.zen;
|
||||
|
||||
import com.carrotsearch.hppc.ObjectContainer;
|
||||
import org.apache.lucene.util.CollectionUtil;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
|
@ -33,9 +32,11 @@ import org.elasticsearch.common.util.CollectionUtils;
|
|||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Comparator;
|
||||
import java.util.Collection;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -45,17 +46,64 @@ public class ElectMasterService extends AbstractComponent {
|
|||
public static final Setting<Integer> DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING =
|
||||
Setting.intSetting("discovery.zen.minimum_master_nodes", -1, Property.Dynamic, Property.NodeScope);
|
||||
|
||||
// This is the minimum version a master needs to be on, otherwise it gets ignored
|
||||
// This is based on the minimum compatible version of the current version this node is on
|
||||
private final Version minMasterVersion;
|
||||
private final NodeComparator nodeComparator = new NodeComparator();
|
||||
|
||||
private volatile int minimumMasterNodes;
|
||||
|
||||
/**
|
||||
* a class to encapsulate all the information about a candidate in a master election
|
||||
* that is needed to decided which of the candidates should win
|
||||
*/
|
||||
public static class MasterCandidate {
|
||||
|
||||
public static final long UNRECOVERED_CLUSTER_VERSION = -1;
|
||||
|
||||
final DiscoveryNode node;
|
||||
|
||||
final long clusterStateVersion;
|
||||
|
||||
public MasterCandidate(DiscoveryNode node, long clusterStateVersion) {
|
||||
Objects.requireNonNull(node);
|
||||
assert clusterStateVersion >= -1 : "got: " + clusterStateVersion;
|
||||
assert node.isMasterNode();
|
||||
this.node = node;
|
||||
this.clusterStateVersion = clusterStateVersion;
|
||||
}
|
||||
|
||||
public DiscoveryNode getNode() {
|
||||
return node;
|
||||
}
|
||||
|
||||
public long getClusterStateVersion() {
|
||||
return clusterStateVersion;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "Candidate{" +
|
||||
"node=" + node +
|
||||
", clusterStateVersion=" + clusterStateVersion +
|
||||
'}';
|
||||
}
|
||||
|
||||
/**
|
||||
* compares two candidates to indicate which the a better master.
|
||||
* A higher cluster state version is better
|
||||
*
|
||||
* @return -1 if c1 is a batter candidate, 1 if c2.
|
||||
*/
|
||||
public static int compare(MasterCandidate c1, MasterCandidate c2) {
|
||||
// we explicitly swap c1 and c2 here. the code expects "better" is lower in a sorted
|
||||
// list, so if c2 has a higher cluster state version, it needs to come first.
|
||||
int ret = Long.compare(c2.clusterStateVersion, c1.clusterStateVersion);
|
||||
if (ret == 0) {
|
||||
ret = compareNodes(c1.getNode(), c2.getNode());
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
@Inject
|
||||
public ElectMasterService(Settings settings) {
|
||||
super(settings);
|
||||
this.minMasterVersion = Version.CURRENT.minimumCompatibilityVersion();
|
||||
this.minimumMasterNodes = DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.get(settings);
|
||||
logger.debug("using minimum_master_nodes [{}]", minimumMasterNodes);
|
||||
}
|
||||
|
@ -69,16 +117,41 @@ public class ElectMasterService extends AbstractComponent {
|
|||
}
|
||||
|
||||
public boolean hasEnoughMasterNodes(Iterable<DiscoveryNode> nodes) {
|
||||
if (minimumMasterNodes < 1) {
|
||||
return true;
|
||||
}
|
||||
int count = 0;
|
||||
for (DiscoveryNode node : nodes) {
|
||||
if (node.isMasterNode()) {
|
||||
count++;
|
||||
}
|
||||
}
|
||||
return count >= minimumMasterNodes;
|
||||
return count > 0 && (minimumMasterNodes < 0 || count >= minimumMasterNodes);
|
||||
}
|
||||
|
||||
public boolean hasEnoughCandidates(Collection<MasterCandidate> candidates) {
|
||||
if (candidates.isEmpty()) {
|
||||
return false;
|
||||
}
|
||||
if (minimumMasterNodes < 1) {
|
||||
return true;
|
||||
}
|
||||
assert candidates.stream().map(MasterCandidate::getNode).collect(Collectors.toSet()).size() == candidates.size() :
|
||||
"duplicates ahead: " + candidates;
|
||||
return candidates.size() >= minimumMasterNodes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Elects a new master out of the possible nodes, returning it. Returns <tt>null</tt>
|
||||
* if no master has been elected.
|
||||
*/
|
||||
public MasterCandidate electMaster(Collection<MasterCandidate> candidates) {
|
||||
assert hasEnoughCandidates(candidates);
|
||||
List<MasterCandidate> sortedCandidates = new ArrayList<>(candidates);
|
||||
sortedCandidates.sort(MasterCandidate::compare);
|
||||
return sortedCandidates.get(0);
|
||||
}
|
||||
|
||||
/** selects the best active master to join, where multiple are discovered */
|
||||
public DiscoveryNode tieBreakActiveMasters(Collection<DiscoveryNode> activeMasters) {
|
||||
return activeMasters.stream().min(ElectMasterService::compareNodes).get();
|
||||
}
|
||||
|
||||
public boolean hasTooManyMasterNodes(Iterable<DiscoveryNode> nodes) {
|
||||
|
@ -107,7 +180,7 @@ public class ElectMasterService extends AbstractComponent {
|
|||
*/
|
||||
public List<DiscoveryNode> sortByMasterLikelihood(Iterable<DiscoveryNode> nodes) {
|
||||
ArrayList<DiscoveryNode> sortedNodes = CollectionUtils.iterableAsArrayList(nodes);
|
||||
CollectionUtil.introSort(sortedNodes, nodeComparator);
|
||||
CollectionUtil.introSort(sortedNodes, ElectMasterService::compareNodes);
|
||||
return sortedNodes;
|
||||
}
|
||||
|
||||
|
@ -130,25 +203,6 @@ public class ElectMasterService extends AbstractComponent {
|
|||
return nextPossibleMasters.toArray(new DiscoveryNode[nextPossibleMasters.size()]);
|
||||
}
|
||||
|
||||
/**
|
||||
* Elects a new master out of the possible nodes, returning it. Returns <tt>null</tt>
|
||||
* if no master has been elected.
|
||||
*/
|
||||
public DiscoveryNode electMaster(Iterable<DiscoveryNode> nodes) {
|
||||
List<DiscoveryNode> sortedNodes = sortedMasterNodes(nodes);
|
||||
if (sortedNodes == null || sortedNodes.isEmpty()) {
|
||||
return null;
|
||||
}
|
||||
DiscoveryNode masterNode = sortedNodes.get(0);
|
||||
// Sanity check: maybe we don't end up here, because serialization may have failed.
|
||||
if (masterNode.getVersion().before(minMasterVersion)) {
|
||||
logger.warn("ignoring master [{}], because the version [{}] is lower than the minimum compatible version [{}]", masterNode, masterNode.getVersion(), minMasterVersion);
|
||||
return null;
|
||||
} else {
|
||||
return masterNode;
|
||||
}
|
||||
}
|
||||
|
||||
private List<DiscoveryNode> sortedMasterNodes(Iterable<DiscoveryNode> nodes) {
|
||||
List<DiscoveryNode> possibleNodes = CollectionUtils.iterableAsArrayList(nodes);
|
||||
if (possibleNodes.isEmpty()) {
|
||||
|
@ -161,21 +215,18 @@ public class ElectMasterService extends AbstractComponent {
|
|||
it.remove();
|
||||
}
|
||||
}
|
||||
CollectionUtil.introSort(possibleNodes, nodeComparator);
|
||||
CollectionUtil.introSort(possibleNodes, ElectMasterService::compareNodes);
|
||||
return possibleNodes;
|
||||
}
|
||||
|
||||
private static class NodeComparator implements Comparator<DiscoveryNode> {
|
||||
|
||||
@Override
|
||||
public int compare(DiscoveryNode o1, DiscoveryNode o2) {
|
||||
if (o1.isMasterNode() && !o2.isMasterNode()) {
|
||||
return -1;
|
||||
}
|
||||
if (!o1.isMasterNode() && o2.isMasterNode()) {
|
||||
return 1;
|
||||
}
|
||||
return o1.getId().compareTo(o2.getId());
|
||||
/** master nodes go before other nodes, with a secondary sort by id **/
|
||||
private static int compareNodes(DiscoveryNode o1, DiscoveryNode o2) {
|
||||
if (o1.isMasterNode() && !o2.isMasterNode()) {
|
||||
return -1;
|
||||
}
|
||||
if (!o1.isMasterNode() && o2.isMasterNode()) {
|
||||
return 1;
|
||||
}
|
||||
return o1.getId().compareTo(o2.getId());
|
||||
}
|
||||
}
|
|
@ -41,7 +41,6 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.common.transport.LocalTransportAddress;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.discovery.DiscoverySettings;
|
||||
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
|
||||
import org.elasticsearch.discovery.zen.membership.MembershipAction;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
|
|
@ -24,7 +24,6 @@ import org.apache.logging.log4j.message.ParameterizedMessage;
|
|||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
|
@ -56,7 +55,6 @@ import org.elasticsearch.common.unit.TimeValue;
|
|||
import org.elasticsearch.discovery.Discovery;
|
||||
import org.elasticsearch.discovery.DiscoverySettings;
|
||||
import org.elasticsearch.discovery.DiscoveryStats;
|
||||
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
|
||||
import org.elasticsearch.discovery.zen.fd.MasterFaultDetection;
|
||||
import org.elasticsearch.discovery.zen.fd.NodesFaultDetection;
|
||||
import org.elasticsearch.discovery.zen.membership.MembershipAction;
|
||||
|
@ -76,13 +74,10 @@ import org.elasticsearch.transport.TransportService;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.function.BiFunction;
|
||||
import java.util.stream.Collectors;
|
||||
|
@ -146,9 +141,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
|||
|
||||
private final JoinThreadControl joinThreadControl;
|
||||
|
||||
/** counts the time this node has joined the cluster or have elected it self as master */
|
||||
private final AtomicLong clusterJoinsCounter = new AtomicLong();
|
||||
|
||||
// must initialized in doStart(), when we have the allocationService set
|
||||
private volatile NodeJoinController nodeJoinController;
|
||||
private volatile NodeRemovalClusterStateTaskExecutor nodeRemovalExecutor;
|
||||
|
@ -304,8 +296,8 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
|||
}
|
||||
|
||||
@Override
|
||||
public boolean nodeHasJoinedClusterOnce() {
|
||||
return clusterJoinsCounter.get() > 0;
|
||||
public ClusterState clusterState() {
|
||||
return clusterService.state();
|
||||
}
|
||||
|
||||
/** end of {@link org.elasticsearch.discovery.zen.ping.PingContextProvider } implementation */
|
||||
|
@ -406,8 +398,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
|||
joinThreadControl.markThreadAsDone(currentThread);
|
||||
// we only starts nodesFD if we are master (it may be that we received a cluster state while pinging)
|
||||
nodesFD.updateNodesAndPing(state); // start the nodes FD
|
||||
long count = clusterJoinsCounter.incrementAndGet();
|
||||
logger.trace("cluster joins counter set to [{}] (elected as master)", count);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -764,9 +754,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
|||
if (currentState.blocks().hasGlobalBlock(discoverySettings.getNoMasterBlock())) {
|
||||
// its a fresh update from the master as we transition from a start of not having a master to having one
|
||||
logger.debug("got first state from fresh master [{}]", newClusterState.nodes().getMasterNodeId());
|
||||
long count = clusterJoinsCounter.incrementAndGet();
|
||||
logger.trace("updated cluster join cluster to [{}]", count);
|
||||
|
||||
return newClusterState;
|
||||
}
|
||||
|
||||
|
@ -873,16 +860,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
|||
} else if (nodeJoinController == null) {
|
||||
throw new IllegalStateException("discovery module is not yet started");
|
||||
} else {
|
||||
// The minimum supported version for a node joining a master:
|
||||
Version minimumNodeJoinVersion = localNode().getVersion().minimumCompatibilityVersion();
|
||||
// Sanity check: maybe we don't end up here, because serialization may have failed.
|
||||
if (node.getVersion().before(minimumNodeJoinVersion)) {
|
||||
callback.onFailure(
|
||||
new IllegalStateException("Can't handle join request from a node with a version [" + node.getVersion() + "] that is lower than the minimum compatible version [" + minimumNodeJoinVersion.minimumCompatibilityVersion() + "]")
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
// try and connect to the node, if it fails, we can raise an exception back to the client...
|
||||
transportService.connectToNode(node);
|
||||
|
||||
|
@ -901,14 +878,14 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
|||
|
||||
private DiscoveryNode findMaster() {
|
||||
logger.trace("starting to ping");
|
||||
ZenPing.PingResponse[] fullPingResponses = pingService.pingAndWait(pingTimeout);
|
||||
List<ZenPing.PingResponse> fullPingResponses = pingService.pingAndWait(pingTimeout).toList();
|
||||
if (fullPingResponses == null) {
|
||||
logger.trace("No full ping responses");
|
||||
return null;
|
||||
}
|
||||
if (logger.isTraceEnabled()) {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
if (fullPingResponses.length == 0) {
|
||||
if (fullPingResponses.size() == 0) {
|
||||
sb.append(" {none}");
|
||||
} else {
|
||||
for (ZenPing.PingResponse pingResponse : fullPingResponses) {
|
||||
|
@ -918,69 +895,57 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
|||
logger.trace("full ping responses:{}", sb);
|
||||
}
|
||||
|
||||
final DiscoveryNode localNode = clusterService.localNode();
|
||||
|
||||
// add our selves
|
||||
assert fullPingResponses.stream().map(ZenPing.PingResponse::node)
|
||||
.filter(n -> n.equals(localNode)).findAny().isPresent() == false;
|
||||
|
||||
fullPingResponses.add(new ZenPing.PingResponse(localNode, null, clusterService.state()));
|
||||
|
||||
// filter responses
|
||||
final List<ZenPing.PingResponse> pingResponses = filterPingResponses(fullPingResponses, masterElectionIgnoreNonMasters, logger);
|
||||
|
||||
final DiscoveryNode localNode = clusterService.localNode();
|
||||
List<DiscoveryNode> pingMasters = new ArrayList<>();
|
||||
List<DiscoveryNode> activeMasters = new ArrayList<>();
|
||||
for (ZenPing.PingResponse pingResponse : pingResponses) {
|
||||
if (pingResponse.master() != null) {
|
||||
// We can't include the local node in pingMasters list, otherwise we may up electing ourselves without
|
||||
// any check / verifications from other nodes in ZenDiscover#innerJoinCluster()
|
||||
if (!localNode.equals(pingResponse.master())) {
|
||||
pingMasters.add(pingResponse.master());
|
||||
}
|
||||
// We can't include the local node in pingMasters list, otherwise we may up electing ourselves without
|
||||
// any check / verifications from other nodes in ZenDiscover#innerJoinCluster()
|
||||
if (pingResponse.master() != null && !localNode.equals(pingResponse.master())) {
|
||||
activeMasters.add(pingResponse.master());
|
||||
}
|
||||
}
|
||||
|
||||
// nodes discovered during pinging
|
||||
Set<DiscoveryNode> activeNodes = new HashSet<>();
|
||||
// nodes discovered who has previously been part of the cluster and do not ping for the very first time
|
||||
Set<DiscoveryNode> joinedOnceActiveNodes = new HashSet<>();
|
||||
if (localNode.isMasterNode()) {
|
||||
activeNodes.add(localNode);
|
||||
long joinsCounter = clusterJoinsCounter.get();
|
||||
if (joinsCounter > 0) {
|
||||
logger.trace("adding local node to the list of active nodes that have previously joined the cluster (joins counter is [{}])", joinsCounter);
|
||||
joinedOnceActiveNodes.add(localNode);
|
||||
}
|
||||
}
|
||||
List<ElectMasterService.MasterCandidate> masterCandidates = new ArrayList<>();
|
||||
for (ZenPing.PingResponse pingResponse : pingResponses) {
|
||||
activeNodes.add(pingResponse.node());
|
||||
if (pingResponse.hasJoinedOnce()) {
|
||||
joinedOnceActiveNodes.add(pingResponse.node());
|
||||
if (pingResponse.node().isMasterNode()) {
|
||||
masterCandidates.add(new ElectMasterService.MasterCandidate(pingResponse.node(), pingResponse.getClusterStateVersion()));
|
||||
}
|
||||
}
|
||||
|
||||
if (pingMasters.isEmpty()) {
|
||||
if (electMaster.hasEnoughMasterNodes(activeNodes)) {
|
||||
// we give preference to nodes who have previously already joined the cluster. Those will
|
||||
// have a cluster state in memory, including an up to date routing table (which is not persistent to disk
|
||||
// by the gateway)
|
||||
DiscoveryNode master = electMaster.electMaster(joinedOnceActiveNodes);
|
||||
if (master != null) {
|
||||
return master;
|
||||
}
|
||||
return electMaster.electMaster(activeNodes);
|
||||
if (activeMasters.isEmpty()) {
|
||||
if (electMaster.hasEnoughCandidates(masterCandidates)) {
|
||||
final ElectMasterService.MasterCandidate winner = electMaster.electMaster(masterCandidates);
|
||||
logger.trace("candidate {} won election", winner);
|
||||
return winner.getNode();
|
||||
} else {
|
||||
// if we don't have enough master nodes, we bail, because there are not enough master to elect from
|
||||
logger.trace("not enough master nodes [{}]", activeNodes);
|
||||
logger.trace("not enough master nodes [{}]", masterCandidates);
|
||||
return null;
|
||||
}
|
||||
} else {
|
||||
|
||||
assert !pingMasters.contains(localNode) : "local node should never be elected as master when other nodes indicate an active master";
|
||||
assert !activeMasters.contains(localNode) : "local node should never be elected as master when other nodes indicate an active master";
|
||||
// lets tie break between discovered nodes
|
||||
return electMaster.electMaster(pingMasters);
|
||||
return electMaster.tieBreakActiveMasters(activeMasters);
|
||||
}
|
||||
}
|
||||
|
||||
static List<ZenPing.PingResponse> filterPingResponses(ZenPing.PingResponse[] fullPingResponses, boolean masterElectionIgnoreNonMasters, Logger logger) {
|
||||
static List<ZenPing.PingResponse> filterPingResponses(List<ZenPing.PingResponse> fullPingResponses, boolean masterElectionIgnoreNonMasters, Logger logger) {
|
||||
List<ZenPing.PingResponse> pingResponses;
|
||||
if (masterElectionIgnoreNonMasters) {
|
||||
pingResponses = Arrays.stream(fullPingResponses).filter(ping -> ping.node().isMasterNode()).collect(Collectors.toList());
|
||||
pingResponses = fullPingResponses.stream().filter(ping -> ping.node().isMasterNode()).collect(Collectors.toList());
|
||||
} else {
|
||||
pingResponses = Arrays.asList(fullPingResponses);
|
||||
pingResponses = fullPingResponses;
|
||||
}
|
||||
|
||||
if (logger.isDebugEnabled()) {
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.discovery.zen.ping;
|
||||
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.discovery.zen.DiscoveryNodesProvider;
|
||||
|
||||
/**
|
||||
|
@ -26,7 +27,7 @@ import org.elasticsearch.discovery.zen.DiscoveryNodesProvider;
|
|||
*/
|
||||
public interface PingContextProvider extends DiscoveryNodesProvider {
|
||||
|
||||
/** return true if this node has previously joined the cluster at least once. False if this is first join */
|
||||
boolean nodeHasJoinedClusterOnce();
|
||||
/** return the current cluster state of the node */
|
||||
ClusterState clusterState();
|
||||
|
||||
}
|
||||
|
|
|
@ -20,30 +20,42 @@
|
|||
package org.elasticsearch.discovery.zen.ping;
|
||||
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.component.LifecycleComponent;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.discovery.zen.ElectMasterService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
import static org.elasticsearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK;
|
||||
|
||||
public interface ZenPing extends LifecycleComponent {
|
||||
|
||||
void setPingContextProvider(PingContextProvider contextProvider);
|
||||
|
||||
void ping(PingListener listener, TimeValue timeout);
|
||||
|
||||
public interface PingListener {
|
||||
interface PingListener {
|
||||
|
||||
void onPing(PingResponse[] pings);
|
||||
/**
|
||||
* called when pinging is done.
|
||||
*
|
||||
* @param pings ping result *must
|
||||
*/
|
||||
void onPing(Collection<PingResponse> pings);
|
||||
}
|
||||
|
||||
public static class PingResponse implements Streamable {
|
||||
class PingResponse implements Streamable {
|
||||
|
||||
public static final PingResponse[] EMPTY = new PingResponse[0];
|
||||
|
||||
|
@ -59,29 +71,36 @@ public interface ZenPing extends LifecycleComponent {
|
|||
|
||||
private DiscoveryNode master;
|
||||
|
||||
private boolean hasJoinedOnce;
|
||||
private long clusterStateVersion;
|
||||
|
||||
private PingResponse() {
|
||||
}
|
||||
|
||||
/**
|
||||
* @param node the node which this ping describes
|
||||
* @param master the current master of the node
|
||||
* @param clusterName the cluster name of the node
|
||||
* @param hasJoinedOnce true if the joined has successfully joined the cluster before
|
||||
* @param node the node which this ping describes
|
||||
* @param master the current master of the node
|
||||
* @param clusterName the cluster name of the node
|
||||
* @param clusterStateVersion the current cluster state version of that node
|
||||
* ({@link ElectMasterService.MasterCandidate#UNRECOVERED_CLUSTER_VERSION} for not recovered)
|
||||
*/
|
||||
public PingResponse(DiscoveryNode node, DiscoveryNode master, ClusterName clusterName, boolean hasJoinedOnce) {
|
||||
public PingResponse(DiscoveryNode node, DiscoveryNode master, ClusterName clusterName, long clusterStateVersion) {
|
||||
this.id = idGenerator.incrementAndGet();
|
||||
this.node = node;
|
||||
this.master = master;
|
||||
this.clusterName = clusterName;
|
||||
this.hasJoinedOnce = hasJoinedOnce;
|
||||
this.clusterStateVersion = clusterStateVersion;
|
||||
}
|
||||
|
||||
/**
|
||||
* an always increasing unique identifier for this ping response.
|
||||
* lower values means older pings.
|
||||
*/
|
||||
public PingResponse(DiscoveryNode node, DiscoveryNode master, ClusterState state) {
|
||||
this(node, master, state.getClusterName(),
|
||||
state.blocks().hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK) ?
|
||||
ElectMasterService.MasterCandidate.UNRECOVERED_CLUSTER_VERSION : state.version());
|
||||
}
|
||||
|
||||
/**
|
||||
* an always increasing unique identifier for this ping response.
|
||||
* lower values means older pings.
|
||||
*/
|
||||
public long id() {
|
||||
return this.id;
|
||||
}
|
||||
|
@ -100,9 +119,11 @@ public interface ZenPing extends LifecycleComponent {
|
|||
return master;
|
||||
}
|
||||
|
||||
/** true if the joined has successfully joined the cluster before */
|
||||
public boolean hasJoinedOnce() {
|
||||
return hasJoinedOnce;
|
||||
/**
|
||||
* the current cluster state version of that node ({@link ElectMasterService.MasterCandidate#UNRECOVERED_CLUSTER_VERSION}
|
||||
* for not recovered) */
|
||||
public long getClusterStateVersion() {
|
||||
return clusterStateVersion;
|
||||
}
|
||||
|
||||
public static PingResponse readPingResponse(StreamInput in) throws IOException {
|
||||
|
@ -118,7 +139,7 @@ public interface ZenPing extends LifecycleComponent {
|
|||
if (in.readBoolean()) {
|
||||
master = new DiscoveryNode(in);
|
||||
}
|
||||
this.hasJoinedOnce = in.readBoolean();
|
||||
this.clusterStateVersion = in.readLong();
|
||||
this.id = in.readLong();
|
||||
}
|
||||
|
||||
|
@ -132,13 +153,14 @@ public interface ZenPing extends LifecycleComponent {
|
|||
out.writeBoolean(true);
|
||||
master.writeTo(out);
|
||||
}
|
||||
out.writeBoolean(hasJoinedOnce);
|
||||
out.writeLong(clusterStateVersion);
|
||||
out.writeLong(id);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ping_response{node [" + node + "], id[" + id + "], master [" + master + "], hasJoinedOnce [" + hasJoinedOnce + "], cluster_name[" + clusterName.value() + "]}";
|
||||
return "ping_response{node [" + node + "], id[" + id + "], master [" + master + "], cluster_state_version [" + clusterStateVersion
|
||||
+ "], cluster_name[" + clusterName.value() + "]}";
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -146,7 +168,7 @@ public interface ZenPing extends LifecycleComponent {
|
|||
/**
|
||||
* a utility collection of pings where only the most recent ping is stored per node
|
||||
*/
|
||||
public static class PingCollection {
|
||||
class PingCollection {
|
||||
|
||||
Map<DiscoveryNode, PingResponse> pings;
|
||||
|
||||
|
@ -171,15 +193,15 @@ public interface ZenPing extends LifecycleComponent {
|
|||
}
|
||||
|
||||
/** adds multiple pings if newer than previous pings from the same node */
|
||||
public synchronized void addPings(PingResponse[] pings) {
|
||||
public synchronized void addPings(Iterable<PingResponse> pings) {
|
||||
for (PingResponse ping : pings) {
|
||||
addPing(ping);
|
||||
}
|
||||
}
|
||||
|
||||
/** serialize current pings to an array */
|
||||
public synchronized PingResponse[] toArray() {
|
||||
return pings.values().toArray(new PingResponse[pings.size()]);
|
||||
/** serialize current pings to a list. It is guaranteed that the list contains one ping response per node */
|
||||
public synchronized List<PingResponse> toList() {
|
||||
return new ArrayList<>(pings.values());
|
||||
}
|
||||
|
||||
/** the number of nodes for which there are known pings */
|
||||
|
|
|
@ -23,17 +23,15 @@ import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
|||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
public class ZenPingService extends AbstractLifecycleComponent implements ZenPing {
|
||||
public class ZenPingService extends AbstractLifecycleComponent {
|
||||
|
||||
private List<ZenPing> zenPings = Collections.emptyList();
|
||||
|
||||
|
@ -47,7 +45,6 @@ public class ZenPingService extends AbstractLifecycleComponent implements ZenPin
|
|||
return this.zenPings;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setPingContextProvider(PingContextProvider contextProvider) {
|
||||
if (lifecycle.started()) {
|
||||
throw new IllegalStateException("Can't set nodes provider when started");
|
||||
|
@ -78,60 +75,31 @@ public class ZenPingService extends AbstractLifecycleComponent implements ZenPin
|
|||
}
|
||||
}
|
||||
|
||||
public PingResponse[] pingAndWait(TimeValue timeout) {
|
||||
final AtomicReference<PingResponse[]> response = new AtomicReference<>();
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
ping(new PingListener() {
|
||||
@Override
|
||||
public void onPing(PingResponse[] pings) {
|
||||
response.set(pings);
|
||||
latch.countDown();
|
||||
public ZenPing.PingCollection pingAndWait(TimeValue timeout) {
|
||||
final ZenPing.PingCollection response = new ZenPing.PingCollection();
|
||||
final CountDownLatch latch = new CountDownLatch(zenPings.size());
|
||||
for (ZenPing zenPing : zenPings) {
|
||||
final AtomicBoolean counted = new AtomicBoolean();
|
||||
try {
|
||||
zenPing.ping(pings -> {
|
||||
response.addPings(pings);
|
||||
if (counted.compareAndSet(false, true)) {
|
||||
latch.countDown();
|
||||
}
|
||||
}, timeout);
|
||||
} catch (Exception ex) {
|
||||
logger.warn("Ping execution failed", ex);
|
||||
if (counted.compareAndSet(false, true)) {
|
||||
latch.countDown();
|
||||
}
|
||||
}
|
||||
}, timeout);
|
||||
}
|
||||
try {
|
||||
latch.await();
|
||||
return response.get();
|
||||
return response;
|
||||
} catch (InterruptedException e) {
|
||||
logger.trace("pingAndWait interrupted");
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void ping(PingListener listener, TimeValue timeout) {
|
||||
List<? extends ZenPing> zenPings = this.zenPings;
|
||||
CompoundPingListener compoundPingListener = new CompoundPingListener(listener, zenPings);
|
||||
for (ZenPing zenPing : zenPings) {
|
||||
try {
|
||||
zenPing.ping(compoundPingListener, timeout);
|
||||
} catch (EsRejectedExecutionException ex) {
|
||||
logger.debug("Ping execution rejected", ex);
|
||||
compoundPingListener.onPing(null);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static class CompoundPingListener implements PingListener {
|
||||
|
||||
private final PingListener listener;
|
||||
|
||||
private final AtomicInteger counter;
|
||||
|
||||
private PingCollection responses = new PingCollection();
|
||||
|
||||
private CompoundPingListener(PingListener listener, List<? extends ZenPing> zenPings) {
|
||||
this.listener = listener;
|
||||
this.counter = new AtomicInteger(zenPings.size());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onPing(PingResponse[] pings) {
|
||||
if (pings != null) {
|
||||
responses.addPings(pings);
|
||||
}
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
listener.onPing(responses.toArray());
|
||||
}
|
||||
return response;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -44,7 +44,7 @@ import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
|||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||
import org.elasticsearch.common.util.concurrent.EsExecutors;
|
||||
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
|
||||
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
|
||||
import org.elasticsearch.discovery.zen.ElectMasterService;
|
||||
import org.elasticsearch.discovery.zen.ping.PingContextProvider;
|
||||
import org.elasticsearch.discovery.zen.ping.ZenPing;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
@ -63,6 +63,7 @@ import java.io.Closeable;
|
|||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -236,8 +237,9 @@ public class UnicastZenPing extends AbstractLifecycleComponent implements ZenPin
|
|||
temporalResponses.clear();
|
||||
}
|
||||
|
||||
public PingResponse[] pingAndWait(TimeValue duration) {
|
||||
final AtomicReference<PingResponse[]> response = new AtomicReference<>();
|
||||
// test only
|
||||
Collection<PingResponse> pingAndWait(TimeValue duration) {
|
||||
final AtomicReference<Collection<PingResponse>> response = new AtomicReference<>();
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
ping(pings -> {
|
||||
response.set(pings);
|
||||
|
@ -273,7 +275,7 @@ public class UnicastZenPing extends AbstractLifecycleComponent implements ZenPin
|
|||
protected void doRun() throws Exception {
|
||||
sendPings(duration, TimeValue.timeValueMillis(duration.millis() / 2), sendPingsHandler);
|
||||
sendPingsHandler.close();
|
||||
listener.onPing(sendPingsHandler.pingCollection().toArray());
|
||||
listener.onPing(sendPingsHandler.pingCollection().toList());
|
||||
for (DiscoveryNode node : sendPingsHandler.nodeToDisconnect) {
|
||||
logger.trace("[{}] disconnecting from {}", sendPingsHandler.id(), node);
|
||||
transportService.disconnectFromNode(node);
|
||||
|
@ -576,8 +578,7 @@ public class UnicastZenPing extends AbstractLifecycleComponent implements ZenPin
|
|||
}
|
||||
|
||||
private PingResponse createPingResponse(DiscoveryNodes discoNodes) {
|
||||
return new PingResponse(discoNodes.getLocalNode(), discoNodes.getMasterNode(), clusterName,
|
||||
contextProvider.nodeHasJoinedClusterOnce());
|
||||
return new PingResponse(discoNodes.getLocalNode(), discoNodes.getMasterNode(), contextProvider.clusterState());
|
||||
}
|
||||
|
||||
static class UnicastPingResponse extends TransportResponse {
|
||||
|
|
|
@ -338,10 +338,6 @@ public class BoolQueryBuilder extends AbstractQueryBuilder<BoolQueryBuilder> {
|
|||
default:
|
||||
throw new ParsingException(parser.getTokenLocation(), "[bool] query does not support [" + currentFieldName + "]");
|
||||
}
|
||||
if (parser.currentToken() != XContentParser.Token.END_OBJECT) {
|
||||
throw new ParsingException(parser.getTokenLocation(),
|
||||
"expected [END_OBJECT] but got [{}], possibly too many query clauses", parser.currentToken());
|
||||
}
|
||||
} else if (token == XContentParser.Token.START_ARRAY) {
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
|
||||
switch (currentFieldName) {
|
||||
|
|
|
@ -25,11 +25,9 @@ import org.elasticsearch.common.ParseFieldMatcherSupplier;
|
|||
import org.elasticsearch.common.ParsingException;
|
||||
import org.elasticsearch.common.logging.DeprecationLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptSettings;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
@ -95,16 +93,12 @@ public class QueryParseContext implements ParseFieldMatcherSupplier {
|
|||
* Parses a query excluding the query element that wraps it
|
||||
*/
|
||||
public Optional<QueryBuilder> parseInnerQueryBuilder() throws IOException {
|
||||
// move to START object
|
||||
XContentParser.Token token;
|
||||
if (parser.currentToken() != XContentParser.Token.START_OBJECT) {
|
||||
token = parser.nextToken();
|
||||
if (token != XContentParser.Token.START_OBJECT) {
|
||||
if (parser.nextToken() != XContentParser.Token.START_OBJECT) {
|
||||
throw new ParsingException(parser.getTokenLocation(), "[_na] query malformed, must start with start_object");
|
||||
}
|
||||
}
|
||||
token = parser.nextToken();
|
||||
if (token == XContentParser.Token.END_OBJECT) {
|
||||
if (parser.nextToken() == XContentParser.Token.END_OBJECT) {
|
||||
// we encountered '{}' for a query clause
|
||||
String msg = "query malformed, empty clause found at [" + parser.getTokenLocation() +"]";
|
||||
DEPRECATION_LOGGER.deprecated(msg);
|
||||
|
@ -113,23 +107,27 @@ public class QueryParseContext implements ParseFieldMatcherSupplier {
|
|||
}
|
||||
return Optional.empty();
|
||||
}
|
||||
if (token != XContentParser.Token.FIELD_NAME) {
|
||||
if (parser.currentToken() != XContentParser.Token.FIELD_NAME) {
|
||||
throw new ParsingException(parser.getTokenLocation(), "[_na] query malformed, no field after start_object");
|
||||
}
|
||||
String queryName = parser.currentName();
|
||||
// move to the next START_OBJECT
|
||||
token = parser.nextToken();
|
||||
if (token != XContentParser.Token.START_OBJECT) {
|
||||
if (parser.nextToken() != XContentParser.Token.START_OBJECT) {
|
||||
throw new ParsingException(parser.getTokenLocation(), "[" + queryName + "] query malformed, no start_object after query name");
|
||||
}
|
||||
@SuppressWarnings("unchecked")
|
||||
Optional<QueryBuilder> result = (Optional<QueryBuilder>) indicesQueriesRegistry.lookup(queryName, parseFieldMatcher,
|
||||
parser.getTokenLocation()).fromXContent(this);
|
||||
//end_object of the specific query (e.g. match, multi_match etc.) element
|
||||
if (parser.currentToken() != XContentParser.Token.END_OBJECT) {
|
||||
throw new ParsingException(parser.getTokenLocation(),
|
||||
"[" + queryName + "] malformed query, expected [END_OBJECT] but found [" + parser.currentToken() + "]");
|
||||
}
|
||||
parser.nextToken();
|
||||
//end_object of the query object
|
||||
if (parser.nextToken() != XContentParser.Token.END_OBJECT) {
|
||||
throw new ParsingException(parser.getTokenLocation(),
|
||||
"[" + queryName + "] malformed query, expected [END_OBJECT] but found [" + parser.currentToken() + "]");
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
|
|
|
@ -321,7 +321,7 @@ public class Node implements Closeable {
|
|||
}
|
||||
final MonitorService monitorService = new MonitorService(settings, nodeEnvironment, threadPool);
|
||||
modules.add(new NodeModule(this, monitorService));
|
||||
NetworkModule networkModule = new NetworkModule(networkService, settings, false);
|
||||
NetworkModule networkModule = createNetworkModule(settings, networkService);
|
||||
modules.add(networkModule);
|
||||
modules.add(new DiscoveryModule(this.settings));
|
||||
ClusterModule clusterModule = new ClusterModule(settings, clusterService,
|
||||
|
@ -417,6 +417,10 @@ public class Node implements Closeable {
|
|||
}
|
||||
}
|
||||
|
||||
protected NetworkModule createNetworkModule(Settings settings, NetworkService networkService) {
|
||||
return new NetworkModule(networkService, settings, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* The settings that were used to create the node.
|
||||
*/
|
||||
|
|
|
@ -0,0 +1,60 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.transport;
|
||||
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
|
||||
import java.util.function.Supplier;
|
||||
|
||||
/**
|
||||
* This interface allows plugins to intercept requests on both the sender and the receiver side.
|
||||
*/
|
||||
public interface TransportInterceptor {
|
||||
/**
|
||||
* This is called for each handler that is registered via
|
||||
* {@link TransportService#registerRequestHandler(String, Supplier, String, boolean, boolean, TransportRequestHandler)} or
|
||||
* {@link TransportService#registerRequestHandler(String, Supplier, String, TransportRequestHandler)}. The returned handler is
|
||||
* used instead of the passed in handler. By default the provided handler is returned.
|
||||
*/
|
||||
default <T extends TransportRequest> TransportRequestHandler<T> interceptHandler(String action,
|
||||
TransportRequestHandler<T> actualHandler) {
|
||||
return actualHandler;
|
||||
}
|
||||
|
||||
/**
|
||||
* This is called up-front providing the actual low level {@link AsyncSender} that performs the low level send request.
|
||||
* The returned sender is used to send all requests that come in via
|
||||
* {@link TransportService#sendRequest(DiscoveryNode, String, TransportRequest, TransportResponseHandler)} or
|
||||
* {@link TransportService#sendRequest(DiscoveryNode, String, TransportRequest, TransportRequestOptions, TransportResponseHandler)}.
|
||||
* This allows plugins to perform actions on each send request including modifying the request context etc.
|
||||
*/
|
||||
default AsyncSender interceptSender(AsyncSender sender) {
|
||||
return sender;
|
||||
}
|
||||
|
||||
/**
|
||||
* A simple interface to decorate
|
||||
* {@link #sendRequest(DiscoveryNode, String, TransportRequest, TransportRequestOptions, TransportResponseHandler)}
|
||||
*/
|
||||
interface AsyncSender {
|
||||
<T extends TransportResponse> void sendRequest(final DiscoveryNode node, final String action, final TransportRequest request,
|
||||
final TransportRequestOptions options, TransportResponseHandler<T> handler);
|
||||
}
|
||||
}
|
|
@ -66,9 +66,6 @@ import java.util.function.Supplier;
|
|||
import static java.util.Collections.emptyList;
|
||||
import static org.elasticsearch.common.settings.Setting.listSetting;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class TransportService extends AbstractLifecycleComponent {
|
||||
|
||||
public static final String DIRECT_RESPONSE_PROFILE = ".direct";
|
||||
|
@ -79,16 +76,19 @@ public class TransportService extends AbstractLifecycleComponent {
|
|||
protected final ThreadPool threadPool;
|
||||
protected final ClusterName clusterName;
|
||||
protected final TaskManager taskManager;
|
||||
private final TransportInterceptor.AsyncSender asyncSender;
|
||||
|
||||
volatile Map<String, RequestHandlerRegistry> requestHandlers = Collections.emptyMap();
|
||||
final Object requestHandlerMutex = new Object();
|
||||
|
||||
final ConcurrentMapLong<RequestHolder> clientHandlers = ConcurrentCollections.newConcurrentMapLongWithAggressiveConcurrency();
|
||||
|
||||
final AtomicLong requestIds = new AtomicLong();
|
||||
private final AtomicLong requestIds = new AtomicLong();
|
||||
|
||||
final CopyOnWriteArrayList<TransportConnectionListener> connectionListeners = new CopyOnWriteArrayList<>();
|
||||
|
||||
private final TransportInterceptor interceptor;
|
||||
|
||||
// An LRU (don't really care about concurrency here) that holds the latest timed out requests so if they
|
||||
// do show up, we can print more descriptive information about them
|
||||
final Map<Long, TimeoutInfoHolder> timeoutInfoHandlers =
|
||||
|
@ -101,6 +101,8 @@ public class TransportService extends AbstractLifecycleComponent {
|
|||
|
||||
private final TransportService.Adapter adapter;
|
||||
|
||||
public static final TransportInterceptor NOOP_TRANSPORT_INTERCEPTOR = new TransportInterceptor() {};
|
||||
|
||||
// tracer log
|
||||
|
||||
public static final Setting<List<String>> TRACE_LOG_INCLUDE_SETTING =
|
||||
|
@ -118,7 +120,7 @@ public class TransportService extends AbstractLifecycleComponent {
|
|||
volatile DiscoveryNode localNode = null;
|
||||
|
||||
@Inject
|
||||
public TransportService(Settings settings, Transport transport, ThreadPool threadPool) {
|
||||
public TransportService(Settings settings, Transport transport, ThreadPool threadPool, TransportInterceptor transportInterceptor) {
|
||||
super(settings);
|
||||
this.transport = transport;
|
||||
this.threadPool = threadPool;
|
||||
|
@ -128,6 +130,8 @@ public class TransportService extends AbstractLifecycleComponent {
|
|||
tracerLog = Loggers.getLogger(logger, ".tracer");
|
||||
adapter = createAdapter();
|
||||
taskManager = createTaskManager();
|
||||
this.interceptor = transportInterceptor;
|
||||
this.asyncSender = interceptor.interceptSender(this::sendRequestInternal);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -241,11 +245,11 @@ public class TransportService extends AbstractLifecycleComponent {
|
|||
* when the transport layer starts up it will block any incoming requests until
|
||||
* this method is called
|
||||
*/
|
||||
public void acceptIncomingRequests() {
|
||||
public final void acceptIncomingRequests() {
|
||||
blockIncomingRequestsLatch.countDown();
|
||||
}
|
||||
|
||||
public boolean addressSupported(Class<? extends TransportAddress> address) {
|
||||
public final boolean addressSupported(Class<? extends TransportAddress> address) {
|
||||
return transport.addressSupported(address);
|
||||
}
|
||||
|
||||
|
@ -442,13 +446,23 @@ public class TransportService extends AbstractLifecycleComponent {
|
|||
return futureHandler;
|
||||
}
|
||||
|
||||
public <T extends TransportResponse> void sendRequest(final DiscoveryNode node, final String action, final TransportRequest request,
|
||||
final TransportResponseHandler<T> handler) {
|
||||
public final <T extends TransportResponse> void sendRequest(final DiscoveryNode node, final String action,
|
||||
final TransportRequest request,
|
||||
final TransportResponseHandler<T> handler) {
|
||||
sendRequest(node, action, request, TransportRequestOptions.EMPTY, handler);
|
||||
}
|
||||
|
||||
public <T extends TransportResponse> void sendRequest(final DiscoveryNode node, final String action, final TransportRequest request,
|
||||
final TransportRequestOptions options, TransportResponseHandler<T> handler) {
|
||||
public final <T extends TransportResponse> void sendRequest(final DiscoveryNode node, final String action,
|
||||
final TransportRequest request,
|
||||
final TransportRequestOptions options,
|
||||
TransportResponseHandler<T> handler) {
|
||||
asyncSender.sendRequest(node, action, request, options, handler);
|
||||
}
|
||||
|
||||
private <T extends TransportResponse> void sendRequestInternal(final DiscoveryNode node, final String action,
|
||||
final TransportRequest request,
|
||||
final TransportRequestOptions options,
|
||||
TransportResponseHandler<T> handler) {
|
||||
if (node == null) {
|
||||
throw new IllegalStateException("can't send request to a null node");
|
||||
}
|
||||
|
@ -594,8 +608,9 @@ public class TransportService extends AbstractLifecycleComponent {
|
|||
* @param executor The executor the request handling will be executed on
|
||||
* @param handler The handler itself that implements the request handling
|
||||
*/
|
||||
public <Request extends TransportRequest> void registerRequestHandler(String action, Supplier<Request> requestFactory, String executor,
|
||||
TransportRequestHandler<Request> handler) {
|
||||
public final <Request extends TransportRequest> void registerRequestHandler(String action, Supplier<Request> requestFactory,
|
||||
String executor, TransportRequestHandler<Request> handler) {
|
||||
handler = interceptor.interceptHandler(action, handler);
|
||||
RequestHandlerRegistry<Request> reg = new RequestHandlerRegistry<>(
|
||||
action, requestFactory, taskManager, handler, executor, false, true);
|
||||
registerRequestHandler(reg);
|
||||
|
@ -611,10 +626,11 @@ public class TransportService extends AbstractLifecycleComponent {
|
|||
* @param canTripCircuitBreaker Check the request size and raise an exception in case the limit is breached.
|
||||
* @param handler The handler itself that implements the request handling
|
||||
*/
|
||||
public <Request extends TransportRequest> void registerRequestHandler(String action, Supplier<Request> request,
|
||||
public final <Request extends TransportRequest> void registerRequestHandler(String action, Supplier<Request> request,
|
||||
String executor, boolean forceExecution,
|
||||
boolean canTripCircuitBreaker,
|
||||
TransportRequestHandler<Request> handler) {
|
||||
handler = interceptor.interceptHandler(action, handler);
|
||||
RequestHandlerRegistry<Request> reg = new RequestHandlerRegistry<>(
|
||||
action, request, taskManager, handler, executor, forceExecution, canTripCircuitBreaker);
|
||||
registerRequestHandler(reg);
|
||||
|
@ -744,12 +760,9 @@ public class TransportService extends AbstractLifecycleComponent {
|
|||
|
||||
@Override
|
||||
public void raiseNodeConnected(final DiscoveryNode node) {
|
||||
threadPool.generic().execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
for (TransportConnectionListener connectionListener : connectionListeners) {
|
||||
connectionListener.onNodeConnected(node);
|
||||
}
|
||||
threadPool.generic().execute(() -> {
|
||||
for (TransportConnectionListener connectionListener : connectionListeners) {
|
||||
connectionListener.onNodeConnected(node);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
@ -758,12 +771,7 @@ public class TransportService extends AbstractLifecycleComponent {
|
|||
public void raiseNodeDisconnected(final DiscoveryNode node) {
|
||||
try {
|
||||
for (final TransportConnectionListener connectionListener : connectionListeners) {
|
||||
threadPool.generic().execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
connectionListener.onNodeDisconnected(node);
|
||||
}
|
||||
});
|
||||
threadPool.generic().execute(() -> connectionListener.onNodeDisconnected(node));
|
||||
}
|
||||
for (Map.Entry<Long, RequestHolder> entry : clientHandlers.entrySet()) {
|
||||
RequestHolder holder = entry.getValue();
|
||||
|
@ -772,12 +780,8 @@ public class TransportService extends AbstractLifecycleComponent {
|
|||
if (holderToNotify != null) {
|
||||
// callback that an exception happened, but on a different thread since we don't
|
||||
// want handlers to worry about stack overflows
|
||||
threadPool.generic().execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
holderToNotify.handler().handleException(new NodeDisconnectedException(node, holderToNotify.action()));
|
||||
}
|
||||
});
|
||||
threadPool.generic().execute(() -> holderToNotify.handler().handleException(new NodeDisconnectedException(node,
|
||||
holderToNotify.action())));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1065,6 +1069,5 @@ public class TransportService extends AbstractLifecycleComponent {
|
|||
public String getChannelType() {
|
||||
return "direct";
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
@ -78,11 +78,11 @@ import org.elasticsearch.action.update.UpdateAction;
|
|||
import org.elasticsearch.action.update.UpdateRequest;
|
||||
import org.elasticsearch.action.update.UpdateResponse;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.network.NetworkModule;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.plugins.PluginsService;
|
||||
import org.elasticsearch.script.MockScriptPlugin;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
|
@ -91,12 +91,10 @@ import org.elasticsearch.tasks.Task;
|
|||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
|
||||
import org.elasticsearch.test.ESIntegTestCase.Scope;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.Transport;
|
||||
import org.elasticsearch.transport.TransportChannel;
|
||||
import org.elasticsearch.transport.TransportInterceptor;
|
||||
import org.elasticsearch.transport.TransportRequest;
|
||||
import org.elasticsearch.transport.TransportRequestHandler;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
|
||||
|
@ -110,7 +108,6 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
|
||||
|
@ -143,8 +140,7 @@ public class IndicesRequestIT extends ESIntegTestCase {
|
|||
return Settings.builder().put(super.nodeSettings(ordinal))
|
||||
// InternalClusterInfoService sends IndicesStatsRequest periodically which messes with this test
|
||||
// this setting disables it...
|
||||
.put("cluster.routing.allocation.disk.threshold_enabled", false)
|
||||
.put(NetworkModule.TRANSPORT_SERVICE_TYPE_KEY, "intercepting").build();
|
||||
.put("cluster.routing.allocation.disk.threshold_enabled", false).build();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -701,31 +697,39 @@ public class IndicesRequestIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
private static void assertAllRequestsHaveBeenConsumed() {
|
||||
Iterable<TransportService> transportServices = internalCluster().getInstances(TransportService.class);
|
||||
for (TransportService transportService : transportServices) {
|
||||
assertThat(((InterceptingTransportService)transportService).requests.entrySet(), emptyIterable());
|
||||
Iterable<PluginsService> pluginsServices = internalCluster().getInstances(PluginsService.class);
|
||||
for (PluginsService pluginsService : pluginsServices) {
|
||||
Set<Map.Entry<String, List<TransportRequest>>> entries =
|
||||
pluginsService.filterPlugins(InterceptingTransportService.TestPlugin.class).stream().findFirst().get()
|
||||
.instance.requests.entrySet();
|
||||
assertThat(entries, emptyIterable());
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
private static void clearInterceptedActions() {
|
||||
Iterable<TransportService> transportServices = internalCluster().getInstances(TransportService.class);
|
||||
for (TransportService transportService : transportServices) {
|
||||
((InterceptingTransportService) transportService).clearInterceptedActions();
|
||||
Iterable<PluginsService> pluginsServices = internalCluster().getInstances(PluginsService.class);
|
||||
for (PluginsService pluginsService : pluginsServices) {
|
||||
pluginsService.filterPlugins(InterceptingTransportService.TestPlugin.class).stream().findFirst().get()
|
||||
.instance.clearInterceptedActions();
|
||||
}
|
||||
}
|
||||
|
||||
private static void interceptTransportActions(String... actions) {
|
||||
Iterable<TransportService> transportServices = internalCluster().getInstances(TransportService.class);
|
||||
for (TransportService transportService : transportServices) {
|
||||
((InterceptingTransportService) transportService).interceptTransportActions(actions);
|
||||
Iterable<PluginsService> pluginsServices = internalCluster().getInstances(PluginsService.class);
|
||||
for (PluginsService pluginsService : pluginsServices) {
|
||||
pluginsService.filterPlugins(InterceptingTransportService.TestPlugin.class).stream().findFirst().get()
|
||||
.instance.interceptTransportActions(actions);
|
||||
}
|
||||
}
|
||||
|
||||
private static List<TransportRequest> consumeTransportRequests(String action) {
|
||||
List<TransportRequest> requests = new ArrayList<>();
|
||||
Iterable<TransportService> transportServices = internalCluster().getInstances(TransportService.class);
|
||||
for (TransportService transportService : transportServices) {
|
||||
List<TransportRequest> transportRequests = ((InterceptingTransportService) transportService).consumeRequests(action);
|
||||
|
||||
Iterable<PluginsService> pluginsServices = internalCluster().getInstances(PluginsService.class);
|
||||
for (PluginsService pluginsService : pluginsServices) {
|
||||
List<TransportRequest> transportRequests = pluginsService.filterPlugins(InterceptingTransportService.TestPlugin.class)
|
||||
.stream().findFirst().get().instance.consumeRequests(action);
|
||||
if (transportRequests != null) {
|
||||
requests.addAll(transportRequests);
|
||||
}
|
||||
|
@ -733,12 +737,12 @@ public class IndicesRequestIT extends ESIntegTestCase {
|
|||
return requests;
|
||||
}
|
||||
|
||||
public static class InterceptingTransportService extends TransportService {
|
||||
public static class InterceptingTransportService implements TransportInterceptor {
|
||||
|
||||
public static class TestPlugin extends Plugin {
|
||||
|
||||
public final InterceptingTransportService instance = new InterceptingTransportService();
|
||||
public void onModule(NetworkModule module) {
|
||||
module.registerTransportService("intercepting", InterceptingTransportService.class);
|
||||
module.addTransportInterceptor(instance);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -746,9 +750,10 @@ public class IndicesRequestIT extends ESIntegTestCase {
|
|||
|
||||
private final Map<String, List<TransportRequest>> requests = new HashMap<>();
|
||||
|
||||
@Inject
|
||||
public InterceptingTransportService(Settings settings, Transport transport, ThreadPool threadPool) {
|
||||
super(settings, transport, threadPool);
|
||||
@Override
|
||||
public <T extends TransportRequest> TransportRequestHandler<T> interceptHandler(String action,
|
||||
TransportRequestHandler<T> actualHandler) {
|
||||
return new InterceptingRequestHandler<>(action, actualHandler);
|
||||
}
|
||||
|
||||
synchronized List<TransportRequest> consumeRequests(String action) {
|
||||
|
@ -763,19 +768,6 @@ public class IndicesRequestIT extends ESIntegTestCase {
|
|||
actions.clear();
|
||||
}
|
||||
|
||||
@Override
|
||||
public <Request extends TransportRequest> void registerRequestHandler(String action, Supplier<Request> request, String executor,
|
||||
boolean forceExecution, boolean canTripCircuitBreaker,
|
||||
TransportRequestHandler<Request> handler) {
|
||||
super.registerRequestHandler(action, request, executor, forceExecution, canTripCircuitBreaker, new
|
||||
InterceptingRequestHandler<>(action, handler));
|
||||
}
|
||||
|
||||
@Override
|
||||
public <Request extends TransportRequest> void registerRequestHandler(String action, Supplier<Request> requestFactory, String
|
||||
executor, TransportRequestHandler<Request> handler) {
|
||||
super.registerRequestHandler(action, requestFactory, executor, new InterceptingRequestHandler<>(action, handler));
|
||||
}
|
||||
|
||||
private class InterceptingRequestHandler<T extends TransportRequest> implements TransportRequestHandler<T> {
|
||||
|
||||
|
|
|
@ -169,7 +169,7 @@ public abstract class TaskManagerTestCase extends ESTestCase {
|
|||
clusterService = createClusterService(threadPool);
|
||||
transportService = new TransportService(settings,
|
||||
new LocalTransport(settings, threadPool, new NamedWriteableRegistry(Collections.emptyList()),
|
||||
new NoneCircuitBreakerService()), threadPool) {
|
||||
new NoneCircuitBreakerService()), threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR) {
|
||||
@Override
|
||||
protected TaskManager createTaskManager() {
|
||||
if (MockTaskManager.USE_MOCK_TASK_MANAGER_SETTING.get(settings)) {
|
||||
|
|
|
@ -72,11 +72,11 @@ import java.util.Map;
|
|||
import java.util.concurrent.BrokenBarrierException;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.CyclicBarrier;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
|
||||
import static java.util.Collections.emptyList;
|
||||
import static java.util.Collections.singleton;
|
||||
import static org.elasticsearch.common.unit.TimeValue.timeValueMillis;
|
||||
import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
|
||||
|
@ -333,12 +333,11 @@ public class TasksIT extends ESIntegTestCase {
|
|||
* particular status results from indexing. For that, look at {@link TransportReplicationActionTests}. We intentionally don't use the
|
||||
* task recording mechanism used in other places in this test so we can make sure that the status fetching works properly over the wire.
|
||||
*/
|
||||
public void testCanFetchIndexStatus() throws InterruptedException, ExecutionException, IOException {
|
||||
/* We make sure all indexing tasks wait to start before this lock is *unlocked* so we can fetch their status with both the get and
|
||||
* list APIs. */
|
||||
public void testCanFetchIndexStatus() throws Exception {
|
||||
// First latch waits for the task to start, second on blocks it from finishing.
|
||||
CountDownLatch taskRegistered = new CountDownLatch(1);
|
||||
CountDownLatch letTaskFinish = new CountDownLatch(1);
|
||||
ListenableActionFuture<IndexResponse> indexFuture = null;
|
||||
Thread index = null;
|
||||
try {
|
||||
for (TransportService transportService : internalCluster().getInstances(TransportService.class)) {
|
||||
((MockTaskManager) transportService.getTaskManager()).addListener(new MockTaskManagerListener() {
|
||||
|
@ -348,7 +347,7 @@ public class TasksIT extends ESIntegTestCase {
|
|||
taskRegistered.countDown();
|
||||
logger.debug("Blocking [{}] starting", task);
|
||||
try {
|
||||
letTaskFinish.await(10, TimeUnit.SECONDS);
|
||||
assertTrue(letTaskFinish.await(10, TimeUnit.SECONDS));
|
||||
} catch (InterruptedException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
|
@ -364,8 +363,13 @@ public class TasksIT extends ESIntegTestCase {
|
|||
}
|
||||
});
|
||||
}
|
||||
indexFuture = client().prepareIndex("test", "test").setSource("test", "test").execute();
|
||||
taskRegistered.await(10, TimeUnit.SECONDS); // waiting for at least one task to be registered
|
||||
// Need to run the task in a separate thread because node client's .execute() is blocked by our task listener
|
||||
index = new Thread(() -> {
|
||||
IndexResponse indexResponse = client().prepareIndex("test", "test").setSource("test", "test").get();
|
||||
assertArrayEquals(ReplicationResponse.EMPTY, indexResponse.getShardInfo().getFailures());
|
||||
});
|
||||
index.start();
|
||||
assertTrue(taskRegistered.await(10, TimeUnit.SECONDS)); // waiting for at least one task to be registered
|
||||
|
||||
ListTasksResponse listResponse = client().admin().cluster().prepareListTasks().setActions("indices:data/write/index*")
|
||||
.setDetailed(true).get();
|
||||
|
@ -387,10 +391,13 @@ public class TasksIT extends ESIntegTestCase {
|
|||
}
|
||||
} finally {
|
||||
letTaskFinish.countDown();
|
||||
if (indexFuture != null) {
|
||||
IndexResponse indexResponse = indexFuture.get();
|
||||
assertArrayEquals(ReplicationResponse.EMPTY, indexResponse.getShardInfo().getFailures());
|
||||
if (index != null) {
|
||||
index.join();
|
||||
}
|
||||
assertBusy(() -> {
|
||||
assertEquals(emptyList(),
|
||||
client().admin().cluster().prepareListTasks().setActions("indices:data/write/index*").get().getTasks());
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -439,6 +446,9 @@ public class TasksIT extends ESIntegTestCase {
|
|||
}, response -> {
|
||||
assertThat(response.getNodeFailures(), empty());
|
||||
assertThat(response.getTaskFailures(), empty());
|
||||
assertThat(response.getTasks(), hasSize(1));
|
||||
TaskInfo task = response.getTasks().get(0);
|
||||
assertEquals(TestTaskPlugin.TestTaskAction.NAME, task.getAction());
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -446,10 +456,12 @@ public class TasksIT extends ESIntegTestCase {
|
|||
waitForCompletionTestCase(false, id -> {
|
||||
return client().admin().cluster().prepareGetTask(id).setWaitForCompletion(true).execute();
|
||||
}, response -> {
|
||||
assertNotNull(response.getTask().getTask());
|
||||
assertTrue(response.getTask().isCompleted());
|
||||
// We didn't store the result so it won't come back when we wait
|
||||
assertNull(response.getTask().getResponse());
|
||||
// But the task's details should still be there because we grabbed a reference to the task before waiting for it to complete.
|
||||
assertNotNull(response.getTask().getTask());
|
||||
assertEquals(TestTaskPlugin.TestTaskAction.NAME, response.getTask().getTask().getAction());
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -457,10 +469,12 @@ public class TasksIT extends ESIntegTestCase {
|
|||
waitForCompletionTestCase(true, id -> {
|
||||
return client().admin().cluster().prepareGetTask(id).setWaitForCompletion(true).execute();
|
||||
}, response -> {
|
||||
assertNotNull(response.getTask().getTask());
|
||||
assertTrue(response.getTask().isCompleted());
|
||||
// We stored the task so we should get its results
|
||||
assertEquals(0, response.getTask().getResponseAsMap().get("failure_count"));
|
||||
// The task's details should also be there
|
||||
assertNotNull(response.getTask().getTask());
|
||||
assertEquals(TestTaskPlugin.TestTaskAction.NAME, response.getTask().getTask().getAction());
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -490,6 +504,7 @@ public class TasksIT extends ESIntegTestCase {
|
|||
((MockTaskManager) transportService.getTaskManager()).addListener(new MockTaskManagerListener() {
|
||||
@Override
|
||||
public void waitForTaskCompletion(Task task) {
|
||||
waitForWaitingToStart.countDown();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -498,7 +513,6 @@ public class TasksIT extends ESIntegTestCase {
|
|||
|
||||
@Override
|
||||
public void onTaskUnregistered(Task task) {
|
||||
waitForWaitingToStart.countDown();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
@ -506,7 +520,9 @@ public class TasksIT extends ESIntegTestCase {
|
|||
// Spin up a request to wait for the test task to finish
|
||||
waitResponseFuture = wait.apply(taskId);
|
||||
|
||||
// Wait for the wait to start
|
||||
/* Wait for the wait to start. This should count down just *before* we wait for completion but after the list/get has got a
|
||||
* reference to the running task. Because we unblock immediately after this the task may no longer be running for us to wait
|
||||
* on which is fine. */
|
||||
waitForWaitingToStart.await();
|
||||
} finally {
|
||||
// Unblock the request so the wait for completion request can finish
|
||||
|
@ -517,7 +533,8 @@ public class TasksIT extends ESIntegTestCase {
|
|||
T waitResponse = waitResponseFuture.get();
|
||||
validator.accept(waitResponse);
|
||||
|
||||
future.get();
|
||||
TestTaskPlugin.NodesResponse response = future.get();
|
||||
assertEquals(emptyList(), response.failures());
|
||||
}
|
||||
|
||||
public void testListTasksWaitForTimeout() throws Exception {
|
||||
|
|
|
@ -87,7 +87,8 @@ public class TransportBulkActionTookTests extends ESTestCase {
|
|||
|
||||
private TransportBulkAction createAction(boolean controlled, AtomicLong expected) {
|
||||
CapturingTransport capturingTransport = new CapturingTransport();
|
||||
TransportService transportService = new TransportService(clusterService.getSettings(), capturingTransport, threadPool);
|
||||
TransportService transportService = new TransportService(clusterService.getSettings(), capturingTransport, threadPool,
|
||||
TransportService.NOOP_TRANSPORT_INTERCEPTOR);
|
||||
transportService.start();
|
||||
transportService.acceptIncomingRequests();
|
||||
IndexNameExpressionResolver resolver = new Resolver(Settings.EMPTY);
|
||||
|
|
|
@ -33,29 +33,30 @@ import org.elasticsearch.cluster.ClusterState;
|
|||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.LocalTransportAddress;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.VersionUtils;
|
||||
import org.elasticsearch.transport.TransportException;
|
||||
import org.elasticsearch.transport.TransportInterceptor;
|
||||
import org.elasticsearch.transport.TransportRequest;
|
||||
import org.elasticsearch.transport.TransportRequestOptions;
|
||||
import org.elasticsearch.transport.TransportResponse;
|
||||
import org.elasticsearch.transport.TransportResponseHandler;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.hamcrest.CustomTypeSafeMatcher;
|
||||
import org.mockito.stubbing.Answer;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
import static org.hamcrest.CoreMatchers.equalTo;
|
||||
import static org.mockito.Matchers.any;
|
||||
import static org.mockito.Matchers.argThat;
|
||||
import static org.mockito.Matchers.eq;
|
||||
import static org.mockito.Matchers.same;
|
||||
import static org.mockito.Mockito.doAnswer;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.never;
|
||||
import static org.mockito.Mockito.verify;
|
||||
|
@ -67,7 +68,7 @@ public class IngestProxyActionFilterTests extends ESTestCase {
|
|||
private TransportService transportService;
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private IngestProxyActionFilter buildFilter(int ingestNodes, int totalNodes) {
|
||||
private IngestProxyActionFilter buildFilter(int ingestNodes, int totalNodes, TransportInterceptor interceptor) {
|
||||
ClusterState.Builder clusterState = new ClusterState.Builder(new ClusterName("_name"));
|
||||
DiscoveryNodes.Builder builder = new DiscoveryNodes.Builder();
|
||||
DiscoveryNode localNode = null;
|
||||
|
@ -88,7 +89,7 @@ public class IngestProxyActionFilterTests extends ESTestCase {
|
|||
ClusterService clusterService = mock(ClusterService.class);
|
||||
when(clusterService.localNode()).thenReturn(localNode);
|
||||
when(clusterService.state()).thenReturn(clusterState.build());
|
||||
transportService = mock(TransportService.class);
|
||||
transportService = new TransportService(Settings.EMPTY, null, null, interceptor);
|
||||
return new IngestProxyActionFilter(clusterService, transportService);
|
||||
}
|
||||
|
||||
|
@ -97,7 +98,7 @@ public class IngestProxyActionFilterTests extends ESTestCase {
|
|||
ActionListener actionListener = mock(ActionListener.class);
|
||||
ActionFilterChain actionFilterChain = mock(ActionFilterChain.class);
|
||||
int totalNodes = randomIntBetween(1, 5);
|
||||
IngestProxyActionFilter filter = buildFilter(0, totalNodes);
|
||||
IngestProxyActionFilter filter = buildFilter(0, totalNodes, TransportService.NOOP_TRANSPORT_INTERCEPTOR);
|
||||
|
||||
String action;
|
||||
ActionRequest request;
|
||||
|
@ -114,7 +115,6 @@ public class IngestProxyActionFilterTests extends ESTestCase {
|
|||
} catch(IllegalStateException e) {
|
||||
assertThat(e.getMessage(), equalTo("There are no ingest nodes in this cluster, unable to forward request to an ingest node."));
|
||||
}
|
||||
verifyZeroInteractions(transportService);
|
||||
verifyZeroInteractions(actionFilterChain);
|
||||
verifyZeroInteractions(actionListener);
|
||||
}
|
||||
|
@ -124,7 +124,8 @@ public class IngestProxyActionFilterTests extends ESTestCase {
|
|||
ActionListener actionListener = mock(ActionListener.class);
|
||||
ActionFilterChain actionFilterChain = mock(ActionFilterChain.class);
|
||||
int totalNodes = randomIntBetween(1, 5);
|
||||
IngestProxyActionFilter filter = buildFilter(randomIntBetween(0, totalNodes - 1), totalNodes);
|
||||
IngestProxyActionFilter filter = buildFilter(randomIntBetween(0, totalNodes - 1), totalNodes,
|
||||
TransportService.NOOP_TRANSPORT_INTERCEPTOR);
|
||||
|
||||
String action;
|
||||
ActionRequest request;
|
||||
|
@ -136,7 +137,6 @@ public class IngestProxyActionFilterTests extends ESTestCase {
|
|||
request = new BulkRequest().add(new IndexRequest());
|
||||
}
|
||||
filter.apply(task, action, request, actionListener, actionFilterChain);
|
||||
verifyZeroInteractions(transportService);
|
||||
verify(actionFilterChain).proceed(any(Task.class), eq(action), same(request), same(actionListener));
|
||||
verifyZeroInteractions(actionListener);
|
||||
}
|
||||
|
@ -147,11 +147,11 @@ public class IngestProxyActionFilterTests extends ESTestCase {
|
|||
ActionFilterChain actionFilterChain = mock(ActionFilterChain.class);
|
||||
ActionRequest request = mock(ActionRequest.class);
|
||||
int totalNodes = randomIntBetween(1, 5);
|
||||
IngestProxyActionFilter filter = buildFilter(randomIntBetween(0, totalNodes - 1), totalNodes);
|
||||
IngestProxyActionFilter filter = buildFilter(randomIntBetween(0, totalNodes - 1), totalNodes,
|
||||
TransportService.NOOP_TRANSPORT_INTERCEPTOR);
|
||||
|
||||
String action = randomAsciiOfLengthBetween(1, 20);
|
||||
filter.apply(task, action, request, actionListener, actionFilterChain);
|
||||
verifyZeroInteractions(transportService);
|
||||
verify(actionFilterChain).proceed(any(Task.class), eq(action), same(request), same(actionListener));
|
||||
verifyZeroInteractions(actionListener);
|
||||
}
|
||||
|
@ -162,19 +162,31 @@ public class IngestProxyActionFilterTests extends ESTestCase {
|
|||
ActionListener actionListener = mock(ActionListener.class);
|
||||
ActionFilterChain actionFilterChain = mock(ActionFilterChain.class);
|
||||
int totalNodes = randomIntBetween(2, 5);
|
||||
IngestProxyActionFilter filter = buildFilter(randomIntBetween(1, totalNodes - 1), totalNodes);
|
||||
Answer<Void> answer = invocationOnMock -> {
|
||||
TransportResponseHandler transportResponseHandler = (TransportResponseHandler) invocationOnMock.getArguments()[3];
|
||||
transportResponseHandler.handleResponse(new IndexResponse());
|
||||
return null;
|
||||
};
|
||||
doAnswer(answer).when(transportService).sendRequest(any(DiscoveryNode.class), any(String.class), any(TransportRequest.class), any(TransportResponseHandler.class));
|
||||
AtomicBoolean run = new AtomicBoolean(false);
|
||||
|
||||
IngestProxyActionFilter filter = buildFilter(randomIntBetween(1, totalNodes - 1), totalNodes,
|
||||
new TransportInterceptor() {
|
||||
@Override
|
||||
public AsyncSender interceptSender(AsyncSender sender) {
|
||||
return new AsyncSender() {
|
||||
@Override
|
||||
public <T extends TransportResponse> void sendRequest(DiscoveryNode node, String action, TransportRequest request,
|
||||
TransportRequestOptions options,
|
||||
TransportResponseHandler<T> handler) {
|
||||
assertTrue(run.compareAndSet(false, true));
|
||||
assertTrue(node.isIngestNode());
|
||||
assertEquals(action, IndexAction.NAME);
|
||||
handler.handleResponse((T) new IndexResponse());
|
||||
}
|
||||
};
|
||||
}
|
||||
});
|
||||
|
||||
IndexRequest indexRequest = new IndexRequest().setPipeline("_id");
|
||||
filter.apply(task, IndexAction.NAME, indexRequest, actionListener, actionFilterChain);
|
||||
|
||||
verify(transportService).sendRequest(argThat(new IngestNodeMatcher()), eq(IndexAction.NAME), same(indexRequest), any(TransportResponseHandler.class));
|
||||
verifyZeroInteractions(actionFilterChain);
|
||||
assertTrue(run.get());
|
||||
verify(actionListener).onResponse(any(IndexResponse.class));
|
||||
verify(actionListener, never()).onFailure(any(TransportException.class));
|
||||
}
|
||||
|
@ -185,13 +197,24 @@ public class IngestProxyActionFilterTests extends ESTestCase {
|
|||
ActionListener actionListener = mock(ActionListener.class);
|
||||
ActionFilterChain actionFilterChain = mock(ActionFilterChain.class);
|
||||
int totalNodes = randomIntBetween(2, 5);
|
||||
IngestProxyActionFilter filter = buildFilter(randomIntBetween(1, totalNodes - 1), totalNodes);
|
||||
Answer<Void> answer = invocationOnMock -> {
|
||||
TransportResponseHandler transportResponseHandler = (TransportResponseHandler) invocationOnMock.getArguments()[3];
|
||||
transportResponseHandler.handleResponse(new BulkResponse(null, -1));
|
||||
return null;
|
||||
};
|
||||
doAnswer(answer).when(transportService).sendRequest(any(DiscoveryNode.class), any(String.class), any(TransportRequest.class), any(TransportResponseHandler.class));
|
||||
AtomicBoolean run = new AtomicBoolean(false);
|
||||
IngestProxyActionFilter filter = buildFilter(randomIntBetween(1, totalNodes - 1), totalNodes,
|
||||
new TransportInterceptor() {
|
||||
@Override
|
||||
public AsyncSender interceptSender(AsyncSender sender) {
|
||||
return new AsyncSender() {
|
||||
@Override
|
||||
public <T extends TransportResponse> void sendRequest(DiscoveryNode node, String action, TransportRequest request,
|
||||
TransportRequestOptions options,
|
||||
TransportResponseHandler<T> handler) {
|
||||
assertTrue(run.compareAndSet(false, true));
|
||||
assertTrue(node.isIngestNode());
|
||||
assertEquals(action, BulkAction.NAME);
|
||||
handler.handleResponse((T) new BulkResponse(null, -1));
|
||||
}
|
||||
};
|
||||
}
|
||||
});
|
||||
|
||||
BulkRequest bulkRequest = new BulkRequest();
|
||||
bulkRequest.add(new IndexRequest().setPipeline("_id"));
|
||||
|
@ -200,11 +223,10 @@ public class IngestProxyActionFilterTests extends ESTestCase {
|
|||
bulkRequest.add(new IndexRequest());
|
||||
}
|
||||
filter.apply(task, BulkAction.NAME, bulkRequest, actionListener, actionFilterChain);
|
||||
|
||||
verify(transportService).sendRequest(argThat(new IngestNodeMatcher()), eq(BulkAction.NAME), same(bulkRequest), any(TransportResponseHandler.class));
|
||||
verifyZeroInteractions(actionFilterChain);
|
||||
verify(actionListener).onResponse(any(BulkResponse.class));
|
||||
verify(actionListener, never()).onFailure(any(TransportException.class));
|
||||
assertTrue(run.get());
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
|
@ -213,30 +235,39 @@ public class IngestProxyActionFilterTests extends ESTestCase {
|
|||
ActionListener actionListener = mock(ActionListener.class);
|
||||
ActionFilterChain actionFilterChain = mock(ActionFilterChain.class);
|
||||
int totalNodes = randomIntBetween(2, 5);
|
||||
IngestProxyActionFilter filter = buildFilter(randomIntBetween(1, totalNodes - 1), totalNodes);
|
||||
Answer<Void> answer = invocationOnMock -> {
|
||||
TransportResponseHandler transportResponseHandler = (TransportResponseHandler) invocationOnMock.getArguments()[3];
|
||||
transportResponseHandler.handleException(new TransportException(new IllegalArgumentException()));
|
||||
return null;
|
||||
};
|
||||
doAnswer(answer).when(transportService).sendRequest(any(DiscoveryNode.class), any(String.class), any(TransportRequest.class), any(TransportResponseHandler.class));
|
||||
|
||||
String action;
|
||||
String requestAction;
|
||||
ActionRequest request;
|
||||
if (randomBoolean()) {
|
||||
action = IndexAction.NAME;
|
||||
requestAction = IndexAction.NAME;
|
||||
request = new IndexRequest().setPipeline("_id");
|
||||
} else {
|
||||
action = BulkAction.NAME;
|
||||
requestAction = BulkAction.NAME;
|
||||
request = new BulkRequest().add(new IndexRequest().setPipeline("_id"));
|
||||
}
|
||||
|
||||
filter.apply(task, action, request, actionListener, actionFilterChain);
|
||||
|
||||
verify(transportService).sendRequest(argThat(new IngestNodeMatcher()), eq(action), same(request), any(TransportResponseHandler.class));
|
||||
AtomicBoolean run = new AtomicBoolean(false);
|
||||
IngestProxyActionFilter filter = buildFilter(randomIntBetween(1, totalNodes - 1), totalNodes,
|
||||
new TransportInterceptor() {
|
||||
@Override
|
||||
public AsyncSender interceptSender(AsyncSender sender) {
|
||||
return new AsyncSender() {
|
||||
@Override
|
||||
public <T extends TransportResponse> void sendRequest(DiscoveryNode node, String action, TransportRequest request,
|
||||
TransportRequestOptions options,
|
||||
TransportResponseHandler<T> handler) {
|
||||
assertTrue(run.compareAndSet(false, true));
|
||||
assertTrue(node.isIngestNode());
|
||||
assertEquals(action, requestAction);
|
||||
handler.handleException(new TransportException(new IllegalArgumentException()));
|
||||
}
|
||||
};
|
||||
}
|
||||
});
|
||||
filter.apply(task, requestAction, request, actionListener, actionFilterChain);
|
||||
verifyZeroInteractions(actionFilterChain);
|
||||
verify(actionListener).onFailure(any(TransportException.class));
|
||||
verify(actionListener, never()).onResponse(any(TransportResponse.class));
|
||||
assertTrue(run.get());
|
||||
|
||||
}
|
||||
|
||||
private static class IngestNodeMatcher extends CustomTypeSafeMatcher<DiscoveryNode> {
|
||||
|
|
|
@ -30,7 +30,6 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
|||
import org.elasticsearch.cluster.block.ClusterBlocks;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.io.stream.ByteBufferStreamInput;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
|
@ -42,7 +41,6 @@ import org.elasticsearch.threadpool.ThreadPool;
|
|||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
@ -56,11 +54,12 @@ public class MainActionTests extends ESTestCase {
|
|||
public void testMainResponseSerialization() throws IOException {
|
||||
final String nodeName = "node1";
|
||||
final ClusterName clusterName = new ClusterName("cluster1");
|
||||
final String clusterUUID = randomAsciiOfLengthBetween(10, 20);
|
||||
final boolean available = randomBoolean();
|
||||
final Version version = Version.CURRENT;
|
||||
final Build build = Build.CURRENT;
|
||||
|
||||
final MainResponse mainResponse = new MainResponse(nodeName, version, clusterName, build, available);
|
||||
final MainResponse mainResponse = new MainResponse(nodeName, version, clusterName, clusterUUID, build, available);
|
||||
BytesStreamOutput streamOutput = new BytesStreamOutput();
|
||||
mainResponse.writeTo(streamOutput);
|
||||
final MainResponse serialized = new MainResponse();
|
||||
|
@ -74,11 +73,21 @@ public class MainActionTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testMainResponseXContent() throws IOException {
|
||||
final MainResponse mainResponse = new MainResponse("node1", Version.CURRENT, new ClusterName("cluster1"), Build.CURRENT, false);
|
||||
final String expected = "{\"name\":\"node1\",\"cluster_name\":\"cluster1\",\"version\":{\"number\":\"" + Version.CURRENT.toString()
|
||||
+ "\",\"build_hash\":\"" + Build.CURRENT.shortHash() + "\",\"build_date\":\"" + Build.CURRENT.date() + "\"," +
|
||||
"\"build_snapshot\":" + Build.CURRENT.isSnapshot() + ",\"lucene_version\":\"" + Version.CURRENT.luceneVersion.toString() +
|
||||
"\"},\"tagline\":\"You Know, for Search\"}";
|
||||
String clusterUUID = randomAsciiOfLengthBetween(10, 20);
|
||||
final MainResponse mainResponse = new MainResponse("node1", Version.CURRENT, new ClusterName("cluster1"), clusterUUID,
|
||||
Build.CURRENT, false);
|
||||
final String expected = "{" +
|
||||
"\"name\":\"node1\"," +
|
||||
"\"cluster_name\":\"cluster1\"," +
|
||||
"\"cluster_uuid\":\"" + clusterUUID + "\"," +
|
||||
"\"version\":{" +
|
||||
"\"number\":\"" + Version.CURRENT.toString() + "\"," +
|
||||
"\"build_hash\":\"" + Build.CURRENT.shortHash() + "\"," +
|
||||
"\"build_date\":\"" + Build.CURRENT.date() + "\"," +
|
||||
"\"build_snapshot\":" + Build.CURRENT.isSnapshot() +
|
||||
",\"lucene_version\":\"" + Version.CURRENT.luceneVersion.toString() +
|
||||
"\"}," +
|
||||
"\"tagline\":\"You Know, for Search\"}";
|
||||
|
||||
XContentBuilder builder = XContentFactory.jsonBuilder();
|
||||
mainResponse.toXContent(builder, ToXContent.EMPTY_PARAMS);
|
||||
|
@ -111,7 +120,8 @@ public class MainActionTests extends ESTestCase {
|
|||
ClusterState state = ClusterState.builder(clusterName).blocks(blocks).build();
|
||||
when(clusterService.state()).thenReturn(state);
|
||||
|
||||
TransportMainAction action = new TransportMainAction(settings, mock(ThreadPool.class), mock(TransportService.class),
|
||||
TransportMainAction action = new TransportMainAction(settings, mock(ThreadPool.class), new TransportService(Settings.EMPTY,
|
||||
null ,null, TransportService.NOOP_TRANSPORT_INTERCEPTOR),
|
||||
mock(ActionFilters.class), mock(IndexNameExpressionResolver.class), clusterService);
|
||||
AtomicReference<MainResponse> responseRef = new AtomicReference<>();
|
||||
action.doExecute(new MainRequest(), new ActionListener<MainResponse>() {
|
||||
|
|
|
@ -57,8 +57,12 @@ public class TransportMultiSearchActionTests extends ESTestCase {
|
|||
when(actionFilters.filters()).thenReturn(new ActionFilter[0]);
|
||||
ThreadPool threadPool = new ThreadPool(settings);
|
||||
TaskManager taskManager = mock(TaskManager.class);
|
||||
TransportService transportService = mock(TransportService.class);
|
||||
when(transportService.getTaskManager()).thenReturn(taskManager);
|
||||
TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR) {
|
||||
@Override
|
||||
public TaskManager getTaskManager() {
|
||||
return taskManager;
|
||||
}
|
||||
};
|
||||
ClusterService clusterService = mock(ClusterService.class);
|
||||
when(clusterService.state()).thenReturn(ClusterState.builder(new ClusterName("test")).build());
|
||||
IndexNameExpressionResolver resolver = new IndexNameExpressionResolver(Settings.EMPTY);
|
||||
|
|
|
@ -191,7 +191,8 @@ public class TransportBroadcastByNodeActionTests extends ESTestCase {
|
|||
super.setUp();
|
||||
transport = new CapturingTransport();
|
||||
clusterService = createClusterService(THREAD_POOL);
|
||||
final TransportService transportService = new TransportService(clusterService.getSettings(), transport, THREAD_POOL);
|
||||
final TransportService transportService = new TransportService(clusterService.getSettings(), transport, THREAD_POOL,
|
||||
TransportService.NOOP_TRANSPORT_INTERCEPTOR);
|
||||
transportService.start();
|
||||
transportService.acceptIncomingRequests();
|
||||
setClusterState(clusterService, TEST_INDEX);
|
||||
|
|
|
@ -23,7 +23,7 @@ import org.elasticsearch.action.DocWriteResponse;
|
|||
import org.elasticsearch.action.index.IndexResponse;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.discovery.DiscoverySettings;
|
||||
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
|
||||
import org.elasticsearch.discovery.zen.ElectMasterService;
|
||||
import org.elasticsearch.discovery.zen.fd.FaultDetection;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
|
|
|
@ -87,7 +87,7 @@ public class TransportMasterNodeActionTests extends ESTestCase {
|
|||
super.setUp();
|
||||
transport = new CapturingTransport();
|
||||
clusterService = createClusterService(threadPool);
|
||||
transportService = new TransportService(clusterService.getSettings(), transport, threadPool);
|
||||
transportService = new TransportService(clusterService.getSettings(), transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR);
|
||||
transportService.start();
|
||||
transportService.acceptIncomingRequests();
|
||||
localNode = new DiscoveryNode("local_node", LocalTransportAddress.buildUnique(), Collections.emptyMap(),
|
||||
|
|
|
@ -177,7 +177,8 @@ public class TransportNodesActionTests extends ESTestCase {
|
|||
super.setUp();
|
||||
transport = new CapturingTransport();
|
||||
clusterService = createClusterService(THREAD_POOL);
|
||||
transportService = new TransportService(clusterService.getSettings(), transport, THREAD_POOL);
|
||||
transportService = new TransportService(clusterService.getSettings(), transport, THREAD_POOL,
|
||||
TransportService.NOOP_TRANSPORT_INTERCEPTOR);
|
||||
transportService.start();
|
||||
transportService.acceptIncomingRequests();
|
||||
int numNodes = randomIntBetween(3, 10);
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
*/
|
||||
package org.elasticsearch.action.support.replication;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.NoShardAvailableActionException;
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
|
@ -92,7 +91,7 @@ public class BroadcastReplicationTests extends ESTestCase {
|
|||
super.setUp();
|
||||
LocalTransport transport = new LocalTransport(Settings.EMPTY, threadPool, new NamedWriteableRegistry(Collections.emptyList()), circuitBreakerService);
|
||||
clusterService = createClusterService(threadPool);
|
||||
transportService = new TransportService(clusterService.getSettings(), transport, threadPool);
|
||||
transportService = new TransportService(clusterService.getSettings(), transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR);
|
||||
transportService.start();
|
||||
transportService.acceptIncomingRequests();
|
||||
broadcastReplicationAction = new TestBroadcastReplicationAction(Settings.EMPTY, threadPool, clusterService, transportService, new ActionFilters(new HashSet<ActionFilter>()), new IndexNameExpressionResolver(Settings.EMPTY), null);
|
||||
|
|
|
@ -149,7 +149,8 @@ public class TransportReplicationActionTests extends ESTestCase {
|
|||
super.setUp();
|
||||
transport = new CapturingTransport();
|
||||
clusterService = createClusterService(threadPool);
|
||||
transportService = new TransportService(clusterService.getSettings(), transport, threadPool);
|
||||
transportService = new TransportService(clusterService.getSettings(), transport, threadPool,
|
||||
TransportService.NOOP_TRANSPORT_INTERCEPTOR);
|
||||
transportService.start();
|
||||
transportService.acceptIncomingRequests();
|
||||
shardStateAction = new ShardStateAction(Settings.EMPTY, clusterService, transportService, null, null, threadPool);
|
||||
|
|
|
@ -130,7 +130,8 @@ public class TransportWriteActionTests extends ESTestCase {
|
|||
|
||||
private class TestAction extends TransportWriteAction<TestRequest, TestResponse> {
|
||||
protected TestAction() {
|
||||
super(Settings.EMPTY, "test", mock(TransportService.class), null, null, null, null, new ActionFilters(new HashSet<>()),
|
||||
super(Settings.EMPTY, "test", new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR),
|
||||
null, null, null, null, new ActionFilters(new HashSet<>()),
|
||||
new IndexNameExpressionResolver(Settings.EMPTY), TestRequest::new, ThreadPool.Names.SAME);
|
||||
}
|
||||
|
||||
|
|
|
@ -142,7 +142,7 @@ public class TransportInstanceSingleOperationActionTests extends ESTestCase {
|
|||
super.setUp();
|
||||
transport = new CapturingTransport();
|
||||
clusterService = createClusterService(THREAD_POOL);
|
||||
transportService = new TransportService(clusterService.getSettings(), transport, THREAD_POOL);
|
||||
transportService = new TransportService(clusterService.getSettings(), transport, THREAD_POOL, TransportService.NOOP_TRANSPORT_INTERCEPTOR);
|
||||
transportService.start();
|
||||
transportService.acceptIncomingRequests();
|
||||
action = new TestTransportInstanceSingleOperationAction(
|
||||
|
|
|
@ -73,8 +73,9 @@ public abstract class AbstractClientHeadersTestCase extends ESTestCase {
|
|||
protected ThreadPool threadPool;
|
||||
private Client client;
|
||||
|
||||
@Before
|
||||
public void initClient() {
|
||||
@Override
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
Settings settings = Settings.builder()
|
||||
.put(HEADER_SETTINGS)
|
||||
.put("path.home", createTempDir().toString())
|
||||
|
@ -85,8 +86,10 @@ public abstract class AbstractClientHeadersTestCase extends ESTestCase {
|
|||
client = buildClient(settings, ACTIONS);
|
||||
}
|
||||
|
||||
@After
|
||||
public void cleanupClient() throws Exception {
|
||||
|
||||
@Override
|
||||
public void tearDown() throws Exception {
|
||||
super.tearDown();
|
||||
client.close();
|
||||
terminate(threadPool);
|
||||
}
|
||||
|
|
|
@ -31,48 +31,56 @@ import org.elasticsearch.cluster.ClusterName;
|
|||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.network.NetworkModule;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.LocalTransportAddress;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.plugins.PluginsService;
|
||||
import org.elasticsearch.test.transport.MockTransportService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.ConnectTransportException;
|
||||
import org.elasticsearch.transport.MockTransportClient;
|
||||
import org.elasticsearch.transport.Transport;
|
||||
import org.elasticsearch.transport.TransportException;
|
||||
import org.elasticsearch.transport.TransportInterceptor;
|
||||
import org.elasticsearch.transport.TransportRequest;
|
||||
import org.elasticsearch.transport.TransportRequestHandler;
|
||||
import org.elasticsearch.transport.TransportRequestOptions;
|
||||
import org.elasticsearch.transport.TransportResponse;
|
||||
import org.elasticsearch.transport.TransportResponseHandler;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class TransportClientHeadersTests extends AbstractClientHeadersTestCase {
|
||||
|
||||
private static final LocalTransportAddress address = new LocalTransportAddress("test");
|
||||
private MockTransportService transportService;
|
||||
|
||||
@Override
|
||||
public void tearDown() throws Exception {
|
||||
super.tearDown();
|
||||
transportService.stop();
|
||||
transportService.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Client buildClient(Settings headersSettings, GenericAction[] testedActions) {
|
||||
transportService = MockTransportService.local(Settings.EMPTY, Version.CURRENT, threadPool);
|
||||
transportService.start();
|
||||
transportService.acceptIncomingRequests();
|
||||
TransportClient client = new MockTransportClient(Settings.builder()
|
||||
.put("client.transport.sniff", false)
|
||||
.put("cluster.name", "cluster1")
|
||||
.put("node.name", "transport_client_" + this.getTestName())
|
||||
.put(headersSettings)
|
||||
.build(), InternalTransportService.TestPlugin.class);
|
||||
|
||||
client.addTransportAddress(address);
|
||||
.build(), InternalTransportServiceInterceptor.TestPlugin.class);
|
||||
InternalTransportServiceInterceptor.TestPlugin plugin = client.injector.getInstance(PluginsService.class)
|
||||
.filterPlugins(InternalTransportServiceInterceptor.TestPlugin.class).stream().findFirst().get();
|
||||
plugin.instance.threadPool = client.threadPool();
|
||||
plugin.instance.address = transportService.boundAddress().publishAddress();
|
||||
client.addTransportAddress(transportService.boundAddress().publishAddress());
|
||||
return client;
|
||||
}
|
||||
|
||||
|
@ -85,72 +93,77 @@ public class TransportClientHeadersTests extends AbstractClientHeadersTestCase {
|
|||
.put("client.transport.nodes_sampler_interval", "1s")
|
||||
.put(HEADER_SETTINGS)
|
||||
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build(),
|
||||
InternalTransportService.TestPlugin.class)) {
|
||||
client.addTransportAddress(address);
|
||||
InternalTransportServiceInterceptor.TestPlugin.class)) {
|
||||
InternalTransportServiceInterceptor.TestPlugin plugin = client.injector.getInstance(PluginsService.class)
|
||||
.filterPlugins(InternalTransportServiceInterceptor.TestPlugin.class).stream().findFirst().get();
|
||||
plugin.instance.threadPool = client.threadPool();
|
||||
plugin.instance.address = transportService.boundAddress().publishAddress();
|
||||
client.addTransportAddress(transportService.boundAddress().publishAddress());
|
||||
|
||||
InternalTransportService service = (InternalTransportService) client.injector.getInstance(TransportService.class);
|
||||
|
||||
if (!service.clusterStateLatch.await(5, TimeUnit.SECONDS)) {
|
||||
if (!plugin.instance.clusterStateLatch.await(5, TimeUnit.SECONDS)) {
|
||||
fail("takes way too long to get the cluster state");
|
||||
}
|
||||
|
||||
assertThat(client.connectedNodes().size(), is(1));
|
||||
assertThat(client.connectedNodes().get(0).getAddress(), is((TransportAddress) address));
|
||||
assertThat(client.connectedNodes().get(0).getAddress(), is(transportService.boundAddress().publishAddress()));
|
||||
}
|
||||
}
|
||||
|
||||
public static class InternalTransportService extends TransportService {
|
||||
public static class InternalTransportServiceInterceptor implements TransportInterceptor {
|
||||
|
||||
ThreadPool threadPool;
|
||||
TransportAddress address;
|
||||
|
||||
|
||||
public static class TestPlugin extends Plugin {
|
||||
private InternalTransportServiceInterceptor instance = new InternalTransportServiceInterceptor();
|
||||
|
||||
public void onModule(NetworkModule transportModule) {
|
||||
transportModule.registerTransportService("internal", InternalTransportService.class);
|
||||
}
|
||||
@Override
|
||||
public Settings additionalSettings() {
|
||||
return Settings.builder().put(NetworkModule.TRANSPORT_SERVICE_TYPE_KEY, "internal").build();
|
||||
transportModule.addTransportInterceptor(new TransportInterceptor() {
|
||||
@Override
|
||||
public <T extends TransportRequest> TransportRequestHandler<T> interceptHandler(String action,
|
||||
TransportRequestHandler<T> actualHandler) {
|
||||
return instance.interceptHandler(action, actualHandler);
|
||||
}
|
||||
|
||||
@Override
|
||||
public AsyncSender interceptSender(AsyncSender sender) {
|
||||
return instance.interceptSender(sender);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
CountDownLatch clusterStateLatch = new CountDownLatch(1);
|
||||
|
||||
@Inject
|
||||
public InternalTransportService(Settings settings, Transport transport, ThreadPool threadPool) {
|
||||
super(settings, transport, threadPool);
|
||||
}
|
||||
|
||||
@Override @SuppressWarnings("unchecked")
|
||||
public <T extends TransportResponse> void sendRequest(DiscoveryNode node, String action, TransportRequest request,
|
||||
TransportRequestOptions options, TransportResponseHandler<T> handler) {
|
||||
if (TransportLivenessAction.NAME.equals(action)) {
|
||||
assertHeaders(threadPool);
|
||||
((TransportResponseHandler<LivenessResponse>) handler).handleResponse(new LivenessResponse(clusterName, node));
|
||||
return;
|
||||
}
|
||||
if (ClusterStateAction.NAME.equals(action)) {
|
||||
assertHeaders(threadPool);
|
||||
ClusterName cluster1 = new ClusterName("cluster1");
|
||||
ClusterState.Builder builder = ClusterState.builder(cluster1);
|
||||
//the sniffer detects only data nodes
|
||||
builder.nodes(DiscoveryNodes.builder().add(new DiscoveryNode("node_id", address, Collections.emptyMap(),
|
||||
Collections.singleton(DiscoveryNode.Role.DATA), Version.CURRENT)));
|
||||
((TransportResponseHandler<ClusterStateResponse>) handler)
|
||||
.handleResponse(new ClusterStateResponse(cluster1, builder.build()));
|
||||
clusterStateLatch.countDown();
|
||||
return;
|
||||
}
|
||||
|
||||
handler.handleException(new TransportException("", new InternalException(action)));
|
||||
}
|
||||
final CountDownLatch clusterStateLatch = new CountDownLatch(1);
|
||||
|
||||
@Override
|
||||
public boolean nodeConnected(DiscoveryNode node) {
|
||||
assertThat(node.getAddress(), equalTo(address));
|
||||
return true;
|
||||
}
|
||||
public AsyncSender interceptSender(AsyncSender sender) {
|
||||
return new AsyncSender() {
|
||||
@Override
|
||||
public <T extends TransportResponse> void sendRequest(DiscoveryNode node, String action, TransportRequest request,
|
||||
TransportRequestOptions options, TransportResponseHandler<T> handler) {
|
||||
if (TransportLivenessAction.NAME.equals(action)) {
|
||||
assertHeaders(threadPool);
|
||||
((TransportResponseHandler<LivenessResponse>) handler).handleResponse(
|
||||
new LivenessResponse(new ClusterName("cluster1"), node));
|
||||
return;
|
||||
}
|
||||
if (ClusterStateAction.NAME.equals(action)) {
|
||||
assertHeaders(threadPool);
|
||||
ClusterName cluster1 = new ClusterName("cluster1");
|
||||
ClusterState.Builder builder = ClusterState.builder(cluster1);
|
||||
//the sniffer detects only data nodes
|
||||
builder.nodes(DiscoveryNodes.builder().add(new DiscoveryNode("node_id", address, Collections.emptyMap(),
|
||||
Collections.singleton(DiscoveryNode.Role.DATA), Version.CURRENT)));
|
||||
((TransportResponseHandler<ClusterStateResponse>) handler)
|
||||
.handleResponse(new ClusterStateResponse(cluster1, builder.build()));
|
||||
clusterStateLatch.countDown();
|
||||
return;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void connectToNode(DiscoveryNode node) throws ConnectTransportException {
|
||||
assertThat(node.getAddress(), equalTo(address));
|
||||
handler.handleException(new TransportException("", new InternalException(action)));
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,12 +29,12 @@ import org.elasticsearch.common.transport.LocalTransportAddress;
|
|||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.threadpool.TestThreadPool;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportInterceptor;
|
||||
import org.elasticsearch.transport.TransportResponseHandler;
|
||||
import org.elasticsearch.transport.TransportException;
|
||||
import org.elasticsearch.transport.TransportRequest;
|
||||
import org.elasticsearch.transport.TransportRequestOptions;
|
||||
import org.elasticsearch.transport.TransportResponse;
|
||||
import org.elasticsearch.transport.TransportResponseHandler;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.Closeable;
|
||||
|
@ -76,28 +76,22 @@ public class TransportClientNodesServiceTests extends ESTestCase {
|
|||
return new TestResponse();
|
||||
}
|
||||
};
|
||||
transportService = new TransportService(settings, transport, threadPool) {
|
||||
transportService = new TransportService(settings, transport, threadPool, new TransportInterceptor() {
|
||||
@Override
|
||||
public <T extends TransportResponse> void sendRequest(DiscoveryNode node, String action,
|
||||
TransportRequest request, final TransportResponseHandler<T> handler) {
|
||||
if (TransportLivenessAction.NAME.equals(action)) {
|
||||
super.sendRequest(node, action, request, wrapLivenessResponseHandler(handler, node, clusterName));
|
||||
} else {
|
||||
super.sendRequest(node, action, request, handler);
|
||||
}
|
||||
public AsyncSender interceptSender(AsyncSender sender) {
|
||||
return new AsyncSender() {
|
||||
@Override
|
||||
public <T extends TransportResponse> void sendRequest(DiscoveryNode node, String action, TransportRequest request,
|
||||
TransportRequestOptions options, TransportResponseHandler<T> handler) {
|
||||
if (TransportLivenessAction.NAME.equals(action)) {
|
||||
sender.sendRequest(node, action, request, options, wrapLivenessResponseHandler(handler, node, clusterName));
|
||||
} else {
|
||||
sender.sendRequest(node, action, request, options, handler);
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T extends TransportResponse> void sendRequest(DiscoveryNode node, String action, TransportRequest request,
|
||||
TransportRequestOptions options,
|
||||
TransportResponseHandler<T> handler) {
|
||||
if (TransportLivenessAction.NAME.equals(action)) {
|
||||
super.sendRequest(node, action, request, options, wrapLivenessResponseHandler(handler, node, clusterName));
|
||||
} else {
|
||||
super.sendRequest(node, action, request, options, handler);
|
||||
}
|
||||
}
|
||||
};
|
||||
});
|
||||
transportService.start();
|
||||
transportService.acceptIncomingRequests();
|
||||
transportClientNodesService =
|
||||
|
|
|
@ -29,8 +29,8 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.discovery.Discovery;
|
||||
import org.elasticsearch.discovery.DiscoverySettings;
|
||||
import org.elasticsearch.discovery.zen.ElectMasterService;
|
||||
import org.elasticsearch.discovery.zen.ZenDiscovery;
|
||||
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
|
|
|
@ -149,7 +149,7 @@ public class NodeConnectionsServiceTests extends ESTestCase {
|
|||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
this.transport = new MockTransport();
|
||||
transportService = new TransportService(Settings.EMPTY, transport, THREAD_POOL);
|
||||
transportService = new TransportService(Settings.EMPTY, transport, THREAD_POOL, TransportService.NOOP_TRANSPORT_INTERCEPTOR);
|
||||
transportService.start();
|
||||
transportService.acceptIncomingRequests();
|
||||
}
|
||||
|
|
|
@ -106,7 +106,7 @@ public class ShardStateActionTests extends ESTestCase {
|
|||
super.setUp();
|
||||
this.transport = new CapturingTransport();
|
||||
clusterService = createClusterService(THREAD_POOL);
|
||||
transportService = new TransportService(clusterService.getSettings(), transport, THREAD_POOL);
|
||||
transportService = new TransportService(clusterService.getSettings(), transport, THREAD_POOL, TransportService.NOOP_TRANSPORT_INTERCEPTOR);
|
||||
transportService.start();
|
||||
transportService.acceptIncomingRequests();
|
||||
shardStateAction = new TestShardStateAction(Settings.EMPTY, clusterService, transportService, null, null);
|
||||
|
|
|
@ -30,8 +30,8 @@ import org.elasticsearch.cluster.routing.allocation.decider.AwarenessAllocationD
|
|||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.discovery.zen.ElectMasterService;
|
||||
import org.elasticsearch.discovery.zen.ZenDiscovery;
|
||||
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
|
||||
|
||||
|
|
|
@ -92,7 +92,8 @@ public class ClusterStateHealthTests extends ESTestCase {
|
|||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
clusterService = createClusterService(threadPool);
|
||||
transportService = new TransportService(clusterService.getSettings(), new CapturingTransport(), threadPool);
|
||||
transportService = new TransportService(clusterService.getSettings(), new CapturingTransport(), threadPool,
|
||||
TransportService.NOOP_TRANSPORT_INTERCEPTOR);
|
||||
transportService.start();
|
||||
transportService.acceptIncomingRequests();
|
||||
}
|
||||
|
|
|
@ -56,7 +56,8 @@ public class MetaDataIndexUpgradeServiceTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testUpgrade() {
|
||||
MetaDataIndexUpgradeService service = new MetaDataIndexUpgradeService(Settings.EMPTY, new MapperRegistry(Collections.emptyMap(), Collections.emptyMap()), IndexScopedSettings.DEFAULT_SCOPED_SETTINGS);
|
||||
MetaDataIndexUpgradeService service = new MetaDataIndexUpgradeService(Settings.EMPTY, new MapperRegistry(Collections.emptyMap(),
|
||||
Collections.emptyMap()), IndexScopedSettings.DEFAULT_SCOPED_SETTINGS);
|
||||
IndexMetaData src = newIndexMeta("foo", Settings.builder().put("index.refresh_interval", "-200").build());
|
||||
assertFalse(service.isUpgraded(src));
|
||||
src = service.upgradeIndexMetaData(src);
|
||||
|
@ -67,7 +68,8 @@ public class MetaDataIndexUpgradeServiceTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testIsUpgraded() {
|
||||
MetaDataIndexUpgradeService service = new MetaDataIndexUpgradeService(Settings.EMPTY, new MapperRegistry(Collections.emptyMap(), Collections.emptyMap()), IndexScopedSettings.DEFAULT_SCOPED_SETTINGS);
|
||||
MetaDataIndexUpgradeService service = new MetaDataIndexUpgradeService(Settings.EMPTY, new MapperRegistry(Collections.emptyMap(),
|
||||
Collections.emptyMap()), IndexScopedSettings.DEFAULT_SCOPED_SETTINGS);
|
||||
IndexMetaData src = newIndexMeta("foo", Settings.builder().put("index.refresh_interval", "-200").build());
|
||||
assertFalse(service.isUpgraded(src));
|
||||
Version version = VersionUtils.randomVersionBetween(random(), VersionUtils.getFirstVersion(), VersionUtils.getPreviousVersion());
|
||||
|
@ -77,6 +79,26 @@ public class MetaDataIndexUpgradeServiceTests extends ESTestCase {
|
|||
assertTrue(service.isUpgraded(src));
|
||||
}
|
||||
|
||||
public void testFailUpgrade() {
|
||||
MetaDataIndexUpgradeService service = new MetaDataIndexUpgradeService(Settings.EMPTY, new MapperRegistry(Collections.emptyMap(),
|
||||
Collections.emptyMap()), IndexScopedSettings.DEFAULT_SCOPED_SETTINGS);
|
||||
final IndexMetaData metaData = newIndexMeta("foo", Settings.builder()
|
||||
.put(IndexMetaData.SETTING_VERSION_UPGRADED, Version.V_2_0_0_beta1)
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.fromString("1.7.0"))
|
||||
.put(IndexMetaData.SETTING_VERSION_MINIMUM_COMPATIBLE,
|
||||
Version.CURRENT.luceneVersion.toString()).build());
|
||||
String message = expectThrows(IllegalStateException.class, () -> service.upgradeIndexMetaData(metaData)).getMessage();
|
||||
assertEquals(message, "The index [[foo/BOOM]] was created before v2.0.0.beta1. It should be reindexed in Elasticsearch 2.x " +
|
||||
"before upgrading to " + Version.CURRENT.toString() + ".");
|
||||
|
||||
IndexMetaData goodMeta = newIndexMeta("foo", Settings.builder()
|
||||
.put(IndexMetaData.SETTING_VERSION_UPGRADED, Version.V_2_0_0_beta1)
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.fromString("2.1.0"))
|
||||
.put(IndexMetaData.SETTING_VERSION_MINIMUM_COMPATIBLE,
|
||||
Version.CURRENT.luceneVersion.toString()).build());
|
||||
service.upgradeIndexMetaData(goodMeta);
|
||||
}
|
||||
|
||||
public static IndexMetaData newIndexMeta(String name, Settings indexSettings) {
|
||||
Settings build = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
|
||||
|
|
|
@ -58,7 +58,7 @@ public class GeoJSONShapeParserTests extends ESTestCase {
|
|||
|
||||
private static final GeometryFactory GEOMETRY_FACTORY = SPATIAL_CONTEXT.getGeometryFactory();
|
||||
|
||||
public void testParse_simplePoint() throws IOException {
|
||||
public void testParseSimplePoint() throws IOException {
|
||||
String pointGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "Point")
|
||||
.startArray("coordinates").value(100.0).value(0.0).endArray()
|
||||
.endObject().string();
|
||||
|
@ -67,7 +67,7 @@ public class GeoJSONShapeParserTests extends ESTestCase {
|
|||
assertGeometryEquals(new JtsPoint(expected, SPATIAL_CONTEXT), pointGeoJson);
|
||||
}
|
||||
|
||||
public void testParse_lineString() throws IOException {
|
||||
public void testParseLineString() throws IOException {
|
||||
String lineGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "LineString")
|
||||
.startArray("coordinates")
|
||||
.startArray().value(100.0).value(0.0).endArray()
|
||||
|
@ -84,7 +84,7 @@ public class GeoJSONShapeParserTests extends ESTestCase {
|
|||
assertGeometryEquals(jtsGeom(expected), lineGeoJson);
|
||||
}
|
||||
|
||||
public void testParse_multiLineString() throws IOException {
|
||||
public void testParseMultiLineString() throws IOException {
|
||||
String multilinesGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "MultiLineString")
|
||||
.startArray("coordinates")
|
||||
.startArray()
|
||||
|
@ -111,7 +111,7 @@ public class GeoJSONShapeParserTests extends ESTestCase {
|
|||
assertGeometryEquals(jtsGeom(expected), multilinesGeoJson);
|
||||
}
|
||||
|
||||
public void testParse_circle() throws IOException {
|
||||
public void testParseCircle() throws IOException {
|
||||
String multilinesGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "circle")
|
||||
.startArray("coordinates").value(100.0).value(0.0).endArray()
|
||||
.field("radius", "100m")
|
||||
|
@ -121,7 +121,7 @@ public class GeoJSONShapeParserTests extends ESTestCase {
|
|||
assertGeometryEquals(expected, multilinesGeoJson);
|
||||
}
|
||||
|
||||
public void testParse_multiDimensionShapes() throws IOException {
|
||||
public void testParseMultiDimensionShapes() throws IOException {
|
||||
// multi dimension point
|
||||
String pointGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "Point")
|
||||
.startArray("coordinates").value(100.0).value(0.0).value(15.0).value(18.0).endArray()
|
||||
|
@ -147,7 +147,7 @@ public class GeoJSONShapeParserTests extends ESTestCase {
|
|||
assertGeometryEquals(jtsGeom(expectedLS), lineGeoJson);
|
||||
}
|
||||
|
||||
public void testParse_envelope() throws IOException {
|
||||
public void testParseEnvelope() throws IOException {
|
||||
// test #1: envelope with expected coordinate order (TopLeft, BottomRight)
|
||||
String multilinesGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "envelope")
|
||||
.startArray("coordinates")
|
||||
|
@ -192,7 +192,7 @@ public class GeoJSONShapeParserTests extends ESTestCase {
|
|||
ElasticsearchGeoAssertions.assertValidException(parser, ElasticsearchParseException.class);
|
||||
}
|
||||
|
||||
public void testParse_polygonNoHoles() throws IOException {
|
||||
public void testParsePolygonNoHoles() throws IOException {
|
||||
String polygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "Polygon")
|
||||
.startArray("coordinates")
|
||||
.startArray()
|
||||
|
@ -217,7 +217,7 @@ public class GeoJSONShapeParserTests extends ESTestCase {
|
|||
assertGeometryEquals(jtsGeom(expected), polygonGeoJson);
|
||||
}
|
||||
|
||||
public void testParse_invalidPoint() throws IOException {
|
||||
public void testParseInvalidPoint() throws IOException {
|
||||
// test case 1: create an invalid point object with multipoint data format
|
||||
String invalidPoint1 = XContentFactory.jsonBuilder().startObject().field("type", "point")
|
||||
.startArray("coordinates")
|
||||
|
@ -238,7 +238,7 @@ public class GeoJSONShapeParserTests extends ESTestCase {
|
|||
ElasticsearchGeoAssertions.assertValidException(parser, ElasticsearchParseException.class);
|
||||
}
|
||||
|
||||
public void testParse_invalidMultipoint() throws IOException {
|
||||
public void testParseInvalidMultipoint() throws IOException {
|
||||
// test case 1: create an invalid multipoint object with single coordinate
|
||||
String invalidMultipoint1 = XContentFactory.jsonBuilder().startObject().field("type", "multipoint")
|
||||
.startArray("coordinates").value(-74.011).value(40.753).endArray()
|
||||
|
@ -267,7 +267,7 @@ public class GeoJSONShapeParserTests extends ESTestCase {
|
|||
ElasticsearchGeoAssertions.assertValidException(parser, ElasticsearchParseException.class);
|
||||
}
|
||||
|
||||
public void testParse_invalidMultiPolygon() throws IOException {
|
||||
public void testParseInvalidMultiPolygon() throws IOException {
|
||||
// test invalid multipolygon (an "accidental" polygon with inner rings outside outer ring)
|
||||
String multiPolygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "MultiPolygon")
|
||||
.startArray("coordinates")
|
||||
|
@ -302,7 +302,7 @@ public class GeoJSONShapeParserTests extends ESTestCase {
|
|||
ElasticsearchGeoAssertions.assertValidException(parser, InvalidShapeException.class);
|
||||
}
|
||||
|
||||
public void testParse_OGCPolygonWithoutHoles() throws IOException {
|
||||
public void testParseOGCPolygonWithoutHoles() throws IOException {
|
||||
// test 1: ccw poly not crossing dateline
|
||||
String polygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "Polygon")
|
||||
.startArray("coordinates")
|
||||
|
@ -384,7 +384,7 @@ public class GeoJSONShapeParserTests extends ESTestCase {
|
|||
ElasticsearchGeoAssertions.assertMultiPolygon(shape);
|
||||
}
|
||||
|
||||
public void testParse_OGCPolygonWithHoles() throws IOException {
|
||||
public void testParseOGCPolygonWithHoles() throws IOException {
|
||||
// test 1: ccw poly not crossing dateline
|
||||
String polygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "Polygon")
|
||||
.startArray("coordinates")
|
||||
|
@ -490,7 +490,7 @@ public class GeoJSONShapeParserTests extends ESTestCase {
|
|||
ElasticsearchGeoAssertions.assertMultiPolygon(shape);
|
||||
}
|
||||
|
||||
public void testParse_invalidPolygon() throws IOException {
|
||||
public void testParseInvalidPolygon() throws IOException {
|
||||
/**
|
||||
* The following 3 test cases ensure proper error handling of invalid polygons
|
||||
* per the GeoJSON specification
|
||||
|
@ -579,7 +579,7 @@ public class GeoJSONShapeParserTests extends ESTestCase {
|
|||
ElasticsearchGeoAssertions.assertValidException(parser, ElasticsearchParseException.class);
|
||||
}
|
||||
|
||||
public void testParse_polygonWithHole() throws IOException {
|
||||
public void testParsePolygonWithHole() throws IOException {
|
||||
String polygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "Polygon")
|
||||
.startArray("coordinates")
|
||||
.startArray()
|
||||
|
@ -623,7 +623,7 @@ public class GeoJSONShapeParserTests extends ESTestCase {
|
|||
assertGeometryEquals(jtsGeom(expected), polygonGeoJson);
|
||||
}
|
||||
|
||||
public void testParse_selfCrossingPolygon() throws IOException {
|
||||
public void testParseSelfCrossingPolygon() throws IOException {
|
||||
// test self crossing ccw poly not crossing dateline
|
||||
String polygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "Polygon")
|
||||
.startArray("coordinates")
|
||||
|
@ -644,7 +644,7 @@ public class GeoJSONShapeParserTests extends ESTestCase {
|
|||
ElasticsearchGeoAssertions.assertValidException(parser, InvalidShapeException.class);
|
||||
}
|
||||
|
||||
public void testParse_multiPoint() throws IOException {
|
||||
public void testParseMultiPoint() throws IOException {
|
||||
String multiPointGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "MultiPoint")
|
||||
.startArray("coordinates")
|
||||
.startArray().value(100.0).value(0.0).endArray()
|
||||
|
@ -658,7 +658,7 @@ public class GeoJSONShapeParserTests extends ESTestCase {
|
|||
assertGeometryEquals(expected, multiPointGeoJson);
|
||||
}
|
||||
|
||||
public void testParse_multiPolygon() throws IOException {
|
||||
public void testParseMultiPolygon() throws IOException {
|
||||
// test #1: two polygons; one without hole, one with hole
|
||||
String multiPolygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "MultiPolygon")
|
||||
.startArray("coordinates")
|
||||
|
@ -770,7 +770,7 @@ public class GeoJSONShapeParserTests extends ESTestCase {
|
|||
assertGeometryEquals(jtsGeom(withHoles), multiPolygonGeoJson);
|
||||
}
|
||||
|
||||
public void testParse_geometryCollection() throws IOException {
|
||||
public void testParseGeometryCollection() throws IOException {
|
||||
String geometryCollectionGeoJson = XContentFactory.jsonBuilder().startObject()
|
||||
.field("type", "GeometryCollection")
|
||||
.startArray("geometries")
|
||||
|
@ -822,7 +822,7 @@ public class GeoJSONShapeParserTests extends ESTestCase {
|
|||
assertGeometryEquals(new JtsPoint(expected, SPATIAL_CONTEXT), pointGeoJson);
|
||||
}
|
||||
|
||||
public void testParse_orientationOption() throws IOException {
|
||||
public void testParseOrientationOption() throws IOException {
|
||||
// test 1: valid ccw (right handed system) poly not crossing dateline (with 'right' field)
|
||||
String polygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "Polygon")
|
||||
.field("orientation", "right")
|
||||
|
|
|
@ -35,18 +35,12 @@ import org.elasticsearch.rest.RestRequest;
|
|||
import org.elasticsearch.rest.action.cat.AbstractCatAction;
|
||||
import org.elasticsearch.test.transport.AssertingLocalTransport;
|
||||
import org.elasticsearch.transport.Transport;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.transport.TransportInterceptor;
|
||||
|
||||
import java.util.Collections;
|
||||
|
||||
public class NetworkModuleTests extends ModuleTestCase {
|
||||
|
||||
static class FakeTransportService extends TransportService {
|
||||
public FakeTransportService() {
|
||||
super(null, null, null);
|
||||
}
|
||||
}
|
||||
|
||||
static class FakeTransport extends AssertingLocalTransport {
|
||||
public FakeTransport() {
|
||||
super(null, null, null, null);
|
||||
|
@ -101,23 +95,6 @@ public class NetworkModuleTests extends ModuleTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testRegisterTransportService() {
|
||||
Settings settings = Settings.builder().put(NetworkModule.TRANSPORT_SERVICE_TYPE_KEY, "custom")
|
||||
.put(NetworkModule.HTTP_ENABLED.getKey(), false)
|
||||
.put(NetworkModule.TRANSPORT_TYPE_KEY, "local")
|
||||
.build();
|
||||
NetworkModule module = new NetworkModule(new NetworkService(settings, Collections.emptyList()), settings, false);
|
||||
module.registerTransportService("custom", FakeTransportService.class);
|
||||
assertBinding(module, TransportService.class, FakeTransportService.class);
|
||||
assertFalse(module.isTransportClient());
|
||||
|
||||
// check it works with transport only as well
|
||||
module = new NetworkModule(new NetworkService(settings, Collections.emptyList()), settings, true);
|
||||
module.registerTransportService("custom", FakeTransportService.class);
|
||||
assertBinding(module, TransportService.class, FakeTransportService.class);
|
||||
assertTrue(module.isTransportClient());
|
||||
}
|
||||
|
||||
public void testRegisterTransport() {
|
||||
Settings settings = Settings.builder().put(NetworkModule.TRANSPORT_TYPE_KEY, "custom")
|
||||
.put(NetworkModule.HTTP_ENABLED.getKey(), false)
|
||||
|
@ -161,4 +138,27 @@ public class NetworkModuleTests extends ModuleTestCase {
|
|||
assertNotBound(module, HttpServerTransport.class);
|
||||
assertFalse(module.isTransportClient());
|
||||
}
|
||||
|
||||
public void testRegisterInterceptor() {
|
||||
Settings settings = Settings.builder()
|
||||
.put(NetworkModule.HTTP_ENABLED.getKey(), false)
|
||||
.put(NetworkModule.TRANSPORT_TYPE_KEY, "local").build();
|
||||
|
||||
NetworkModule module = new NetworkModule(new NetworkService(settings, Collections.emptyList()), settings, false);
|
||||
TransportInterceptor interceptor = new TransportInterceptor() {};
|
||||
module.addTransportInterceptor(interceptor);
|
||||
assertInstanceBinding(module, TransportInterceptor.class, i -> {
|
||||
if (i instanceof NetworkModule.CompositeTransportInterceptor) {
|
||||
assertEquals(((NetworkModule.CompositeTransportInterceptor)i).transportInterceptors.size(), 1);
|
||||
return ((NetworkModule.CompositeTransportInterceptor)i).transportInterceptors.get(0) == interceptor;
|
||||
}
|
||||
return false;
|
||||
});
|
||||
|
||||
NullPointerException nullPointerException = expectThrows(NullPointerException.class, () -> {
|
||||
module.addTransportInterceptor(null);
|
||||
});
|
||||
assertEquals("interceptor must not be null", nullPointerException.getMessage());
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,13 +18,10 @@
|
|||
*/
|
||||
package org.elasticsearch.discovery;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.inject.ModuleTestCase;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.discovery.local.LocalDiscovery;
|
||||
import org.elasticsearch.discovery.zen.ElectMasterService;
|
||||
import org.elasticsearch.discovery.zen.ZenDiscovery;
|
||||
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
|
||||
import org.elasticsearch.node.Node;
|
||||
import org.elasticsearch.test.NoopDiscovery;
|
||||
|
||||
/**
|
||||
|
|
|
@ -49,8 +49,8 @@ import org.elasticsearch.common.Strings;
|
|||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.discovery.zen.ElectMasterService;
|
||||
import org.elasticsearch.discovery.zen.ZenDiscovery;
|
||||
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
|
||||
import org.elasticsearch.discovery.zen.fd.FaultDetection;
|
||||
import org.elasticsearch.discovery.zen.membership.MembershipAction;
|
||||
import org.elasticsearch.discovery.zen.ping.ZenPing;
|
||||
|
@ -82,6 +82,7 @@ import org.elasticsearch.test.disruption.SingleNodeDisruption;
|
|||
import org.elasticsearch.test.disruption.SlowClusterStateProcessing;
|
||||
import org.elasticsearch.test.junit.annotations.TestLogging;
|
||||
import org.elasticsearch.test.transport.MockTransportService;
|
||||
import org.elasticsearch.transport.TcpTransport;
|
||||
import org.elasticsearch.transport.TransportException;
|
||||
import org.elasticsearch.transport.TransportRequest;
|
||||
import org.elasticsearch.transport.TransportRequestOptions;
|
||||
|
@ -109,9 +110,12 @@ import java.util.concurrent.atomic.AtomicInteger;
|
|||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING;
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING;
|
||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
|
@ -163,7 +167,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
|||
|
||||
private List<String> startCluster(int numberOfNodes, int minimumMasterNode, @Nullable int[] unicastHostsOrdinals) throws
|
||||
ExecutionException, InterruptedException {
|
||||
configureUnicastCluster(numberOfNodes, unicastHostsOrdinals, minimumMasterNode);
|
||||
configureCluster(numberOfNodes, unicastHostsOrdinals, minimumMasterNode);
|
||||
List<String> nodes = internalCluster().startNodesAsync(numberOfNodes).get();
|
||||
ensureStableCluster(numberOfNodes);
|
||||
|
||||
|
@ -183,6 +187,11 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
|||
.put(FaultDetection.PING_RETRIES_SETTING.getKey(), "1") // for hitting simulated network failures quickly
|
||||
.put("discovery.zen.join_timeout", "10s") // still long to induce failures but to long so test won't time out
|
||||
.put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "1s") // <-- for hitting simulated network failures quickly
|
||||
.put(TcpTransport.TCP_CONNECT_TIMEOUT.getKey(), "10s") // Network delay disruption waits for the min between this
|
||||
// value and the time of disruption and does not recover immediately
|
||||
// when disruption is stop. We should make sure we recover faster
|
||||
// then the default of 30s, causing ensureGreen and friends to time out
|
||||
|
||||
.build();
|
||||
|
||||
@Override
|
||||
|
@ -190,15 +199,15 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
|||
return Arrays.asList(MockTransportService.TestPlugin.class);
|
||||
}
|
||||
|
||||
private void configureUnicastCluster(
|
||||
private void configureCluster(
|
||||
int numberOfNodes,
|
||||
@Nullable int[] unicastHostsOrdinals,
|
||||
int minimumMasterNode
|
||||
) throws ExecutionException, InterruptedException {
|
||||
configureUnicastCluster(DEFAULT_SETTINGS, numberOfNodes, unicastHostsOrdinals, minimumMasterNode);
|
||||
configureCluster(DEFAULT_SETTINGS, numberOfNodes, unicastHostsOrdinals, minimumMasterNode);
|
||||
}
|
||||
|
||||
private void configureUnicastCluster(
|
||||
private void configureCluster(
|
||||
Settings settings,
|
||||
int numberOfNodes,
|
||||
@Nullable int[] unicastHostsOrdinals,
|
||||
|
@ -1025,7 +1034,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
public void testClusterFormingWithASlowNode() throws Exception {
|
||||
configureUnicastCluster(3, null, 2);
|
||||
configureCluster(3, null, 2);
|
||||
|
||||
SlowClusterStateProcessing disruption = new SlowClusterStateProcessing(random(), 0, 0, 1000, 2000);
|
||||
|
||||
|
@ -1088,7 +1097,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
|||
*/
|
||||
public void testSearchWithRelocationAndSlowClusterStateProcessing() throws Exception {
|
||||
// don't use DEFAULT settings (which can cause node disconnects on a slow CI machine)
|
||||
configureUnicastCluster(Settings.EMPTY, 3, null, 1);
|
||||
configureCluster(Settings.EMPTY, 3, null, 1);
|
||||
InternalTestCluster.Async<String> masterNodeFuture = internalCluster().startMasterOnlyNodeAsync();
|
||||
InternalTestCluster.Async<String> node_1Future = internalCluster().startDataOnlyNodeAsync();
|
||||
|
||||
|
@ -1129,7 +1138,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
|||
|
||||
public void testIndexImportedFromDataOnlyNodesIfMasterLostDataFolder() throws Exception {
|
||||
// test for https://github.com/elastic/elasticsearch/issues/8823
|
||||
configureUnicastCluster(2, null, 1);
|
||||
configureCluster(2, null, 1);
|
||||
String masterNode = internalCluster().startMasterOnlyNode(Settings.EMPTY);
|
||||
internalCluster().startDataOnlyNode(Settings.EMPTY);
|
||||
|
||||
|
@ -1160,7 +1169,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
|||
.put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), "30s") // wait till cluster state is committed
|
||||
.build();
|
||||
final String idxName = "test";
|
||||
configureUnicastCluster(settings, 3, null, 2);
|
||||
configureCluster(settings, 3, null, 2);
|
||||
InternalTestCluster.Async<List<String>> masterNodes = internalCluster().startMasterOnlyNodesAsync(2);
|
||||
InternalTestCluster.Async<String> dataNode = internalCluster().startDataOnlyNodeAsync();
|
||||
dataNode.get();
|
||||
|
@ -1189,6 +1198,61 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
|||
assertFalse(client().admin().indices().prepareExists(idxName).get().isExists());
|
||||
}
|
||||
|
||||
public void testElectMasterWithLatestVersion() throws Exception {
|
||||
configureCluster(3, null, 2);
|
||||
final Set<String> nodes = new HashSet<>(internalCluster().startNodesAsync(3).get());
|
||||
ensureStableCluster(3);
|
||||
ServiceDisruptionScheme isolateAllNodes = new NetworkDisruption(new NetworkDisruption.IsolateAllNodes(nodes), new NetworkDisconnect());
|
||||
internalCluster().setDisruptionScheme(isolateAllNodes);
|
||||
|
||||
logger.info("--> forcing a complete election to make sure \"preferred\" master is elected");
|
||||
isolateAllNodes.startDisrupting();
|
||||
for (String node: nodes) {
|
||||
assertNoMaster(node);
|
||||
}
|
||||
isolateAllNodes.stopDisrupting();
|
||||
ensureStableCluster(3);
|
||||
final String preferredMasterName = internalCluster().getMasterName();
|
||||
final DiscoveryNode preferredMaster = internalCluster().clusterService(preferredMasterName).localNode();
|
||||
for (String node: nodes) {
|
||||
DiscoveryNode discoveryNode = internalCluster().clusterService(node).localNode();
|
||||
assertThat(discoveryNode.getId(), greaterThanOrEqualTo(preferredMaster.getId()));
|
||||
}
|
||||
|
||||
logger.info("--> preferred master is {}", preferredMaster);
|
||||
final Set<String> nonPreferredNodes = new HashSet<>(nodes);
|
||||
nonPreferredNodes.remove(preferredMasterName);
|
||||
final ServiceDisruptionScheme isolatePreferredMaster =
|
||||
new NetworkDisruption(
|
||||
new NetworkDisruption.TwoPartitions(
|
||||
Collections.singleton(preferredMasterName), nonPreferredNodes),
|
||||
new NetworkDisconnect());
|
||||
internalCluster().setDisruptionScheme(isolatePreferredMaster);
|
||||
isolatePreferredMaster.startDisrupting();
|
||||
|
||||
assertAcked(client(randomFrom(nonPreferredNodes)).admin().indices().prepareCreate("test").setSettings(
|
||||
INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1,
|
||||
INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0
|
||||
));
|
||||
|
||||
internalCluster().clearDisruptionScheme(false);
|
||||
internalCluster().setDisruptionScheme(isolateAllNodes);
|
||||
|
||||
logger.info("--> forcing a complete election again");
|
||||
isolateAllNodes.startDisrupting();
|
||||
for (String node: nodes) {
|
||||
assertNoMaster(node);
|
||||
}
|
||||
|
||||
isolateAllNodes.stopDisrupting();
|
||||
|
||||
final ClusterState state = client().admin().cluster().prepareState().get().getState();
|
||||
if (state.metaData().hasIndex("test") == false) {
|
||||
fail("index 'test' was lost. current cluster state: " + state.prettyPrint());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
protected NetworkDisruption addRandomDisruptionType(TwoPartitions partitions) {
|
||||
final NetworkLinkDisruptionType disruptionType;
|
||||
if (randomBoolean()) {
|
||||
|
|
|
@ -147,7 +147,7 @@ public class ZenFaultDetectionTests extends ESTestCase {
|
|||
return version;
|
||||
}
|
||||
},
|
||||
threadPool);
|
||||
threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR);
|
||||
transportService.start();
|
||||
transportService.acceptIncomingRequests();
|
||||
return transportService;
|
||||
|
|
|
@ -23,7 +23,7 @@ import org.elasticsearch.Version;
|
|||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.LocalTransportAddress;
|
||||
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
|
||||
import org.elasticsearch.discovery.zen.ElectMasterService.MasterCandidate;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
@ -31,6 +31,10 @@ import java.util.Collections;
|
|||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThan;
|
||||
|
||||
public class ElectMasterServiceTests extends ESTestCase {
|
||||
|
||||
|
@ -55,6 +59,22 @@ public class ElectMasterServiceTests extends ESTestCase {
|
|||
return nodes;
|
||||
}
|
||||
|
||||
List<MasterCandidate> generateRandomCandidates() {
|
||||
int count = scaledRandomIntBetween(1, 100);
|
||||
ArrayList<MasterCandidate> candidates = new ArrayList<>(count);
|
||||
for (int i = 0; i < count; i++) {
|
||||
Set<DiscoveryNode.Role> roles = new HashSet<>();
|
||||
roles.add(DiscoveryNode.Role.MASTER);
|
||||
DiscoveryNode node = new DiscoveryNode("n_" + i, "n_" + i, LocalTransportAddress.buildUnique(), Collections.emptyMap(),
|
||||
roles, Version.CURRENT);
|
||||
candidates.add(new MasterCandidate(node, randomBoolean() ? MasterCandidate.UNRECOVERED_CLUSTER_VERSION : randomPositiveLong()));
|
||||
}
|
||||
|
||||
Collections.shuffle(candidates, random());
|
||||
return candidates;
|
||||
}
|
||||
|
||||
|
||||
public void testSortByMasterLikelihood() {
|
||||
List<DiscoveryNode> nodes = generateRandomNodes();
|
||||
List<DiscoveryNode> sortedNodes = electMasterService().sortByMasterLikelihood(nodes);
|
||||
|
@ -69,36 +89,53 @@ public class ElectMasterServiceTests extends ESTestCase {
|
|||
}
|
||||
prevNode = node;
|
||||
}
|
||||
}
|
||||
|
||||
public void testTieBreakActiveMasters() {
|
||||
List<DiscoveryNode> nodes = generateRandomCandidates().stream().map(MasterCandidate::getNode).collect(Collectors.toList());
|
||||
DiscoveryNode bestMaster = electMasterService().tieBreakActiveMasters(nodes);
|
||||
for (DiscoveryNode node: nodes) {
|
||||
if (node.equals(bestMaster) == false) {
|
||||
assertTrue(bestMaster.getId().compareTo(node.getId()) < 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testHasEnoughNodes() {
|
||||
List<DiscoveryNode> nodes = rarely() ? Collections.emptyList() : generateRandomNodes();
|
||||
ElectMasterService service = electMasterService();
|
||||
int masterNodes = (int) nodes.stream().filter(DiscoveryNode::isMasterNode).count();
|
||||
service.minimumMasterNodes(randomIntBetween(-1, masterNodes));
|
||||
assertThat(service.hasEnoughMasterNodes(nodes), equalTo(masterNodes > 0));
|
||||
service.minimumMasterNodes(masterNodes + 1 + randomIntBetween(0, nodes.size()));
|
||||
assertFalse(service.hasEnoughMasterNodes(nodes));
|
||||
}
|
||||
|
||||
public void testHasEnoughCandidates() {
|
||||
List<MasterCandidate> candidates = rarely() ? Collections.emptyList() : generateRandomCandidates();
|
||||
ElectMasterService service = electMasterService();
|
||||
service.minimumMasterNodes(randomIntBetween(-1, candidates.size()));
|
||||
assertThat(service.hasEnoughCandidates(candidates), equalTo(candidates.size() > 0));
|
||||
service.minimumMasterNodes(candidates.size() + 1 + randomIntBetween(0, candidates.size()));
|
||||
assertFalse(service.hasEnoughCandidates(candidates));
|
||||
}
|
||||
|
||||
public void testElectMaster() {
|
||||
List<DiscoveryNode> nodes = generateRandomNodes();
|
||||
List<MasterCandidate> candidates = generateRandomCandidates();
|
||||
ElectMasterService service = electMasterService();
|
||||
int min_master_nodes = randomIntBetween(0, nodes.size());
|
||||
service.minimumMasterNodes(min_master_nodes);
|
||||
|
||||
int master_nodes = 0;
|
||||
for (DiscoveryNode node : nodes) {
|
||||
if (node.isMasterNode()) {
|
||||
master_nodes++;
|
||||
}
|
||||
}
|
||||
DiscoveryNode master = null;
|
||||
if (service.hasEnoughMasterNodes(nodes)) {
|
||||
master = service.electMaster(nodes);
|
||||
}
|
||||
|
||||
if (master_nodes == 0) {
|
||||
assertNull(master);
|
||||
} else if (min_master_nodes > 0 && master_nodes < min_master_nodes) {
|
||||
assertNull(master);
|
||||
} else {
|
||||
assertNotNull(master);
|
||||
for (DiscoveryNode node : nodes) {
|
||||
if (node.isMasterNode()) {
|
||||
assertTrue(master.getId().compareTo(node.getId()) <= 0);
|
||||
}
|
||||
int minMasterNodes = randomIntBetween(0, candidates.size());
|
||||
service.minimumMasterNodes(minMasterNodes);
|
||||
MasterCandidate master = service.electMaster(candidates);
|
||||
assertNotNull(master);
|
||||
for (MasterCandidate candidate : candidates) {
|
||||
if (candidate.getNode().equals(master.getNode())) {
|
||||
// nothing much to test here
|
||||
} else if (candidate.getClusterStateVersion() == master.getClusterStateVersion()) {
|
||||
assertThat("candidate " + candidate + " has a lower or equal id than master " + master, candidate.getNode().getId(),
|
||||
greaterThan(master.getNode().getId()));
|
||||
} else {
|
||||
assertThat("candidate " + master + " has a higher cluster state version than candidate " + candidate,
|
||||
master.getClusterStateVersion(), greaterThan(candidate.getClusterStateVersion()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -43,7 +43,6 @@ import org.elasticsearch.common.unit.TimeValue;
|
|||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.elasticsearch.common.util.concurrent.BaseFuture;
|
||||
import org.elasticsearch.discovery.DiscoverySettings;
|
||||
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
|
||||
import org.elasticsearch.discovery.zen.membership.MembershipAction;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
|
|
@ -28,7 +28,6 @@ import org.elasticsearch.cluster.node.DiscoveryNodes;
|
|||
import org.elasticsearch.cluster.routing.allocation.AllocationService;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.common.transport.LocalTransportAddress;
|
||||
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
|
|
@ -34,14 +34,12 @@ import org.elasticsearch.cluster.service.ClusterService;
|
|||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.InetSocketTransportAddress;
|
||||
import org.elasticsearch.common.transport.LocalTransportAddress;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.discovery.Discovery;
|
||||
import org.elasticsearch.discovery.DiscoveryStats;
|
||||
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
|
||||
import org.elasticsearch.discovery.zen.fd.FaultDetection;
|
||||
import org.elasticsearch.discovery.zen.membership.MembershipAction;
|
||||
import org.elasticsearch.discovery.zen.publish.PublishClusterStateAction;
|
||||
|
@ -60,10 +58,8 @@ import org.hamcrest.Matchers;
|
|||
import org.junit.Before;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.InetAddress;
|
||||
import java.net.UnknownHostException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.EnumSet;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
|
@ -77,8 +73,6 @@ import static org.hamcrest.Matchers.equalTo;
|
|||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
import static org.hamcrest.Matchers.sameInstance;
|
||||
|
||||
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 0)
|
||||
@ESIntegTestCase.SuppressLocalMode
|
||||
|
@ -293,44 +287,6 @@ public class ZenDiscoveryIT extends ESIntegTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testHandleNodeJoin_incompatibleMinVersion() throws UnknownHostException {
|
||||
Settings nodeSettings = Settings.builder()
|
||||
.put("discovery.type", "zen") // <-- To override the local setting if set externally
|
||||
.build();
|
||||
String nodeName = internalCluster().startNode(nodeSettings);
|
||||
ZenDiscovery zenDiscovery = (ZenDiscovery) internalCluster().getInstance(Discovery.class, nodeName);
|
||||
ClusterService clusterService = internalCluster().getInstance(ClusterService.class, nodeName);
|
||||
DiscoveryNode node = new DiscoveryNode("_node_id", new InetSocketTransportAddress(InetAddress.getByName("0.0.0.0"), 0),
|
||||
emptyMap(), emptySet(), previousMajorVersion);
|
||||
final AtomicReference<IllegalStateException> holder = new AtomicReference<>();
|
||||
zenDiscovery.handleJoinRequest(node, clusterService.state(), new MembershipAction.JoinCallback() {
|
||||
@Override
|
||||
public void onSuccess() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
holder.set((IllegalStateException) e);
|
||||
}
|
||||
});
|
||||
|
||||
assertThat(holder.get(), notNullValue());
|
||||
assertThat(holder.get().getMessage(), equalTo("Can't handle join request from a node with a version [" + previousMajorVersion
|
||||
+ "] that is lower than the minimum compatible version [" + Version.CURRENT.minimumCompatibilityVersion() + "]"));
|
||||
}
|
||||
|
||||
public void testJoinElectedMaster_incompatibleMinVersion() {
|
||||
ElectMasterService electMasterService = new ElectMasterService(Settings.EMPTY);
|
||||
|
||||
DiscoveryNode node = new DiscoveryNode("_node_id", new LocalTransportAddress("_id"), emptyMap(),
|
||||
Collections.singleton(DiscoveryNode.Role.MASTER), Version.CURRENT);
|
||||
assertThat(electMasterService.electMaster(Collections.singletonList(node)), sameInstance(node));
|
||||
node = new DiscoveryNode("_node_id", new LocalTransportAddress("_id"), emptyMap(), emptySet(), previousMajorVersion);
|
||||
assertThat("Can't join master because version " + previousMajorVersion
|
||||
+ " is lower than the minimum compatable version " + Version.CURRENT + " can support",
|
||||
electMasterService.electMaster(Collections.singletonList(node)), nullValue());
|
||||
}
|
||||
|
||||
public void testDiscoveryStats() throws IOException {
|
||||
String expectedStatsJsonResponse = "{\n" +
|
||||
" \"discovery\" : {\n" +
|
||||
|
|
|
@ -33,7 +33,6 @@ import org.elasticsearch.common.settings.ClusterSettings;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.LocalTransportAddress;
|
||||
import org.elasticsearch.discovery.Discovery;
|
||||
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
|
||||
import org.elasticsearch.discovery.zen.ping.ZenPing;
|
||||
import org.elasticsearch.discovery.zen.ping.ZenPingService;
|
||||
import org.elasticsearch.discovery.zen.publish.PublishClusterStateActionTests.AssertingAckListener;
|
||||
|
@ -55,8 +54,8 @@ import java.util.stream.Collectors;
|
|||
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static java.util.Collections.emptySet;
|
||||
import static org.elasticsearch.discovery.zen.ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING;
|
||||
import static org.elasticsearch.discovery.zen.ZenDiscovery.shouldIgnoreOrRejectNewClusterState;
|
||||
import static org.elasticsearch.discovery.zen.elect.ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING;
|
||||
import static org.elasticsearch.test.ClusterServiceUtils.createClusterService;
|
||||
import static org.elasticsearch.test.ClusterServiceUtils.setState;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
|
@ -128,7 +127,7 @@ public class ZenDiscoveryUnitTests extends ESTestCase {
|
|||
Set<Role> roles = new HashSet<>(randomSubsetOf(Arrays.asList(Role.values())));
|
||||
DiscoveryNode node = new DiscoveryNode("node_" + i, "id_" + i, LocalTransportAddress.buildUnique(), Collections.emptyMap(),
|
||||
roles, Version.CURRENT);
|
||||
responses.add(new ZenPing.PingResponse(node, randomBoolean() ? null : node, new ClusterName("test"), randomBoolean()));
|
||||
responses.add(new ZenPing.PingResponse(node, randomBoolean() ? null : node, new ClusterName("test"), randomLong()));
|
||||
allNodes.add(node);
|
||||
if (node.isMasterNode()) {
|
||||
masterNodes.add(node);
|
||||
|
@ -136,8 +135,7 @@ public class ZenDiscoveryUnitTests extends ESTestCase {
|
|||
}
|
||||
|
||||
boolean ignore = randomBoolean();
|
||||
List<ZenPing.PingResponse> filtered = ZenDiscovery.filterPingResponses(
|
||||
responses.toArray(new ZenPing.PingResponse[responses.size()]), ignore, logger);
|
||||
List<ZenPing.PingResponse> filtered = ZenDiscovery.filterPingResponses(responses, ignore, logger);
|
||||
final List<DiscoveryNode> filteredNodes = filtered.stream().map(ZenPing.PingResponse::node).collect(Collectors.toList());
|
||||
if (ignore) {
|
||||
assertThat(filteredNodes, equalTo(masterNodes));
|
||||
|
|
|
@ -29,6 +29,7 @@ import org.elasticsearch.test.ESTestCase;
|
|||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static java.util.Collections.emptySet;
|
||||
|
@ -39,7 +40,7 @@ public class ZenPingTests extends ESTestCase {
|
|||
DiscoveryNode[] nodes = new DiscoveryNode[randomIntBetween(1, 30)];
|
||||
long maxIdPerNode[] = new long[nodes.length];
|
||||
DiscoveryNode masterPerNode[] = new DiscoveryNode[nodes.length];
|
||||
boolean hasJoinedOncePerNode[] = new boolean[nodes.length];
|
||||
long clusterStateVersionPerNode[] = new long[nodes.length];
|
||||
ArrayList<ZenPing.PingResponse> pings = new ArrayList<>();
|
||||
for (int i = 0; i < nodes.length; i++) {
|
||||
nodes[i] = new DiscoveryNode("" + i, LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT);
|
||||
|
@ -51,9 +52,9 @@ public class ZenPingTests extends ESTestCase {
|
|||
if (randomBoolean()) {
|
||||
masterNode = nodes[randomInt(nodes.length - 1)];
|
||||
}
|
||||
boolean hasJoinedOnce = randomBoolean();
|
||||
long clusterStateVersion = randomLong();
|
||||
ZenPing.PingResponse ping = new ZenPing.PingResponse(nodes[node], masterNode, ClusterName.CLUSTER_NAME_SETTING.
|
||||
getDefault(Settings.EMPTY), hasJoinedOnce);
|
||||
getDefault(Settings.EMPTY), clusterStateVersion);
|
||||
if (rarely()) {
|
||||
// ignore some pings
|
||||
continue;
|
||||
|
@ -61,7 +62,7 @@ public class ZenPingTests extends ESTestCase {
|
|||
// update max ping info
|
||||
maxIdPerNode[node] = ping.id();
|
||||
masterPerNode[node] = masterNode;
|
||||
hasJoinedOncePerNode[node] = hasJoinedOnce;
|
||||
clusterStateVersionPerNode[node] = clusterStateVersion;
|
||||
pings.add(ping);
|
||||
}
|
||||
|
||||
|
@ -69,15 +70,15 @@ public class ZenPingTests extends ESTestCase {
|
|||
Collections.shuffle(pings, random());
|
||||
|
||||
ZenPing.PingCollection collection = new ZenPing.PingCollection();
|
||||
collection.addPings(pings.toArray(new ZenPing.PingResponse[pings.size()]));
|
||||
collection.addPings(pings);
|
||||
|
||||
ZenPing.PingResponse[] aggregate = collection.toArray();
|
||||
List<ZenPing.PingResponse> aggregate = collection.toList();
|
||||
|
||||
for (ZenPing.PingResponse ping : aggregate) {
|
||||
int nodeId = Integer.parseInt(ping.node().getId());
|
||||
assertThat(maxIdPerNode[nodeId], equalTo(ping.id()));
|
||||
assertThat(masterPerNode[nodeId], equalTo(ping.master()));
|
||||
assertThat(hasJoinedOncePerNode[nodeId], equalTo(ping.hasJoinedOnce()));
|
||||
assertThat(clusterStateVersionPerNode[nodeId], equalTo(ping.getClusterStateVersion()));
|
||||
|
||||
maxIdPerNode[nodeId] = -1; // mark as seen
|
||||
}
|
||||
|
|
|
@ -20,6 +20,9 @@
|
|||
package org.elasticsearch.discovery.zen.ping.unicast;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlocks;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
|
@ -31,7 +34,7 @@ import org.elasticsearch.common.transport.TransportAddress;
|
|||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
|
||||
import org.elasticsearch.discovery.zen.ElectMasterService;
|
||||
import org.elasticsearch.discovery.zen.ping.PingContextProvider;
|
||||
import org.elasticsearch.discovery.zen.ping.ZenPing;
|
||||
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
|
||||
|
@ -45,16 +48,18 @@ import org.elasticsearch.transport.TransportService;
|
|||
import org.elasticsearch.transport.TransportSettings;
|
||||
|
||||
import java.net.InetSocketAddress;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static java.util.Collections.emptySet;
|
||||
import static org.elasticsearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThan;
|
||||
|
||||
public class UnicastZenPingIT extends ESTestCase {
|
||||
public class UnicastZenPingTests extends ESTestCase {
|
||||
public void testSimplePings() throws InterruptedException {
|
||||
int startPort = 11000 + randomIntBetween(0, 1000);
|
||||
int endPort = startPort + 10;
|
||||
|
@ -78,6 +83,8 @@ public class UnicastZenPingIT extends ESTestCase {
|
|||
Version versionD = VersionUtils.randomVersionBetween(random(), previousVersion.minimumCompatibilityVersion(), previousVersion);
|
||||
NetworkHandle handleD = startServices(settingsMismatch, threadPool, networkService, "UZP_D", versionD);
|
||||
|
||||
final ClusterState state = ClusterState.builder(new ClusterName("test")).version(randomPositiveLong()).build();
|
||||
|
||||
Settings hostsSettings = Settings.builder()
|
||||
.putArray("discovery.zen.ping.unicast.hosts",
|
||||
NetworkAddress.format(new InetSocketAddress(handleA.address.address().getAddress(), handleA.address.address().getPort())),
|
||||
|
@ -96,8 +103,8 @@ public class UnicastZenPingIT extends ESTestCase {
|
|||
}
|
||||
|
||||
@Override
|
||||
public boolean nodeHasJoinedClusterOnce() {
|
||||
return false;
|
||||
public ClusterState clusterState() {
|
||||
return ClusterState.builder(state).blocks(ClusterBlocks.builder().addGlobalBlock(STATE_NOT_RECOVERED_BLOCK)).build();
|
||||
}
|
||||
});
|
||||
zenPingA.start();
|
||||
|
@ -110,8 +117,8 @@ public class UnicastZenPingIT extends ESTestCase {
|
|||
}
|
||||
|
||||
@Override
|
||||
public boolean nodeHasJoinedClusterOnce() {
|
||||
return true;
|
||||
public ClusterState clusterState() {
|
||||
return state;
|
||||
}
|
||||
});
|
||||
zenPingB.start();
|
||||
|
@ -130,8 +137,8 @@ public class UnicastZenPingIT extends ESTestCase {
|
|||
}
|
||||
|
||||
@Override
|
||||
public boolean nodeHasJoinedClusterOnce() {
|
||||
return false;
|
||||
public ClusterState clusterState() {
|
||||
return state;
|
||||
}
|
||||
});
|
||||
zenPingC.start();
|
||||
|
@ -144,36 +151,38 @@ public class UnicastZenPingIT extends ESTestCase {
|
|||
}
|
||||
|
||||
@Override
|
||||
public boolean nodeHasJoinedClusterOnce() {
|
||||
return false;
|
||||
public ClusterState clusterState() {
|
||||
return state;
|
||||
}
|
||||
});
|
||||
zenPingD.start();
|
||||
|
||||
try {
|
||||
logger.info("ping from UZP_A");
|
||||
ZenPing.PingResponse[] pingResponses = zenPingA.pingAndWait(TimeValue.timeValueSeconds(1));
|
||||
assertThat(pingResponses.length, equalTo(1));
|
||||
assertThat(pingResponses[0].node().getId(), equalTo("UZP_B"));
|
||||
assertTrue(pingResponses[0].hasJoinedOnce());
|
||||
Collection<ZenPing.PingResponse> pingResponses = zenPingA.pingAndWait(TimeValue.timeValueSeconds(1));
|
||||
assertThat(pingResponses.size(), equalTo(1));
|
||||
ZenPing.PingResponse ping = pingResponses.iterator().next();
|
||||
assertThat(ping.node().getId(), equalTo("UZP_B"));
|
||||
assertThat(ping.getClusterStateVersion(), equalTo(state.version()));
|
||||
assertCounters(handleA, handleA, handleB, handleC, handleD);
|
||||
|
||||
// ping again, this time from B,
|
||||
logger.info("ping from UZP_B");
|
||||
pingResponses = zenPingB.pingAndWait(TimeValue.timeValueSeconds(1));
|
||||
assertThat(pingResponses.length, equalTo(1));
|
||||
assertThat(pingResponses[0].node().getId(), equalTo("UZP_A"));
|
||||
assertFalse(pingResponses[0].hasJoinedOnce());
|
||||
assertThat(pingResponses.size(), equalTo(1));
|
||||
ping = pingResponses.iterator().next();
|
||||
assertThat(ping.node().getId(), equalTo("UZP_A"));
|
||||
assertThat(ping.getClusterStateVersion(), equalTo(ElectMasterService.MasterCandidate.UNRECOVERED_CLUSTER_VERSION));
|
||||
assertCounters(handleB, handleA, handleB, handleC, handleD);
|
||||
|
||||
logger.info("ping from UZP_C");
|
||||
pingResponses = zenPingC.pingAndWait(TimeValue.timeValueSeconds(1));
|
||||
assertThat(pingResponses.length, equalTo(0));
|
||||
assertThat(pingResponses.size(), equalTo(0));
|
||||
assertCounters(handleC, handleA, handleB, handleC, handleD);
|
||||
|
||||
logger.info("ping from UZP_D");
|
||||
pingResponses = zenPingD.pingAndWait(TimeValue.timeValueSeconds(1));
|
||||
assertThat(pingResponses.length, equalTo(0));
|
||||
assertThat(pingResponses.size(), equalTo(0));
|
||||
assertCounters(handleD, handleA, handleB, handleC, handleD);
|
||||
} finally {
|
||||
zenPingA.close();
|
||||
|
@ -201,7 +210,8 @@ public class UnicastZenPingIT extends ESTestCase {
|
|||
Version version) {
|
||||
MockTcpTransport transport = new MockTcpTransport(settings, threadPool, BigArrays.NON_RECYCLING_INSTANCE,
|
||||
new NoneCircuitBreakerService(), new NamedWriteableRegistry(Collections.emptyList()), networkService, version);
|
||||
final TransportService transportService = new TransportService(settings, transport, threadPool);
|
||||
final TransportService transportService = new TransportService(settings, transport, threadPool,
|
||||
TransportService.NOOP_TRANSPORT_INTERCEPTOR);
|
||||
transportService.start();
|
||||
transportService.acceptIncomingRequests();
|
||||
ConcurrentMap<TransportAddress, AtomicInteger> counters = ConcurrentCollections.newConcurrentMap();
|
|
@ -23,13 +23,19 @@ import org.apache.lucene.util.BytesRef;
|
|||
import org.elasticsearch.action.fieldstats.FieldStats;
|
||||
import org.elasticsearch.action.fieldstats.FieldStatsResponse;
|
||||
import org.elasticsearch.action.fieldstats.IndexConstraint;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.joda.Joda;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.mapper.DateFieldMapper;
|
||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||
import org.joda.time.DateTime;
|
||||
import org.joda.time.DateTimeZone;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.InetAddress;
|
||||
import java.net.UnknownHostException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Date;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
|
||||
|
@ -513,4 +519,52 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
|
|||
assertThat(response.getAllFieldStats().get("_type").isSearchable(), equalTo(true));
|
||||
assertThat(response.getAllFieldStats().get("_type").isAggregatable(), equalTo(true));
|
||||
}
|
||||
|
||||
public void testSerialization() throws IOException {
|
||||
for (int i = 0; i < 20; i++) {
|
||||
assertSerialization(randomFieldStats());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* creates a random field stats which does not guarantee that {@link FieldStats#maxValue} is greater than {@link FieldStats#minValue}
|
||||
**/
|
||||
private FieldStats randomFieldStats() throws UnknownHostException {
|
||||
int type = randomInt(5);
|
||||
switch (type) {
|
||||
case 0:
|
||||
return new FieldStats.Long(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(),
|
||||
randomPositiveLong(), randomBoolean(), randomBoolean(), randomLong(), randomLong());
|
||||
case 1:
|
||||
return new FieldStats.Double(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(),
|
||||
randomPositiveLong(), randomBoolean(), randomBoolean(), randomDouble(), randomDouble());
|
||||
case 2:
|
||||
return new FieldStats.Date(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(),
|
||||
randomPositiveLong(), randomBoolean(), randomBoolean(), Joda.forPattern("basicDate"),
|
||||
new Date().getTime(), new Date().getTime());
|
||||
case 3:
|
||||
return new FieldStats.Text(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(),
|
||||
randomPositiveLong(), randomBoolean(), randomBoolean(),
|
||||
new BytesRef(randomAsciiOfLength(10)), new BytesRef(randomAsciiOfLength(20)));
|
||||
case 4:
|
||||
return new FieldStats.Ip(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(),
|
||||
randomPositiveLong(), randomBoolean(), randomBoolean(),
|
||||
InetAddress.getByName("::1"), InetAddress.getByName("::1"));
|
||||
case 5:
|
||||
return new FieldStats.Ip(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(),
|
||||
randomPositiveLong(), randomBoolean(), randomBoolean(),
|
||||
InetAddress.getByName("1.2.3.4"), InetAddress.getByName("1.2.3.4"));
|
||||
default:
|
||||
throw new IllegalArgumentException("Invalid type");
|
||||
}
|
||||
}
|
||||
|
||||
private void assertSerialization(FieldStats stats) throws IOException {
|
||||
BytesStreamOutput output = new BytesStreamOutput();
|
||||
stats.writeTo(output);
|
||||
output.flush();
|
||||
FieldStats deserializedStats = FieldStats.readFrom(output.bytes().streamInput());
|
||||
assertThat(stats, equalTo(deserializedStats));
|
||||
assertThat(stats.hashCode(), equalTo(deserializedStats.hashCode()));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -38,7 +38,7 @@ import org.elasticsearch.common.logging.Loggers;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
|
||||
import org.elasticsearch.discovery.zen.ElectMasterService;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
|
|
|
@ -23,7 +23,7 @@ import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
|
|||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
|
||||
import org.elasticsearch.discovery.zen.ElectMasterService;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
|
||||
import org.elasticsearch.test.ESIntegTestCase.Scope;
|
||||
|
|
|
@ -77,7 +77,8 @@ public class DynamicMappingDisabledTests extends ESSingleNodeTestCase {
|
|||
clusterService = createClusterService(THREAD_POOL);
|
||||
transport = new LocalTransport(settings, THREAD_POOL, new NamedWriteableRegistry(Collections.emptyList()),
|
||||
new NoneCircuitBreakerService());
|
||||
transportService = new TransportService(clusterService.getSettings(), transport, THREAD_POOL);
|
||||
transportService = new TransportService(clusterService.getSettings(), transport, THREAD_POOL,
|
||||
TransportService.NOOP_TRANSPORT_INTERCEPTOR);
|
||||
indicesService = getInstanceFromNode(IndicesService.class);
|
||||
shardStateAction = new ShardStateAction(settings, clusterService, transportService, null, null, THREAD_POOL);
|
||||
actionFilters = new ActionFilters(Collections.emptySet());
|
||||
|
|
|
@ -365,12 +365,22 @@ public class BoolQueryBuilderTests extends AbstractQueryTestCase<BoolQueryBuilde
|
|||
* test that two queries in object throws error
|
||||
*/
|
||||
public void testTooManyQueriesInObject() throws IOException {
|
||||
String clauseType = randomFrom(new String[] {"must", "should", "must_not", "filter"});
|
||||
String clauseType = randomFrom("must", "should", "must_not", "filter");
|
||||
// should also throw error if invalid query is preceded by a valid one
|
||||
String query = "{\"bool\" : {\"" + clauseType
|
||||
+ "\" : { \"match\" : { \"foo\" : \"bar\" } , \"match\" : { \"baz\" : \"buzz\" } } } }";
|
||||
String query = "{\n" +
|
||||
" \"bool\": {\n" +
|
||||
" \"" + clauseType + "\": {\n" +
|
||||
" \"match\": {\n" +
|
||||
" \"foo\": \"bar\"\n" +
|
||||
" },\n" +
|
||||
" \"match\": {\n" +
|
||||
" \"baz\": \"buzz\"\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
"}";
|
||||
ParsingException ex = expectThrows(ParsingException.class, () -> parseQuery(query, ParseFieldMatcher.EMPTY));
|
||||
assertEquals("expected [END_OBJECT] but got [FIELD_NAME], possibly too many query clauses", ex.getMessage());
|
||||
assertEquals("[match] malformed query, expected [END_OBJECT] but found [FIELD_NAME]", ex.getMessage());
|
||||
}
|
||||
|
||||
public void testRewrite() throws IOException {
|
||||
|
|
|
@ -700,7 +700,7 @@ public class FunctionScoreQueryBuilderTests extends AbstractQueryTestCase<Functi
|
|||
expectParsingException(json, "field [not_supported] is not supported");
|
||||
}
|
||||
|
||||
public void testMalformedQuery() throws IOException {
|
||||
public void testMalformedQueryMultipleQueryObjects() throws IOException {
|
||||
//verify that an error is thrown rather than setting the query twice (https://github.com/elastic/elasticsearch/issues/16583)
|
||||
String json = "{\n" +
|
||||
" \"function_score\":{\n" +
|
||||
|
@ -715,15 +715,34 @@ public class FunctionScoreQueryBuilderTests extends AbstractQueryTestCase<Functi
|
|||
" }\n" +
|
||||
" }\n" +
|
||||
"}";
|
||||
expectParsingException(json, equalTo("[bool] malformed query, expected [END_OBJECT] but found [FIELD_NAME]"));
|
||||
}
|
||||
|
||||
public void testMalformedQueryMultipleQueryElements() throws IOException {
|
||||
String json = "{\n" +
|
||||
" \"function_score\":{\n" +
|
||||
" \"query\":{\n" +
|
||||
" \"bool\":{\n" +
|
||||
" \"must\":{\"match\":{\"field\":\"value\"}}" +
|
||||
" }\n" +
|
||||
" },\n" +
|
||||
" \"query\":{\n" +
|
||||
" \"bool\":{\n" +
|
||||
" \"must\":{\"match\":{\"field\":\"value\"}}" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
"}";
|
||||
expectParsingException(json, "[query] is already defined.");
|
||||
}
|
||||
|
||||
private void expectParsingException(String json, Matcher<String> messageMatcher) {
|
||||
private static void expectParsingException(String json, Matcher<String> messageMatcher) {
|
||||
ParsingException e = expectThrows(ParsingException.class, () -> parseQuery(json));
|
||||
assertThat(e.getMessage(), messageMatcher);
|
||||
}
|
||||
|
||||
private void expectParsingException(String json, String message) {
|
||||
private static void expectParsingException(String json, String message) {
|
||||
expectParsingException(json, equalTo("failed to parse [function_score] query. " + message));
|
||||
}
|
||||
|
||||
|
|
|
@ -150,7 +150,8 @@ public class ClusterStateChanges extends AbstractComponent {
|
|||
}
|
||||
|
||||
// services
|
||||
TransportService transportService = new TransportService(settings, transport, threadPool);
|
||||
TransportService transportService = new TransportService(settings, transport, threadPool,
|
||||
TransportService.NOOP_TRANSPORT_INTERCEPTOR);
|
||||
MetaDataIndexUpgradeService metaDataIndexUpgradeService = new MetaDataIndexUpgradeService(settings, null, null) {
|
||||
// metaData upgrader should do nothing
|
||||
@Override
|
||||
|
|
|
@ -336,7 +336,8 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice
|
|||
final Executor executor = mock(Executor.class);
|
||||
when(threadPool.generic()).thenReturn(executor);
|
||||
final MockIndicesService indicesService = indicesServiceSupplier.get();
|
||||
final TransportService transportService = new TransportService(Settings.EMPTY, null, threadPool);
|
||||
final TransportService transportService = new TransportService(Settings.EMPTY, null, threadPool,
|
||||
TransportService.NOOP_TRANSPORT_INTERCEPTOR);
|
||||
final ClusterService clusterService = mock(ClusterService.class);
|
||||
final RepositoriesService repositoriesService = new RepositoriesService(Settings.EMPTY, clusterService,
|
||||
transportService, null);
|
||||
|
|
|
@ -88,7 +88,7 @@ public class IndicesStoreTests extends ESTestCase {
|
|||
public void before() {
|
||||
localNode = new DiscoveryNode("abc", new LocalTransportAddress("abc"), emptyMap(), emptySet(), Version.CURRENT);
|
||||
clusterService = createClusterService(threadPool);
|
||||
indicesStore = new IndicesStore(Settings.EMPTY, null, clusterService, new TransportService(clusterService.getSettings(), null, null), null);
|
||||
indicesStore = new IndicesStore(Settings.EMPTY, null, clusterService, new TransportService(clusterService.getSettings(), null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR), null);
|
||||
}
|
||||
|
||||
@After
|
||||
|
|
|
@ -30,8 +30,6 @@ import org.elasticsearch.common.xcontent.json.JsonXContent;
|
|||
import org.elasticsearch.rest.BytesRestResponse;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.rest.RestRequest.Method;
|
||||
import org.elasticsearch.rest.action.RestMainAction;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.rest.FakeRestRequest;
|
||||
|
||||
|
@ -45,12 +43,13 @@ public class RestMainActionTests extends ESTestCase {
|
|||
public void testHeadResponse() throws Exception {
|
||||
final String nodeName = "node1";
|
||||
final ClusterName clusterName = new ClusterName("cluster1");
|
||||
final String clusterUUID = randomAsciiOfLengthBetween(10, 20);
|
||||
final boolean available = randomBoolean();
|
||||
final RestStatus expectedStatus = available ? RestStatus.OK : RestStatus.SERVICE_UNAVAILABLE;
|
||||
final Version version = Version.CURRENT;
|
||||
final Build build = Build.CURRENT;
|
||||
|
||||
final MainResponse mainResponse = new MainResponse(nodeName, version, clusterName, build, available);
|
||||
final MainResponse mainResponse = new MainResponse(nodeName, version, clusterName, clusterUUID, build, available);
|
||||
XContentBuilder builder = JsonXContent.contentBuilder();
|
||||
RestRequest restRequest = new FakeRestRequest() {
|
||||
@Override
|
||||
|
@ -70,13 +69,14 @@ public class RestMainActionTests extends ESTestCase {
|
|||
public void testGetResponse() throws Exception {
|
||||
final String nodeName = "node1";
|
||||
final ClusterName clusterName = new ClusterName("cluster1");
|
||||
final String clusterUUID = randomAsciiOfLengthBetween(10, 20);
|
||||
final boolean available = randomBoolean();
|
||||
final RestStatus expectedStatus = available ? RestStatus.OK : RestStatus.SERVICE_UNAVAILABLE;
|
||||
final Version version = Version.CURRENT;
|
||||
final Build build = Build.CURRENT;
|
||||
final boolean prettyPrint = randomBoolean();
|
||||
|
||||
final MainResponse mainResponse = new MainResponse(nodeName, version, clusterName, build, available);
|
||||
final MainResponse mainResponse = new MainResponse(nodeName, version, clusterName, clusterUUID, build, available);
|
||||
XContentBuilder builder = JsonXContent.contentBuilder();
|
||||
|
||||
Map<String, String> params = new HashMap<>();
|
||||
|
|
|
@ -0,0 +1,512 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.search;
|
||||
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchType;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.common.ParsingException;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.text.Text;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.indices.IndicesModule;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.plugins.SearchPlugin;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.search.aggregations.AggregationBuilders;
|
||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
|
||||
import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilderTests;
|
||||
import org.elasticsearch.search.rescore.QueryRescoreBuilderTests;
|
||||
import org.elasticsearch.search.searchafter.SearchAfterBuilder;
|
||||
import org.elasticsearch.search.slice.SliceBuilder;
|
||||
import org.elasticsearch.search.sort.ScriptSortBuilder;
|
||||
import org.elasticsearch.search.sort.SortBuilders;
|
||||
import org.elasticsearch.search.sort.SortOrder;
|
||||
import org.elasticsearch.search.suggest.SuggestBuilderTests;
|
||||
import org.elasticsearch.test.AbstractQueryTestCase;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.function.Function;
|
||||
|
||||
public abstract class AbstractSearchTestCase extends ESTestCase {
|
||||
|
||||
protected NamedWriteableRegistry namedWriteableRegistry;
|
||||
protected SearchRequestParsers searchRequestParsers;
|
||||
private TestSearchExtPlugin searchExtPlugin;
|
||||
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
IndicesModule indicesModule = new IndicesModule(Collections.emptyList());
|
||||
searchExtPlugin = new TestSearchExtPlugin();
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.singletonList(searchExtPlugin));
|
||||
List<NamedWriteableRegistry.Entry> entries = new ArrayList<>();
|
||||
entries.addAll(indicesModule.getNamedWriteables());
|
||||
entries.addAll(searchModule.getNamedWriteables());
|
||||
namedWriteableRegistry = new NamedWriteableRegistry(entries);
|
||||
searchRequestParsers = searchModule.getSearchRequestParsers();
|
||||
}
|
||||
|
||||
protected SearchSourceBuilder createSearchSourceBuilder() throws IOException {
|
||||
SearchSourceBuilder builder = new SearchSourceBuilder();
|
||||
if (randomBoolean()) {
|
||||
builder.from(randomIntBetween(0, 10000));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
builder.size(randomIntBetween(0, 10000));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
builder.explain(randomBoolean());
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
builder.version(randomBoolean());
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
builder.trackScores(randomBoolean());
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
builder.minScore(randomFloat() * 1000);
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
builder.timeout(TimeValue.parseTimeValue(randomTimeValue(), null, "timeout"));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
builder.terminateAfter(randomIntBetween(1, 100000));
|
||||
}
|
||||
|
||||
switch(randomInt(2)) {
|
||||
case 0:
|
||||
builder.storedFields();
|
||||
break;
|
||||
case 1:
|
||||
builder.storedField("_none_");
|
||||
break;
|
||||
case 2:
|
||||
int fieldsSize = randomInt(25);
|
||||
List<String> fields = new ArrayList<>(fieldsSize);
|
||||
for (int i = 0; i < fieldsSize; i++) {
|
||||
fields.add(randomAsciiOfLengthBetween(5, 50));
|
||||
}
|
||||
builder.storedFields(fields);
|
||||
break;
|
||||
default:
|
||||
throw new IllegalStateException();
|
||||
}
|
||||
|
||||
if (randomBoolean()) {
|
||||
int scriptFieldsSize = randomInt(25);
|
||||
for (int i = 0; i < scriptFieldsSize; i++) {
|
||||
if (randomBoolean()) {
|
||||
builder.scriptField(randomAsciiOfLengthBetween(5, 50), new Script("foo"), randomBoolean());
|
||||
} else {
|
||||
builder.scriptField(randomAsciiOfLengthBetween(5, 50), new Script("foo"));
|
||||
}
|
||||
}
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
FetchSourceContext fetchSourceContext;
|
||||
int branch = randomInt(5);
|
||||
String[] includes = new String[randomIntBetween(0, 20)];
|
||||
for (int i = 0; i < includes.length; i++) {
|
||||
includes[i] = randomAsciiOfLengthBetween(5, 20);
|
||||
}
|
||||
String[] excludes = new String[randomIntBetween(0, 20)];
|
||||
for (int i = 0; i < excludes.length; i++) {
|
||||
excludes[i] = randomAsciiOfLengthBetween(5, 20);
|
||||
}
|
||||
switch (branch) {
|
||||
case 0:
|
||||
fetchSourceContext = new FetchSourceContext(randomBoolean());
|
||||
break;
|
||||
case 1:
|
||||
fetchSourceContext = new FetchSourceContext(includes, excludes);
|
||||
break;
|
||||
case 2:
|
||||
fetchSourceContext = new FetchSourceContext(randomAsciiOfLengthBetween(5, 20), randomAsciiOfLengthBetween(5, 20));
|
||||
break;
|
||||
case 3:
|
||||
fetchSourceContext = new FetchSourceContext(true, includes, excludes);
|
||||
break;
|
||||
case 4:
|
||||
fetchSourceContext = new FetchSourceContext(includes);
|
||||
break;
|
||||
case 5:
|
||||
fetchSourceContext = new FetchSourceContext(randomAsciiOfLengthBetween(5, 20));
|
||||
break;
|
||||
default:
|
||||
throw new IllegalStateException();
|
||||
}
|
||||
builder.fetchSource(fetchSourceContext);
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
int size = randomIntBetween(0, 20);
|
||||
List<String> statsGroups = new ArrayList<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
statsGroups.add(randomAsciiOfLengthBetween(5, 20));
|
||||
}
|
||||
builder.stats(statsGroups);
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
int indexBoostSize = randomIntBetween(1, 10);
|
||||
for (int i = 0; i < indexBoostSize; i++) {
|
||||
builder.indexBoost(randomAsciiOfLengthBetween(5, 20), randomFloat() * 10);
|
||||
}
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
builder.query(QueryBuilders.termQuery(randomAsciiOfLengthBetween(5, 20), randomAsciiOfLengthBetween(5, 20)));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
builder.postFilter(QueryBuilders.termQuery(randomAsciiOfLengthBetween(5, 20), randomAsciiOfLengthBetween(5, 20)));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
int numSorts = randomIntBetween(1, 5);
|
||||
for (int i = 0; i < numSorts; i++) {
|
||||
int branch = randomInt(5);
|
||||
switch (branch) {
|
||||
case 0:
|
||||
builder.sort(SortBuilders.fieldSort(randomAsciiOfLengthBetween(5, 20)).order(randomFrom(SortOrder.values())));
|
||||
break;
|
||||
case 1:
|
||||
builder.sort(SortBuilders.geoDistanceSort(randomAsciiOfLengthBetween(5, 20),
|
||||
AbstractQueryTestCase.randomGeohash(1, 12)).order(randomFrom(SortOrder.values())));
|
||||
break;
|
||||
case 2:
|
||||
builder.sort(SortBuilders.scoreSort().order(randomFrom(SortOrder.values())));
|
||||
break;
|
||||
case 3:
|
||||
builder.sort(SortBuilders.scriptSort(new Script("foo"),
|
||||
ScriptSortBuilder.ScriptSortType.NUMBER).order(randomFrom(SortOrder.values())));
|
||||
break;
|
||||
case 4:
|
||||
builder.sort(randomAsciiOfLengthBetween(5, 20));
|
||||
break;
|
||||
case 5:
|
||||
builder.sort(randomAsciiOfLengthBetween(5, 20), randomFrom(SortOrder.values()));
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (randomBoolean()) {
|
||||
int numSearchFrom = randomIntBetween(1, 5);
|
||||
// We build a json version of the search_from first in order to
|
||||
// ensure that every number type remain the same before/after xcontent (de)serialization.
|
||||
// This is not a problem because the final type of each field value is extracted from associated sort field.
|
||||
// This little trick ensure that equals and hashcode are the same when using the xcontent serialization.
|
||||
XContentBuilder jsonBuilder = XContentFactory.jsonBuilder();
|
||||
jsonBuilder.startObject();
|
||||
jsonBuilder.startArray("search_from");
|
||||
for (int i = 0; i < numSearchFrom; i++) {
|
||||
int branch = randomInt(8);
|
||||
switch (branch) {
|
||||
case 0:
|
||||
jsonBuilder.value(randomInt());
|
||||
break;
|
||||
case 1:
|
||||
jsonBuilder.value(randomFloat());
|
||||
break;
|
||||
case 2:
|
||||
jsonBuilder.value(randomLong());
|
||||
break;
|
||||
case 3:
|
||||
jsonBuilder.value(randomDouble());
|
||||
break;
|
||||
case 4:
|
||||
jsonBuilder.value(randomAsciiOfLengthBetween(5, 20));
|
||||
break;
|
||||
case 5:
|
||||
jsonBuilder.value(randomBoolean());
|
||||
break;
|
||||
case 6:
|
||||
jsonBuilder.value(randomByte());
|
||||
break;
|
||||
case 7:
|
||||
jsonBuilder.value(randomShort());
|
||||
break;
|
||||
case 8:
|
||||
jsonBuilder.value(new Text(randomAsciiOfLengthBetween(5, 20)));
|
||||
break;
|
||||
}
|
||||
}
|
||||
jsonBuilder.endArray();
|
||||
jsonBuilder.endObject();
|
||||
XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(jsonBuilder.bytes());
|
||||
parser.nextToken();
|
||||
parser.nextToken();
|
||||
parser.nextToken();
|
||||
builder.searchAfter(SearchAfterBuilder.fromXContent(parser, null).getSortValues());
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
builder.highlighter(HighlightBuilderTests.randomHighlighterBuilder());
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
builder.suggest(SuggestBuilderTests.randomSuggestBuilder());
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
int numRescores = randomIntBetween(1, 5);
|
||||
for (int i = 0; i < numRescores; i++) {
|
||||
builder.addRescorer(QueryRescoreBuilderTests.randomRescoreBuilder());
|
||||
}
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
builder.aggregation(AggregationBuilders.avg(randomAsciiOfLengthBetween(5, 20)));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
Set<String> elementNames = new HashSet<>(searchExtPlugin.getSupportedElements().keySet());
|
||||
int numSearchExts = randomIntBetween(1, elementNames.size());
|
||||
while(elementNames.size() > numSearchExts) {
|
||||
elementNames.remove(randomFrom(elementNames));
|
||||
}
|
||||
List<SearchExtBuilder> searchExtBuilders = new ArrayList<>();
|
||||
for (String elementName : elementNames) {
|
||||
searchExtBuilders.add(searchExtPlugin.getSupportedElements().get(elementName).apply(randomAsciiOfLengthBetween(3, 10)));
|
||||
}
|
||||
builder.ext(searchExtBuilders);
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
String field = randomBoolean() ? null : randomAsciiOfLengthBetween(5, 20);
|
||||
int max = between(2, 1000);
|
||||
int id = randomInt(max-1);
|
||||
if (field == null) {
|
||||
builder.slice(new SliceBuilder(id, max));
|
||||
} else {
|
||||
builder.slice(new SliceBuilder(field, id, max));
|
||||
}
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
|
||||
protected SearchRequest createSearchRequest() throws IOException {
|
||||
SearchRequest searchRequest = new SearchRequest();
|
||||
if (randomBoolean()) {
|
||||
searchRequest.indices(generateRandomStringArray(10, 10, false, false));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
searchRequest.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
searchRequest.types(generateRandomStringArray(10, 10, false, false));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
searchRequest.preference(randomAsciiOfLengthBetween(3, 10));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
searchRequest.requestCache(randomBoolean());
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
searchRequest.routing(randomAsciiOfLengthBetween(3, 10));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
searchRequest.scroll(randomPositiveTimeValue());
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
searchRequest.searchType(randomFrom(SearchType.values()));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
searchRequest.source(createSearchSourceBuilder());
|
||||
}
|
||||
return searchRequest;
|
||||
}
|
||||
|
||||
private static class TestSearchExtPlugin extends Plugin implements SearchPlugin {
|
||||
private final List<SearchExtSpec<? extends SearchExtBuilder>> searchExtSpecs;
|
||||
private final Map<String, Function<String, ? extends SearchExtBuilder>> supportedElements;
|
||||
|
||||
private TestSearchExtPlugin() {
|
||||
int numSearchExts = randomIntBetween(1, 3);
|
||||
this.searchExtSpecs = new ArrayList<>(numSearchExts);
|
||||
this.supportedElements = new HashMap<>();
|
||||
for (int i = 0; i < numSearchExts; i++) {
|
||||
switch (randomIntBetween(0, 2)) {
|
||||
case 0:
|
||||
if (this.supportedElements.put(TestSearchExtBuilder1.NAME, TestSearchExtBuilder1::new) == null) {
|
||||
this.searchExtSpecs.add(new SearchExtSpec<>(TestSearchExtBuilder1.NAME, TestSearchExtBuilder1::new,
|
||||
new TestSearchExtParser<>(TestSearchExtBuilder1::new)));
|
||||
}
|
||||
break;
|
||||
case 1:
|
||||
if (this.supportedElements.put(TestSearchExtBuilder2.NAME, TestSearchExtBuilder2::new) == null) {
|
||||
this.searchExtSpecs.add(new SearchExtSpec<>(TestSearchExtBuilder2.NAME, TestSearchExtBuilder2::new,
|
||||
new TestSearchExtParser<>(TestSearchExtBuilder2::new)));
|
||||
}
|
||||
break;
|
||||
case 2:
|
||||
if (this.supportedElements.put(TestSearchExtBuilder3.NAME, TestSearchExtBuilder3::new) == null) {
|
||||
this.searchExtSpecs.add(new SearchExtSpec<>(TestSearchExtBuilder3.NAME, TestSearchExtBuilder3::new,
|
||||
new TestSearchExtParser<>(TestSearchExtBuilder3::new)));
|
||||
}
|
||||
break;
|
||||
default:
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Map<String, Function<String, ? extends SearchExtBuilder>> getSupportedElements() {
|
||||
return supportedElements;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<SearchExtSpec<?>> getSearchExts() {
|
||||
return searchExtSpecs;
|
||||
}
|
||||
}
|
||||
|
||||
private static class TestSearchExtParser<T extends SearchExtBuilder> implements SearchExtParser<T> {
|
||||
private final Function<String, T> searchExtBuilderFunction;
|
||||
|
||||
TestSearchExtParser(Function<String, T> searchExtBuilderFunction) {
|
||||
this.searchExtBuilderFunction = searchExtBuilderFunction;
|
||||
}
|
||||
|
||||
@Override
|
||||
public T fromXContent(XContentParser parser) throws IOException {
|
||||
return searchExtBuilderFunction.apply(parseField(parser));
|
||||
}
|
||||
|
||||
String parseField(XContentParser parser) throws IOException {
|
||||
if (parser.currentToken() != XContentParser.Token.START_OBJECT) {
|
||||
throw new ParsingException(parser.getTokenLocation(), "start_object expected, found " + parser.currentToken());
|
||||
}
|
||||
if (parser.nextToken() != XContentParser.Token.FIELD_NAME) {
|
||||
throw new ParsingException(parser.getTokenLocation(), "field_name expected, found " + parser.currentToken());
|
||||
}
|
||||
String field = parser.currentName();
|
||||
if (parser.nextToken() != XContentParser.Token.START_OBJECT) {
|
||||
throw new ParsingException(parser.getTokenLocation(), "start_object expected, found " + parser.currentToken());
|
||||
}
|
||||
if (parser.nextToken() != XContentParser.Token.END_OBJECT) {
|
||||
throw new ParsingException(parser.getTokenLocation(), "end_object expected, found " + parser.currentToken());
|
||||
}
|
||||
if (parser.nextToken() != XContentParser.Token.END_OBJECT) {
|
||||
throw new ParsingException(parser.getTokenLocation(), "end_object expected, found " + parser.currentToken());
|
||||
}
|
||||
return field;
|
||||
}
|
||||
}
|
||||
|
||||
//Would be nice to have a single builder that gets its name as a parameter, but the name wouldn't get a value when the object
|
||||
//is created reading from the stream (constructor that takes a StreamInput) which is a problem as we check that after reading
|
||||
//a named writeable its name is the expected one. That's why we go for the following less dynamic approach.
|
||||
private static class TestSearchExtBuilder1 extends TestSearchExtBuilder {
|
||||
private static final String NAME = "name1";
|
||||
|
||||
TestSearchExtBuilder1(String field) {
|
||||
super(NAME, field);
|
||||
}
|
||||
|
||||
TestSearchExtBuilder1(StreamInput in) throws IOException {
|
||||
super(NAME, in);
|
||||
}
|
||||
}
|
||||
|
||||
private static class TestSearchExtBuilder2 extends TestSearchExtBuilder {
|
||||
private static final String NAME = "name2";
|
||||
|
||||
TestSearchExtBuilder2(String field) {
|
||||
super(NAME, field);
|
||||
}
|
||||
|
||||
TestSearchExtBuilder2(StreamInput in) throws IOException {
|
||||
super(NAME, in);
|
||||
}
|
||||
}
|
||||
|
||||
private static class TestSearchExtBuilder3 extends TestSearchExtBuilder {
|
||||
private static final String NAME = "name3";
|
||||
|
||||
TestSearchExtBuilder3(String field) {
|
||||
super(NAME, field);
|
||||
}
|
||||
|
||||
TestSearchExtBuilder3(StreamInput in) throws IOException {
|
||||
super(NAME, in);
|
||||
}
|
||||
}
|
||||
|
||||
private abstract static class TestSearchExtBuilder extends SearchExtBuilder {
|
||||
final String objectName;
|
||||
protected final String name;
|
||||
|
||||
TestSearchExtBuilder(String name, String objectName) {
|
||||
this.name = name;
|
||||
this.objectName = objectName;
|
||||
}
|
||||
|
||||
TestSearchExtBuilder(String name, StreamInput in) throws IOException {
|
||||
this.name = name;
|
||||
this.objectName = in.readString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeString(objectName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) {
|
||||
return true;
|
||||
}
|
||||
if (o == null || getClass() != o.getClass()) {
|
||||
return false;
|
||||
}
|
||||
TestSearchExtBuilder that = (TestSearchExtBuilder) o;
|
||||
return Objects.equals(objectName, that.objectName) &&
|
||||
Objects.equals(name, that.name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(objectName, name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getWriteableName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject(name);
|
||||
builder.startObject(objectName);
|
||||
builder.endObject();
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -24,49 +24,13 @@ import org.elasticsearch.action.search.SearchType;
|
|||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.indices.IndicesModule;
|
||||
import org.elasticsearch.search.fetch.FetchSubPhasePluginIT;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
import static java.util.Collections.emptyList;
|
||||
import static org.elasticsearch.search.builder.SearchSourceBuilderTests.createSearchSourceBuilder;
|
||||
|
||||
public class SearchRequestTests extends ESTestCase {
|
||||
|
||||
private static NamedWriteableRegistry namedWriteableRegistry;
|
||||
|
||||
@BeforeClass
|
||||
public static void beforeClass() {
|
||||
IndicesModule indicesModule = new IndicesModule(emptyList()) {
|
||||
@Override
|
||||
protected void configure() {
|
||||
bindMapperExtension();
|
||||
}
|
||||
};
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, false,
|
||||
Collections.singletonList(new FetchSubPhasePluginIT.FetchTermVectorsPlugin()));
|
||||
List<NamedWriteableRegistry.Entry> entries = new ArrayList<>();
|
||||
entries.addAll(indicesModule.getNamedWriteables());
|
||||
entries.addAll(searchModule.getNamedWriteables());
|
||||
namedWriteableRegistry = new NamedWriteableRegistry(entries);
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void afterClass() {
|
||||
namedWriteableRegistry = null;
|
||||
}
|
||||
public class SearchRequestTests extends AbstractSearchTestCase {
|
||||
|
||||
public void testSerialization() throws Exception {
|
||||
SearchRequest searchRequest = createSearchRequest();
|
||||
|
@ -204,38 +168,6 @@ public class SearchRequestTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public static SearchRequest createSearchRequest() throws IOException {
|
||||
SearchRequest searchRequest = new SearchRequest();
|
||||
if (randomBoolean()) {
|
||||
searchRequest.indices(generateRandomStringArray(10, 10, false, false));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
searchRequest.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
searchRequest.types(generateRandomStringArray(10, 10, false, false));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
searchRequest.preference(randomAsciiOfLengthBetween(3, 10));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
searchRequest.requestCache(randomBoolean());
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
searchRequest.routing(randomAsciiOfLengthBetween(3, 10));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
searchRequest.scroll(randomPositiveTimeValue());
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
searchRequest.searchType(randomFrom(SearchType.values()));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
searchRequest.source(createSearchSourceBuilder());
|
||||
}
|
||||
return searchRequest;
|
||||
}
|
||||
|
||||
private static SearchRequest copyRequest(SearchRequest searchRequest) throws IOException {
|
||||
SearchRequest result = new SearchRequest();
|
||||
result.indices(searchRequest.indices());
|
||||
|
|
|
@ -21,301 +21,33 @@ package org.elasticsearch.search.builder;
|
|||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.ParsingException;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.text.Text;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.index.query.QueryParseContext;
|
||||
import org.elasticsearch.indices.IndicesModule;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.search.SearchModule;
|
||||
import org.elasticsearch.search.SearchRequestParsers;
|
||||
import org.elasticsearch.search.aggregations.AggregationBuilders;
|
||||
import org.elasticsearch.search.fetch.FetchSubPhasePluginIT;
|
||||
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
|
||||
import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilderTests;
|
||||
import org.elasticsearch.search.rescore.QueryRescoreBuilderTests;
|
||||
import org.elasticsearch.search.AbstractSearchTestCase;
|
||||
import org.elasticsearch.search.rescore.QueryRescorerBuilder;
|
||||
import org.elasticsearch.search.searchafter.SearchAfterBuilder;
|
||||
import org.elasticsearch.search.slice.SliceBuilder;
|
||||
import org.elasticsearch.search.sort.FieldSortBuilder;
|
||||
import org.elasticsearch.search.sort.ScoreSortBuilder;
|
||||
import org.elasticsearch.search.sort.ScriptSortBuilder.ScriptSortType;
|
||||
import org.elasticsearch.search.sort.SortBuilders;
|
||||
import org.elasticsearch.search.sort.SortOrder;
|
||||
import org.elasticsearch.search.suggest.SuggestBuilderTests;
|
||||
import org.elasticsearch.test.AbstractQueryTestCase;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
import static org.hamcrest.CoreMatchers.containsString;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.hasToString;
|
||||
|
||||
public class SearchSourceBuilderTests extends ESTestCase {
|
||||
|
||||
private NamedWriteableRegistry namedWriteableRegistry;
|
||||
|
||||
private SearchRequestParsers searchRequestParsers;
|
||||
|
||||
private ParseFieldMatcher parseFieldMatcher;
|
||||
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
// we have to prefer CURRENT since with the range of versions we support
|
||||
// it's rather unlikely to get the current actually.
|
||||
Settings settings = Settings.builder()
|
||||
.put("node.name", AbstractQueryTestCase.class.toString())
|
||||
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()).build();
|
||||
IndicesModule indicesModule = new IndicesModule(Collections.emptyList());
|
||||
SearchModule searchModule = new SearchModule(settings, false,
|
||||
Collections.singletonList(new FetchSubPhasePluginIT.FetchTermVectorsPlugin()));
|
||||
List<NamedWriteableRegistry.Entry> entries = new ArrayList<>();
|
||||
entries.addAll(indicesModule.getNamedWriteables());
|
||||
entries.addAll(searchModule.getNamedWriteables());
|
||||
namedWriteableRegistry = new NamedWriteableRegistry(entries);
|
||||
searchRequestParsers = searchModule.getSearchRequestParsers();
|
||||
parseFieldMatcher = ParseFieldMatcher.STRICT;
|
||||
}
|
||||
|
||||
public static SearchSourceBuilder createSearchSourceBuilder() throws IOException {
|
||||
SearchSourceBuilder builder = new SearchSourceBuilder();
|
||||
if (randomBoolean()) {
|
||||
builder.from(randomIntBetween(0, 10000));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
builder.size(randomIntBetween(0, 10000));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
builder.explain(randomBoolean());
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
builder.version(randomBoolean());
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
builder.trackScores(randomBoolean());
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
builder.minScore(randomFloat() * 1000);
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
builder.timeout(TimeValue.parseTimeValue(randomTimeValue(), null, "timeout"));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
builder.terminateAfter(randomIntBetween(1, 100000));
|
||||
}
|
||||
// if (randomBoolean()) {
|
||||
// builder.defaultRescoreWindowSize(randomIntBetween(1, 100));
|
||||
// }
|
||||
|
||||
switch(randomInt(2)) {
|
||||
case 0:
|
||||
builder.storedFields();
|
||||
break;
|
||||
case 1:
|
||||
builder.storedField("_none_");
|
||||
break;
|
||||
case 2:
|
||||
int fieldsSize = randomInt(25);
|
||||
List<String> fields = new ArrayList<>(fieldsSize);
|
||||
for (int i = 0; i < fieldsSize; i++) {
|
||||
fields.add(randomAsciiOfLengthBetween(5, 50));
|
||||
}
|
||||
builder.storedFields(fields);
|
||||
break;
|
||||
default:
|
||||
throw new IllegalStateException();
|
||||
}
|
||||
|
||||
if (randomBoolean()) {
|
||||
int scriptFieldsSize = randomInt(25);
|
||||
for (int i = 0; i < scriptFieldsSize; i++) {
|
||||
if (randomBoolean()) {
|
||||
builder.scriptField(randomAsciiOfLengthBetween(5, 50), new Script("foo"), randomBoolean());
|
||||
} else {
|
||||
builder.scriptField(randomAsciiOfLengthBetween(5, 50), new Script("foo"));
|
||||
}
|
||||
}
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
FetchSourceContext fetchSourceContext;
|
||||
int branch = randomInt(5);
|
||||
String[] includes = new String[randomIntBetween(0, 20)];
|
||||
for (int i = 0; i < includes.length; i++) {
|
||||
includes[i] = randomAsciiOfLengthBetween(5, 20);
|
||||
}
|
||||
String[] excludes = new String[randomIntBetween(0, 20)];
|
||||
for (int i = 0; i < excludes.length; i++) {
|
||||
excludes[i] = randomAsciiOfLengthBetween(5, 20);
|
||||
}
|
||||
switch (branch) {
|
||||
case 0:
|
||||
fetchSourceContext = new FetchSourceContext(randomBoolean());
|
||||
break;
|
||||
case 1:
|
||||
fetchSourceContext = new FetchSourceContext(includes, excludes);
|
||||
break;
|
||||
case 2:
|
||||
fetchSourceContext = new FetchSourceContext(randomAsciiOfLengthBetween(5, 20), randomAsciiOfLengthBetween(5, 20));
|
||||
break;
|
||||
case 3:
|
||||
fetchSourceContext = new FetchSourceContext(true, includes, excludes);
|
||||
break;
|
||||
case 4:
|
||||
fetchSourceContext = new FetchSourceContext(includes);
|
||||
break;
|
||||
case 5:
|
||||
fetchSourceContext = new FetchSourceContext(randomAsciiOfLengthBetween(5, 20));
|
||||
break;
|
||||
default:
|
||||
throw new IllegalStateException();
|
||||
}
|
||||
builder.fetchSource(fetchSourceContext);
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
int size = randomIntBetween(0, 20);
|
||||
List<String> statsGroups = new ArrayList<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
statsGroups.add(randomAsciiOfLengthBetween(5, 20));
|
||||
}
|
||||
builder.stats(statsGroups);
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
int indexBoostSize = randomIntBetween(1, 10);
|
||||
for (int i = 0; i < indexBoostSize; i++) {
|
||||
builder.indexBoost(randomAsciiOfLengthBetween(5, 20), randomFloat() * 10);
|
||||
}
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
builder.query(QueryBuilders.termQuery(randomAsciiOfLengthBetween(5, 20), randomAsciiOfLengthBetween(5, 20)));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
builder.postFilter(QueryBuilders.termQuery(randomAsciiOfLengthBetween(5, 20), randomAsciiOfLengthBetween(5, 20)));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
int numSorts = randomIntBetween(1, 5);
|
||||
for (int i = 0; i < numSorts; i++) {
|
||||
int branch = randomInt(5);
|
||||
switch (branch) {
|
||||
case 0:
|
||||
builder.sort(SortBuilders.fieldSort(randomAsciiOfLengthBetween(5, 20)).order(randomFrom(SortOrder.values())));
|
||||
break;
|
||||
case 1:
|
||||
builder.sort(SortBuilders.geoDistanceSort(randomAsciiOfLengthBetween(5, 20),
|
||||
AbstractQueryTestCase.randomGeohash(1, 12)).order(randomFrom(SortOrder.values())));
|
||||
break;
|
||||
case 2:
|
||||
builder.sort(SortBuilders.scoreSort().order(randomFrom(SortOrder.values())));
|
||||
break;
|
||||
case 3:
|
||||
builder.sort(SortBuilders.scriptSort(new Script("foo"),
|
||||
ScriptSortType.NUMBER).order(randomFrom(SortOrder.values())));
|
||||
break;
|
||||
case 4:
|
||||
builder.sort(randomAsciiOfLengthBetween(5, 20));
|
||||
break;
|
||||
case 5:
|
||||
builder.sort(randomAsciiOfLengthBetween(5, 20), randomFrom(SortOrder.values()));
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (randomBoolean()) {
|
||||
int numSearchFrom = randomIntBetween(1, 5);
|
||||
// We build a json version of the search_from first in order to
|
||||
// ensure that every number type remain the same before/after xcontent (de)serialization.
|
||||
// This is not a problem because the final type of each field value is extracted from associated sort field.
|
||||
// This little trick ensure that equals and hashcode are the same when using the xcontent serialization.
|
||||
XContentBuilder jsonBuilder = XContentFactory.jsonBuilder();
|
||||
jsonBuilder.startObject();
|
||||
jsonBuilder.startArray("search_from");
|
||||
for (int i = 0; i < numSearchFrom; i++) {
|
||||
int branch = randomInt(8);
|
||||
switch (branch) {
|
||||
case 0:
|
||||
jsonBuilder.value(randomInt());
|
||||
break;
|
||||
case 1:
|
||||
jsonBuilder.value(randomFloat());
|
||||
break;
|
||||
case 2:
|
||||
jsonBuilder.value(randomLong());
|
||||
break;
|
||||
case 3:
|
||||
jsonBuilder.value(randomDouble());
|
||||
break;
|
||||
case 4:
|
||||
jsonBuilder.value(randomAsciiOfLengthBetween(5, 20));
|
||||
break;
|
||||
case 5:
|
||||
jsonBuilder.value(randomBoolean());
|
||||
break;
|
||||
case 6:
|
||||
jsonBuilder.value(randomByte());
|
||||
break;
|
||||
case 7:
|
||||
jsonBuilder.value(randomShort());
|
||||
break;
|
||||
case 8:
|
||||
jsonBuilder.value(new Text(randomAsciiOfLengthBetween(5, 20)));
|
||||
break;
|
||||
}
|
||||
}
|
||||
jsonBuilder.endArray();
|
||||
jsonBuilder.endObject();
|
||||
XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(jsonBuilder.bytes());
|
||||
parser.nextToken();
|
||||
parser.nextToken();
|
||||
parser.nextToken();
|
||||
builder.searchAfter(SearchAfterBuilder.fromXContent(parser, null).getSortValues());
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
builder.highlighter(HighlightBuilderTests.randomHighlighterBuilder());
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
builder.suggest(SuggestBuilderTests.randomSuggestBuilder());
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
int numRescores = randomIntBetween(1, 5);
|
||||
for (int i = 0; i < numRescores; i++) {
|
||||
builder.addRescorer(QueryRescoreBuilderTests.randomRescoreBuilder());
|
||||
}
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
builder.aggregation(AggregationBuilders.avg(randomAsciiOfLengthBetween(5, 20)));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
builder.ext(Collections.singletonList(new FetchSubPhasePluginIT.TermVectorsFetchBuilder("test")));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
String field = randomBoolean() ? null : randomAsciiOfLengthBetween(5, 20);
|
||||
int max = between(2, 1000);
|
||||
int id = randomInt(max-1);
|
||||
if (field == null) {
|
||||
builder.slice(new SliceBuilder(id, max));
|
||||
} else {
|
||||
builder.slice(new SliceBuilder(field, id, max));
|
||||
}
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
public class SearchSourceBuilderTests extends AbstractSearchTestCase {
|
||||
|
||||
public void testFromXContent() throws IOException {
|
||||
SearchSourceBuilder testSearchSourceBuilder = createSearchSourceBuilder();
|
||||
|
@ -347,7 +79,7 @@ public class SearchSourceBuilderTests extends ESTestCase {
|
|||
}
|
||||
|
||||
private QueryParseContext createParseContext(XContentParser parser) {
|
||||
return new QueryParseContext(searchRequestParsers.queryParsers, parser, parseFieldMatcher);
|
||||
return new QueryParseContext(searchRequestParsers.queryParsers, parser, ParseFieldMatcher.STRICT);
|
||||
}
|
||||
|
||||
public void testSerialization() throws IOException {
|
||||
|
@ -391,7 +123,7 @@ public class SearchSourceBuilderTests extends ESTestCase {
|
|||
}
|
||||
|
||||
//we use the streaming infra to create a copy of the builder provided as argument
|
||||
protected SearchSourceBuilder copyBuilder(SearchSourceBuilder builder) throws IOException {
|
||||
private SearchSourceBuilder copyBuilder(SearchSourceBuilder builder) throws IOException {
|
||||
try (BytesStreamOutput output = new BytesStreamOutput()) {
|
||||
builder.writeTo(output);
|
||||
try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWriteableRegistry)) {
|
||||
|
@ -422,6 +154,26 @@ public class SearchSourceBuilderTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testMultipleQueryObjectsAreRejected() throws Exception {
|
||||
String restContent =
|
||||
" { \"query\": {\n" +
|
||||
" \"multi_match\": {\n" +
|
||||
" \"query\": \"workd\",\n" +
|
||||
" \"fields\": [\"title^5\", \"plain_body\"]\n" +
|
||||
" },\n" +
|
||||
" \"filters\": {\n" +
|
||||
" \"terms\": {\n" +
|
||||
" \"status\": [ 3 ]\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
" } }";
|
||||
try (XContentParser parser = XContentFactory.xContent(restContent).createParser(restContent)) {
|
||||
ParsingException e = expectThrows(ParsingException.class, () -> SearchSourceBuilder.fromXContent(createParseContext(parser),
|
||||
searchRequestParsers.aggParsers, searchRequestParsers.suggesters, searchRequestParsers.searchExtParsers));
|
||||
assertEquals("[multi_match] malformed query, expected [END_OBJECT] but found [FIELD_NAME]", e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
public void testParseSort() throws IOException {
|
||||
{
|
||||
String restContent = " { \"sort\": \"foo\"}";
|
||||
|
@ -574,25 +326,4 @@ public class SearchSourceBuilderTests extends ESTestCase {
|
|||
String query = "{ \"query\": {} }";
|
||||
assertParseSearchSource(builder, new BytesArray(query), ParseFieldMatcher.EMPTY);
|
||||
}
|
||||
|
||||
public void testSearchRequestBuilderSerializationWithIndexBoost() throws Exception {
|
||||
SearchSourceBuilder searchSourceBuilder = createSearchSourceBuilder();
|
||||
createIndexBoost(searchSourceBuilder);
|
||||
try (BytesStreamOutput output = new BytesStreamOutput()) {
|
||||
searchSourceBuilder.writeTo(output);
|
||||
try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWriteableRegistry)) {
|
||||
SearchSourceBuilder deserializedSearchSourceBuilder = new SearchSourceBuilder(in);
|
||||
BytesStreamOutput deserializedOutput = new BytesStreamOutput();
|
||||
deserializedSearchSourceBuilder.writeTo(deserializedOutput);
|
||||
assertEquals(output.bytes(), deserializedOutput.bytes());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void createIndexBoost(SearchSourceBuilder searchSourceBuilder) {
|
||||
int indexBoostSize = randomIntBetween(1, 10);
|
||||
for (int i = 0; i < indexBoostSize; i++) {
|
||||
searchSourceBuilder.indexBoost(randomAsciiOfLengthBetween(5, 20), randomFloat() * 10);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -118,7 +118,7 @@ public class FetchSubPhasePluginIT extends ESIntegTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public static final class TermVectorsFetchSubPhase implements FetchSubPhase {
|
||||
private static final class TermVectorsFetchSubPhase implements FetchSubPhase {
|
||||
private static final String NAME = "term_vectors_fetch";
|
||||
|
||||
@Override
|
||||
|
@ -153,7 +153,7 @@ public class FetchSubPhasePluginIT extends ESIntegTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public static final class TermVectorsFetchParser implements SearchExtParser<TermVectorsFetchBuilder> {
|
||||
private static final class TermVectorsFetchParser implements SearchExtParser<TermVectorsFetchBuilder> {
|
||||
|
||||
private static final TermVectorsFetchParser INSTANCE = new TermVectorsFetchParser();
|
||||
|
||||
|
@ -176,18 +176,18 @@ public class FetchSubPhasePluginIT extends ESIntegTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public static final class TermVectorsFetchBuilder extends SearchExtBuilder {
|
||||
private static final class TermVectorsFetchBuilder extends SearchExtBuilder {
|
||||
private final String field;
|
||||
|
||||
public TermVectorsFetchBuilder(String field) {
|
||||
private TermVectorsFetchBuilder(String field) {
|
||||
this.field = field;
|
||||
}
|
||||
|
||||
public TermVectorsFetchBuilder(StreamInput in) throws IOException {
|
||||
private TermVectorsFetchBuilder(StreamInput in) throws IOException {
|
||||
this.field = in.readString();
|
||||
}
|
||||
|
||||
public String getField() {
|
||||
private String getField() {
|
||||
return field;
|
||||
}
|
||||
|
||||
|
|
|
@ -27,49 +27,13 @@ import org.elasticsearch.cluster.routing.UnassignedInfo;
|
|||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.indices.IndicesModule;
|
||||
import org.elasticsearch.search.SearchModule;
|
||||
import org.elasticsearch.search.SearchRequestTests;
|
||||
import org.elasticsearch.search.fetch.FetchSubPhasePluginIT;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.elasticsearch.search.AbstractSearchTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
import static java.util.Collections.emptyList;
|
||||
|
||||
public class ShardSearchTransportRequestTests extends ESTestCase {
|
||||
|
||||
private static NamedWriteableRegistry namedWriteableRegistry;
|
||||
|
||||
@BeforeClass
|
||||
public static void beforeClass() {
|
||||
IndicesModule indicesModule = new IndicesModule(emptyList()) {
|
||||
@Override
|
||||
protected void configure() {
|
||||
bindMapperExtension();
|
||||
}
|
||||
};
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, false,
|
||||
Collections.singletonList(new FetchSubPhasePluginIT.FetchTermVectorsPlugin()));
|
||||
List<NamedWriteableRegistry.Entry> entries = new ArrayList<>();
|
||||
entries.addAll(indicesModule.getNamedWriteables());
|
||||
entries.addAll(searchModule.getNamedWriteables());
|
||||
namedWriteableRegistry = new NamedWriteableRegistry(entries);
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void afterClass() {
|
||||
namedWriteableRegistry = null;
|
||||
}
|
||||
public class ShardSearchTransportRequestTests extends AbstractSearchTestCase {
|
||||
|
||||
public void testSerialization() throws Exception {
|
||||
ShardSearchTransportRequest shardSearchTransportRequest = createShardSearchTransportRequest();
|
||||
|
@ -95,8 +59,8 @@ public class ShardSearchTransportRequestTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
private static ShardSearchTransportRequest createShardSearchTransportRequest() throws IOException {
|
||||
SearchRequest searchRequest = SearchRequestTests.createSearchRequest();
|
||||
private ShardSearchTransportRequest createShardSearchTransportRequest() throws IOException {
|
||||
SearchRequest searchRequest = createSearchRequest();
|
||||
ShardId shardId = new ShardId(randomAsciiOfLengthBetween(2, 10), randomAsciiOfLengthBetween(2, 10), randomInt());
|
||||
ShardRouting shardRouting = TestShardRouting.newShardRouting(shardId, null, null, randomBoolean(), ShardRoutingState.UNASSIGNED,
|
||||
new UnassignedInfo(randomFrom(UnassignedInfo.Reason.values()), "reason"));
|
||||
|
|
|
@ -126,6 +126,7 @@ public class FieldSortIT extends ESIntegTestCase {
|
|||
List<IndexRequestBuilder> builders = new ArrayList<>();
|
||||
boolean strictTimeBasedIndices = randomBoolean();
|
||||
final int numIndices = randomIntBetween(2, 25); // at most 25 days in the month
|
||||
int docs = 0;
|
||||
for (int i = 0; i < numIndices; i++) {
|
||||
final String indexId = strictTimeBasedIndices ? "idx_" + i : "idx";
|
||||
if (strictTimeBasedIndices || i == 0) {
|
||||
|
@ -141,9 +142,10 @@ public class FieldSortIT extends ESIntegTestCase {
|
|||
String.format(Locale.ROOT, "%02d", j+1) +
|
||||
":00:00"));
|
||||
}
|
||||
indexRandom(true, builders);
|
||||
docs += builders.size();
|
||||
builders.clear();
|
||||
}
|
||||
int docs = builders.size();
|
||||
indexRandom(true, builders);
|
||||
SearchResponse allDocsResponse = client().prepareSearch().setQuery(
|
||||
QueryBuilders.boolQuery().must(QueryBuilders.termQuery("foo", "bar")).must(
|
||||
QueryBuilders.rangeQuery("timeUpdated").gte("2014/0" + randomIntBetween(1, 7) + "/01")))
|
||||
|
|
|
@ -21,16 +21,16 @@ package org.elasticsearch.search.suggest;
|
|||
import com.carrotsearch.randomizedtesting.generators.RandomStrings;
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||
import org.elasticsearch.action.index.IndexResponse;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.geo.GeoHashUtils;
|
||||
import org.elasticsearch.common.geo.GeoPoint;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.Fuzziness;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.search.suggest.CompletionSuggestSearchIT.CompletionMappingBuilder;
|
||||
import org.elasticsearch.search.suggest.completion.CompletionSuggestionBuilder;
|
||||
import org.elasticsearch.search.suggest.completion.context.CategoryContextMapping;
|
||||
|
@ -40,7 +40,6 @@ import org.elasticsearch.search.suggest.completion.context.ContextMapping;
|
|||
import org.elasticsearch.search.suggest.completion.context.GeoContextMapping;
|
||||
import org.elasticsearch.search.suggest.completion.context.GeoQueryContext;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.VersionUtils;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
@ -54,6 +53,8 @@ import java.util.Map;
|
|||
|
||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
|
||||
import static org.hamcrest.core.IsEqual.equalTo;
|
||||
|
||||
@SuppressCodecs("*") // requires custom completion format
|
||||
public class ContextCompletionSuggestSearchIT extends ESIntegTestCase {
|
||||
|
@ -160,6 +161,27 @@ public class ContextCompletionSuggestSearchIT extends ESIntegTestCase {
|
|||
assertSuggestions("foo", prefix, "sugxgestion9", "sugxgestion8", "sugxgestion7", "sugxgestion6", "sugxgestion5");
|
||||
}
|
||||
|
||||
public void testContextFilteringWorksWithUTF8Categories() throws Exception {
|
||||
CategoryContextMapping contextMapping = ContextBuilder.category("cat").field("cat").build();
|
||||
LinkedHashMap<String, ContextMapping> map = new LinkedHashMap<>(Collections.singletonMap("cat", contextMapping));
|
||||
final CompletionMappingBuilder mapping = new CompletionMappingBuilder().context(map);
|
||||
createIndexAndMapping(mapping);
|
||||
IndexResponse indexResponse = client().prepareIndex(INDEX, TYPE, "1")
|
||||
.setSource(jsonBuilder().startObject()
|
||||
.startObject(FIELD)
|
||||
.field("input", "suggestion")
|
||||
.endObject()
|
||||
.field("cat", "ctx\\u00e4")
|
||||
.endObject())
|
||||
.get();
|
||||
assertThat(indexResponse.status(), equalTo(RestStatus.CREATED));
|
||||
assertNoFailures(client().admin().indices().prepareRefresh(INDEX).get());
|
||||
CompletionSuggestionBuilder contextSuggestQuery = SuggestBuilders.completionSuggestion(FIELD).prefix("sugg")
|
||||
.contexts(Collections.singletonMap("cat",
|
||||
Collections.singletonList(CategoryQueryContext.builder().setCategory("ctx\\u00e4").build())));
|
||||
assertSuggestions("foo", contextSuggestQuery, "suggestion");
|
||||
}
|
||||
|
||||
public void testSingleContextFiltering() throws Exception {
|
||||
CategoryContextMapping contextMapping = ContextBuilder.category("cat").field("cat").build();
|
||||
LinkedHashMap<String, ContextMapping> map = new LinkedHashMap<String, ContextMapping>(Collections.singletonMap("cat", contextMapping));
|
||||
|
|
|
@ -44,8 +44,8 @@ import org.elasticsearch.common.Priority;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.discovery.zen.ElectMasterService;
|
||||
import org.elasticsearch.discovery.zen.ZenDiscovery;
|
||||
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
|
||||
import org.elasticsearch.index.store.IndexStore;
|
||||
import org.elasticsearch.indices.recovery.RecoveryState;
|
||||
import org.elasticsearch.indices.ttl.IndicesTTLService;
|
||||
|
|
|
@ -1,51 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.transport;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.inject.ModuleTestCase;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
||||
import org.elasticsearch.test.transport.AssertingLocalTransport;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
/** Unit tests for module registering custom transport and transport service */
|
||||
public class TransportModuleTests extends ModuleTestCase {
|
||||
|
||||
|
||||
|
||||
static class FakeTransport extends AssertingLocalTransport {
|
||||
@Inject
|
||||
public FakeTransport(Settings settings, CircuitBreakerService circuitBreakerService, ThreadPool threadPool,
|
||||
NamedWriteableRegistry namedWriteableRegistry) {
|
||||
super(settings, circuitBreakerService, threadPool, namedWriteableRegistry);
|
||||
}
|
||||
}
|
||||
|
||||
static class FakeTransportService extends TransportService {
|
||||
@Inject
|
||||
public FakeTransportService(Settings settings, Transport transport, ThreadPool threadPool) {
|
||||
super(settings, transport, threadPool);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -65,7 +65,8 @@ public class TransportServiceHandshakeTests extends ESTestCase {
|
|||
new NoneCircuitBreakerService(),
|
||||
new NamedWriteableRegistry(Collections.emptyList()),
|
||||
new NetworkService(settings, Collections.emptyList()));
|
||||
TransportService transportService = new MockTransportService(settings, transport, threadPool);
|
||||
TransportService transportService = new MockTransportService(settings, transport, threadPool,
|
||||
TransportService.NOOP_TRANSPORT_INTERCEPTOR);
|
||||
transportService.start();
|
||||
transportService.acceptIncomingRequests();
|
||||
DiscoveryNode node =
|
||||
|
|
|
@ -75,7 +75,7 @@ echo Unknown option "%SERVICE_CMD%"
|
|||
|
||||
:displayUsage
|
||||
echo.
|
||||
echo Usage: service.bat install^|remove^|start^|stop^|manager [SERVICE_ID]
|
||||
echo Usage: elasticsearch-service.bat install^|remove^|start^|stop^|manager [SERVICE_ID]
|
||||
goto:eof
|
||||
|
||||
:doStart
|
|
@ -391,6 +391,7 @@ This command should give you a JSON result:
|
|||
{
|
||||
"name" : "Cp8oag6",
|
||||
"cluster_name" : "elasticsearch",
|
||||
"cluster_uuid" : "AT69_T_DTp-1qgIJlatQqA",
|
||||
"version" : {
|
||||
"number" : "{version}",
|
||||
"build_hash" : "f27399d",
|
||||
|
@ -403,6 +404,7 @@ This command should give you a JSON result:
|
|||
--------------------------------------------
|
||||
// TESTRESPONSE[s/"name" : "Cp8oag6",/"name" : "$body.name",/]
|
||||
// TESTRESPONSE[s/"cluster_name" : "elasticsearch",/"cluster_name" : "$body.cluster_name",/]
|
||||
// TESTRESPONSE[s/"cluster_uuid" : "AT69_T_DTp-1qgIJlatQqA",/"cluster_uuid" : "$body.cluster_uuid",/]
|
||||
// TESTRESPONSE[s/"build_hash" : "f27399d",/"build_hash" : "$body.version.build_hash",/]
|
||||
// TESTRESPONSE[s/"build_date" : "2016-03-30T09:51:41.449Z",/"build_date" : $body.version.build_date,/]
|
||||
// TESTRESPONSE[s/"build_snapshot" : false,/"build_snapshot" : $body.version.build_snapshot,/]
|
||||
|
|
|
@ -16,6 +16,10 @@ Plugins contain JAR files, but may also contain scripts and config files, and
|
|||
must be installed on every node in the cluster. After installation, each
|
||||
node must be restarted before the plugin becomes visible.
|
||||
|
||||
NOTE: A full cluster restart is required for installing plugins that have
|
||||
custom cluster state metadata, such as X-Pack. It is still possible to upgrade
|
||||
such plugins with a rolling restart.
|
||||
|
||||
This documentation distinguishes two categories of plugins:
|
||||
|
||||
Core Plugins:: This category identifies plugins that are part of Elasticsearch
|
||||
|
|
|
@ -3,48 +3,35 @@
|
|||
|
||||
The `nodes` command shows the cluster topology.
|
||||
|
||||
["source","sh",subs="attributes,callouts"]
|
||||
[source,sh]
|
||||
--------------------------------------------------
|
||||
% curl 192.168.56.10:9200/_cat/nodes
|
||||
SP4H 4727 192.168.56.30 9300 {version} {jdk} 72.1gb 35.4 93.9mb 79 239.1mb 0.45 3.4h mdi - Boneyard
|
||||
_uhJ 5134 192.168.56.10 9300 {version} {jdk} 72.1gb 33.3 93.9mb 85 239.1mb 0.06 3.4h mdi * Athena
|
||||
HfDp 4562 192.168.56.20 9300 {version} {jdk} 72.2gb 74.5 93.9mb 83 239.1mb 0.12 3.4h mdi - Zarek
|
||||
% GET /_cat/nodes
|
||||
192.168.56.30 9 78 22 1.80 2.05 2.51 mdi * bGG90GE
|
||||
192.168.56.10 6 75 14 1.24 2.45 1.37 md - I8hydUG
|
||||
192.168.56.20 5 71 12 1.07 1.05 1.11 di - H5dfFeA
|
||||
--------------------------------------------------
|
||||
|
||||
The first few columns tell you where your nodes live. For sanity it
|
||||
also tells you what version of ES and the JVM each one runs.
|
||||
|
||||
["source","sh",subs="attributes,callouts"]
|
||||
--------------------------------------------------
|
||||
nodeId pid ip port version jdk
|
||||
u2PZ 4234 192.168.56.30 9300 {version} {jdk}
|
||||
URzf 5443 192.168.56.10 9300 {version} {jdk}
|
||||
ActN 3806 192.168.56.20 9300 {version} {jdk}
|
||||
--------------------------------------------------
|
||||
|
||||
|
||||
The next few give a picture of your heap, memory, and load.
|
||||
The first few columns tell you where your nodes live and give
|
||||
a picture of your heap, memory, cpu and load.
|
||||
|
||||
[source,sh]
|
||||
--------------------------------------------------
|
||||
diskAvail heapPercent heapMax ramPercent ramMax load
|
||||
72.1gb 31.3 93.9mb 81 239.1mb 0.24
|
||||
72.1gb 19.6 93.9mb 82 239.1mb 0.05
|
||||
72.2gb 64.9 93.9mb 84 239.1mb 0.12
|
||||
ip heap.percent ram.percent cpu load_1m load_5m load_15m
|
||||
192.168.56.30 9 78 22 1.80 2.05 2.51
|
||||
192.168.56.10 6 75 14 1.24 2.45 1.37
|
||||
192.168.56.20 5 71 12 1.07 1.05 1.11
|
||||
--------------------------------------------------
|
||||
|
||||
The last columns provide ancillary information that can often be
|
||||
useful when looking at the cluster as a whole, particularly large
|
||||
ones. How many master-eligible nodes do I have? How many client
|
||||
nodes? It looks like someone restarted a node recently; which one was
|
||||
it?
|
||||
ones. How many master-eligible nodes do I have?
|
||||
|
||||
[source,sh]
|
||||
--------------------------------------------------
|
||||
uptime node.role master name
|
||||
3.5h di - Boneyard
|
||||
3.5h md * Athena
|
||||
3.5h i - Zarek
|
||||
node.role master name
|
||||
mdi * bGG90GE
|
||||
md - I8hydUG
|
||||
di - H5dfFeA
|
||||
--------------------------------------------------
|
||||
|
||||
[float]
|
||||
|
@ -65,7 +52,7 @@ by default. To have the headers appear in the output, use verbose
|
|||
mode (`v`). The header name will match the supplied value (e.g.,
|
||||
`pid` versus `p`). For example:
|
||||
|
||||
["source","sh",subs="attributes,callouts"]
|
||||
[source,sh]
|
||||
--------------------------------------------------
|
||||
% curl 192.168.56.10:9200/_cat/nodes?v&h=id,ip,port,v,m
|
||||
id ip port v m
|
||||
|
@ -102,13 +89,15 @@ descriptors |123
|
|||
descriptors percentage |1
|
||||
|`file_desc.max` |`fdm`, `fileDescriptorMax` |No |Maximum number of file
|
||||
descriptors |1024
|
||||
|`load` |`l` |No |Most recent load average |0.22
|
||||
|`cpu` | |No |Recent system CPU usage as percent |12
|
||||
|`load_1m` |`l` |No |Most recent load average |0.22
|
||||
|`load_5m` |`l` |No |Load average for the last five minutes |0.78
|
||||
|`load_15m` |`l` |No |Load average for the last fifteen minutes |1.24
|
||||
|`uptime` |`u` |No |Node uptime |17.3m
|
||||
|`node.role` |`r`, `role`, `nodeRole` |Yes |Master eligible node (m);
|
||||
Data node (d); Ingest node (i); Coordinating node only (-) |mdi
|
||||
|`master` |`m` |Yes |Elected master (*); Not elected master (-) |*
|
||||
|`name` |`n` |Yes |Node name |Venom
|
||||
|`name` |`n` |Yes |Node name |I8hydUG
|
||||
|`completion.size` |`cs`, `completionSize` |No |Size of completion |0b
|
||||
|`fielddata.memory_size` |`fm`, `fielddataMemory` |No |Used fielddata
|
||||
cache memory |0b
|
||||
|
@ -152,6 +141,8 @@ of current indexing operations |0
|
|||
indexing |134ms
|
||||
|`indexing.index_total` |`iito`, `indexingIndexTotal` |No |Number of
|
||||
indexing operations |1
|
||||
|`indexing.index_failed` |`iif`, `indexingIndexFailed` |No |Number of
|
||||
failed indexing operations |0
|
||||
|`merges.current` |`mc`, `mergesCurrent` |No |Number of current
|
||||
merge operations |0
|
||||
|`merges.current_docs` |`mcd`, `mergesCurrentDocs` |No |Number of
|
||||
|
@ -166,15 +157,6 @@ documents |0
|
|||
merges |0b
|
||||
|`merges.total_time` |`mtt`, `mergesTotalTime` |No |Time spent merging
|
||||
documents |0s
|
||||
|`percolate.current` |`pc`, `percolateCurrent` |No |Number of current
|
||||
percolations |0
|
||||
|`percolate.memory_size` |`pm`, `percolateMemory` |No |Memory used by
|
||||
current percolations |0b
|
||||
|`percolate.queries` |`pq`, `percolateQueries` |No |Number of
|
||||
registered percolation queries |0
|
||||
|`percolate.time` |`pti`, `percolateTime` |No |Time spent
|
||||
percolating |0s
|
||||
|`percolate.total` |`pto`, `percolateTotal` |No |Total percolations |0
|
||||
|`refresh.total` |`rto`, `refreshTotal` |No |Number of refreshes |16
|
||||
|`refresh.time` |`rti`, `refreshTime` |No |Time spent in refreshes |91ms
|
||||
|`script.compilations` |`scrcc`, `scriptCompilations` |No |Total script compilations |17
|
||||
|
@ -203,4 +185,9 @@ segments |1.4kb
|
|||
|Memory used by index writer |18mb
|
||||
|`segments.version_map_memory` |`svmm`, `segmentsVersionMapMemory` |No
|
||||
|Memory used by version map |1.0kb
|
||||
|`segments.fixed_bitset_memory` |`sfbm`, `fixedBitsetMemory` |No
|
||||
|Memory used by fixed bit sets for nested object field types and type filters for types referred in _parent fields |1.0kb
|
||||
|`suggest.current` |`suc`, `suggestCurrent` |No |Number of current suggest operations |0
|
||||
|`suggest.time` |`suti`, `suggestTime` |No |Time spent in suggest |0
|
||||
|`suggest.total` |`suto`, `suggestTotal` |No |Number of suggest operations |0
|
||||
|=======================================================================
|
||||
|
|
|
@ -302,6 +302,12 @@ curl -XPUT 'localhost:9200/customer/external/1?pretty' -d '
|
|||
"_type" : "external",
|
||||
"_id" : "1",
|
||||
"_version" : 1,
|
||||
"result" : "created",
|
||||
"_shards" : {
|
||||
"total" : 2,
|
||||
"successful" : 1,
|
||||
"failed" : 0
|
||||
},
|
||||
"created" : true
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
@ -1053,7 +1059,7 @@ curl -XPOST 'localhost:9200/bank/_search?pretty' -d '
|
|||
"aggs": {
|
||||
"group_by_gender": {
|
||||
"terms": {
|
||||
"field": "gender"
|
||||
"field": "gender.keyword"
|
||||
},
|
||||
"aggs": {
|
||||
"average_balance": {
|
||||
|
|
|
@ -85,7 +85,11 @@ Specify when shard rebalancing is allowed:
|
|||
`cluster.routing.allocation.cluster_concurrent_rebalance`::
|
||||
|
||||
Allow to control how many concurrent shard rebalances are
|
||||
allowed cluster wide. Defaults to `2`.
|
||||
allowed cluster wide. Defaults to `2`. Note that this setting
|
||||
only controls the number of concurrent shard relocations due
|
||||
to imbalances in the cluster. This setting does not limit shard
|
||||
relocations due to <<allocation-filtering,allocation filtering>>
|
||||
or <<forced-awareness,forced awareness>>.
|
||||
|
||||
[float]
|
||||
=== Shard Balancing Heuristics
|
||||
|
|
|
@ -17,13 +17,17 @@ Any `_search` request can be profiled by adding a top-level `profile` parameter:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
curl -XGET 'localhost:9200/_search' -d '{
|
||||
GET /_search
|
||||
{
|
||||
"profile": true,<1>
|
||||
"query" : {
|
||||
"match" : { "message" : "search test" }
|
||||
"match" : { "message" : "message number" }
|
||||
}
|
||||
}'
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:twitter]
|
||||
|
||||
<1> Setting the top-level `profile` parameter to `true` will enable profiling
|
||||
for the search
|
||||
|
||||
|
@ -40,70 +44,70 @@ This will yield the following result:
|
|||
"failed": 0
|
||||
},
|
||||
"hits": {
|
||||
"total": 3,
|
||||
"max_score": 1.078072,
|
||||
"hits": [ ... ] <1>
|
||||
"total": 4,
|
||||
"max_score": 0.5093388,
|
||||
"hits": [...] <1>
|
||||
},
|
||||
"profile": {
|
||||
"shards": [
|
||||
{
|
||||
"id": "[2aE02wS1R8q_QFnYu6vDVQ][test][1]",
|
||||
"id": "[2aE02wS1R8q_QFnYu6vDVQ][twitter][1]",
|
||||
"searches": [
|
||||
{
|
||||
"query": [
|
||||
{
|
||||
"type": "BooleanQuery",
|
||||
"description": "message:search message:test",
|
||||
"time": "15.52889800ms",
|
||||
"description": "message:message message:number",
|
||||
"time": "1.873811000ms",
|
||||
"breakdown": {
|
||||
"score": 6352,
|
||||
"score_count": 1,
|
||||
"build_scorer": 1800776,
|
||||
"score": 51306,
|
||||
"score_count": 4,
|
||||
"build_scorer": 2935582,
|
||||
"build_scorer_count": 1,
|
||||
"match": 0,
|
||||
"match_count": 0,
|
||||
"create_weight": 667400,
|
||||
"create_weight": 919297,
|
||||
"create_weight_count": 1,
|
||||
"next_doc": 10563,
|
||||
"next_doc_count": 2,
|
||||
"next_doc": 53876,
|
||||
"next_doc_count": 5,
|
||||
"advance": 0,
|
||||
"advance_count": 0
|
||||
},
|
||||
"children": [
|
||||
{
|
||||
"type": "TermQuery",
|
||||
"description": "message:search",
|
||||
"time": "4.938855000ms",
|
||||
"description": "message:message",
|
||||
"time": "0.3919430000ms",
|
||||
"breakdown": {
|
||||
"score": 0,
|
||||
"score_count": 0,
|
||||
"build_scorer": 3230,
|
||||
"score": 28776,
|
||||
"score_count": 4,
|
||||
"build_scorer": 784451,
|
||||
"build_scorer_count": 1,
|
||||
"match": 0,
|
||||
"match_count": 0,
|
||||
"create_weight": 415612,
|
||||
"create_weight": 1669564,
|
||||
"create_weight_count": 1,
|
||||
"next_doc": 0,
|
||||
"next_doc_count": 0,
|
||||
"next_doc": 10111,
|
||||
"next_doc_count": 5,
|
||||
"advance": 0,
|
||||
"advance_count": 0
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "TermQuery",
|
||||
"description": "message:test",
|
||||
"time": "0.5016660000ms",
|
||||
"description": "message:number",
|
||||
"time": "0.2106820000ms",
|
||||
"breakdown": {
|
||||
"score": 5014,
|
||||
"score_count": 1,
|
||||
"build_scorer": 1689333,
|
||||
"score": 4552,
|
||||
"score_count": 4,
|
||||
"build_scorer": 42602,
|
||||
"build_scorer_count": 1,
|
||||
"match": 0,
|
||||
"match_count": 0,
|
||||
"create_weight": 166587,
|
||||
"create_weight": 89323,
|
||||
"create_weight_count": 1,
|
||||
"next_doc": 5542,
|
||||
"next_doc_count": 2,
|
||||
"next_doc": 2852,
|
||||
"next_doc_count": 5,
|
||||
"advance": 0,
|
||||
"advance_count": 0
|
||||
}
|
||||
|
@ -111,21 +115,44 @@ This will yield the following result:
|
|||
]
|
||||
}
|
||||
],
|
||||
"rewrite_time": 870954,
|
||||
"rewrite_time": 51443,
|
||||
"collector": [
|
||||
{
|
||||
"name": "SimpleTopScoreDocCollector",
|
||||
"reason": "search_top_hits",
|
||||
"time": "0.009783000000ms"
|
||||
"time": "0.06989100000ms"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
],
|
||||
"aggregations": []
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE[s/"took": 25/"took": $body.took/]
|
||||
// TESTRESPONSE[s/"hits": \[...\]/"hits": $body.hits.hits/]
|
||||
// TESTRESPONSE[s/"id": "\[2aE02wS1R8q_QFnYu6vDVQ\]\[twitter\]\[1\]"/"id": $body.profile.shards.0.id/]
|
||||
// TESTRESPONSE[s/"rewrite_time": 51443/"rewrite_time": $body.profile.shards.0.searches.0.rewrite_time/]
|
||||
// TESTRESPONSE[s/"score": 51306/"score": $body.profile.shards.0.searches.0.query.0.breakdown.score/]
|
||||
// TESTRESPONSE[s/"time": "1.873811000ms"/"time": $body.profile.shards.0.searches.0.query.0.time/]
|
||||
// TESTRESPONSE[s/"build_scorer": 2935582/"build_scorer": $body.profile.shards.0.searches.0.query.0.breakdown.build_scorer/]
|
||||
// TESTRESPONSE[s/"create_weight": 919297/"create_weight": $body.profile.shards.0.searches.0.query.0.breakdown.create_weight/]
|
||||
// TESTRESPONSE[s/"next_doc": 53876/"next_doc": $body.profile.shards.0.searches.0.query.0.breakdown.next_doc/]
|
||||
// TESTRESPONSE[s/"time": "0.3919430000ms"/"time": $body.profile.shards.0.searches.0.query.0.children.0.time/]
|
||||
// TESTRESPONSE[s/"score": 28776/"score": $body.profile.shards.0.searches.0.query.0.children.0.breakdown.score/]
|
||||
// TESTRESPONSE[s/"build_scorer": 784451/"build_scorer": $body.profile.shards.0.searches.0.query.0.children.0.breakdown.build_scorer/]
|
||||
// TESTRESPONSE[s/"create_weight": 1669564/"create_weight": $body.profile.shards.0.searches.0.query.0.children.0.breakdown.create_weight/]
|
||||
// TESTRESPONSE[s/"next_doc": 10111/"next_doc": $body.profile.shards.0.searches.0.query.0.children.0.breakdown.next_doc/]
|
||||
// TESTRESPONSE[s/"time": "0.2106820000ms"/"time": $body.profile.shards.0.searches.0.query.0.children.1.time/]
|
||||
// TESTRESPONSE[s/"score": 4552/"score": $body.profile.shards.0.searches.0.query.0.children.1.breakdown.score/]
|
||||
// TESTRESPONSE[s/"build_scorer": 42602/"build_scorer": $body.profile.shards.0.searches.0.query.0.children.1.breakdown.build_scorer/]
|
||||
// TESTRESPONSE[s/"create_weight": 89323/"create_weight": $body.profile.shards.0.searches.0.query.0.children.1.breakdown.create_weight/]
|
||||
// TESTRESPONSE[s/"next_doc": 2852/"next_doc": $body.profile.shards.0.searches.0.query.0.children.1.breakdown.next_doc/]
|
||||
// TESTRESPONSE[s/"time": "0.06989100000ms"/"time": $body.profile.shards.0.searches.0.collector.0.time/]
|
||||
// Sorry for this mess....
|
||||
|
||||
<1> Search results are returned, but were omitted here for brevity
|
||||
|
||||
Even for a simple query, the response is relatively complicated. Let's break it down piece-by-piece before moving
|
||||
|
@ -139,11 +166,11 @@ First, the overall structure of the profile response is as follows:
|
|||
"profile": {
|
||||
"shards": [
|
||||
{
|
||||
"id": "[2aE02wS1R8q_QFnYu6vDVQ][test][1]", <1>
|
||||
"id": "[2aE02wS1R8q_QFnYu6vDVQ][twitter][1]", <1>
|
||||
"searches": [
|
||||
{
|
||||
"query": [...], <2>
|
||||
"rewrite_time": 870954, <3>
|
||||
"rewrite_time": 51443, <3>
|
||||
"collector": [...] <4>
|
||||
}
|
||||
],
|
||||
|
@ -153,6 +180,12 @@ First, the overall structure of the profile response is as follows:
|
|||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE[s/"profile": /"took": $body.took, "timed_out": $body.timed_out, "_shards": $body._shards, "hits": $body.hits, "profile": /]
|
||||
// TESTRESPONSE[s/"id": "\[2aE02wS1R8q_QFnYu6vDVQ\]\[twitter\]\[1\]"/"id": $body.profile.shards.0.id/]
|
||||
// TESTRESPONSE[s/"query": \[...\]/"query": $body.profile.shards.0.searches.0.query/]
|
||||
// TESTRESPONSE[s/"rewrite_time": 51443/"rewrite_time": $body.profile.shards.0.searches.0.rewrite_time/]
|
||||
// TESTRESPONSE[s/"collector": \[...\]/"collector": $body.profile.shards.0.searches.0.collector/]
|
||||
// TESTRESPONSE[s/"aggregations": \[...\]/"aggregations": []/]
|
||||
<1> A profile is returned for each shard that participated in the response, and is identified
|
||||
by a unique ID
|
||||
<2> Each profile contains a section which holds details about the query execution
|
||||
|
@ -195,33 +228,38 @@ the `advance` phase of that query is the cause, for example.
|
|||
The `query` section contains detailed timing of the query tree executed by Lucene on a particular shard.
|
||||
The overall structure of this query tree will resemble your original Elasticsearch query, but may be slightly
|
||||
(or sometimes very) different. It will also use similar but not always identical naming. Using our previous
|
||||
`term` query example, let's analyze the `query` section:
|
||||
`match` query example, let's analyze the `query` section:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
"query": [
|
||||
{
|
||||
"type": "BooleanQuery",
|
||||
"description": "message:search message:test",
|
||||
"time": "15.52889800ms",
|
||||
"description": "message:message message:number",
|
||||
"time": "1.873811000ms",
|
||||
"breakdown": {...}, <1>
|
||||
"children": [
|
||||
{
|
||||
"type": "TermQuery",
|
||||
"description": "message:search",
|
||||
"time": "4.938855000ms",
|
||||
"description": "message:message",
|
||||
"time": "0.3919430000ms",
|
||||
"breakdown": {...}
|
||||
},
|
||||
{
|
||||
"type": "TermQuery",
|
||||
"description": "message:test",
|
||||
"time": "0.5016660000ms",
|
||||
"description": "message:number",
|
||||
"time": "0.2106820000ms",
|
||||
"breakdown": {...}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE[s/^/{\n"took": $body.took,\n"timed_out": $body.timed_out,\n"_shards": $body._shards,\n"hits": $body.hits,\n"profile": {\n"shards": [ {\n"id": "$body.profile.shards.0.id",\n"searches": [{\n/]
|
||||
// TESTRESPONSE[s/]$/],"rewrite_time": $body.profile.shards.0.searches.0.rewrite_time, "collector": $body.profile.shards.0.searches.0.collector}], "aggregations": []}]}}/]
|
||||
// TESTRESPONSE[s/"time": "1.873811000ms",\n.+"breakdown": \{...\}/"time": $body.profile.shards.0.searches.0.query.0.time, "breakdown": $body.profile.shards.0.searches.0.query.0.breakdown/]
|
||||
// TESTRESPONSE[s/"time": "0.3919430000ms",\n.+"breakdown": \{...\}/"time": $body.profile.shards.0.searches.0.query.0.children.0.time, "breakdown": $body.profile.shards.0.searches.0.query.0.children.0.breakdown/]
|
||||
// TESTRESPONSE[s/"time": "0.2106820000ms",\n.+"breakdown": \{...\}/"time": $body.profile.shards.0.searches.0.query.0.children.1.time, "breakdown": $body.profile.shards.0.searches.0.query.0.children.1.breakdown/]
|
||||
<1> The breakdown timings are omitted for simplicity
|
||||
|
||||
Based on the profile structure, we can see that our `match` query was rewritten by Lucene into a BooleanQuery with two
|
||||
|
@ -245,20 +283,27 @@ The `"breakdown"` component lists detailed timing statistics about low-level Luc
|
|||
[source,js]
|
||||
--------------------------------------------------
|
||||
"breakdown": {
|
||||
"score": 5014,
|
||||
"score_count": 1,
|
||||
"build_scorer": 1689333,
|
||||
"build_scorer_count": 1,
|
||||
"match": 0,
|
||||
"match_count": 0,
|
||||
"create_weight": 166587,
|
||||
"create_weight_count": 1,
|
||||
"next_doc": 5542,
|
||||
"next_doc_count": 2,
|
||||
"advance": 0,
|
||||
"advance_count": 0
|
||||
"score": 51306,
|
||||
"score_count": 4,
|
||||
"build_scorer": 2935582,
|
||||
"build_scorer_count": 1,
|
||||
"match": 0,
|
||||
"match_count": 0,
|
||||
"create_weight": 919297,
|
||||
"create_weight_count": 1,
|
||||
"next_doc": 53876,
|
||||
"next_doc_count": 5,
|
||||
"advance": 0,
|
||||
"advance_count": 0
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE[s/^/{\n"took": $body.took,\n"timed_out": $body.timed_out,\n"_shards": $body._shards,\n"hits": $body.hits,\n"profile": {\n"shards": [ {\n"id": "$body.profile.shards.0.id",\n"searches": [{\n"query": [{\n"type": "BooleanQuery",\n"description": "message:message message:number",\n"time": $body.profile.shards.0.searches.0.query.0.time,/]
|
||||
// TESTRESPONSE[s/}$/},\n"children": $body.profile.shards.0.searches.0.query.0.children}],\n"rewrite_time": $body.profile.shards.0.searches.0.rewrite_time, "collector": $body.profile.shards.0.searches.0.collector}], "aggregations": []}]}}/]
|
||||
// TESTRESPONSE[s/"score": 51306/"score": $body.profile.shards.0.searches.0.query.0.breakdown.score/]
|
||||
// TESTRESPONSE[s/"time": "1.873811000ms"/"time": $body.profile.shards.0.searches.0.query.0.time/]
|
||||
// TESTRESPONSE[s/"build_scorer": 2935582/"build_scorer": $body.profile.shards.0.searches.0.query.0.breakdown.build_scorer/]
|
||||
// TESTRESPONSE[s/"create_weight": 919297/"create_weight": $body.profile.shards.0.searches.0.query.0.breakdown.create_weight/]
|
||||
// TESTRESPONSE[s/"next_doc": 53876/"next_doc": $body.profile.shards.0.searches.0.query.0.breakdown.next_doc/]
|
||||
|
||||
Timings are listed in wall-clock nanoseconds and are not normalized at all. All caveats about the overall
|
||||
`"time"` apply here. The intention of the breakdown is to give you a feel for A) what machinery in Lucene is
|
||||
|
@ -348,10 +393,13 @@ Looking at the previous example:
|
|||
{
|
||||
"name": "SimpleTopScoreDocCollector",
|
||||
"reason": "search_top_hits",
|
||||
"time": "2.206529000ms"
|
||||
"time": "0.06989100000ms"
|
||||
}
|
||||
]
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE[s/^/{\n"took": $body.took,\n"timed_out": $body.timed_out,\n"_shards": $body._shards,\n"hits": $body.hits,\n"profile": {\n"shards": [ {\n"id": "$body.profile.shards.0.id",\n"searches": [{\n"query": $body.profile.shards.0.searches.0.query,\n"rewrite_time": $body.profile.shards.0.searches.0.rewrite_time,/]
|
||||
// TESTRESPONSE[s/]$/]}], "aggregations": []}]}}/]
|
||||
// TESTRESPONSE[s/"time": "0.06989100000ms"/"time": $body.profile.shards.0.searches.0.collector.0.time/]
|
||||
|
||||
We see a single collector named `SimpleTopScoreDocCollector`. This is the default "scoring and sorting" Collector
|
||||
used by Elasticsearch. The `"reason"` field attempts to give a plain english description of the class name. The
|
||||
|
@ -473,6 +521,8 @@ GET /test/_search
|
|||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[s/^/PUT test\n/]
|
||||
|
||||
This example has:
|
||||
|
||||
|
@ -509,7 +559,7 @@ And the response:
|
|||
"create_weight_count": 1,
|
||||
"build_scorer": 377872,
|
||||
"build_scorer_count": 1,
|
||||
"advance": 0
|
||||
"advance": 0,
|
||||
"advance_count": 0
|
||||
}
|
||||
},
|
||||
|
@ -528,7 +578,7 @@ And the response:
|
|||
"create_weight_count": 1,
|
||||
"build_scorer": 112551,
|
||||
"build_scorer_count": 1,
|
||||
"advance": 0
|
||||
"advance": 0,
|
||||
"advance_count": 0
|
||||
}
|
||||
}
|
||||
|
@ -578,7 +628,7 @@ And the response:
|
|||
"create_weight_count": 1,
|
||||
"build_scorer": 38310,
|
||||
"build_scorer_count": 1,
|
||||
"advance": 0
|
||||
"advance": 0,
|
||||
"advance_count": 0
|
||||
}
|
||||
}
|
||||
|
@ -640,7 +690,7 @@ the following example aggregations request:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
curl -XGET "http://localhost:9200/house-prices/_search" -d'
|
||||
GET /house-prices/_search
|
||||
{
|
||||
"profile": true,
|
||||
"size": 0,
|
||||
|
@ -658,8 +708,10 @@ curl -XGET "http://localhost:9200/house-prices/_search" -d'
|
|||
}
|
||||
}
|
||||
}
|
||||
}'
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[s/^/PUT house-prices\n/]
|
||||
|
||||
Which yields the following aggregation profile output
|
||||
|
||||
|
|
|
@ -351,9 +351,23 @@ Multiple geo points can be passed as an array containing any `geo_point` format,
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
"pin.location" : [[-70, 40], [-71, 42]]
|
||||
"pin.location" : [{"lat": 40, "lon": -70}, {"lat": 42, "lon": -71}]
|
||||
GET /_search
|
||||
{
|
||||
"sort" : [
|
||||
{
|
||||
"_geo_distance" : {
|
||||
"pin.location" : [[-70, 40], [-71, 42]],
|
||||
"order" : "asc",
|
||||
"unit" : "km"
|
||||
}
|
||||
}
|
||||
],
|
||||
"query" : {
|
||||
"term" : { "user" : "kimchy" }
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
and so forth.
|
||||
|
||||
|
|
|
@ -16,6 +16,7 @@ which should give you a response something like this:
|
|||
{
|
||||
"name" : "Cp8oag6",
|
||||
"cluster_name" : "elasticsearch",
|
||||
"cluster_uuid" : "AT69_T_DTp-1qgIJlatQqA",
|
||||
"version" : {
|
||||
"number" : "{version}",
|
||||
"build_hash" : "f27399d",
|
||||
|
@ -28,6 +29,7 @@ which should give you a response something like this:
|
|||
--------------------------------------------
|
||||
// TESTRESPONSE[s/"name" : "Cp8oag6",/"name" : "$body.name",/]
|
||||
// TESTRESPONSE[s/"cluster_name" : "elasticsearch",/"cluster_name" : "$body.cluster_name",/]
|
||||
// TESTRESPONSE[s/"cluster_uuid" : "AT69_T_DTp-1qgIJlatQqA",/"cluster_uuid" : "$body.cluster_uuid",/]
|
||||
// TESTRESPONSE[s/"build_hash" : "f27399d",/"build_hash" : "$body.version.build_hash",/]
|
||||
// TESTRESPONSE[s/"build_date" : "2016-03-30T09:51:41.449Z",/"build_date" : $body.version.build_date,/]
|
||||
// TESTRESPONSE[s/"build_snapshot" : false,/"build_snapshot" : $body.version.build_snapshot,/]
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
=== Install Elasticsearch on Windows
|
||||
|
||||
Elasticsearch can be installed on Windows using the `.zip` package. This
|
||||
comes with a `service.bat` command which will setup Elasticsearch to run as a
|
||||
comes with a `elasticsearch-service.bat` command which will setup Elasticsearch to run as a
|
||||
service.
|
||||
|
||||
The latest stable version of Elasticsearch can be found on the
|
||||
|
@ -13,7 +13,7 @@ link:/downloads/past-releases[Past Releases page].
|
|||
[[install-windows]]
|
||||
==== Download and install the `.zip` package
|
||||
|
||||
Download the `.zip` archive for Elastisearch v{version} from: https://download.elastic.co/elasticsearch/release/org/elasticsearch/distribution/zip/elasticsearch/{version}/elasticsearch-{version}.zip
|
||||
Download the `.zip` archive for Elasticsearch v{version} from: https://download.elastic.co/elasticsearch/release/org/elasticsearch/distribution/zip/elasticsearch/{version}/elasticsearch-{version}.zip
|
||||
|
||||
Unzip it with your favourite unzip tool. This will create a folder called
|
||||
+elasticsearch-{version}+, which we will refer to as `%ES_HOME%`. In a terminal
|
||||
|
@ -65,7 +65,7 @@ include::check-running.asciidoc[]
|
|||
|
||||
Elasticsearch can be installed as a service to run in the background or start
|
||||
automatically at boot time without any user interaction. This can be achieved
|
||||
through the `service.bat` script in the `bin\` folder which allows one to
|
||||
through the `elasticsearch-service.bat` script in the `bin\` folder which allows one to
|
||||
install, remove, manage or configure the service and potentially start and
|
||||
stop the service, all from the command-line.
|
||||
|
||||
|
@ -73,7 +73,7 @@ stop the service, all from the command-line.
|
|||
--------------------------------------------------
|
||||
c:\elasticsearch-{version}{backslash}bin>service
|
||||
|
||||
Usage: service.bat install|remove|start|stop|manager [SERVICE_ID]
|
||||
Usage: elasticsearch-service.bat install|remove|start|stop|manager [SERVICE_ID]
|
||||
--------------------------------------------------
|
||||
|
||||
The script requires one parameter (the command to execute) followed by an
|
||||
|
@ -170,18 +170,18 @@ The Elasticsearch service can be configured prior to installation by setting the
|
|||
|
||||
The timeout in seconds that procrun waits for service to exit gracefully. Defaults to `0`.
|
||||
|
||||
NOTE: At its core, `service.bat` relies on http://commons.apache.org/proper/commons-daemon/[Apache Commons Daemon] project
|
||||
NOTE: At its core, `elasticsearch-service.bat` relies on http://commons.apache.org/proper/commons-daemon/[Apache Commons Daemon] project
|
||||
to install the service. Environment variables set prior to the service installation are copied and will be used during the service lifecycle. This means any changes made to them after the installation will not be picked up unless the service is reinstalled.
|
||||
|
||||
NOTE: On Windows, the <<heap-size,heap size>> can be configured as for
|
||||
any other Elasticsearch installation when running Elasticsearch from the
|
||||
command line, or when installing Elasticsearch as a service for the
|
||||
first time. To adjust the heap size for an already installed service,
|
||||
use the service manager: `bin\service.bat manager`.
|
||||
use the service manager: `bin\elasticsearch-service.bat manager`.
|
||||
|
||||
Using the Manager GUI::
|
||||
|
||||
It is also possible to configure the service after it's been installed using the manager GUI (`elasticsearch-service-mgr.exe`), which offers insight into the installed service, including its status, startup type, JVM, start and stop settings amongst other things. Simply invoking `service.bat manager` from the command-line will open up the manager window:
|
||||
It is also possible to configure the service after it's been installed using the manager GUI (`elasticsearch-service-mgr.exe`), which offers insight into the installed service, including its status, startup type, JVM, start and stop settings amongst other things. Simply invoking `elasticsearch-service.bat manager` from the command-line will open up the manager window:
|
||||
|
||||
image::images/service-manager-win.png["Windows Service Manager GUI",align="center"]
|
||||
|
||||
|
|
|
@ -63,6 +63,22 @@ to create new scenarios. We have currently ported all published Jepsen scenarios
|
|||
framework. As the Jepsen tests evolve, we will continue porting new scenarios that are not covered yet. We are committed to investigating
|
||||
all new scenarios and will report issues that we find on this page and in our GitHub repository.
|
||||
|
||||
[float]
|
||||
=== Repeated network partitions can cause cluster state updates to be lost (STATUS: ONGOING)
|
||||
|
||||
During a networking partition, cluster state updates (like mapping changes or shard assignments)
|
||||
are committed if a majority of the master-eligible nodes received the update correctly. This means that the current master has access
|
||||
to enough nodes in the cluster to continue to operate correctly. When the network partition heals, the isolated nodes catch
|
||||
up with the current state and receive the previously missed changes. However, if a second partition happens while the cluster
|
||||
is still recovering from the previous one *and* the old master falls on the minority side, it may be that a new master is elected
|
||||
which has not yet catch up. If that happens, cluster state updates can be lost.
|
||||
|
||||
This problem is mostly fixed by {GIT}20384[#20384] (v5.0.0), which takes committed cluster state updates into account during master
|
||||
election. This considerably reduces the chance of this rare problem occurring but does not fully mitigate it. If the second partition
|
||||
happens concurrently with a cluster state update and blocks the cluster state commit message from reaching a majority of nodes, it may be
|
||||
that the in flight update will be lost. If the now-isolated master can still acknowledge the cluster state update to the client this
|
||||
will amount to the loss of an acknowledged change. Fixing that last scenario needs considerable work and is currently targeted at (v6.0.0).
|
||||
|
||||
[float]
|
||||
=== Better request retry mechanism when nodes are disconnected (STATUS: ONGOING)
|
||||
|
||||
|
|
|
@ -294,6 +294,7 @@ final class RemoteResponseParsers {
|
|||
MAIN_ACTION_PARSER.declareInt((p, v) -> {}, new ParseField("status"));
|
||||
MAIN_ACTION_PARSER.declareString((p, v) -> {}, new ParseField("name"));
|
||||
MAIN_ACTION_PARSER.declareString((p, v) -> {}, new ParseField("cluster_name"));
|
||||
MAIN_ACTION_PARSER.declareString((p, v) -> {}, new ParseField("cluster_uuid"));
|
||||
MAIN_ACTION_PARSER.declareString((p, v) -> {}, new ParseField("name"));
|
||||
MAIN_ACTION_PARSER.declareString((p, v) -> {}, new ParseField("tagline"));
|
||||
MAIN_ACTION_PARSER.declareObject(constructorArg(), VERSION_PARSER, new ParseField("version"));
|
||||
|
|
|
@ -67,7 +67,8 @@ public class Netty3SizeHeaderFrameDecoderTests extends ESTestCase {
|
|||
nettyTransport = new Netty3Transport(settings, threadPool, networkService, bigArrays,
|
||||
new NamedWriteableRegistry(Collections.emptyList()), new NoneCircuitBreakerService());
|
||||
nettyTransport.start();
|
||||
TransportService transportService = new TransportService(settings, nettyTransport, threadPool);
|
||||
TransportService transportService = new TransportService(settings, nettyTransport, threadPool,
|
||||
TransportService.NOOP_TRANSPORT_INTERCEPTOR);
|
||||
nettyTransport.transportServiceAdapter(transportService.createAdapter());
|
||||
|
||||
TransportAddress[] boundAddresses = nettyTransport.boundAddress().boundAddresses();
|
||||
|
|
|
@ -40,6 +40,7 @@ import org.elasticsearch.transport.TransportRequestHandler;
|
|||
import org.elasticsearch.transport.TransportRequestOptions;
|
||||
import org.elasticsearch.transport.TransportResponse;
|
||||
import org.elasticsearch.transport.TransportResponseOptions;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.transport.TransportSettings;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -65,13 +66,13 @@ public class Netty3ScheduledPingTests extends ESTestCase {
|
|||
NamedWriteableRegistry registry = new NamedWriteableRegistry(Collections.emptyList());
|
||||
final Netty3Transport nettyA = new Netty3Transport(settings, threadPool, new NetworkService(settings, Collections.emptyList()),
|
||||
BigArrays.NON_RECYCLING_INSTANCE, registry, circuitBreakerService);
|
||||
MockTransportService serviceA = new MockTransportService(settings, nettyA, threadPool);
|
||||
MockTransportService serviceA = new MockTransportService(settings, nettyA, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR);
|
||||
serviceA.start();
|
||||
serviceA.acceptIncomingRequests();
|
||||
|
||||
final Netty3Transport nettyB = new Netty3Transport(settings, threadPool, new NetworkService(settings, Collections.emptyList()),
|
||||
BigArrays.NON_RECYCLING_INSTANCE, registry, circuitBreakerService);
|
||||
MockTransportService serviceB = new MockTransportService(settings, nettyB, threadPool);
|
||||
MockTransportService serviceB = new MockTransportService(settings, nettyB, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR);
|
||||
|
||||
serviceB.start();
|
||||
serviceB.acceptIncomingRequests();
|
||||
|
|
|
@ -32,6 +32,7 @@ import org.elasticsearch.threadpool.ThreadPool;
|
|||
import org.elasticsearch.transport.AbstractSimpleTransportTestCase;
|
||||
import org.elasticsearch.transport.ConnectTransportException;
|
||||
import org.elasticsearch.transport.Transport;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.transport.TransportSettings;
|
||||
|
||||
import java.net.InetAddress;
|
||||
|
@ -55,7 +56,7 @@ public class SimpleNetty3TransportTests extends AbstractSimpleTransportTestCase
|
|||
return version;
|
||||
}
|
||||
};
|
||||
return new MockTransportService(Settings.EMPTY, transport, threadPool);
|
||||
return new MockTransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -40,6 +40,7 @@ import org.elasticsearch.transport.TransportRequestOptions;
|
|||
import org.elasticsearch.transport.TransportResponse;
|
||||
import org.elasticsearch.transport.TransportResponseHandler;
|
||||
import org.elasticsearch.transport.TransportResponseOptions;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.transport.TransportSettings;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -65,13 +66,13 @@ public class Netty4ScheduledPingTests extends ESTestCase {
|
|||
NamedWriteableRegistry registry = new NamedWriteableRegistry(Collections.emptyList());
|
||||
final Netty4Transport nettyA = new Netty4Transport(settings, threadPool, new NetworkService(settings, Collections.emptyList()),
|
||||
BigArrays.NON_RECYCLING_INSTANCE, registry, circuitBreakerService);
|
||||
MockTransportService serviceA = new MockTransportService(settings, nettyA, threadPool);
|
||||
MockTransportService serviceA = new MockTransportService(settings, nettyA, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR);
|
||||
serviceA.start();
|
||||
serviceA.acceptIncomingRequests();
|
||||
|
||||
final Netty4Transport nettyB = new Netty4Transport(settings, threadPool, new NetworkService(settings, Collections.emptyList()),
|
||||
BigArrays.NON_RECYCLING_INSTANCE, registry, circuitBreakerService);
|
||||
MockTransportService serviceB = new MockTransportService(settings, nettyB, threadPool);
|
||||
MockTransportService serviceB = new MockTransportService(settings, nettyB, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR);
|
||||
|
||||
serviceB.start();
|
||||
serviceB.acceptIncomingRequests();
|
||||
|
|
|
@ -32,6 +32,7 @@ import org.elasticsearch.threadpool.ThreadPool;
|
|||
import org.elasticsearch.transport.AbstractSimpleTransportTestCase;
|
||||
import org.elasticsearch.transport.ConnectTransportException;
|
||||
import org.elasticsearch.transport.Transport;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.transport.TransportSettings;
|
||||
|
||||
import java.net.InetAddress;
|
||||
|
@ -55,7 +56,7 @@ public class SimpleNetty4TransportTests extends AbstractSimpleTransportTestCase
|
|||
return version;
|
||||
}
|
||||
};
|
||||
return new MockTransportService(Settings.EMPTY, transport, threadPool);
|
||||
return new MockTransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -31,6 +31,7 @@ import org.elasticsearch.test.transport.MockTransportService;
|
|||
import org.elasticsearch.threadpool.TestThreadPool;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.MockTcpTransport;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
|
@ -73,7 +74,7 @@ public class FileBasedUnicastHostsProviderTests extends ESTestCase {
|
|||
new NoneCircuitBreakerService(),
|
||||
new NamedWriteableRegistry(Collections.emptyList()),
|
||||
new NetworkService(Settings.EMPTY, Collections.emptyList()));
|
||||
transportService = new MockTransportService(Settings.EMPTY, transport, threadPool);
|
||||
transportService = new MockTransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR);
|
||||
}
|
||||
|
||||
public void testBuildDynamicNodes() throws Exception {
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.apache.logging.log4j.core.LoggerContext;
|
|||
import org.apache.logging.log4j.core.config.Configuration;
|
||||
import org.apache.logging.log4j.core.config.Configurator;
|
||||
import org.apache.logging.log4j.core.config.LoggerConfig;
|
||||
import org.elasticsearch.cli.UserException;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
@ -34,7 +35,9 @@ import org.elasticsearch.test.ESTestCase;
|
|||
import java.io.IOException;
|
||||
import java.nio.file.Path;
|
||||
|
||||
import static org.hamcrest.CoreMatchers.containsString;
|
||||
import static org.hamcrest.CoreMatchers.equalTo;
|
||||
import static org.hamcrest.Matchers.hasToString;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
|
||||
public class EvilLoggerConfigurationTests extends ESTestCase {
|
||||
|
@ -85,7 +88,7 @@ public class EvilLoggerConfigurationTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testDefaults() throws IOException {
|
||||
public void testDefaults() throws IOException, UserException {
|
||||
final Path configDir = getDataPath("config");
|
||||
final String level = randomFrom(Level.TRACE, Level.DEBUG, Level.INFO, Level.WARN, Level.ERROR).toString();
|
||||
final Settings settings = Settings.builder()
|
||||
|
@ -137,4 +140,15 @@ public class EvilLoggerConfigurationTests extends ESTestCase {
|
|||
assertThat(ESLoggerFactory.getLogger("x.y").getLevel(), equalTo(level));
|
||||
}
|
||||
|
||||
public void testMissingConfigFile() {
|
||||
final Path configDir = getDataPath("does_not_exist");
|
||||
final Settings settings = Settings.builder()
|
||||
.put(Environment.PATH_CONF_SETTING.getKey(), configDir.toAbsolutePath())
|
||||
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
|
||||
.build();
|
||||
final Environment environment = new Environment(settings);
|
||||
UserException e = expectThrows(UserException.class, () -> LogConfigurator.configure(environment, true));
|
||||
assertThat(e, hasToString(containsString("no log4j2.properties found; tried")));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.apache.logging.log4j.core.appender.ConsoleAppender;
|
|||
import org.apache.logging.log4j.core.appender.CountingNoOpAppender;
|
||||
import org.apache.logging.log4j.core.config.Configurator;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.elasticsearch.cli.UserException;
|
||||
import org.elasticsearch.common.io.PathUtils;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
|
@ -59,7 +60,7 @@ public class EvilLoggerTests extends ESTestCase {
|
|||
super.tearDown();
|
||||
}
|
||||
|
||||
public void testLocationInfoTest() throws IOException {
|
||||
public void testLocationInfoTest() throws IOException, UserException {
|
||||
setupLogging("location_info");
|
||||
|
||||
final Logger testLogger = ESLoggerFactory.getLogger("test");
|
||||
|
@ -81,7 +82,7 @@ public class EvilLoggerTests extends ESTestCase {
|
|||
assertLogLine(events.get(4), Level.TRACE, location, "This is a trace message");
|
||||
}
|
||||
|
||||
public void testDeprecationLogger() throws IOException {
|
||||
public void testDeprecationLogger() throws IOException, UserException {
|
||||
setupLogging("deprecation");
|
||||
|
||||
final DeprecationLogger deprecationLogger = new DeprecationLogger(ESLoggerFactory.getLogger("deprecation"));
|
||||
|
@ -97,7 +98,7 @@ public class EvilLoggerTests extends ESTestCase {
|
|||
"This is a deprecation message");
|
||||
}
|
||||
|
||||
public void testFindAppender() throws IOException {
|
||||
public void testFindAppender() throws IOException, UserException {
|
||||
setupLogging("find_appender");
|
||||
|
||||
final Logger hasConsoleAppender = ESLoggerFactory.getLogger("has_console_appender");
|
||||
|
@ -111,7 +112,7 @@ public class EvilLoggerTests extends ESTestCase {
|
|||
assertThat(countingNoOpAppender.getName(), equalTo("counting_no_op"));
|
||||
}
|
||||
|
||||
public void testPrefixLogger() throws IOException, IllegalAccessException {
|
||||
public void testPrefixLogger() throws IOException, IllegalAccessException, UserException {
|
||||
setupLogging("prefix");
|
||||
|
||||
final String prefix = randomBoolean() ? null : randomAsciiOfLength(16);
|
||||
|
@ -179,7 +180,7 @@ public class EvilLoggerTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
private void setupLogging(final String config) throws IOException {
|
||||
private void setupLogging(final String config) throws IOException, UserException {
|
||||
final Path configDir = getDataPath(config);
|
||||
// need to set custom path.conf so we can use a custom log4j2.properties file for the test
|
||||
final Settings settings = Settings.builder()
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue