Merge branch 'master' into feature/aggs-refactoring

# Conflicts:
#	core/src/test/java/org/elasticsearch/search/aggregations/AggregationsBinaryIT.java
#	core/src/test/java/org/elasticsearch/search/aggregations/bucket/FilterIT.java
#	core/src/test/java/org/elasticsearch/search/aggregations/bucket/FiltersIT.java
#	core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java
#	core/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java
#	core/src/test/java/org/elasticsearch/search/aggregations/bucket/SamplerIT.java
#	core/src/test/java/org/elasticsearch/search/aggregations/pipeline/DerivativeIT.java
#	modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/BucketSelectorTests.java
#	modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/TDigestPercentileRanksTests.java
This commit is contained in:
Colin Goodheart-Smithe 2016-02-01 13:38:33 +00:00
commit a241983c9d
322 changed files with 6358 additions and 4365 deletions
buildSrc/src/main
groovy/org/elasticsearch/gradle/precommit
resources
core/src
main/java/org/elasticsearch
action
bootstrap
cache/recycler
client/transport
cluster
common
discovery
gateway
http
index
indices
ingest
monitor
node
percolator
rest
search
transport
tribe
test/java/org/elasticsearch

@ -61,10 +61,6 @@ public class ForbiddenPatternsTask extends DefaultTask {
// add mandatory rules // add mandatory rules
patterns.put('nocommit', /nocommit/) patterns.put('nocommit', /nocommit/)
patterns.put('tab', /\t/) patterns.put('tab', /\t/)
patterns.put('wildcard imports', /^\s*import.*\.\*/)
// We don't use Java serialization so we fail if it looks like we're trying to.
patterns.put('declares serialVersionUID', /serialVersionUID/)
patterns.put('references Serializable', /java\.io\.Serializable/)
inputs.property("excludes", filesFilter.excludes) inputs.property("excludes", filesFilter.excludes)
inputs.property("rules", patterns) inputs.property("rules", patterns)

@ -30,9 +30,9 @@ class PrecommitTasks {
/** Adds a precommit task, which depends on non-test verification tasks. */ /** Adds a precommit task, which depends on non-test verification tasks. */
public static Task create(Project project, boolean includeDependencyLicenses) { public static Task create(Project project, boolean includeDependencyLicenses) {
List<Task> precommitTasks = [ List<Task> precommitTasks = [
configureForbiddenApis(project), configureForbiddenApis(project),
configureCheckstyle(project),
project.tasks.create('forbiddenPatterns', ForbiddenPatternsTask.class), project.tasks.create('forbiddenPatterns', ForbiddenPatternsTask.class),
project.tasks.create('licenseHeaders', LicenseHeadersTask.class), project.tasks.create('licenseHeaders', LicenseHeadersTask.class),
project.tasks.create('jarHell', JarHellTask.class), project.tasks.create('jarHell', JarHellTask.class),
@ -83,4 +83,25 @@ class PrecommitTasks {
forbiddenApis.group = "" // clear group, so this does not show up under verification tasks forbiddenApis.group = "" // clear group, so this does not show up under verification tasks
return forbiddenApis return forbiddenApis
} }
private static Task configureCheckstyle(Project project) {
Task checkstyleTask = project.tasks.create('checkstyle')
// Apply the checkstyle plugin to create `checkstyleMain` and `checkstyleTest`. It only
// creates them if there is main or test code to check and it makes `check` depend
// on them. But we want `precommit` to depend on `checkstyle` which depends on them so
// we have to swap them.
project.pluginManager.apply('checkstyle')
project.checkstyle {
config = project.resources.text.fromFile(
PrecommitTasks.getResource('/checkstyle.xml'), 'UTF-8')
}
for (String taskName : ['checkstyleMain', 'checkstyleTest']) {
Task task = project.tasks.findByName(taskName)
if (task != null) {
project.tasks['check'].dependsOn.remove(task)
checkstyleTask.dependsOn(task)
}
}
return checkstyleTask
}
} }

@ -0,0 +1,53 @@
<?xml version="1.0"?>
<!DOCTYPE module PUBLIC
"-//Puppy Crawl//DTD Check Configuration 1.3//EN"
"http://www.puppycrawl.com/dtds/configuration_1_3.dtd">
<module name="Checker">
<property name="charset" value="UTF-8" />
<module name="TreeWalker">
<!-- ~3500 violations
<module name="LineLength">
<property name="max" value="140"/>
</module>
-->
<module name="AvoidStarImport" />
<!-- Doesn't pass but we could make it pass pretty quick.
<module name="UnusedImports">
The next property is optional. If we remove it then imports that are
only referenced by Javadoc cause the check to fail.
<property name="processJavadoc" value="true" />
</module>
-->
<!-- Non-inner classes must be in files that match their names. -->
<module name="OuterTypeFilename" />
<!-- No line wraps inside of import and package statements. -->
<module name="NoLineWrap" />
<!-- Each java file has only one outer class -->
<module name="OneTopLevelClass" />
<!-- The suffix L is preferred, because the letter l (ell) is often
hard to distinguish from the digit 1 (one). -->
<module name="UpperEll"/>
<!-- We don't use Java's builtin serialization and we suppress all warning
about it. The flip side of that coin is that we shouldn't _try_ to use
it. We can't outright ban it with ForbiddenApis because it complain about
every we reference a class that implements Serializable like String or
Exception.
-->
<module name="RegexpSinglelineJava">
<property name="format" value="serialVersionUID" />
<property name="message" value="Do not declare serialVersionUID." />
<property name="ignoreComments" value="true" />
</module>
<module name="RegexpSinglelineJava">
<property name="format" value="java\.io\.Serializable" />
<property name="message" value="References java.io.Serializable." />
<property name="ignoreComments" value="true" />
</module>
<!-- end Orwellian suppression of Serializable -->
</module>
</module>

@ -139,24 +139,24 @@ public class SimulatePipelineRequest extends ActionRequest<SimulatePipelineReque
} }
static Parsed parse(Map<String, Object> config, boolean verbose, PipelineStore pipelineStore) throws Exception { static Parsed parse(Map<String, Object> config, boolean verbose, PipelineStore pipelineStore) throws Exception {
Map<String, Object> pipelineConfig = ConfigurationUtils.readMap(config, Fields.PIPELINE); Map<String, Object> pipelineConfig = ConfigurationUtils.readMap(null, null, config, Fields.PIPELINE);
Pipeline pipeline = PIPELINE_FACTORY.create(SIMULATED_PIPELINE_ID, pipelineConfig, pipelineStore.getProcessorFactoryRegistry()); Pipeline pipeline = PIPELINE_FACTORY.create(SIMULATED_PIPELINE_ID, pipelineConfig, pipelineStore.getProcessorFactoryRegistry());
List<IngestDocument> ingestDocumentList = parseDocs(config); List<IngestDocument> ingestDocumentList = parseDocs(config);
return new Parsed(pipeline, ingestDocumentList, verbose); return new Parsed(pipeline, ingestDocumentList, verbose);
} }
private static List<IngestDocument> parseDocs(Map<String, Object> config) { private static List<IngestDocument> parseDocs(Map<String, Object> config) {
List<Map<String, Object>> docs = ConfigurationUtils.readList(config, Fields.DOCS); List<Map<String, Object>> docs = ConfigurationUtils.readList(null, null, config, Fields.DOCS);
List<IngestDocument> ingestDocumentList = new ArrayList<>(); List<IngestDocument> ingestDocumentList = new ArrayList<>();
for (Map<String, Object> dataMap : docs) { for (Map<String, Object> dataMap : docs) {
Map<String, Object> document = ConfigurationUtils.readMap(dataMap, Fields.SOURCE); Map<String, Object> document = ConfigurationUtils.readMap(null, null, dataMap, Fields.SOURCE);
IngestDocument ingestDocument = new IngestDocument(ConfigurationUtils.readStringProperty(dataMap, MetaData.INDEX.getFieldName(), "_index"), IngestDocument ingestDocument = new IngestDocument(ConfigurationUtils.readStringProperty(null, null, dataMap, MetaData.INDEX.getFieldName(), "_index"),
ConfigurationUtils.readStringProperty(dataMap, MetaData.TYPE.getFieldName(), "_type"), ConfigurationUtils.readStringProperty(null, null, dataMap, MetaData.TYPE.getFieldName(), "_type"),
ConfigurationUtils.readStringProperty(dataMap, MetaData.ID.getFieldName(), "_id"), ConfigurationUtils.readStringProperty(null, null, dataMap, MetaData.ID.getFieldName(), "_id"),
ConfigurationUtils.readOptionalStringProperty(dataMap, MetaData.ROUTING.getFieldName()), ConfigurationUtils.readOptionalStringProperty(null, null, dataMap, MetaData.ROUTING.getFieldName()),
ConfigurationUtils.readOptionalStringProperty(dataMap, MetaData.PARENT.getFieldName()), ConfigurationUtils.readOptionalStringProperty(null, null, dataMap, MetaData.PARENT.getFieldName()),
ConfigurationUtils.readOptionalStringProperty(dataMap, MetaData.TIMESTAMP.getFieldName()), ConfigurationUtils.readOptionalStringProperty(null, null, dataMap, MetaData.TIMESTAMP.getFieldName()),
ConfigurationUtils.readOptionalStringProperty(dataMap, MetaData.TTL.getFieldName()), ConfigurationUtils.readOptionalStringProperty(null, null, dataMap, MetaData.TTL.getFieldName()),
document); document);
ingestDocumentList.add(ingestDocument); ingestDocumentList.add(ingestDocument);
} }

@ -22,24 +22,31 @@ package org.elasticsearch.action.ingest;
import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.StatusToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentBuilderString; import org.elasticsearch.common.xcontent.XContentBuilderString;
import org.elasticsearch.ingest.core.PipelineFactoryError;
import org.elasticsearch.rest.RestStatus;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections; import java.util.Collections;
import java.util.List; import java.util.List;
public class SimulatePipelineResponse extends ActionResponse implements ToXContent { public class SimulatePipelineResponse extends ActionResponse implements StatusToXContent {
private String pipelineId; private String pipelineId;
private boolean verbose; private boolean verbose;
private List<SimulateDocumentResult> results; private List<SimulateDocumentResult> results;
private PipelineFactoryError error;
public SimulatePipelineResponse() { public SimulatePipelineResponse() {
} }
public SimulatePipelineResponse(PipelineFactoryError error) {
this.error = error;
}
public SimulatePipelineResponse(String pipelineId, boolean verbose, List<SimulateDocumentResult> responses) { public SimulatePipelineResponse(String pipelineId, boolean verbose, List<SimulateDocumentResult> responses) {
this.pipelineId = pipelineId; this.pipelineId = pipelineId;
this.verbose = verbose; this.verbose = verbose;
@ -58,42 +65,69 @@ public class SimulatePipelineResponse extends ActionResponse implements ToXConte
return verbose; return verbose;
} }
public boolean isError() {
return error != null;
}
@Override
public RestStatus status() {
if (isError()) {
return RestStatus.BAD_REQUEST;
}
return RestStatus.OK;
}
@Override @Override
public void writeTo(StreamOutput out) throws IOException { public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out); super.writeTo(out);
out.writeString(pipelineId); out.writeBoolean(isError());
out.writeBoolean(verbose); if (isError()) {
out.writeVInt(results.size()); error.writeTo(out);
for (SimulateDocumentResult response : results) { } else {
response.writeTo(out); out.writeString(pipelineId);
out.writeBoolean(verbose);
out.writeVInt(results.size());
for (SimulateDocumentResult response : results) {
response.writeTo(out);
}
} }
} }
@Override @Override
public void readFrom(StreamInput in) throws IOException { public void readFrom(StreamInput in) throws IOException {
super.readFrom(in); super.readFrom(in);
this.pipelineId = in.readString(); boolean isError = in.readBoolean();
boolean verbose = in.readBoolean(); if (isError) {
int responsesLength = in.readVInt(); error = new PipelineFactoryError();
results = new ArrayList<>(); error.readFrom(in);
for (int i = 0; i < responsesLength; i++) { } else {
SimulateDocumentResult<?> simulateDocumentResult; this.pipelineId = in.readString();
if (verbose) { boolean verbose = in.readBoolean();
simulateDocumentResult = SimulateDocumentVerboseResult.readSimulateDocumentVerboseResultFrom(in); int responsesLength = in.readVInt();
} else { results = new ArrayList<>();
simulateDocumentResult = SimulateDocumentBaseResult.readSimulateDocumentSimpleResult(in); for (int i = 0; i < responsesLength; i++) {
SimulateDocumentResult<?> simulateDocumentResult;
if (verbose) {
simulateDocumentResult = SimulateDocumentVerboseResult.readSimulateDocumentVerboseResultFrom(in);
} else {
simulateDocumentResult = SimulateDocumentBaseResult.readSimulateDocumentSimpleResult(in);
}
results.add(simulateDocumentResult);
} }
results.add(simulateDocumentResult);
} }
} }
@Override @Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startArray(Fields.DOCUMENTS); if (isError()) {
for (SimulateDocumentResult response : results) { error.toXContent(builder, params);
response.toXContent(builder, params); } else {
builder.startArray(Fields.DOCUMENTS);
for (SimulateDocumentResult response : results) {
response.toXContent(builder, params);
}
builder.endArray();
} }
builder.endArray();
return builder; return builder;
} }

@ -27,6 +27,8 @@ import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.ingest.PipelineStore; import org.elasticsearch.ingest.PipelineStore;
import org.elasticsearch.ingest.core.PipelineFactoryError;
import org.elasticsearch.ingest.processor.ConfigurationPropertyException;
import org.elasticsearch.node.service.NodeService; import org.elasticsearch.node.service.NodeService;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportService;
@ -56,6 +58,9 @@ public class SimulatePipelineTransportAction extends HandledTransportAction<Simu
} else { } else {
simulateRequest = SimulatePipelineRequest.parse(source, request.isVerbose(), pipelineStore); simulateRequest = SimulatePipelineRequest.parse(source, request.isVerbose(), pipelineStore);
} }
} catch (ConfigurationPropertyException e) {
listener.onResponse(new SimulatePipelineResponse(new PipelineFactoryError(e)));
return;
} catch (Exception e) { } catch (Exception e) {
listener.onFailure(e); listener.onFailure(e);
return; return;

@ -22,27 +22,49 @@ package org.elasticsearch.action.ingest;
import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.ingest.core.PipelineFactoryError;
import java.io.IOException; import java.io.IOException;
public class WritePipelineResponse extends AcknowledgedResponse { public class WritePipelineResponse extends AcknowledgedResponse {
private PipelineFactoryError error;
WritePipelineResponse() { WritePipelineResponse() {
} }
public WritePipelineResponse(boolean acknowledge) { public WritePipelineResponse(boolean acknowledged) {
super(acknowledge); super(acknowledged);
if (!isAcknowledged()) {
error = new PipelineFactoryError("pipeline write is not acknowledged");
}
}
public WritePipelineResponse(PipelineFactoryError error) {
super(false);
this.error = error;
}
public PipelineFactoryError getError() {
return error;
} }
@Override @Override
public void readFrom(StreamInput in) throws IOException { public void readFrom(StreamInput in) throws IOException {
super.readFrom(in); super.readFrom(in);
readAcknowledged(in); readAcknowledged(in);
if (!isAcknowledged()) {
error = new PipelineFactoryError();
error.readFrom(in);
}
} }
@Override @Override
public void writeTo(StreamOutput out) throws IOException { public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out); super.writeTo(out);
writeAcknowledged(out); writeAcknowledged(out);
if (!isAcknowledged()) {
error.writeTo(out);
}
} }
} }

@ -0,0 +1,41 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.ingest;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.rest.RestChannel;
import org.elasticsearch.rest.action.support.AcknowledgedRestListener;
import java.io.IOException;
public class WritePipelineResponseRestListener extends AcknowledgedRestListener<WritePipelineResponse> {
public WritePipelineResponseRestListener(RestChannel channel) {
super(channel);
}
@Override
protected void addCustomFields(XContentBuilder builder, WritePipelineResponse response) throws IOException {
if (!response.isAcknowledged()) {
response.getError().toXContent(builder, null);
}
}
}

@ -320,7 +320,6 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
public void onFailure(Throwable t) { public void onFailure(Throwable t) {
if (t instanceof RetryOnReplicaException) { if (t instanceof RetryOnReplicaException) {
logger.trace("Retrying operation on replica, action [{}], request [{}]", t, transportReplicaAction, request); logger.trace("Retrying operation on replica, action [{}], request [{}]", t, transportReplicaAction, request);
final ThreadContext threadContext = threadPool.getThreadContext();
final ThreadContext.StoredContext context = threadPool.getThreadContext().newStoredContext(); final ThreadContext.StoredContext context = threadPool.getThreadContext().newStoredContext();
observer.waitForNextChange(new ClusterStateObserver.Listener() { observer.waitForNextChange(new ClusterStateObserver.Listener() {
@Override @Override
@ -528,7 +527,6 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
finishAsFailed(failure); finishAsFailed(failure);
return; return;
} }
final ThreadContext threadContext = threadPool.getThreadContext();
final ThreadContext.StoredContext context = threadPool.getThreadContext().newStoredContext(); final ThreadContext.StoredContext context = threadPool.getThreadContext().newStoredContext();
observer.waitForNextChange(new ClusterStateObserver.Listener() { observer.waitForNextChange(new ClusterStateObserver.Listener() {
@Override @Override
@ -898,7 +896,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
onReplicaFailure(nodeId, exp); onReplicaFailure(nodeId, exp);
} else { } else {
String message = String.format(Locale.ROOT, "failed to perform %s on replica on node %s", transportReplicaAction, node); String message = String.format(Locale.ROOT, "failed to perform %s on replica on node %s", transportReplicaAction, node);
logger.warn("{} {}", exp, shardId, message); logger.warn("[{}] {}", exp, shardId, message);
shardStateAction.shardFailed( shardStateAction.shardFailed(
shard, shard,
indexUUID, indexUUID,

@ -20,7 +20,9 @@
package org.elasticsearch.bootstrap; package org.elasticsearch.bootstrap;
import org.apache.lucene.util.Constants; import org.apache.lucene.util.Constants;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.StringHelper; import org.apache.lucene.util.StringHelper;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.common.PidFile; import org.elasticsearch.common.PidFile;
import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.SuppressForbidden;
@ -40,6 +42,7 @@ import org.elasticsearch.node.Node;
import org.elasticsearch.node.internal.InternalSettingsPreparer; import org.elasticsearch.node.internal.InternalSettingsPreparer;
import java.io.ByteArrayOutputStream; import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.PrintStream; import java.io.PrintStream;
import java.nio.file.Path; import java.nio.file.Path;
import java.util.Locale; import java.util.Locale;
@ -114,7 +117,11 @@ final class Bootstrap {
public boolean handle(int code) { public boolean handle(int code) {
if (CTRL_CLOSE_EVENT == code) { if (CTRL_CLOSE_EVENT == code) {
logger.info("running graceful exit on windows"); logger.info("running graceful exit on windows");
Bootstrap.stop(); try {
Bootstrap.stop();
} catch (IOException e) {
throw new ElasticsearchException("failed to stop node", e);
}
return true; return true;
} }
return false; return false;
@ -153,8 +160,10 @@ final class Bootstrap {
Runtime.getRuntime().addShutdownHook(new Thread() { Runtime.getRuntime().addShutdownHook(new Thread() {
@Override @Override
public void run() { public void run() {
if (node != null) { try {
node.close(); IOUtils.close(node);
} catch (IOException ex) {
throw new ElasticsearchException("failed to stop node", ex);
} }
} }
}); });
@ -221,9 +230,9 @@ final class Bootstrap {
keepAliveThread.start(); keepAliveThread.start();
} }
static void stop() { static void stop() throws IOException {
try { try {
Releasables.close(INSTANCE.node); IOUtils.close(INSTANCE.node);
} finally { } finally {
INSTANCE.keepAliveLatch.countDown(); INSTANCE.keepAliveLatch.countDown();
} }

@ -19,6 +19,8 @@
package org.elasticsearch.bootstrap; package org.elasticsearch.bootstrap;
import java.io.IOException;
/** /**
* This class starts elasticsearch. * This class starts elasticsearch.
*/ */
@ -48,7 +50,7 @@ public final class Elasticsearch {
* *
* NOTE: If this method is renamed and/or moved, make sure to update service.bat! * NOTE: If this method is renamed and/or moved, make sure to update service.bat!
*/ */
static void close(String[] args) { static void close(String[] args) throws IOException {
Bootstrap.stop(); Bootstrap.stop();
} }
} }

@ -25,7 +25,7 @@ import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.io.PathUtils;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment; import org.elasticsearch.env.Environment;
import org.elasticsearch.http.netty.NettyHttpServerTransport; import org.elasticsearch.http.HttpTransportSettings;
import org.elasticsearch.plugins.PluginInfo; import org.elasticsearch.plugins.PluginInfo;
import org.elasticsearch.transport.TransportSettings; import org.elasticsearch.transport.TransportSettings;
@ -270,9 +270,7 @@ final class Security {
static void addBindPermissions(Permissions policy, Settings settings) throws IOException { static void addBindPermissions(Permissions policy, Settings settings) throws IOException {
// http is simple // http is simple
String httpRange = settings.get("http.netty.port", String httpRange = HttpTransportSettings.SETTING_HTTP_PORT.get(settings).getPortRangeString();
settings.get("http.port",
NettyHttpServerTransport.DEFAULT_PORT_RANGE));
// listen is always called with 'localhost' but use wildcard to be sure, no name service is consulted. // listen is always called with 'localhost' but use wildcard to be sure, no name service is consulted.
// see SocketPermission implies() code // see SocketPermission implies() code
policy.add(new SocketPermission("*:" + httpRange, "listen,resolve")); policy.add(new SocketPermission("*:" + httpRange, "listen,resolve"));

@ -22,9 +22,12 @@ package org.elasticsearch.cache.recycler;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.recycler.AbstractRecyclerC; import org.elasticsearch.common.recycler.AbstractRecyclerC;
import org.elasticsearch.common.recycler.Recycler; import org.elasticsearch.common.recycler.Recycler;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.EsExecutors;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
@ -38,17 +41,22 @@ import static org.elasticsearch.common.recycler.Recyclers.dequeFactory;
import static org.elasticsearch.common.recycler.Recyclers.none; import static org.elasticsearch.common.recycler.Recyclers.none;
/** A recycler of fixed-size pages. */ /** A recycler of fixed-size pages. */
public class PageCacheRecycler extends AbstractComponent { public class PageCacheRecycler extends AbstractComponent implements Releasable {
public static final String TYPE = "recycler.page.type"; public static final Setting<Type> TYPE_SETTING = new Setting<>("cache.recycler.page.type", Type.CONCURRENT.name(), Type::parse, false, Setting.Scope.CLUSTER);
public static final String LIMIT_HEAP = "recycler.page.limit.heap"; public static final Setting<ByteSizeValue> LIMIT_HEAP_SETTING = Setting.byteSizeSetting("cache.recycler.page.limit.heap", "10%", false, Setting.Scope.CLUSTER);
public static final String WEIGHT = "recycler.page.weight"; public static final Setting<Double> WEIGHT_BYTES_SETTING = Setting.doubleSetting("cache.recycler.page.weight.bytes", 1d, 0d, false, Setting.Scope.CLUSTER);
public static final Setting<Double> WEIGHT_LONG_SETTING = Setting.doubleSetting("cache.recycler.page.weight.longs", 1d, 0d, false, Setting.Scope.CLUSTER);
public static final Setting<Double> WEIGHT_INT_SETTING = Setting.doubleSetting("cache.recycler.page.weight.ints", 1d, 0d, false, Setting.Scope.CLUSTER);
// object pages are less useful to us so we give them a lower weight by default
public static final Setting<Double> WEIGHT_OBJECTS_SETTING = Setting.doubleSetting("cache.recycler.page.weight.objects", 0.1d, 0d, false, Setting.Scope.CLUSTER);
private final Recycler<byte[]> bytePage; private final Recycler<byte[]> bytePage;
private final Recycler<int[]> intPage; private final Recycler<int[]> intPage;
private final Recycler<long[]> longPage; private final Recycler<long[]> longPage;
private final Recycler<Object[]> objectPage; private final Recycler<Object[]> objectPage;
@Override
public void close() { public void close() {
bytePage.close(); bytePage.close();
intPage.close(); intPage.close();
@ -71,8 +79,8 @@ public class PageCacheRecycler extends AbstractComponent {
@Inject @Inject
public PageCacheRecycler(Settings settings, ThreadPool threadPool) { public PageCacheRecycler(Settings settings, ThreadPool threadPool) {
super(settings); super(settings);
final Type type = Type.parse(settings.get(TYPE)); final Type type = TYPE_SETTING .get(settings);
final long limit = settings.getAsMemory(LIMIT_HEAP, "10%").bytes(); final long limit = LIMIT_HEAP_SETTING .get(settings).bytes();
final int availableProcessors = EsExecutors.boundedNumberOfProcessors(settings); final int availableProcessors = EsExecutors.boundedNumberOfProcessors(settings);
final int searchThreadPoolSize = maximumSearchThreadPoolSize(threadPool, settings); final int searchThreadPoolSize = maximumSearchThreadPoolSize(threadPool, settings);
@ -89,11 +97,10 @@ public class PageCacheRecycler extends AbstractComponent {
// to direct ByteBuffers or sun.misc.Unsafe on a byte[] but this would have other issues // to direct ByteBuffers or sun.misc.Unsafe on a byte[] but this would have other issues
// that would need to be addressed such as garbage collection of native memory or safety // that would need to be addressed such as garbage collection of native memory or safety
// of Unsafe writes. // of Unsafe writes.
final double bytesWeight = settings.getAsDouble(WEIGHT + ".bytes", 1d); final double bytesWeight = WEIGHT_BYTES_SETTING .get(settings);
final double intsWeight = settings.getAsDouble(WEIGHT + ".ints", 1d); final double intsWeight = WEIGHT_INT_SETTING .get(settings);
final double longsWeight = settings.getAsDouble(WEIGHT + ".longs", 1d); final double longsWeight = WEIGHT_LONG_SETTING .get(settings);
// object pages are less useful to us so we give them a lower weight by default final double objectsWeight = WEIGHT_OBJECTS_SETTING .get(settings);
final double objectsWeight = settings.getAsDouble(WEIGHT + ".objects", 0.1d);
final double totalWeight = bytesWeight + intsWeight + longsWeight + objectsWeight; final double totalWeight = bytesWeight + intsWeight + longsWeight + objectsWeight;
final int maxPageCount = (int) Math.min(Integer.MAX_VALUE, limit / BigArrays.PAGE_SIZE_IN_BYTES); final int maxPageCount = (int) Math.min(Integer.MAX_VALUE, limit / BigArrays.PAGE_SIZE_IN_BYTES);
@ -188,7 +195,7 @@ public class PageCacheRecycler extends AbstractComponent {
return recycler; return recycler;
} }
public static enum Type { public enum Type {
QUEUE { QUEUE {
@Override @Override
<T> Recycler<T> build(Recycler.C<T> c, int limit, int estimatedThreadPoolSize, int availableProcessors) { <T> Recycler<T> build(Recycler.C<T> c, int limit, int estimatedThreadPoolSize, int availableProcessors) {
@ -209,9 +216,6 @@ public class PageCacheRecycler extends AbstractComponent {
}; };
public static Type parse(String type) { public static Type parse(String type) {
if (Strings.isNullOrEmpty(type)) {
return CONCURRENT;
}
try { try {
return Type.valueOf(type.toUpperCase(Locale.ROOT)); return Type.valueOf(type.toUpperCase(Locale.ROOT));
} catch (IllegalArgumentException e) { } catch (IllegalArgumentException e) {

@ -112,7 +112,7 @@ public class TransportClient extends AbstractClient {
final Settings.Builder settingsBuilder = settingsBuilder() final Settings.Builder settingsBuilder = settingsBuilder()
.put(NettyTransport.PING_SCHEDULE.getKey(), "5s") // enable by default the transport schedule ping interval .put(NettyTransport.PING_SCHEDULE.getKey(), "5s") // enable by default the transport schedule ping interval
.put(InternalSettingsPreparer.prepareSettings(settings)) .put(InternalSettingsPreparer.prepareSettings(settings))
.put(NettyTransport.NETWORK_SERVER.getKey(), false) .put(NetworkService.NETWORK_SERVER.getKey(), false)
.put(Node.NODE_CLIENT_SETTING.getKey(), true) .put(Node.NODE_CLIENT_SETTING.getKey(), true)
.put(CLIENT_TYPE_SETTING_S.getKey(), CLIENT_TYPE); .put(CLIENT_TYPE_SETTING_S.getKey(), CLIENT_TYPE);
return new PluginsService(settingsBuilder.build(), null, null, pluginClasses); return new PluginsService(settingsBuilder.build(), null, null, pluginClasses);

@ -302,7 +302,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
} }
public long getCreationDate() { public long getCreationDate() {
return settings.getAsLong(SETTING_CREATION_DATE, -1l); return settings.getAsLong(SETTING_CREATION_DATE, -1L);
} }
public State getState() { public State getState() {

@ -106,7 +106,7 @@ public class UnassignedInfo implements ToXContent, Writeable<UnassignedInfo> {
private final Reason reason; private final Reason reason;
private final long unassignedTimeMillis; // used for display and log messages, in milliseconds private final long unassignedTimeMillis; // used for display and log messages, in milliseconds
private final long unassignedTimeNanos; // in nanoseconds, used to calculate delay for delayed shard allocation private final long unassignedTimeNanos; // in nanoseconds, used to calculate delay for delayed shard allocation
private volatile long lastComputedLeftDelayNanos = 0l; // how long to delay shard allocation, not serialized (always positive, 0 means no delay) private volatile long lastComputedLeftDelayNanos = 0L; // how long to delay shard allocation, not serialized (always positive, 0 means no delay)
private final String message; private final String message;
private final Throwable failure; private final Throwable failure;
@ -217,7 +217,7 @@ public class UnassignedInfo implements ToXContent, Writeable<UnassignedInfo> {
return 0; return 0;
} }
TimeValue delayTimeout = INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.get(indexSettings, settings); TimeValue delayTimeout = INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.get(indexSettings, settings);
return Math.max(0l, delayTimeout.nanos()); return Math.max(0L, delayTimeout.nanos());
} }
/** /**
@ -236,8 +236,8 @@ public class UnassignedInfo implements ToXContent, Writeable<UnassignedInfo> {
public long updateDelay(long nanoTimeNow, Settings settings, Settings indexSettings) { public long updateDelay(long nanoTimeNow, Settings settings, Settings indexSettings) {
long delayTimeoutNanos = getAllocationDelayTimeoutSettingNanos(settings, indexSettings); long delayTimeoutNanos = getAllocationDelayTimeoutSettingNanos(settings, indexSettings);
final long newComputedLeftDelayNanos; final long newComputedLeftDelayNanos;
if (delayTimeoutNanos == 0l) { if (delayTimeoutNanos == 0L) {
newComputedLeftDelayNanos = 0l; newComputedLeftDelayNanos = 0L;
} else { } else {
assert nanoTimeNow >= unassignedTimeNanos; assert nanoTimeNow >= unassignedTimeNanos;
newComputedLeftDelayNanos = Math.max(0L, delayTimeoutNanos - (nanoTimeNow - unassignedTimeNanos)); newComputedLeftDelayNanos = Math.max(0L, delayTimeoutNanos - (nanoTimeNow - unassignedTimeNanos));
@ -277,7 +277,7 @@ public class UnassignedInfo implements ToXContent, Writeable<UnassignedInfo> {
} }
} }
} }
return minDelaySetting == Long.MAX_VALUE ? 0l : minDelaySetting; return minDelaySetting == Long.MAX_VALUE ? 0L : minDelaySetting;
} }
@ -294,7 +294,7 @@ public class UnassignedInfo implements ToXContent, Writeable<UnassignedInfo> {
} }
} }
} }
return nextDelay == Long.MAX_VALUE ? 0l : nextDelay; return nextDelay == Long.MAX_VALUE ? 0L : nextDelay;
} }
public String shortSummary() { public String shortSummary() {

@ -90,7 +90,7 @@ public class ClusterRebalanceAllocationDecider extends AllocationDecider {
logger.warn("[{}] has a wrong value {}, defaulting to 'indices_all_active'", CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING, CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getRaw(settings)); logger.warn("[{}] has a wrong value {}, defaulting to 'indices_all_active'", CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING, CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getRaw(settings));
type = ClusterRebalanceType.INDICES_ALL_ACTIVE; type = ClusterRebalanceType.INDICES_ALL_ACTIVE;
} }
logger.debug("using [{}] with [{}]", CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING, type.toString().toLowerCase(Locale.ROOT)); logger.debug("using [{}] with [{}]", CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), type.toString().toLowerCase(Locale.ROOT));
clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING, this::setType); clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING, this::setType);
} }

@ -179,7 +179,7 @@ public class GeoUtils {
final double width = Math.sqrt((meters*meters)/(ratio*ratio)); // convert to cell width final double width = Math.sqrt((meters*meters)/(ratio*ratio)); // convert to cell width
final long part = Math.round(Math.ceil(EARTH_EQUATOR / width)); final long part = Math.round(Math.ceil(EARTH_EQUATOR / width));
final int level = Long.SIZE - Long.numberOfLeadingZeros(part)-1; // (log_2) final int level = Long.SIZE - Long.numberOfLeadingZeros(part)-1; // (log_2)
return (part<=(1l<<level)) ?level :(level+1); // adjust level return (part<=(1L<<level)) ?level :(level+1); // adjust level
} }
} }

@ -38,7 +38,8 @@ public final class NotSerializableExceptionWrapper extends ElasticsearchExceptio
private final RestStatus status; private final RestStatus status;
public NotSerializableExceptionWrapper(Throwable other) { public NotSerializableExceptionWrapper(Throwable other) {
super(other.getMessage(), other.getCause()); super(ElasticsearchException.getExceptionName(other) +
": " + other.getMessage(), other.getCause());
this.name = ElasticsearchException.getExceptionName(other); this.name = ElasticsearchException.getExceptionName(other);
this.status = ExceptionsHelper.status(other); this.status = ExceptionsHelper.status(other);
setStackTrace(other.getStackTrace()); setStackTrace(other.getStackTrace());

@ -21,10 +21,12 @@ package org.elasticsearch.common.lease;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import java.io.Closeable;
/** /**
* Specialization of {@link AutoCloseable} that may only throw an {@link ElasticsearchException}. * Specialization of {@link AutoCloseable} that may only throw an {@link ElasticsearchException}.
*/ */
public interface Releasable extends AutoCloseable { public interface Releasable extends Closeable {
@Override @Override
void close(); void close();

@ -19,38 +19,24 @@
package org.elasticsearch.common.lease; package org.elasticsearch.common.lease;
import org.apache.lucene.util.IOUtils;
import java.io.IOException;
import java.util.Arrays; import java.util.Arrays;
/** Utility methods to work with {@link Releasable}s. */ /** Utility methods to work with {@link Releasable}s. */
public enum Releasables { public enum Releasables {
; ;
private static void rethrow(Throwable t) {
if (t instanceof RuntimeException) {
throw (RuntimeException) t;
}
if (t instanceof Error) {
throw (Error) t;
}
throw new RuntimeException(t);
}
private static void close(Iterable<? extends Releasable> releasables, boolean ignoreException) { private static void close(Iterable<? extends Releasable> releasables, boolean ignoreException) {
Throwable th = null; try {
for (Releasable releasable : releasables) { // this does the right thing with respect to add suppressed and not wrapping errors etc.
if (releasable != null) { IOUtils.close(releasables);
try { } catch (Throwable t) {
releasable.close(); if (ignoreException == false) {
} catch (Throwable t) { IOUtils.reThrowUnchecked(t);
if (th == null) {
th = t;
}
}
} }
} }
if (th != null && !ignoreException) {
rethrow(th);
}
} }
/** Release the provided {@link Releasable}s. */ /** Release the provided {@link Releasable}s. */
@ -99,25 +85,11 @@ public enum Releasables {
* </pre> * </pre>
*/ */
public static Releasable wrap(final Iterable<Releasable> releasables) { public static Releasable wrap(final Iterable<Releasable> releasables) {
return new Releasable() { return () -> close(releasables);
@Override
public void close() {
Releasables.close(releasables);
}
};
} }
/** @see #wrap(Iterable) */ /** @see #wrap(Iterable) */
public static Releasable wrap(final Releasable... releasables) { public static Releasable wrap(final Releasable... releasables) {
return new Releasable() { return () -> close(releasables);
@Override
public void close() {
Releasables.close(releasables);
}
};
} }
} }

@ -49,6 +49,7 @@ public class NetworkService extends AbstractComponent {
s -> s, false, Setting.Scope.CLUSTER); s -> s, false, Setting.Scope.CLUSTER);
public static final Setting<List<String>> GLOBAL_NETWORK_PUBLISHHOST_SETTING = Setting.listSetting("network.publish_host", GLOBAL_NETWORK_HOST_SETTING, public static final Setting<List<String>> GLOBAL_NETWORK_PUBLISHHOST_SETTING = Setting.listSetting("network.publish_host", GLOBAL_NETWORK_HOST_SETTING,
s -> s, false, Setting.Scope.CLUSTER); s -> s, false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> NETWORK_SERVER = Setting.boolSetting("network.server", true, false, Setting.Scope.CLUSTER);
public static final class TcpSettings { public static final class TcpSettings {
public static final Setting<Boolean> TCP_NO_DELAY = Setting.boolSetting("network.tcp.no_delay", true, false, Setting.Scope.CLUSTER); public static final Setting<Boolean> TCP_NO_DELAY = Setting.boolSetting("network.tcp.no_delay", true, false, Setting.Scope.CLUSTER);
@ -149,7 +150,7 @@ public class NetworkService extends AbstractComponent {
*/ */
// TODO: needs to be InetAddress[] // TODO: needs to be InetAddress[]
public InetAddress resolvePublishHostAddresses(String publishHosts[]) throws IOException { public InetAddress resolvePublishHostAddresses(String publishHosts[]) throws IOException {
if (publishHosts == null) { if (publishHosts == null || publishHosts.length == 0) {
if (GLOBAL_NETWORK_PUBLISHHOST_SETTING.exists(settings) || GLOBAL_NETWORK_HOST_SETTING.exists(settings)) { if (GLOBAL_NETWORK_PUBLISHHOST_SETTING.exists(settings) || GLOBAL_NETWORK_HOST_SETTING.exists(settings)) {
// if we have settings use them (we have a fallback to GLOBAL_NETWORK_HOST_SETTING inline // if we have settings use them (we have a fallback to GLOBAL_NETWORK_HOST_SETTING inline
publishHosts = GLOBAL_NETWORK_PUBLISHHOST_SETTING.get(settings).toArray(Strings.EMPTY_ARRAY); publishHosts = GLOBAL_NETWORK_PUBLISHHOST_SETTING.get(settings).toArray(Strings.EMPTY_ARRAY);

@ -22,6 +22,7 @@ import org.elasticsearch.action.admin.indices.close.TransportCloseIndexAction;
import org.elasticsearch.action.support.AutoCreateIndex; import org.elasticsearch.action.support.AutoCreateIndex;
import org.elasticsearch.action.support.DestructiveOperations; import org.elasticsearch.action.support.DestructiveOperations;
import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; import org.elasticsearch.action.support.master.TransportMasterNodeReadAction;
import org.elasticsearch.cache.recycler.PageCacheRecycler;
import org.elasticsearch.client.Client; import org.elasticsearch.client.Client;
import org.elasticsearch.client.transport.TransportClientNodesService; import org.elasticsearch.client.transport.TransportClientNodesService;
import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.ClusterModule;
@ -56,6 +57,7 @@ import org.elasticsearch.env.Environment;
import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.gateway.GatewayService;
import org.elasticsearch.gateway.PrimaryShardAllocator; import org.elasticsearch.gateway.PrimaryShardAllocator;
import org.elasticsearch.http.HttpTransportSettings;
import org.elasticsearch.http.netty.NettyHttpServerTransport; import org.elasticsearch.http.netty.NettyHttpServerTransport;
import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.store.IndexStoreConfig; import org.elasticsearch.index.store.IndexStoreConfig;
@ -67,6 +69,11 @@ import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.indices.recovery.RecoverySettings;
import org.elasticsearch.indices.store.IndicesStore; import org.elasticsearch.indices.store.IndicesStore;
import org.elasticsearch.indices.ttl.IndicesTTLService; import org.elasticsearch.indices.ttl.IndicesTTLService;
import org.elasticsearch.monitor.fs.FsService;
import org.elasticsearch.monitor.jvm.JvmGcMonitorService;
import org.elasticsearch.monitor.jvm.JvmService;
import org.elasticsearch.monitor.os.OsService;
import org.elasticsearch.monitor.process.ProcessService;
import org.elasticsearch.node.Node; import org.elasticsearch.node.Node;
import org.elasticsearch.node.internal.InternalSettingsPreparer; import org.elasticsearch.node.internal.InternalSettingsPreparer;
import org.elasticsearch.repositories.fs.FsRepository; import org.elasticsearch.repositories.fs.FsRepository;
@ -79,6 +86,7 @@ import org.elasticsearch.transport.Transport;
import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportService;
import org.elasticsearch.transport.TransportSettings; import org.elasticsearch.transport.TransportSettings;
import org.elasticsearch.transport.netty.NettyTransport; import org.elasticsearch.transport.netty.NettyTransport;
import org.elasticsearch.tribe.TribeService;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collections; import java.util.Collections;
@ -106,9 +114,9 @@ public final class ClusterSettings extends AbstractScopedSettings {
@Override @Override
public boolean hasChanged(Settings current, Settings previous) { public boolean hasChanged(Settings current, Settings previous) {
return current.filter(loggerPredicate).getAsMap().equals(previous.filter(loggerPredicate).getAsMap()) == false; return current.filter(loggerPredicate).getAsMap().equals(previous.filter(loggerPredicate).getAsMap()) == false;
} }
@Override @Override
public Settings getValue(Settings current, Settings previous) { public Settings getValue(Settings current, Settings previous) {
Settings.Builder builder = Settings.builder(); Settings.Builder builder = Settings.builder();
builder.put(current.filter(loggerPredicate).getAsMap()); builder.put(current.filter(loggerPredicate).getAsMap());
@ -124,7 +132,7 @@ public final class ClusterSettings extends AbstractScopedSettings {
return builder.build(); return builder.build();
} }
@Override @Override
public void apply(Settings value, Settings current, Settings previous) { public void apply(Settings value, Settings current, Settings previous) {
for (String key : value.getAsMap().keySet()) { for (String key : value.getAsMap().keySet()) {
assert loggerPredicate.test(key); assert loggerPredicate.test(key);
@ -135,91 +143,109 @@ public final class ClusterSettings extends AbstractScopedSettings {
} else { } else {
ESLoggerFactory.getLogger(component).setLevel(value.get(key)); ESLoggerFactory.getLogger(component).setLevel(value.get(key));
} }
} }
} }
}; };
public static Set<Setting<?>> BUILT_IN_CLUSTER_SETTINGS = Collections.unmodifiableSet(new HashSet<>( public static Set<Setting<?>> BUILT_IN_CLUSTER_SETTINGS = Collections.unmodifiableSet(new HashSet<>(
Arrays.asList(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING, Arrays.asList(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING,
TransportClientNodesService.CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL, // TODO these transport client settings are kind of odd here and should only be valid if we are a transport client TransportClientNodesService.CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL, // TODO these transport client settings are kind of odd here and should only be valid if we are a transport client
TransportClientNodesService.CLIENT_TRANSPORT_PING_TIMEOUT, TransportClientNodesService.CLIENT_TRANSPORT_PING_TIMEOUT,
TransportClientNodesService.CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME, TransportClientNodesService.CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME,
AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING, AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING,
BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING, BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING,
BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING, BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING,
BalancedShardsAllocator.THRESHOLD_SETTING, BalancedShardsAllocator.THRESHOLD_SETTING,
ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING, ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING,
ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING, ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING,
EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING, EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING,
EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING, EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING,
ZenDiscovery.REJOIN_ON_MASTER_GONE_SETTING, ZenDiscovery.REJOIN_ON_MASTER_GONE_SETTING,
FilterAllocationDecider.CLUSTER_ROUTING_INCLUDE_GROUP_SETTING, FilterAllocationDecider.CLUSTER_ROUTING_INCLUDE_GROUP_SETTING,
FilterAllocationDecider.CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING, FilterAllocationDecider.CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING,
FilterAllocationDecider.CLUSTER_ROUTING_REQUIRE_GROUP_SETTING, FilterAllocationDecider.CLUSTER_ROUTING_REQUIRE_GROUP_SETTING,
FsRepository.REPOSITORIES_CHUNK_SIZE_SETTING, FsRepository.REPOSITORIES_CHUNK_SIZE_SETTING,
FsRepository.REPOSITORIES_COMPRESS_SETTING, FsRepository.REPOSITORIES_COMPRESS_SETTING,
FsRepository.REPOSITORIES_LOCATION_SETTING, FsRepository.REPOSITORIES_LOCATION_SETTING,
IndexStoreConfig.INDICES_STORE_THROTTLE_TYPE_SETTING, IndexStoreConfig.INDICES_STORE_THROTTLE_TYPE_SETTING,
IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING, IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING,
IndicesQueryCache.INDICES_CACHE_QUERY_SIZE_SETTING, IndicesQueryCache.INDICES_CACHE_QUERY_SIZE_SETTING,
IndicesQueryCache.INDICES_CACHE_QUERY_COUNT_SETTING, IndicesQueryCache.INDICES_CACHE_QUERY_COUNT_SETTING,
IndicesTTLService.INDICES_TTL_INTERVAL_SETTING, IndicesTTLService.INDICES_TTL_INTERVAL_SETTING,
MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING, MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING,
MetaData.SETTING_READ_ONLY_SETTING, MetaData.SETTING_READ_ONLY_SETTING,
RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING, RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING,
RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING, RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING,
RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING, RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING,
RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING, RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING,
RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING, RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING,
RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING, RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING,
ThreadPool.THREADPOOL_GROUP_SETTING, ThreadPool.THREADPOOL_GROUP_SETTING,
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING, ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING,
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING, ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING,
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING, ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING,
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING, ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING,
DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING, DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING,
DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING, DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING,
DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING, DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING,
DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING, DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING,
DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING, DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING,
InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING, InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING,
InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING, InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING,
SnapshotInProgressAllocationDecider.CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING, SnapshotInProgressAllocationDecider.CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING,
DestructiveOperations.REQUIRES_NAME_SETTING, DestructiveOperations.REQUIRES_NAME_SETTING,
DiscoverySettings.PUBLISH_TIMEOUT_SETTING, DiscoverySettings.PUBLISH_TIMEOUT_SETTING,
DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING, DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING,
DiscoverySettings.COMMIT_TIMEOUT_SETTING, DiscoverySettings.COMMIT_TIMEOUT_SETTING,
DiscoverySettings.NO_MASTER_BLOCK_SETTING, DiscoverySettings.NO_MASTER_BLOCK_SETTING,
GatewayService.EXPECTED_DATA_NODES_SETTING, GatewayService.EXPECTED_DATA_NODES_SETTING,
GatewayService.EXPECTED_MASTER_NODES_SETTING, GatewayService.EXPECTED_MASTER_NODES_SETTING,
GatewayService.EXPECTED_NODES_SETTING, GatewayService.EXPECTED_NODES_SETTING,
GatewayService.RECOVER_AFTER_DATA_NODES_SETTING, GatewayService.RECOVER_AFTER_DATA_NODES_SETTING,
GatewayService.RECOVER_AFTER_MASTER_NODES_SETTING, GatewayService.RECOVER_AFTER_MASTER_NODES_SETTING,
GatewayService.RECOVER_AFTER_NODES_SETTING, GatewayService.RECOVER_AFTER_NODES_SETTING,
GatewayService.RECOVER_AFTER_TIME_SETTING, GatewayService.RECOVER_AFTER_TIME_SETTING,
NetworkModule.HTTP_ENABLED, NetworkModule.HTTP_ENABLED,
NettyHttpServerTransport.SETTING_CORS_ALLOW_CREDENTIALS, HttpTransportSettings.SETTING_CORS_ALLOW_CREDENTIALS,
NettyHttpServerTransport.SETTING_CORS_ENABLED, HttpTransportSettings.SETTING_CORS_ENABLED,
NettyHttpServerTransport.SETTING_CORS_MAX_AGE, HttpTransportSettings.SETTING_CORS_MAX_AGE,
NettyHttpServerTransport.SETTING_HTTP_DETAILED_ERRORS_ENABLED, HttpTransportSettings.SETTING_HTTP_DETAILED_ERRORS_ENABLED,
NettyHttpServerTransport.SETTING_PIPELINING, HttpTransportSettings.SETTING_PIPELINING,
HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING, HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN,
HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, HttpTransportSettings.SETTING_HTTP_PORT,
HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, HttpTransportSettings.SETTING_HTTP_PUBLISH_PORT,
HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, HttpTransportSettings.SETTING_PIPELINING_MAX_EVENTS,
HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING, HttpTransportSettings.SETTING_HTTP_COMPRESSION,
InternalClusterService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, HttpTransportSettings.SETTING_HTTP_COMPRESSION_LEVEL,
SearchService.DEFAULT_SEARCH_TIMEOUT_SETTING, HttpTransportSettings.SETTING_CORS_ALLOW_METHODS,
ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING, HttpTransportSettings.SETTING_CORS_ALLOW_HEADERS,
TransportService.TRACE_LOG_EXCLUDE_SETTING, HttpTransportSettings.SETTING_HTTP_DETAILED_ERRORS_ENABLED,
TransportService.TRACE_LOG_INCLUDE_SETTING, HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH,
TransportCloseIndexAction.CLUSTER_INDICES_CLOSE_ENABLE_SETTING, HttpTransportSettings.SETTING_HTTP_MAX_CHUNK_SIZE,
ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING, HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE,
InternalClusterService.CLUSTER_SERVICE_RECONNECT_INTERVAL_SETTING, HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH,
HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING, HttpTransportSettings.SETTING_HTTP_RESET_COOKIES,
HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING, HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING,
Transport.TRANSPORT_TCP_COMPRESS, HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING,
HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING,
HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING,
HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING,
InternalClusterService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING,
SearchService.DEFAULT_SEARCH_TIMEOUT_SETTING,
ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING,
TransportService.TRACE_LOG_EXCLUDE_SETTING,
TransportService.TRACE_LOG_INCLUDE_SETTING,
TransportCloseIndexAction.CLUSTER_INDICES_CLOSE_ENABLE_SETTING,
ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING,
InternalClusterService.CLUSTER_SERVICE_RECONNECT_INTERVAL_SETTING,
HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING,
HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING,
Transport.TRANSPORT_TCP_COMPRESS,
TransportSettings.TRANSPORT_PROFILES_SETTING, TransportSettings.TRANSPORT_PROFILES_SETTING,
TransportSettings.HOST,
TransportSettings.PUBLISH_HOST,
TransportSettings.BIND_HOST,
TransportSettings.PUBLISH_PORT,
TransportSettings.PORT, TransportSettings.PORT,
NettyTransport.WORKER_COUNT, NettyTransport.WORKER_COUNT,
NettyTransport.CONNECTIONS_PER_NODE_RECOVERY, NettyTransport.CONNECTIONS_PER_NODE_RECOVERY,
@ -235,74 +261,80 @@ public final class ClusterSettings extends AbstractScopedSettings {
NettyTransport.NETTY_RECEIVE_PREDICTOR_SIZE, NettyTransport.NETTY_RECEIVE_PREDICTOR_SIZE,
NettyTransport.NETTY_RECEIVE_PREDICTOR_MIN, NettyTransport.NETTY_RECEIVE_PREDICTOR_MIN,
NettyTransport.NETTY_RECEIVE_PREDICTOR_MAX, NettyTransport.NETTY_RECEIVE_PREDICTOR_MAX,
NettyTransport.NETWORK_SERVER, NetworkService.NETWORK_SERVER,
NettyTransport.NETTY_BOSS_COUNT, NettyTransport.NETTY_BOSS_COUNT,
NetworkService.GLOBAL_NETWORK_HOST_SETTING, NettyTransport.TCP_NO_DELAY,
NetworkService.GLOBAL_NETWORK_BINDHOST_SETTING, NettyTransport.TCP_KEEP_ALIVE,
NetworkService.GLOBAL_NETWORK_PUBLISHHOST_SETTING, NettyTransport.TCP_REUSE_ADDRESS,
NetworkService.TcpSettings.TCP_NO_DELAY, NettyTransport.TCP_SEND_BUFFER_SIZE,
NetworkService.TcpSettings.TCP_KEEP_ALIVE, NettyTransport.TCP_RECEIVE_BUFFER_SIZE,
NetworkService.TcpSettings.TCP_REUSE_ADDRESS, NettyTransport.TCP_BLOCKING_SERVER,
NetworkService.TcpSettings.TCP_SEND_BUFFER_SIZE, NetworkService.GLOBAL_NETWORK_HOST_SETTING,
NetworkService.TcpSettings.TCP_RECEIVE_BUFFER_SIZE, NetworkService.GLOBAL_NETWORK_BINDHOST_SETTING,
NetworkService.TcpSettings.TCP_BLOCKING, NetworkService.GLOBAL_NETWORK_PUBLISHHOST_SETTING,
NetworkService.TcpSettings.TCP_BLOCKING_SERVER, NetworkService.TcpSettings.TCP_NO_DELAY,
NetworkService.TcpSettings.TCP_BLOCKING_CLIENT, NetworkService.TcpSettings.TCP_KEEP_ALIVE,
NetworkService.TcpSettings.TCP_CONNECT_TIMEOUT, NetworkService.TcpSettings.TCP_REUSE_ADDRESS,
IndexSettings.QUERY_STRING_ANALYZE_WILDCARD, NetworkService.TcpSettings.TCP_SEND_BUFFER_SIZE,
IndexSettings.QUERY_STRING_ALLOW_LEADING_WILDCARD, NetworkService.TcpSettings.TCP_RECEIVE_BUFFER_SIZE,
PrimaryShardAllocator.NODE_INITIAL_SHARDS_SETTING, NetworkService.TcpSettings.TCP_BLOCKING,
ScriptService.SCRIPT_CACHE_SIZE_SETTING, NetworkService.TcpSettings.TCP_BLOCKING_SERVER,
IndicesFieldDataCache.INDICES_FIELDDATA_CLEAN_INTERVAL_SETTING, NetworkService.TcpSettings.TCP_BLOCKING_CLIENT,
IndicesFieldDataCache.INDICES_FIELDDATA_CACHE_SIZE_KEY, NetworkService.TcpSettings.TCP_CONNECT_TIMEOUT,
IndicesRequestCache.INDICES_CACHE_QUERY_SIZE, IndexSettings.QUERY_STRING_ANALYZE_WILDCARD,
IndicesRequestCache.INDICES_CACHE_QUERY_EXPIRE, IndexSettings.QUERY_STRING_ALLOW_LEADING_WILDCARD,
PrimaryShardAllocator.NODE_INITIAL_SHARDS_SETTING,
ScriptService.SCRIPT_CACHE_SIZE_SETTING,
IndicesFieldDataCache.INDICES_FIELDDATA_CLEAN_INTERVAL_SETTING,
IndicesFieldDataCache.INDICES_FIELDDATA_CACHE_SIZE_KEY,
IndicesRequestCache.INDICES_CACHE_QUERY_SIZE,
IndicesRequestCache.INDICES_CACHE_QUERY_EXPIRE,
IndicesRequestCache.INDICES_CACHE_REQUEST_CLEAN_INTERVAL, IndicesRequestCache.INDICES_CACHE_REQUEST_CLEAN_INTERVAL,
HunspellService.HUNSPELL_LAZY_LOAD, HunspellService.HUNSPELL_LAZY_LOAD,
HunspellService.HUNSPELL_IGNORE_CASE, HunspellService.HUNSPELL_IGNORE_CASE,
HunspellService.HUNSPELL_DICTIONARY_OPTIONS, HunspellService.HUNSPELL_DICTIONARY_OPTIONS,
IndicesStore.INDICES_STORE_DELETE_SHARD_TIMEOUT, IndicesStore.INDICES_STORE_DELETE_SHARD_TIMEOUT,
Environment.PATH_CONF_SETTING, Environment.PATH_CONF_SETTING,
Environment.PATH_DATA_SETTING, Environment.PATH_DATA_SETTING,
Environment.PATH_HOME_SETTING, Environment.PATH_HOME_SETTING,
Environment.PATH_LOGS_SETTING, Environment.PATH_LOGS_SETTING,
Environment.PATH_PLUGINS_SETTING, Environment.PATH_PLUGINS_SETTING,
Environment.PATH_REPO_SETTING, Environment.PATH_REPO_SETTING,
Environment.PATH_SCRIPTS_SETTING, Environment.PATH_SCRIPTS_SETTING,
Environment.PATH_SHARED_DATA_SETTING, Environment.PATH_SHARED_DATA_SETTING,
Environment.PIDFILE_SETTING, Environment.PIDFILE_SETTING,
DiscoveryService.DISCOVERY_SEED_SETTING, DiscoveryService.DISCOVERY_SEED_SETTING,
DiscoveryService.INITIAL_STATE_TIMEOUT_SETTING, DiscoveryService.INITIAL_STATE_TIMEOUT_SETTING,
DiscoveryModule.DISCOVERY_TYPE_SETTING, DiscoveryModule.DISCOVERY_TYPE_SETTING,
DiscoveryModule.ZEN_MASTER_SERVICE_TYPE_SETTING, DiscoveryModule.ZEN_MASTER_SERVICE_TYPE_SETTING,
FaultDetection.PING_RETRIES_SETTING, FaultDetection.PING_RETRIES_SETTING,
FaultDetection.PING_TIMEOUT_SETTING, FaultDetection.PING_TIMEOUT_SETTING,
FaultDetection.REGISTER_CONNECTION_LISTENER_SETTING, FaultDetection.REGISTER_CONNECTION_LISTENER_SETTING,
FaultDetection.PING_INTERVAL_SETTING, FaultDetection.PING_INTERVAL_SETTING,
FaultDetection.CONNECT_ON_NETWORK_DISCONNECT_SETTING, FaultDetection.CONNECT_ON_NETWORK_DISCONNECT_SETTING,
ZenDiscovery.PING_TIMEOUT_SETTING, ZenDiscovery.PING_TIMEOUT_SETTING,
ZenDiscovery.JOIN_TIMEOUT_SETTING, ZenDiscovery.JOIN_TIMEOUT_SETTING,
ZenDiscovery.JOIN_RETRY_ATTEMPTS_SETTING, ZenDiscovery.JOIN_RETRY_ATTEMPTS_SETTING,
ZenDiscovery.JOIN_RETRY_DELAY_SETTING, ZenDiscovery.JOIN_RETRY_DELAY_SETTING,
ZenDiscovery.MAX_PINGS_FROM_ANOTHER_MASTER_SETTING, ZenDiscovery.MAX_PINGS_FROM_ANOTHER_MASTER_SETTING,
ZenDiscovery.SEND_LEAVE_REQUEST_SETTING, ZenDiscovery.SEND_LEAVE_REQUEST_SETTING,
ZenDiscovery.MASTER_ELECTION_FILTER_CLIENT_SETTING, ZenDiscovery.MASTER_ELECTION_FILTER_CLIENT_SETTING,
ZenDiscovery.MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING, ZenDiscovery.MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING,
ZenDiscovery.MASTER_ELECTION_FILTER_DATA_SETTING, ZenDiscovery.MASTER_ELECTION_FILTER_DATA_SETTING,
UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING, UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING,
UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING, UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING,
SearchService.DEFAULT_KEEPALIVE_SETTING, SearchService.DEFAULT_KEEPALIVE_SETTING,
SearchService.KEEPALIVE_INTERVAL_SETTING, SearchService.KEEPALIVE_INTERVAL_SETTING,
Node.WRITE_PORTS_FIELD_SETTING, Node.WRITE_PORTS_FIELD_SETTING,
Node.NODE_CLIENT_SETTING, Node.NODE_CLIENT_SETTING,
Node.NODE_DATA_SETTING, Node.NODE_DATA_SETTING,
Node.NODE_MASTER_SETTING, Node.NODE_MASTER_SETTING,
Node.NODE_LOCAL_SETTING, Node.NODE_LOCAL_SETTING,
Node.NODE_MODE_SETTING, Node.NODE_MODE_SETTING,
Node.NODE_INGEST_SETTING, Node.NODE_INGEST_SETTING,
URLRepository.ALLOWED_URLS_SETTING, URLRepository.ALLOWED_URLS_SETTING,
URLRepository.REPOSITORIES_LIST_DIRECTORIES_SETTING, URLRepository.REPOSITORIES_LIST_DIRECTORIES_SETTING,
URLRepository.REPOSITORIES_URL_SETTING, URLRepository.REPOSITORIES_URL_SETTING,
URLRepository.SUPPORTED_PROTOCOLS_SETTING, URLRepository.SUPPORTED_PROTOCOLS_SETTING,
TransportMasterNodeReadAction.FORCE_LOCAL_SETTING, TransportMasterNodeReadAction.FORCE_LOCAL_SETTING,
AutoCreateIndex.AUTO_CREATE_INDEX_SETTING, AutoCreateIndex.AUTO_CREATE_INDEX_SETTING,
@ -315,7 +347,27 @@ public final class ClusterSettings extends AbstractScopedSettings {
ThreadContext.DEFAULT_HEADERS_SETTING, ThreadContext.DEFAULT_HEADERS_SETTING,
ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING, ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING,
ESLoggerFactory.LOG_LEVEL_SETTING, ESLoggerFactory.LOG_LEVEL_SETTING,
TribeService.BLOCKS_METADATA_SETTING,
TribeService.BLOCKS_WRITE_SETTING,
TribeService.BLOCKS_WRITE_INDICES_SETTING,
TribeService.BLOCKS_READ_INDICES_SETTING,
TribeService.BLOCKS_METADATA_INDICES_SETTING,
TribeService.ON_CONFLICT_SETTING,
NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING, NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING,
NodeEnvironment.ENABLE_LUCENE_SEGMENT_INFOS_TRACE_SETTING, NodeEnvironment.ENABLE_LUCENE_SEGMENT_INFOS_TRACE_SETTING,
NodeEnvironment.ADD_NODE_ID_TO_CUSTOM_PATH))); NodeEnvironment.ADD_NODE_ID_TO_CUSTOM_PATH,
OsService.REFRESH_INTERVAL_SETTING,
ProcessService.REFRESH_INTERVAL_SETTING,
JvmService.REFRESH_INTERVAL_SETTING,
FsService.REFRESH_INTERVAL_SETTING,
JvmGcMonitorService.ENABLED_SETTING,
JvmGcMonitorService.REFRESH_INTERVAL_SETTING,
JvmGcMonitorService.GC_SETTING,
PageCacheRecycler.LIMIT_HEAP_SETTING,
PageCacheRecycler.WEIGHT_BYTES_SETTING,
PageCacheRecycler.WEIGHT_INT_SETTING,
PageCacheRecycler.WEIGHT_LONG_SETTING,
PageCacheRecycler.WEIGHT_OBJECTS_SETTING,
PageCacheRecycler.TYPE_SETTING
)));
} }

@ -939,6 +939,14 @@ public final class Settings implements ToXContent {
return this; return this;
} }
/**
* Sets the setting with the provided setting key and an array of values.
*
* @param setting The setting key
* @param values The values
* @return The builder
*/
/** /**
* Sets the setting with the provided setting key and an array of values. * Sets the setting with the provided setting key and an array of values.
* *
@ -947,6 +955,17 @@ public final class Settings implements ToXContent {
* @return The builder * @return The builder
*/ */
public Builder putArray(String setting, String... values) { public Builder putArray(String setting, String... values) {
return putArray(setting, Arrays.asList(values));
}
/**
* Sets the setting with the provided setting key and a list of values.
*
* @param setting The setting key
* @param values The values
* @return The builder
*/
public Builder putArray(String setting, List<String> values) {
remove(setting); remove(setting);
int counter = 0; int counter = 0;
while (true) { while (true) {
@ -955,8 +974,8 @@ public final class Settings implements ToXContent {
break; break;
} }
} }
for (int i = 0; i < values.length; i++) { for (int i = 0; i < values.size(); i++) {
put(setting + "." + i, values[i]); put(setting + "." + i, values.get(i));
} }
return this; return this;
} }

@ -35,6 +35,10 @@ public class PortsRange {
this.portRange = portRange; this.portRange = portRange;
} }
public String getPortRangeString() {
return portRange;
}
public int[] ports() throws NumberFormatException { public int[] ports() throws NumberFormatException {
final IntArrayList ports = new IntArrayList(); final IntArrayList ports = new IntArrayList();
iterate(new PortCallback() { iterate(new PortCallback() {

@ -41,7 +41,7 @@ import java.util.concurrent.TimeUnit;
public class DiscoveryService extends AbstractLifecycleComponent<DiscoveryService> { public class DiscoveryService extends AbstractLifecycleComponent<DiscoveryService> {
public static final Setting<TimeValue> INITIAL_STATE_TIMEOUT_SETTING = Setting.positiveTimeSetting("discovery.initial_state_timeout", TimeValue.timeValueSeconds(30), false, Setting.Scope.CLUSTER); public static final Setting<TimeValue> INITIAL_STATE_TIMEOUT_SETTING = Setting.positiveTimeSetting("discovery.initial_state_timeout", TimeValue.timeValueSeconds(30), false, Setting.Scope.CLUSTER);
public static final Setting<Long> DISCOVERY_SEED_SETTING = Setting.longSetting("discovery.id.seed", 0l, Long.MIN_VALUE, false, Setting.Scope.CLUSTER); public static final Setting<Long> DISCOVERY_SEED_SETTING = Setting.longSetting("discovery.id.seed", 0L, Long.MIN_VALUE, false, Setting.Scope.CLUSTER);
private static class InitialStateListener implements InitialStateDiscoveryListener { private static class InitialStateListener implements InitialStateDiscoveryListener {

@ -19,7 +19,6 @@
package org.elasticsearch.discovery.local; package org.elasticsearch.discovery.local;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterService;
@ -29,7 +28,6 @@ import org.elasticsearch.cluster.Diff;
import org.elasticsearch.cluster.IncompatibleClusterStateVersionException; import org.elasticsearch.cluster.IncompatibleClusterStateVersionException;
import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodeService;
import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.RoutingService; import org.elasticsearch.cluster.routing.RoutingService;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
@ -44,12 +42,10 @@ import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.discovery.AckClusterStatePublishResponseHandler; import org.elasticsearch.discovery.AckClusterStatePublishResponseHandler;
import org.elasticsearch.discovery.BlockingClusterStatePublishResponseHandler; import org.elasticsearch.discovery.BlockingClusterStatePublishResponseHandler;
import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.Discovery;
import org.elasticsearch.discovery.DiscoveryService;
import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.DiscoverySettings;
import org.elasticsearch.discovery.DiscoveryStats; import org.elasticsearch.discovery.DiscoveryStats;
import org.elasticsearch.discovery.InitialStateDiscoveryListener; import org.elasticsearch.discovery.InitialStateDiscoveryListener;
import org.elasticsearch.node.service.NodeService; import org.elasticsearch.node.service.NodeService;
import org.elasticsearch.transport.TransportService;
import java.util.HashSet; import java.util.HashSet;
import java.util.Queue; import java.util.Queue;
@ -67,17 +63,12 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
private static final LocalDiscovery[] NO_MEMBERS = new LocalDiscovery[0]; private static final LocalDiscovery[] NO_MEMBERS = new LocalDiscovery[0];
private final TransportService transportService;
private final ClusterService clusterService; private final ClusterService clusterService;
private final DiscoveryNodeService discoveryNodeService;
private RoutingService routingService; private RoutingService routingService;
private final ClusterName clusterName; private final ClusterName clusterName;
private final Version version;
private final DiscoverySettings discoverySettings; private final DiscoverySettings discoverySettings;
private DiscoveryNode localNode;
private volatile boolean master = false; private volatile boolean master = false;
private final AtomicBoolean initialStateSent = new AtomicBoolean(); private final AtomicBoolean initialStateSent = new AtomicBoolean();
@ -89,14 +80,11 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
private volatile ClusterState lastProcessedClusterState; private volatile ClusterState lastProcessedClusterState;
@Inject @Inject
public LocalDiscovery(Settings settings, ClusterName clusterName, TransportService transportService, ClusterService clusterService, public LocalDiscovery(Settings settings, ClusterName clusterName, ClusterService clusterService,
DiscoveryNodeService discoveryNodeService, Version version, DiscoverySettings discoverySettings) { DiscoverySettings discoverySettings) {
super(settings); super(settings);
this.clusterName = clusterName; this.clusterName = clusterName;
this.clusterService = clusterService; this.clusterService = clusterService;
this.transportService = transportService;
this.discoveryNodeService = discoveryNodeService;
this.version = version;
this.discoverySettings = discoverySettings; this.discoverySettings = discoverySettings;
} }
@ -119,8 +107,6 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
clusterGroups.put(clusterName, clusterGroup); clusterGroups.put(clusterName, clusterGroup);
} }
logger.debug("Connected to cluster [{}]", clusterName); logger.debug("Connected to cluster [{}]", clusterName);
this.localNode = new DiscoveryNode(settings.get("name"), DiscoveryService.generateNodeId(settings), transportService.boundAddress().publishAddress(),
discoveryNodeService.buildAttributes(), version);
clusterGroup.members().add(this); clusterGroup.members().add(this);
@ -147,7 +133,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
public ClusterState execute(ClusterState currentState) { public ClusterState execute(ClusterState currentState) {
DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(); DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder();
for (LocalDiscovery discovery : clusterGroups.get(clusterName).members()) { for (LocalDiscovery discovery : clusterGroups.get(clusterName).members()) {
nodesBuilder.put(discovery.localNode); nodesBuilder.put(discovery.localNode());
} }
nodesBuilder.localNodeId(master.localNode().id()).masterNodeId(master.localNode().id()); nodesBuilder.localNodeId(master.localNode().id()).masterNodeId(master.localNode().id());
// remove the NO_MASTER block in this case // remove the NO_MASTER block in this case
@ -166,30 +152,9 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
} }
}); });
} else if (firstMaster != null) { } else if (firstMaster != null) {
// update as fast as we can the local node state with the new metadata (so we create indices for example)
final ClusterState masterState = firstMaster.clusterService.state();
clusterService.submitStateUpdateTask("local-disco(detected_master)", new ClusterStateUpdateTask() {
@Override
public boolean runOnlyOnMaster() {
return false;
}
@Override
public ClusterState execute(ClusterState currentState) {
// make sure we have the local node id set, we might need it as a result of the new metadata
DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(currentState.nodes()).put(localNode).localNodeId(localNode.id());
return ClusterState.builder(currentState).metaData(masterState.metaData()).nodes(nodesBuilder).build();
}
@Override
public void onFailure(String source, Throwable t) {
logger.error("unexpected failure during [{}]", t, source);
}
});
// tell the master to send the fact that we are here // tell the master to send the fact that we are here
final LocalDiscovery master = firstMaster; final LocalDiscovery master = firstMaster;
firstMaster.clusterService.submitStateUpdateTask("local-disco-receive(from node[" + localNode + "])", new ClusterStateUpdateTask() { firstMaster.clusterService.submitStateUpdateTask("local-disco-receive(from node[" + localNode() + "])", new ClusterStateUpdateTask() {
@Override @Override
public boolean runOnlyOnMaster() { public boolean runOnlyOnMaster() {
return false; return false;
@ -199,7 +164,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
public ClusterState execute(ClusterState currentState) { public ClusterState execute(ClusterState currentState) {
DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(); DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder();
for (LocalDiscovery discovery : clusterGroups.get(clusterName).members()) { for (LocalDiscovery discovery : clusterGroups.get(clusterName).members()) {
nodesBuilder.put(discovery.localNode); nodesBuilder.put(discovery.localNode());
} }
nodesBuilder.localNodeId(master.localNode().id()).masterNodeId(master.localNode().id()); nodesBuilder.localNodeId(master.localNode().id()).masterNodeId(master.localNode().id());
return ClusterState.builder(currentState).nodes(nodesBuilder).build(); return ClusterState.builder(currentState).nodes(nodesBuilder).build();
@ -254,7 +219,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
final Set<String> newMembers = new HashSet<>(); final Set<String> newMembers = new HashSet<>();
for (LocalDiscovery discovery : clusterGroup.members()) { for (LocalDiscovery discovery : clusterGroup.members()) {
newMembers.add(discovery.localNode.id()); newMembers.add(discovery.localNode().id());
} }
final LocalDiscovery master = firstMaster; final LocalDiscovery master = firstMaster;
@ -266,7 +231,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
@Override @Override
public ClusterState execute(ClusterState currentState) { public ClusterState execute(ClusterState currentState) {
DiscoveryNodes newNodes = currentState.nodes().removeDeadMembers(newMembers, master.localNode.id()); DiscoveryNodes newNodes = currentState.nodes().removeDeadMembers(newMembers, master.localNode().id());
DiscoveryNodes.Delta delta = newNodes.delta(currentState.nodes()); DiscoveryNodes.Delta delta = newNodes.delta(currentState.nodes());
if (delta.added()) { if (delta.added()) {
logger.warn("No new nodes should be created when a new discovery view is accepted"); logger.warn("No new nodes should be created when a new discovery view is accepted");
@ -293,7 +258,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
@Override @Override
public DiscoveryNode localNode() { public DiscoveryNode localNode() {
return localNode; return clusterService.localNode();
} }
@Override @Override
@ -308,7 +273,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
@Override @Override
public String nodeDescription() { public String nodeDescription() {
return clusterName.value() + "/" + localNode.id(); return clusterName.value() + "/" + localNode().id();
} }
@Override @Override
@ -323,7 +288,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
if (localDiscovery.master) { if (localDiscovery.master) {
continue; continue;
} }
nodesToPublishTo.add(localDiscovery.localNode); nodesToPublishTo.add(localDiscovery.localNode());
} }
publish(members, clusterChangedEvent, new AckClusterStatePublishResponseHandler(nodesToPublishTo, ackListener)); publish(members, clusterChangedEvent, new AckClusterStatePublishResponseHandler(nodesToPublishTo, ackListener));
} }
@ -359,7 +324,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
synchronized (this) { synchronized (this) {
// we do the marshaling intentionally, to check it works well... // we do the marshaling intentionally, to check it works well...
// check if we publsihed cluster state at least once and node was in the cluster when we published cluster state the last time // check if we publsihed cluster state at least once and node was in the cluster when we published cluster state the last time
if (discovery.lastProcessedClusterState != null && clusterChangedEvent.previousState().nodes().nodeExists(discovery.localNode.id())) { if (discovery.lastProcessedClusterState != null && clusterChangedEvent.previousState().nodes().nodeExists(discovery.localNode().id())) {
// both conditions are true - which means we can try sending cluster state as diffs // both conditions are true - which means we can try sending cluster state as diffs
if (clusterStateDiffBytes == null) { if (clusterStateDiffBytes == null) {
Diff diff = clusterState.diff(clusterChangedEvent.previousState()); Diff diff = clusterState.diff(clusterChangedEvent.previousState());
@ -369,7 +334,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
} }
try { try {
newNodeSpecificClusterState = discovery.lastProcessedClusterState.readDiffFrom(StreamInput.wrap(clusterStateDiffBytes)).apply(discovery.lastProcessedClusterState); newNodeSpecificClusterState = discovery.lastProcessedClusterState.readDiffFrom(StreamInput.wrap(clusterStateDiffBytes)).apply(discovery.lastProcessedClusterState);
logger.trace("sending diff cluster state version [{}] with size {} to [{}]", clusterState.version(), clusterStateDiffBytes.length, discovery.localNode.getName()); logger.trace("sending diff cluster state version [{}] with size {} to [{}]", clusterState.version(), clusterStateDiffBytes.length, discovery.localNode().getName());
} catch (IncompatibleClusterStateVersionException ex) { } catch (IncompatibleClusterStateVersionException ex) {
logger.warn("incompatible cluster state version [{}] - resending complete cluster state", ex, clusterState.version()); logger.warn("incompatible cluster state version [{}] - resending complete cluster state", ex, clusterState.version());
} }
@ -378,7 +343,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
if (clusterStateBytes == null) { if (clusterStateBytes == null) {
clusterStateBytes = Builder.toBytes(clusterState); clusterStateBytes = Builder.toBytes(clusterState);
} }
newNodeSpecificClusterState = ClusterState.Builder.fromBytes(clusterStateBytes, discovery.localNode); newNodeSpecificClusterState = ClusterState.Builder.fromBytes(clusterStateBytes, discovery.localNode());
} }
discovery.lastProcessedClusterState = newNodeSpecificClusterState; discovery.lastProcessedClusterState = newNodeSpecificClusterState;
} }
@ -423,17 +388,17 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
@Override @Override
public void onFailure(String source, Throwable t) { public void onFailure(String source, Throwable t) {
logger.error("unexpected failure during [{}]", t, source); logger.error("unexpected failure during [{}]", t, source);
publishResponseHandler.onFailure(discovery.localNode, t); publishResponseHandler.onFailure(discovery.localNode(), t);
} }
@Override @Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
sendInitialStateEventIfNeeded(); sendInitialStateEventIfNeeded();
publishResponseHandler.onResponse(discovery.localNode); publishResponseHandler.onResponse(discovery.localNode());
} }
}); });
} else { } else {
publishResponseHandler.onResponse(discovery.localNode); publishResponseHandler.onResponse(discovery.localNode());
} }
} }

@ -60,7 +60,7 @@ public abstract class PriorityComparator implements Comparator<ShardRouting> {
} }
private long timeCreated(Settings settings) { private long timeCreated(Settings settings) {
return settings.getAsLong(IndexMetaData.SETTING_CREATION_DATE, -1l); return settings.getAsLong(IndexMetaData.SETTING_CREATION_DATE, -1L);
} }
protected abstract Settings getIndexSettings(String index); protected abstract Settings getIndexSettings(String index);

@ -0,0 +1,53 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.http;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Scope;
import org.elasticsearch.common.transport.PortsRange;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
public final class HttpTransportSettings {
public static final Setting<Boolean> SETTING_CORS_ENABLED = Setting.boolSetting("http.cors.enabled", false, false, Scope.CLUSTER);
public static final Setting<String> SETTING_CORS_ALLOW_ORIGIN = new Setting<String>("http.cors.allow-origin", "", (value) -> value, false, Scope.CLUSTER);
public static final Setting<Integer> SETTING_CORS_MAX_AGE = Setting.intSetting("http.cors.max-age", 1728000, false, Scope.CLUSTER);
public static final Setting<String> SETTING_CORS_ALLOW_METHODS = new Setting<String>("http.cors.allow-methods", "OPTIONS, HEAD, GET, POST, PUT, DELETE", (value) -> value, false, Scope.CLUSTER);
public static final Setting<String> SETTING_CORS_ALLOW_HEADERS = new Setting<String>("http.cors.allow-headers", "X-Requested-With, Content-Type, Content-Length", (value) -> value, false, Scope.CLUSTER);
public static final Setting<Boolean> SETTING_CORS_ALLOW_CREDENTIALS = Setting.boolSetting("http.cors.allow-credentials", false, false, Scope.CLUSTER);
public static final Setting<Boolean> SETTING_PIPELINING = Setting.boolSetting("http.pipelining", true, false, Scope.CLUSTER);
public static final Setting<Integer> SETTING_PIPELINING_MAX_EVENTS = Setting.intSetting("http.pipelining.max_events", 10000, false, Scope.CLUSTER);
public static final Setting<Boolean> SETTING_HTTP_COMPRESSION = Setting.boolSetting("http.compression", false, false, Scope.CLUSTER);
public static final Setting<Integer> SETTING_HTTP_COMPRESSION_LEVEL = Setting.intSetting("http.compression_level", 6, false, Scope.CLUSTER);
public static final Setting<PortsRange> SETTING_HTTP_PORT = new Setting<PortsRange>("http.port", "9200-9300", PortsRange::new, false, Scope.CLUSTER);
public static final Setting<Integer> SETTING_HTTP_PUBLISH_PORT = Setting.intSetting("http.publish_port", 0, 0, false, Scope.CLUSTER);
public static final Setting<Boolean> SETTING_HTTP_DETAILED_ERRORS_ENABLED = Setting.boolSetting("http.detailed_errors.enabled", true, false, Scope.CLUSTER);
public static final Setting<ByteSizeValue> SETTING_HTTP_MAX_CONTENT_LENGTH = Setting.byteSizeSetting("http.max_content_length", new ByteSizeValue(100, ByteSizeUnit.MB), false, Scope.CLUSTER) ;
public static final Setting<ByteSizeValue> SETTING_HTTP_MAX_CHUNK_SIZE = Setting.byteSizeSetting("http.max_chunk_size", new ByteSizeValue(8, ByteSizeUnit.KB), false, Scope.CLUSTER) ;
public static final Setting<ByteSizeValue> SETTING_HTTP_MAX_HEADER_SIZE = Setting.byteSizeSetting("http.max_header_size", new ByteSizeValue(8, ByteSizeUnit.KB), false, Scope.CLUSTER) ;
public static final Setting<ByteSizeValue> SETTING_HTTP_MAX_INITIAL_LINE_LENGTH = Setting.byteSizeSetting("http.max_initial_line_length", new ByteSizeValue(4, ByteSizeUnit.KB), false, Scope.CLUSTER) ;
// don't reset cookies by default, since I don't think we really need to
// note, parsing cookies was fixed in netty 3.5.1 regarding stack allocation, but still, currently, we don't need cookies
public static final Setting<Boolean> SETTING_HTTP_RESET_COOKIES = Setting.boolSetting("http.reset_cookies", false, false, Scope.CLUSTER);
private HttpTransportSettings() {
}
}

@ -20,6 +20,7 @@
package org.elasticsearch.http.netty; package org.elasticsearch.http.netty;
import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.http.HttpTransportSettings;
import org.elasticsearch.http.netty.pipelining.OrderedUpstreamMessageEvent; import org.elasticsearch.http.netty.pipelining.OrderedUpstreamMessageEvent;
import org.elasticsearch.rest.support.RestUtils; import org.elasticsearch.rest.support.RestUtils;
import org.jboss.netty.channel.ChannelHandler; import org.jboss.netty.channel.ChannelHandler;
@ -46,7 +47,8 @@ public class HttpRequestHandler extends SimpleChannelUpstreamHandler {
public HttpRequestHandler(NettyHttpServerTransport serverTransport, boolean detailedErrorsEnabled, ThreadContext threadContext) { public HttpRequestHandler(NettyHttpServerTransport serverTransport, boolean detailedErrorsEnabled, ThreadContext threadContext) {
this.serverTransport = serverTransport; this.serverTransport = serverTransport;
this.corsPattern = RestUtils.checkCorsSettingForRegex(serverTransport.settings().get(NettyHttpServerTransport.SETTING_CORS_ALLOW_ORIGIN)); this.corsPattern = RestUtils
.checkCorsSettingForRegex(HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN.get(serverTransport.settings()));
this.httpPipeliningEnabled = serverTransport.pipelining; this.httpPipeliningEnabled = serverTransport.pipelining;
this.detailedErrorsEnabled = detailedErrorsEnabled; this.detailedErrorsEnabled = detailedErrorsEnabled;
this.threadContext = threadContext; this.threadContext = threadContext;

@ -49,12 +49,12 @@ import java.util.Map;
import java.util.Set; import java.util.Set;
import java.util.regex.Pattern; import java.util.regex.Pattern;
import static org.elasticsearch.http.netty.NettyHttpServerTransport.SETTING_CORS_ALLOW_CREDENTIALS; import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_CREDENTIALS;
import static org.elasticsearch.http.netty.NettyHttpServerTransport.SETTING_CORS_ALLOW_HEADERS; import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_HEADERS;
import static org.elasticsearch.http.netty.NettyHttpServerTransport.SETTING_CORS_ALLOW_METHODS; import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_METHODS;
import static org.elasticsearch.http.netty.NettyHttpServerTransport.SETTING_CORS_ALLOW_ORIGIN; import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN;
import static org.elasticsearch.http.netty.NettyHttpServerTransport.SETTING_CORS_ENABLED; import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ENABLED;
import static org.elasticsearch.http.netty.NettyHttpServerTransport.SETTING_CORS_MAX_AGE; import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_MAX_AGE;
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.ACCESS_CONTROL_ALLOW_CREDENTIALS; import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.ACCESS_CONTROL_ALLOW_CREDENTIALS;
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.ACCESS_CONTROL_ALLOW_HEADERS; import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.ACCESS_CONTROL_ALLOW_HEADERS;
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.ACCESS_CONTROL_ALLOW_METHODS; import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.ACCESS_CONTROL_ALLOW_METHODS;
@ -117,7 +117,7 @@ public class NettyHttpChannel extends HttpChannel {
String originHeader = request.header(ORIGIN); String originHeader = request.header(ORIGIN);
if (!Strings.isNullOrEmpty(originHeader)) { if (!Strings.isNullOrEmpty(originHeader)) {
if (corsPattern == null) { if (corsPattern == null) {
String allowedOrigins = transport.settings().get(SETTING_CORS_ALLOW_ORIGIN, null); String allowedOrigins = SETTING_CORS_ALLOW_ORIGIN.get(transport.settings());
if (!Strings.isNullOrEmpty(allowedOrigins)) { if (!Strings.isNullOrEmpty(allowedOrigins)) {
resp.headers().add(ACCESS_CONTROL_ALLOW_ORIGIN, allowedOrigins); resp.headers().add(ACCESS_CONTROL_ALLOW_ORIGIN, allowedOrigins);
} }
@ -128,8 +128,8 @@ public class NettyHttpChannel extends HttpChannel {
if (nettyRequest.getMethod() == HttpMethod.OPTIONS) { if (nettyRequest.getMethod() == HttpMethod.OPTIONS) {
// Allow Ajax requests based on the CORS "preflight" request // Allow Ajax requests based on the CORS "preflight" request
resp.headers().add(ACCESS_CONTROL_MAX_AGE, SETTING_CORS_MAX_AGE.get(transport.settings())); resp.headers().add(ACCESS_CONTROL_MAX_AGE, SETTING_CORS_MAX_AGE.get(transport.settings()));
resp.headers().add(ACCESS_CONTROL_ALLOW_METHODS, transport.settings().get(SETTING_CORS_ALLOW_METHODS, "OPTIONS, HEAD, GET, POST, PUT, DELETE")); resp.headers().add(ACCESS_CONTROL_ALLOW_METHODS, SETTING_CORS_ALLOW_METHODS.get(transport.settings()));
resp.headers().add(ACCESS_CONTROL_ALLOW_HEADERS, transport.settings().get(SETTING_CORS_ALLOW_HEADERS, "X-Requested-With, Content-Type, Content-Length")); resp.headers().add(ACCESS_CONTROL_ALLOW_HEADERS, SETTING_CORS_ALLOW_HEADERS.get(transport.settings()));
} }
if (SETTING_CORS_ALLOW_CREDENTIALS.get(transport.settings())) { if (SETTING_CORS_ALLOW_CREDENTIALS.get(transport.settings())) {

@ -26,8 +26,6 @@ import org.elasticsearch.common.netty.NettyUtils;
import org.elasticsearch.common.netty.OpenChannelsHandler; import org.elasticsearch.common.netty.OpenChannelsHandler;
import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.network.NetworkAddress;
import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Scope;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.BoundTransportAddress;
import org.elasticsearch.common.transport.InetSocketTransportAddress; import org.elasticsearch.common.transport.InetSocketTransportAddress;
@ -46,6 +44,7 @@ import org.elasticsearch.http.HttpRequest;
import org.elasticsearch.http.HttpServerAdapter; import org.elasticsearch.http.HttpServerAdapter;
import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.http.HttpServerTransport;
import org.elasticsearch.http.HttpStats; import org.elasticsearch.http.HttpStats;
import org.elasticsearch.http.HttpTransportSettings;
import org.elasticsearch.http.netty.pipelining.HttpPipeliningHandler; import org.elasticsearch.http.netty.pipelining.HttpPipeliningHandler;
import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.monitor.jvm.JvmInfo;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
@ -75,7 +74,6 @@ import java.util.Arrays;
import java.util.List; import java.util.List;
import java.util.concurrent.Executors; import java.util.concurrent.Executors;
import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.atomic.AtomicReference;
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_BLOCKING; import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_BLOCKING;
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_KEEP_ALIVE; import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_KEEP_ALIVE;
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_NO_DELAY; import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_NO_DELAY;
@ -93,22 +91,6 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
NettyUtils.setup(); NettyUtils.setup();
} }
public static final Setting<Boolean> SETTING_CORS_ENABLED = Setting.boolSetting("http.cors.enabled", false, false, Scope.CLUSTER);
public static final String SETTING_CORS_ALLOW_ORIGIN = "http.cors.allow-origin";
public static final Setting<Integer> SETTING_CORS_MAX_AGE = Setting.intSetting("http.cors.max-age", 1728000, false, Scope.CLUSTER);
public static final String SETTING_CORS_ALLOW_METHODS = "http.cors.allow-methods";
public static final String SETTING_CORS_ALLOW_HEADERS = "http.cors.allow-headers";
public static final Setting<Boolean> SETTING_CORS_ALLOW_CREDENTIALS = Setting.boolSetting("http.cors.allow-credentials", false, false, Scope.CLUSTER);
public static final Setting<Boolean> SETTING_PIPELINING = Setting.boolSetting("http.pipelining", true, false, Scope.CLUSTER);
public static final String SETTING_PIPELINING_MAX_EVENTS = "http.pipelining.max_events";
public static final String SETTING_HTTP_COMPRESSION = "http.compression";
public static final String SETTING_HTTP_COMPRESSION_LEVEL = "http.compression_level";
public static final Setting<Boolean> SETTING_HTTP_DETAILED_ERRORS_ENABLED = Setting.boolSetting("http.detailed_errors.enabled", true, false, Scope.CLUSTER);
public static final int DEFAULT_SETTING_PIPELINING_MAX_EVENTS = 10000;
public static final String DEFAULT_PORT_RANGE = "9200-9300";
protected final NetworkService networkService; protected final NetworkService networkService;
protected final BigArrays bigArrays; protected final BigArrays bigArrays;
@ -131,7 +113,7 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
protected final boolean resetCookies; protected final boolean resetCookies;
protected final String port; protected final PortsRange port;
protected final String bindHosts[]; protected final String bindHosts[];
@ -176,28 +158,25 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
if (settings.getAsBoolean("netty.epollBugWorkaround", false)) { if (settings.getAsBoolean("netty.epollBugWorkaround", false)) {
System.setProperty("org.jboss.netty.epollBugWorkaround", "true"); System.setProperty("org.jboss.netty.epollBugWorkaround", "true");
} }
ByteSizeValue maxContentLength = HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH.get(settings);
ByteSizeValue maxContentLength = settings.getAsBytesSize("http.netty.max_content_length", settings.getAsBytesSize("http.max_content_length", new ByteSizeValue(100, ByteSizeUnit.MB))); this.maxChunkSize = HttpTransportSettings.SETTING_HTTP_MAX_CHUNK_SIZE.get(settings);
this.maxChunkSize = settings.getAsBytesSize("http.netty.max_chunk_size", settings.getAsBytesSize("http.max_chunk_size", new ByteSizeValue(8, ByteSizeUnit.KB))); this.maxHeaderSize = HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE.get(settings);
this.maxHeaderSize = settings.getAsBytesSize("http.netty.max_header_size", settings.getAsBytesSize("http.max_header_size", new ByteSizeValue(8, ByteSizeUnit.KB))); this.maxInitialLineLength = HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH.get(settings);
this.maxInitialLineLength = settings.getAsBytesSize("http.netty.max_initial_line_length", settings.getAsBytesSize("http.max_initial_line_length", new ByteSizeValue(4, ByteSizeUnit.KB))); this.resetCookies = HttpTransportSettings.SETTING_HTTP_RESET_COOKIES.get(settings);
// don't reset cookies by default, since I don't think we really need to
// note, parsing cookies was fixed in netty 3.5.1 regarding stack allocation, but still, currently, we don't need cookies
this.resetCookies = settings.getAsBoolean("http.netty.reset_cookies", settings.getAsBoolean("http.reset_cookies", false));
this.maxCumulationBufferCapacity = settings.getAsBytesSize("http.netty.max_cumulation_buffer_capacity", null); this.maxCumulationBufferCapacity = settings.getAsBytesSize("http.netty.max_cumulation_buffer_capacity", null);
this.maxCompositeBufferComponents = settings.getAsInt("http.netty.max_composite_buffer_components", -1); this.maxCompositeBufferComponents = settings.getAsInt("http.netty.max_composite_buffer_components", -1);
this.workerCount = settings.getAsInt("http.netty.worker_count", EsExecutors.boundedNumberOfProcessors(settings) * 2); this.workerCount = settings.getAsInt("http.netty.worker_count", EsExecutors.boundedNumberOfProcessors(settings) * 2);
this.blockingServer = settings.getAsBoolean("http.netty.http.blocking_server", TCP_BLOCKING.get(settings)); this.blockingServer = settings.getAsBoolean("http.netty.http.blocking_server", TCP_BLOCKING.get(settings));
this.port = settings.get("http.netty.port", settings.get("http.port", DEFAULT_PORT_RANGE)); this.port = HttpTransportSettings.SETTING_HTTP_PORT.get(settings);
this.bindHosts = settings.getAsArray("http.netty.bind_host", settings.getAsArray("http.bind_host", settings.getAsArray("http.host", null))); this.bindHosts = settings.getAsArray("http.netty.bind_host", settings.getAsArray("http.bind_host", settings.getAsArray("http.host", null)));
this.publishHosts = settings.getAsArray("http.netty.publish_host", settings.getAsArray("http.publish_host", settings.getAsArray("http.host", null))); this.publishHosts = settings.getAsArray("http.netty.publish_host", settings.getAsArray("http.publish_host", settings.getAsArray("http.host", null)));
this.publishPort = settings.getAsInt("http.netty.publish_port", settings.getAsInt("http.publish_port", 0)); this.publishPort = HttpTransportSettings.SETTING_HTTP_PUBLISH_PORT.get(settings);
this.tcpNoDelay = settings.getAsBoolean("http.netty.tcp_no_delay", TCP_NO_DELAY.get(settings)); this.tcpNoDelay = settings.getAsBoolean("http.netty.tcp_no_delay", TCP_NO_DELAY.get(settings));
this.tcpKeepAlive = settings.getAsBoolean("http.netty.tcp_keep_alive", TCP_KEEP_ALIVE.get(settings)); this.tcpKeepAlive = settings.getAsBoolean("http.netty.tcp_keep_alive", TCP_KEEP_ALIVE.get(settings));
this.reuseAddress = settings.getAsBoolean("http.netty.reuse_address", TCP_REUSE_ADDRESS.get(settings)); this.reuseAddress = settings.getAsBoolean("http.netty.reuse_address", TCP_REUSE_ADDRESS.get(settings));
this.tcpSendBufferSize = settings.getAsBytesSize("http.netty.tcp_send_buffer_size", TCP_SEND_BUFFER_SIZE.get(settings)); this.tcpSendBufferSize = settings.getAsBytesSize("http.netty.tcp_send_buffer_size", TCP_SEND_BUFFER_SIZE.get(settings));
this.tcpReceiveBufferSize = settings.getAsBytesSize("http.netty.tcp_receive_buffer_size", TCP_RECEIVE_BUFFER_SIZE.get(settings)); this.tcpReceiveBufferSize = settings.getAsBytesSize("http.netty.tcp_receive_buffer_size", TCP_RECEIVE_BUFFER_SIZE.get(settings));
this.detailedErrorsEnabled = SETTING_HTTP_DETAILED_ERRORS_ENABLED.get(settings); this.detailedErrorsEnabled = HttpTransportSettings.SETTING_HTTP_DETAILED_ERRORS_ENABLED.get(settings);
long defaultReceiverPredictor = 512 * 1024; long defaultReceiverPredictor = 512 * 1024;
if (JvmInfo.jvmInfo().getMem().getDirectMemoryMax().bytes() > 0) { if (JvmInfo.jvmInfo().getMem().getDirectMemoryMax().bytes() > 0) {
@ -215,10 +194,10 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
receiveBufferSizePredictorFactory = new AdaptiveReceiveBufferSizePredictorFactory((int) receivePredictorMin.bytes(), (int) receivePredictorMin.bytes(), (int) receivePredictorMax.bytes()); receiveBufferSizePredictorFactory = new AdaptiveReceiveBufferSizePredictorFactory((int) receivePredictorMin.bytes(), (int) receivePredictorMin.bytes(), (int) receivePredictorMax.bytes());
} }
this.compression = settings.getAsBoolean(SETTING_HTTP_COMPRESSION, false); this.compression = HttpTransportSettings.SETTING_HTTP_COMPRESSION.get(settings);
this.compressionLevel = settings.getAsInt(SETTING_HTTP_COMPRESSION_LEVEL, 6); this.compressionLevel = HttpTransportSettings.SETTING_HTTP_COMPRESSION_LEVEL.get(settings);
this.pipelining = SETTING_PIPELINING.get(settings); this.pipelining = HttpTransportSettings.SETTING_PIPELINING.get(settings);
this.pipeliningMaxEvents = settings.getAsInt(SETTING_PIPELINING_MAX_EVENTS, DEFAULT_SETTING_PIPELINING_MAX_EVENTS); this.pipeliningMaxEvents = HttpTransportSettings.SETTING_PIPELINING_MAX_EVENTS.get(settings);
// validate max content length // validate max content length
if (maxContentLength.bytes() > Integer.MAX_VALUE) { if (maxContentLength.bytes() > Integer.MAX_VALUE) {
@ -312,10 +291,9 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
} }
private InetSocketTransportAddress bindAddress(final InetAddress hostAddress) { private InetSocketTransportAddress bindAddress(final InetAddress hostAddress) {
PortsRange portsRange = new PortsRange(port);
final AtomicReference<Exception> lastException = new AtomicReference<>(); final AtomicReference<Exception> lastException = new AtomicReference<>();
final AtomicReference<InetSocketAddress> boundSocket = new AtomicReference<>(); final AtomicReference<InetSocketAddress> boundSocket = new AtomicReference<>();
boolean success = portsRange.iterate(new PortsRange.PortCallback() { boolean success = port.iterate(new PortsRange.PortCallback() {
@Override @Override
public boolean onPortNumber(int portNumber) { public boolean onPortNumber(int portNumber) {
try { try {

@ -456,7 +456,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
if (shardId != null) { if (shardId != null) {
final IndexShard shard = indexService.getShardOrNull(shardId.id()); final IndexShard shard = indexService.getShardOrNull(shardId.id());
if (shard != null) { if (shard != null) {
long ramBytesUsed = accountable != null ? accountable.ramBytesUsed() : 0l; long ramBytesUsed = accountable != null ? accountable.ramBytesUsed() : 0L;
shard.shardBitsetFilterCache().onCached(ramBytesUsed); shard.shardBitsetFilterCache().onCached(ramBytesUsed);
} }
} }
@ -467,7 +467,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
if (shardId != null) { if (shardId != null) {
final IndexShard shard = indexService.getShardOrNull(shardId.id()); final IndexShard shard = indexService.getShardOrNull(shardId.id());
if (shard != null) { if (shard != null) {
long ramBytesUsed = accountable != null ? accountable.ramBytesUsed() : 0l; long ramBytesUsed = accountable != null ? accountable.ramBytesUsed() : 0L;
shard.shardBitsetFilterCache().onRemoval(ramBytesUsed); shard.shardBitsetFilterCache().onRemoval(ramBytesUsed);
} }
} }

@ -121,7 +121,7 @@ public interface ScriptDocValues<T> extends List<T> {
public long getValue() { public long getValue() {
int numValues = values.count(); int numValues = values.count();
if (numValues == 0) { if (numValues == 0) {
return 0l; return 0L;
} }
return values.valueAt(0); return values.valueAt(0);
} }

@ -81,7 +81,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
} }
public static final String DEFAULT_MAPPING = "_default_"; public static final String DEFAULT_MAPPING = "_default_";
public static final Setting<Long> INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING = Setting.longSetting("index.mapping.nested_fields.limit", 50l, 0, true, Setting.Scope.INDEX); public static final Setting<Long> INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING = Setting.longSetting("index.mapping.nested_fields.limit", 50L, 0, true, Setting.Scope.INDEX);
public static final boolean INDEX_MAPPER_DYNAMIC_DEFAULT = true; public static final boolean INDEX_MAPPER_DYNAMIC_DEFAULT = true;
public static final Setting<Boolean> INDEX_MAPPER_DYNAMIC_SETTING = Setting.boolSetting("index.mapper.dynamic", INDEX_MAPPER_DYNAMIC_DEFAULT, false, Setting.Scope.INDEX); public static final Setting<Boolean> INDEX_MAPPER_DYNAMIC_SETTING = Setting.boolSetting("index.mapper.dynamic", INDEX_MAPPER_DYNAMIC_DEFAULT, false, Setting.Scope.INDEX);
private static ObjectHashSet<String> META_FIELDS = ObjectHashSet.from( private static ObjectHashSet<String> META_FIELDS = ObjectHashSet.from(

@ -66,7 +66,7 @@ import static org.elasticsearch.index.mapper.core.TypeParsers.parseNumberField;
public class IpFieldMapper extends NumberFieldMapper { public class IpFieldMapper extends NumberFieldMapper {
public static final String CONTENT_TYPE = "ip"; public static final String CONTENT_TYPE = "ip";
public static final long MAX_IP = 4294967296l; public static final long MAX_IP = 4294967296L;
public static String longToIp(long longIp) { public static String longToIp(long longIp) {
int octet3 = (int) ((longIp >> 24) % 256); int octet3 = (int) ((longIp >> 24) % 256);

@ -930,7 +930,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
return new Tuple<>(indexInput.readStringStringMap(), lastFound); return new Tuple<>(indexInput.readStringStringMap(), lastFound);
} }
} }
return new Tuple<>(new HashMap<>(), -1l); return new Tuple<>(new HashMap<>(), -1L);
} }
} }

@ -31,6 +31,7 @@ import org.elasticsearch.common.cache.RemovalListener;
import org.elasticsearch.common.cache.RemovalNotification; import org.elasticsearch.common.cache.RemovalNotification;
import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader;
import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting;
@ -52,7 +53,7 @@ import java.util.function.ToLongBiFunction;
/** /**
*/ */
public class IndicesFieldDataCache extends AbstractComponent implements RemovalListener<IndicesFieldDataCache.Key, Accountable> { public class IndicesFieldDataCache extends AbstractComponent implements RemovalListener<IndicesFieldDataCache.Key, Accountable>, Releasable{
public static final Setting<TimeValue> INDICES_FIELDDATA_CLEAN_INTERVAL_SETTING = Setting.positiveTimeSetting("indices.fielddata.cache.cleanup_interval", TimeValue.timeValueMinutes(1), false, Setting.Scope.CLUSTER); public static final Setting<TimeValue> INDICES_FIELDDATA_CLEAN_INTERVAL_SETTING = Setting.positiveTimeSetting("indices.fielddata.cache.cleanup_interval", TimeValue.timeValueMinutes(1), false, Setting.Scope.CLUSTER);
public static final Setting<ByteSizeValue> INDICES_FIELDDATA_CACHE_SIZE_KEY = Setting.byteSizeSetting("indices.fielddata.cache.size", new ByteSizeValue(-1), false, Setting.Scope.CLUSTER); public static final Setting<ByteSizeValue> INDICES_FIELDDATA_CACHE_SIZE_KEY = Setting.byteSizeSetting("indices.fielddata.cache.size", new ByteSizeValue(-1), false, Setting.Scope.CLUSTER);
@ -84,6 +85,7 @@ public class IndicesFieldDataCache extends AbstractComponent implements RemovalL
new FieldDataCacheCleaner(this.cache, this.logger, this.threadPool, this.cleanInterval)); new FieldDataCacheCleaner(this.cache, this.logger, this.threadPool, this.cleanInterval));
} }
@Override
public void close() { public void close() {
cache.invalidateAll(); cache.invalidateAll();
this.closed = true; this.closed = true;

@ -252,60 +252,58 @@ public class RecoverySourceHandler {
final AtomicLong bytesSinceLastPause = new AtomicLong(); final AtomicLong bytesSinceLastPause = new AtomicLong();
final Function<StoreFileMetaData, OutputStream> outputStreamFactories = (md) -> new BufferedOutputStream(new RecoveryOutputStream(md, bytesSinceLastPause, translogView), chunkSizeInBytes); final Function<StoreFileMetaData, OutputStream> outputStreamFactories = (md) -> new BufferedOutputStream(new RecoveryOutputStream(md, bytesSinceLastPause, translogView), chunkSizeInBytes);
sendFiles(store, phase1Files.toArray(new StoreFileMetaData[phase1Files.size()]), outputStreamFactories); sendFiles(store, phase1Files.toArray(new StoreFileMetaData[phase1Files.size()]), outputStreamFactories);
cancellableThreads.execute(() -> { // Send the CLEAN_FILES request, which takes all of the files that
// Send the CLEAN_FILES request, which takes all of the files that // were transferred and renames them from their temporary file
// were transferred and renames them from their temporary file // names to the actual file names. It also writes checksums for
// names to the actual file names. It also writes checksums for // the files after they have been renamed.
// the files after they have been renamed. //
// // Once the files have been renamed, any other files that are not
// Once the files have been renamed, any other files that are not // related to this recovery (out of date segments, for example)
// related to this recovery (out of date segments, for example) // are deleted
// are deleted try {
try { cancellableThreads.execute(() -> {
transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.CLEAN_FILES, transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.CLEAN_FILES,
new RecoveryCleanFilesRequest(request.recoveryId(), shard.shardId(), recoverySourceMetadata, translogView.totalOperations()), new RecoveryCleanFilesRequest(request.recoveryId(), shard.shardId(), recoverySourceMetadata, translogView.totalOperations()),
TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionTimeout()).build(), TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionTimeout()).build(),
EmptyTransportResponseHandler.INSTANCE_SAME).txGet(); EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
} catch (RemoteTransportException remoteException) { });
final IOException corruptIndexException; } catch (RemoteTransportException remoteException) {
// we realized that after the index was copied and we wanted to finalize the recovery final IOException corruptIndexException;
// the index was corrupted: // we realized that after the index was copied and we wanted to finalize the recovery
// - maybe due to a broken segments file on an empty index (transferred with no checksum) // the index was corrupted:
// - maybe due to old segments without checksums or length only checks // - maybe due to a broken segments file on an empty index (transferred with no checksum)
if ((corruptIndexException = ExceptionsHelper.unwrapCorruption(remoteException)) != null) { // - maybe due to old segments without checksums or length only checks
try { if ((corruptIndexException = ExceptionsHelper.unwrapCorruption(remoteException)) != null) {
final Store.MetadataSnapshot recoverySourceMetadata1 = store.getMetadata(snapshot); try {
StoreFileMetaData[] metadata = final Store.MetadataSnapshot recoverySourceMetadata1 = store.getMetadata(snapshot);
StreamSupport.stream(recoverySourceMetadata1.spliterator(), false).toArray(size -> new StoreFileMetaData[size]); StoreFileMetaData[] metadata =
ArrayUtil.timSort(metadata, new Comparator<StoreFileMetaData>() { StreamSupport.stream(recoverySourceMetadata1.spliterator(), false).toArray(size -> new StoreFileMetaData[size]);
@Override ArrayUtil.timSort(metadata, (o1, o2) -> {
public int compare(StoreFileMetaData o1, StoreFileMetaData o2) { return Long.compare(o1.length(), o2.length()); // check small files first
return Long.compare(o1.length(), o2.length()); // check small files first });
} for (StoreFileMetaData md : metadata) {
}); cancellableThreads.checkForCancel();
for (StoreFileMetaData md : metadata) { logger.debug("{} checking integrity for file {} after remove corruption exception", shard.shardId(), md);
logger.debug("{} checking integrity for file {} after remove corruption exception", shard.shardId(), md); if (store.checkIntegrityNoException(md) == false) { // we are corrupted on the primary -- fail!
if (store.checkIntegrityNoException(md) == false) { // we are corrupted on the primary -- fail! shard.failShard("recovery", corruptIndexException);
shard.failShard("recovery", corruptIndexException); logger.warn("{} Corrupted file detected {} checksum mismatch", shard.shardId(), md);
logger.warn("{} Corrupted file detected {} checksum mismatch", shard.shardId(), md); throw corruptIndexException;
throw corruptIndexException;
}
} }
} catch (IOException ex) {
remoteException.addSuppressed(ex);
throw remoteException;
} }
// corruption has happened on the way to replica } catch (IOException ex) {
RemoteTransportException exception = new RemoteTransportException("File corruption occurred on recovery but checksums are ok", null); remoteException.addSuppressed(ex);
exception.addSuppressed(remoteException);
logger.warn("{} Remote file corruption during finalization on node {}, recovering {}. local checksum OK",
corruptIndexException, shard.shardId(), request.targetNode());
throw exception;
} else {
throw remoteException; throw remoteException;
} }
// corruption has happened on the way to replica
RemoteTransportException exception = new RemoteTransportException("File corruption occurred on recovery but checksums are ok", null);
exception.addSuppressed(remoteException);
logger.warn("{} Remote file corruption during finalization on node {}, recovering {}. local checksum OK",
corruptIndexException, shard.shardId(), request.targetNode());
throw exception;
} else {
throw remoteException;
} }
}); }
} }
prepareTargetForTranslog(translogView.totalOperations()); prepareTargetForTranslog(translogView.totalOperations());

@ -36,8 +36,10 @@ import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.ingest.core.Pipeline; import org.elasticsearch.ingest.core.Pipeline;
import org.elasticsearch.ingest.core.PipelineFactoryError;
import org.elasticsearch.ingest.core.Processor; import org.elasticsearch.ingest.core.Processor;
import org.elasticsearch.ingest.core.TemplateService; import org.elasticsearch.ingest.core.TemplateService;
import org.elasticsearch.ingest.processor.ConfigurationPropertyException;
import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.ScriptService;
import java.io.Closeable; import java.io.Closeable;
@ -101,7 +103,7 @@ public class PipelineStore extends AbstractComponent implements Closeable, Clust
Map<String, Pipeline> pipelines = new HashMap<>(); Map<String, Pipeline> pipelines = new HashMap<>();
for (PipelineConfiguration pipeline : ingestMetadata.getPipelines().values()) { for (PipelineConfiguration pipeline : ingestMetadata.getPipelines().values()) {
try { try {
pipelines.put(pipeline.getId(), constructPipeline(pipeline.getId(), pipeline.getConfigAsMap())); pipelines.put(pipeline.getId(), factory.create(pipeline.getId(), pipeline.getConfigAsMap(), processorFactoryRegistry));
} catch (Exception e) { } catch (Exception e) {
throw new RuntimeException(e); throw new RuntimeException(e);
} }
@ -148,16 +150,14 @@ public class PipelineStore extends AbstractComponent implements Closeable, Clust
/** /**
* Stores the specified pipeline definition in the request. * Stores the specified pipeline definition in the request.
*
* @throws IllegalArgumentException If the pipeline holds incorrect configuration
*/ */
public void put(ClusterService clusterService, PutPipelineRequest request, ActionListener<WritePipelineResponse> listener) throws IllegalArgumentException { public void put(ClusterService clusterService, PutPipelineRequest request, ActionListener<WritePipelineResponse> listener) {
try { // validates the pipeline and processor configuration before submitting a cluster update task:
// validates the pipeline and processor configuration before submitting a cluster update task: Map<String, Object> pipelineConfig = XContentHelper.convertToMap(request.getSource(), false).v2();
Map<String, Object> pipelineConfig = XContentHelper.convertToMap(request.getSource(), false).v2(); WritePipelineResponse response = validatePipelineResponse(request.getId(), pipelineConfig);
constructPipeline(request.getId(), pipelineConfig); if (response != null) {
} catch (Exception e) { listener.onResponse(response);
throw new IllegalArgumentException("Invalid pipeline configuration", e); return;
} }
clusterService.submitStateUpdateTask("put-pipeline-" + request.getId(), new AckedClusterStateUpdateTask<WritePipelineResponse>(request, listener) { clusterService.submitStateUpdateTask("put-pipeline-" + request.getId(), new AckedClusterStateUpdateTask<WritePipelineResponse>(request, listener) {
@ -235,8 +235,15 @@ public class PipelineStore extends AbstractComponent implements Closeable, Clust
return result; return result;
} }
private Pipeline constructPipeline(String id, Map<String, Object> config) throws Exception { WritePipelineResponse validatePipelineResponse(String id, Map<String, Object> config) {
return factory.create(id, config, processorFactoryRegistry); try {
factory.create(id, config, processorFactoryRegistry);
return null;
} catch (ConfigurationPropertyException e) {
return new WritePipelineResponse(new PipelineFactoryError(e));
} catch (Exception e) {
return new WritePipelineResponse(new PipelineFactoryError(e.getMessage()));
}
} }
} }

@ -31,7 +31,7 @@ public abstract class AbstractProcessorFactory<P extends Processor> implements P
@Override @Override
public P create(Map<String, Object> config) throws Exception { public P create(Map<String, Object> config) throws Exception {
String tag = ConfigurationUtils.readOptionalStringProperty(config, TAG_KEY); String tag = ConfigurationUtils.readOptionalStringProperty(null, null, config, TAG_KEY);
return doCreate(tag, config); return doCreate(tag, config);
} }

@ -19,6 +19,8 @@
package org.elasticsearch.ingest.core; package org.elasticsearch.ingest.core;
import org.elasticsearch.ingest.processor.ConfigurationPropertyException;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@ -30,133 +32,133 @@ public final class ConfigurationUtils {
/** /**
* Returns and removes the specified optional property from the specified configuration map. * Returns and removes the specified optional property from the specified configuration map.
* *
* If the property value isn't of type string a {@link IllegalArgumentException} is thrown. * If the property value isn't of type string a {@link ConfigurationPropertyException} is thrown.
*/ */
public static String readOptionalStringProperty(Map<String, Object> configuration, String propertyName) { public static String readOptionalStringProperty(String processorType, String processorTag, Map<String, Object> configuration, String propertyName) {
Object value = configuration.remove(propertyName); Object value = configuration.remove(propertyName);
return readString(propertyName, value); return readString(processorType, processorTag, propertyName, value);
} }
/** /**
* Returns and removes the specified property from the specified configuration map. * Returns and removes the specified property from the specified configuration map.
* *
* If the property value isn't of type string an {@link IllegalArgumentException} is thrown. * If the property value isn't of type string an {@link ConfigurationPropertyException} is thrown.
* If the property is missing an {@link IllegalArgumentException} is thrown * If the property is missing an {@link ConfigurationPropertyException} is thrown
*/ */
public static String readStringProperty(Map<String, Object> configuration, String propertyName) { public static String readStringProperty(String processorType, String processorTag, Map<String, Object> configuration, String propertyName) {
return readStringProperty(configuration, propertyName, null); return readStringProperty(processorType, processorTag, configuration, propertyName, null);
} }
/** /**
* Returns and removes the specified property from the specified configuration map. * Returns and removes the specified property from the specified configuration map.
* *
* If the property value isn't of type string a {@link IllegalArgumentException} is thrown. * If the property value isn't of type string a {@link ConfigurationPropertyException} is thrown.
* If the property is missing and no default value has been specified a {@link IllegalArgumentException} is thrown * If the property is missing and no default value has been specified a {@link ConfigurationPropertyException} is thrown
*/ */
public static String readStringProperty(Map<String, Object> configuration, String propertyName, String defaultValue) { public static String readStringProperty(String processorType, String processorTag, Map<String, Object> configuration, String propertyName, String defaultValue) {
Object value = configuration.remove(propertyName); Object value = configuration.remove(propertyName);
if (value == null && defaultValue != null) { if (value == null && defaultValue != null) {
return defaultValue; return defaultValue;
} else if (value == null) { } else if (value == null) {
throw new IllegalArgumentException("required property [" + propertyName + "] is missing"); throw new ConfigurationPropertyException(processorType, processorTag, propertyName, "required property is missing");
} }
return readString(propertyName, value); return readString(processorType, processorTag, propertyName, value);
} }
private static String readString(String propertyName, Object value) { private static String readString(String processorType, String processorTag, String propertyName, Object value) {
if (value == null) { if (value == null) {
return null; return null;
} }
if (value instanceof String) { if (value instanceof String) {
return (String) value; return (String) value;
} }
throw new IllegalArgumentException("property [" + propertyName + "] isn't a string, but of type [" + value.getClass().getName() + "]"); throw new ConfigurationPropertyException(processorType, processorTag, propertyName, "property isn't a string, but of type [" + value.getClass().getName() + "]");
} }
/** /**
* Returns and removes the specified property of type list from the specified configuration map. * Returns and removes the specified property of type list from the specified configuration map.
* *
* If the property value isn't of type list an {@link IllegalArgumentException} is thrown. * If the property value isn't of type list an {@link ConfigurationPropertyException} is thrown.
*/ */
public static <T> List<T> readOptionalList(Map<String, Object> configuration, String propertyName) { public static <T> List<T> readOptionalList(String processorType, String processorTag, Map<String, Object> configuration, String propertyName) {
Object value = configuration.remove(propertyName); Object value = configuration.remove(propertyName);
if (value == null) { if (value == null) {
return null; return null;
} }
return readList(propertyName, value); return readList(processorType, processorTag, propertyName, value);
} }
/** /**
* Returns and removes the specified property of type list from the specified configuration map. * Returns and removes the specified property of type list from the specified configuration map.
* *
* If the property value isn't of type list an {@link IllegalArgumentException} is thrown. * If the property value isn't of type list an {@link ConfigurationPropertyException} is thrown.
* If the property is missing an {@link IllegalArgumentException} is thrown * If the property is missing an {@link ConfigurationPropertyException} is thrown
*/ */
public static <T> List<T> readList(Map<String, Object> configuration, String propertyName) { public static <T> List<T> readList(String processorType, String processorTag, Map<String, Object> configuration, String propertyName) {
Object value = configuration.remove(propertyName); Object value = configuration.remove(propertyName);
if (value == null) { if (value == null) {
throw new IllegalArgumentException("required property [" + propertyName + "] is missing"); throw new ConfigurationPropertyException(processorType, processorTag, propertyName, "required property is missing");
} }
return readList(propertyName, value); return readList(processorType, processorTag, propertyName, value);
} }
private static <T> List<T> readList(String propertyName, Object value) { private static <T> List<T> readList(String processorType, String processorTag, String propertyName, Object value) {
if (value instanceof List) { if (value instanceof List) {
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
List<T> stringList = (List<T>) value; List<T> stringList = (List<T>) value;
return stringList; return stringList;
} else { } else {
throw new IllegalArgumentException("property [" + propertyName + "] isn't a list, but of type [" + value.getClass().getName() + "]"); throw new ConfigurationPropertyException(processorType, processorTag, propertyName, "property isn't a list, but of type [" + value.getClass().getName() + "]");
} }
} }
/** /**
* Returns and removes the specified property of type map from the specified configuration map. * Returns and removes the specified property of type map from the specified configuration map.
* *
* If the property value isn't of type map an {@link IllegalArgumentException} is thrown. * If the property value isn't of type map an {@link ConfigurationPropertyException} is thrown.
* If the property is missing an {@link IllegalArgumentException} is thrown * If the property is missing an {@link ConfigurationPropertyException} is thrown
*/ */
public static <T> Map<String, T> readMap(Map<String, Object> configuration, String propertyName) { public static <T> Map<String, T> readMap(String processorType, String processorTag, Map<String, Object> configuration, String propertyName) {
Object value = configuration.remove(propertyName); Object value = configuration.remove(propertyName);
if (value == null) { if (value == null) {
throw new IllegalArgumentException("required property [" + propertyName + "] is missing"); throw new ConfigurationPropertyException(processorType, processorTag, propertyName, "required property is missing");
} }
return readMap(propertyName, value); return readMap(processorType, processorTag, propertyName, value);
} }
/** /**
* Returns and removes the specified property of type map from the specified configuration map. * Returns and removes the specified property of type map from the specified configuration map.
* *
* If the property value isn't of type map an {@link IllegalArgumentException} is thrown. * If the property value isn't of type map an {@link ConfigurationPropertyException} is thrown.
*/ */
public static <T> Map<String, T> readOptionalMap(Map<String, Object> configuration, String propertyName) { public static <T> Map<String, T> readOptionalMap(String processorType, String processorTag, Map<String, Object> configuration, String propertyName) {
Object value = configuration.remove(propertyName); Object value = configuration.remove(propertyName);
if (value == null) { if (value == null) {
return null; return null;
} }
return readMap(propertyName, value); return readMap(processorType, processorTag, propertyName, value);
} }
private static <T> Map<String, T> readMap(String propertyName, Object value) { private static <T> Map<String, T> readMap(String processorType, String processorTag, String propertyName, Object value) {
if (value instanceof Map) { if (value instanceof Map) {
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
Map<String, T> map = (Map<String, T>) value; Map<String, T> map = (Map<String, T>) value;
return map; return map;
} else { } else {
throw new IllegalArgumentException("property [" + propertyName + "] isn't a map, but of type [" + value.getClass().getName() + "]"); throw new ConfigurationPropertyException(processorType, processorTag, propertyName, "property isn't a map, but of type [" + value.getClass().getName() + "]");
} }
} }
/** /**
* Returns and removes the specified property as an {@link Object} from the specified configuration map. * Returns and removes the specified property as an {@link Object} from the specified configuration map.
*/ */
public static Object readObject(Map<String, Object> configuration, String propertyName) { public static Object readObject(String processorType, String processorTag, Map<String, Object> configuration, String propertyName) {
Object value = configuration.remove(propertyName); Object value = configuration.remove(propertyName);
if (value == null) { if (value == null) {
throw new IllegalArgumentException("required property [" + propertyName + "] is missing"); throw new ConfigurationPropertyException(processorType, processorTag, propertyName, "required property is missing");
} }
return value; return value;
} }

@ -19,6 +19,8 @@
package org.elasticsearch.ingest.core; package org.elasticsearch.ingest.core;
import org.elasticsearch.ingest.processor.ConfigurationPropertyException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collections; import java.util.Collections;
@ -82,19 +84,20 @@ public final class Pipeline {
public final static class Factory { public final static class Factory {
public Pipeline create(String id, Map<String, Object> config, Map<String, Processor.Factory> processorRegistry) throws Exception { public Pipeline create(String id, Map<String, Object> config, Map<String, Processor.Factory> processorRegistry) throws ConfigurationPropertyException {
String description = ConfigurationUtils.readOptionalStringProperty(config, DESCRIPTION_KEY); String description = ConfigurationUtils.readOptionalStringProperty(null, null, config, DESCRIPTION_KEY);
List<Processor> processors = readProcessors(PROCESSORS_KEY, processorRegistry, config); List<Map<String, Map<String, Object>>> processorConfigs = ConfigurationUtils.readList(null, null, config, PROCESSORS_KEY);
List<Processor> onFailureProcessors = readProcessors(ON_FAILURE_KEY, processorRegistry, config); List<Processor> processors = readProcessorConfigs(processorConfigs, processorRegistry);
List<Map<String, Map<String, Object>>> onFailureProcessorConfigs = ConfigurationUtils.readOptionalList(null, null, config, ON_FAILURE_KEY);
List<Processor> onFailureProcessors = readProcessorConfigs(onFailureProcessorConfigs, processorRegistry);
if (config.isEmpty() == false) { if (config.isEmpty() == false) {
throw new IllegalArgumentException("pipeline [" + id + "] doesn't support one or more provided configuration parameters " + Arrays.toString(config.keySet().toArray())); throw new ConfigurationPropertyException("pipeline [" + id + "] doesn't support one or more provided configuration parameters " + Arrays.toString(config.keySet().toArray()));
} }
CompoundProcessor compoundProcessor = new CompoundProcessor(Collections.unmodifiableList(processors), Collections.unmodifiableList(onFailureProcessors)); CompoundProcessor compoundProcessor = new CompoundProcessor(Collections.unmodifiableList(processors), Collections.unmodifiableList(onFailureProcessors));
return new Pipeline(id, description, compoundProcessor); return new Pipeline(id, description, compoundProcessor);
} }
private List<Processor> readProcessors(String fieldName, Map<String, Processor.Factory> processorRegistry, Map<String, Object> config) throws Exception { private List<Processor> readProcessorConfigs(List<Map<String, Map<String, Object>>> processorConfigs, Map<String, Processor.Factory> processorRegistry) throws ConfigurationPropertyException {
List<Map<String, Map<String, Object>>> processorConfigs = ConfigurationUtils.readOptionalList(config, fieldName);
List<Processor> processors = new ArrayList<>(); List<Processor> processors = new ArrayList<>();
if (processorConfigs != null) { if (processorConfigs != null) {
for (Map<String, Map<String, Object>> processorConfigWithKey : processorConfigs) { for (Map<String, Map<String, Object>> processorConfigWithKey : processorConfigs) {
@ -107,20 +110,28 @@ public final class Pipeline {
return processors; return processors;
} }
private Processor readProcessor(Map<String, Processor.Factory> processorRegistry, String type, Map<String, Object> config) throws Exception { private Processor readProcessor(Map<String, Processor.Factory> processorRegistry, String type, Map<String, Object> config) throws ConfigurationPropertyException {
Processor.Factory factory = processorRegistry.get(type); Processor.Factory factory = processorRegistry.get(type);
if (factory != null) { if (factory != null) {
List<Processor> onFailureProcessors = readProcessors(ON_FAILURE_KEY, processorRegistry, config); List<Map<String, Map<String, Object>>> onFailureProcessorConfigs = ConfigurationUtils.readOptionalList(null, null, config, ON_FAILURE_KEY);
Processor processor = factory.create(config); List<Processor> onFailureProcessors = readProcessorConfigs(onFailureProcessorConfigs, processorRegistry);
if (config.isEmpty() == false) { Processor processor;
throw new IllegalArgumentException("processor [" + type + "] doesn't support one or more provided configuration parameters " + Arrays.toString(config.keySet().toArray())); try {
processor = factory.create(config);
} catch (ConfigurationPropertyException e) {
throw e;
} catch (Exception e) {
throw new ConfigurationPropertyException(e.getMessage());
}
if (!config.isEmpty()) {
throw new ConfigurationPropertyException("processor [" + type + "] doesn't support one or more provided configuration parameters " + Arrays.toString(config.keySet().toArray()));
} }
if (onFailureProcessors.isEmpty()) { if (onFailureProcessors.isEmpty()) {
return processor; return processor;
} }
return new CompoundProcessor(Collections.singletonList(processor), onFailureProcessors); return new CompoundProcessor(Collections.singletonList(processor), onFailureProcessors);
} }
throw new IllegalArgumentException("No processor type exists with name [" + type + "]"); throw new ConfigurationPropertyException("No processor type exists with name [" + type + "]");
} }
} }
} }

@ -0,0 +1,96 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.ingest.core;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentBuilderString;
import org.elasticsearch.ingest.processor.ConfigurationPropertyException;
import java.io.IOException;
public class PipelineFactoryError implements Streamable, ToXContent {
private String reason;
private String processorType;
private String processorTag;
private String processorPropertyName;
public PipelineFactoryError() {
}
public PipelineFactoryError(ConfigurationPropertyException e) {
this.reason = e.getMessage();
this.processorType = e.getProcessorType();
this.processorTag = e.getProcessorTag();
this.processorPropertyName = e.getPropertyName();
}
public PipelineFactoryError(String reason) {
this.reason = "Constructing Pipeline failed:" + reason;
}
public String getReason() {
return reason;
}
public String getProcessorTag() {
return processorTag;
}
public String getProcessorPropertyName() {
return processorPropertyName;
}
public String getProcessorType() {
return processorType;
}
@Override
public void readFrom(StreamInput in) throws IOException {
reason = in.readString();
processorType = in.readOptionalString();
processorTag = in.readOptionalString();
processorPropertyName = in.readOptionalString();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(reason);
out.writeOptionalString(processorType);
out.writeOptionalString(processorTag);
out.writeOptionalString(processorPropertyName);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject("error");
builder.field("type", processorType);
builder.field("tag", processorTag);
builder.field("reason", reason);
builder.field("property_name", processorPropertyName);
builder.endObject();
return builder;
}
}

@ -0,0 +1,43 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.ingest.core;
public class PipelineFactoryResult {
private final Pipeline pipeline;
private final PipelineFactoryError error;
public PipelineFactoryResult(Pipeline pipeline) {
this.pipeline = pipeline;
this.error = null;
}
public PipelineFactoryResult(PipelineFactoryError error) {
this.error = error;
this.pipeline = null;
}
public Pipeline getPipeline() {
return pipeline;
}
public PipelineFactoryError getError() {
return error;
}
}

@ -20,6 +20,8 @@
package org.elasticsearch.ingest.core; package org.elasticsearch.ingest.core;
import org.elasticsearch.ingest.processor.ConfigurationPropertyException;
import java.util.Map; import java.util.Map;
/** /**

@ -55,10 +55,15 @@ public abstract class AbstractStringProcessor extends AbstractProcessor {
protected abstract String process(String value); protected abstract String process(String value);
public static abstract class Factory<T extends AbstractStringProcessor> extends AbstractProcessorFactory<T> { public static abstract class Factory<T extends AbstractStringProcessor> extends AbstractProcessorFactory<T> {
protected final String processorType;
protected Factory(String processorType) {
this.processorType = processorType;
}
@Override @Override
public T doCreate(String processorTag, Map<String, Object> config) throws Exception { public T doCreate(String processorTag, Map<String, Object> config) throws Exception {
String field = ConfigurationUtils.readStringProperty(config, "field"); String field = ConfigurationUtils.readStringProperty(processorType, processorTag, config, "field");
return newProcessor(processorTag, field); return newProcessor(processorTag, field);
} }

@ -74,8 +74,8 @@ public class AppendProcessor extends AbstractProcessor {
@Override @Override
public AppendProcessor doCreate(String processorTag, Map<String, Object> config) throws Exception { public AppendProcessor doCreate(String processorTag, Map<String, Object> config) throws Exception {
String field = ConfigurationUtils.readStringProperty(config, "field"); String field = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "field");
Object value = ConfigurationUtils.readObject(config, "value"); Object value = ConfigurationUtils.readObject(TYPE, processorTag, config, "value");
return new AppendProcessor(processorTag, templateService.compile(field), ValueSource.wrap(value, templateService)); return new AppendProcessor(processorTag, templateService.compile(field), ValueSource.wrap(value, templateService));
} }
} }

@ -0,0 +1,53 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.ingest.processor;
/**
* Exception class thrown by processor factories.
*/
public class ConfigurationPropertyException extends RuntimeException {
private String processorType;
private String processorTag;
private String propertyName;
public ConfigurationPropertyException(String processorType, String processorTag, String propertyName, String message) {
super("[" + propertyName + "] " + message);
this.processorTag = processorTag;
this.processorType = processorType;
this.propertyName = propertyName;
}
public ConfigurationPropertyException(String errorMessage) {
super(errorMessage);
}
public String getPropertyName() {
return propertyName;
}
public String getProcessorType() {
return processorType;
}
public String getProcessorTag() {
return processorTag;
}
}

@ -137,8 +137,8 @@ public class ConvertProcessor extends AbstractProcessor {
public static class Factory extends AbstractProcessorFactory<ConvertProcessor> { public static class Factory extends AbstractProcessorFactory<ConvertProcessor> {
@Override @Override
public ConvertProcessor doCreate(String processorTag, Map<String, Object> config) throws Exception { public ConvertProcessor doCreate(String processorTag, Map<String, Object> config) throws Exception {
String field = ConfigurationUtils.readStringProperty(config, "field"); String field = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "field");
Type convertType = Type.fromString(ConfigurationUtils.readStringProperty(config, "type")); Type convertType = Type.fromString(ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "type"));
return new ConvertProcessor(processorTag, field, convertType); return new ConvertProcessor(processorTag, field, convertType);
} }
} }

@ -112,11 +112,11 @@ public final class DateProcessor extends AbstractProcessor {
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
public DateProcessor doCreate(String processorTag, Map<String, Object> config) throws Exception { public DateProcessor doCreate(String processorTag, Map<String, Object> config) throws Exception {
String matchField = ConfigurationUtils.readStringProperty(config, "match_field"); String matchField = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "match_field");
String targetField = ConfigurationUtils.readStringProperty(config, "target_field", DEFAULT_TARGET_FIELD); String targetField = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "target_field", DEFAULT_TARGET_FIELD);
String timezoneString = ConfigurationUtils.readOptionalStringProperty(config, "timezone"); String timezoneString = ConfigurationUtils.readOptionalStringProperty(TYPE, processorTag, config, "timezone");
DateTimeZone timezone = timezoneString == null ? DateTimeZone.UTC : DateTimeZone.forID(timezoneString); DateTimeZone timezone = timezoneString == null ? DateTimeZone.UTC : DateTimeZone.forID(timezoneString);
String localeString = ConfigurationUtils.readOptionalStringProperty(config, "locale"); String localeString = ConfigurationUtils.readOptionalStringProperty(TYPE, processorTag, config, "locale");
Locale locale = Locale.ENGLISH; Locale locale = Locale.ENGLISH;
if (localeString != null) { if (localeString != null) {
try { try {
@ -125,7 +125,7 @@ public final class DateProcessor extends AbstractProcessor {
throw new IllegalArgumentException("Invalid language tag specified: " + localeString); throw new IllegalArgumentException("Invalid language tag specified: " + localeString);
} }
} }
List<String> matchFormats = ConfigurationUtils.readList(config, "match_formats"); List<String> matchFormats = ConfigurationUtils.readList(TYPE, processorTag, config, "match_formats");
return new DateProcessor(processorTag, timezone, locale, matchField, matchFormats, targetField); return new DateProcessor(processorTag, timezone, locale, matchField, matchFormats, targetField);
} }
} }

@ -95,7 +95,7 @@ public class DeDotProcessor extends AbstractProcessor {
@Override @Override
public DeDotProcessor doCreate(String processorTag, Map<String, Object> config) throws Exception { public DeDotProcessor doCreate(String processorTag, Map<String, Object> config) throws Exception {
String separator = ConfigurationUtils.readOptionalStringProperty(config, "separator"); String separator = ConfigurationUtils.readOptionalStringProperty(TYPE, processorTag, config, "separator");
if (separator == null) { if (separator == null) {
separator = DEFAULT_SEPARATOR; separator = DEFAULT_SEPARATOR;
} }

@ -66,7 +66,7 @@ public class FailProcessor extends AbstractProcessor {
@Override @Override
public FailProcessor doCreate(String processorTag, Map<String, Object> config) throws Exception { public FailProcessor doCreate(String processorTag, Map<String, Object> config) throws Exception {
String message = ConfigurationUtils.readStringProperty(config, "message"); String message = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "message");
return new FailProcessor(processorTag, templateService.compile(message)); return new FailProcessor(processorTag, templateService.compile(message));
} }
} }

@ -79,9 +79,9 @@ public class GsubProcessor extends AbstractProcessor {
public static class Factory extends AbstractProcessorFactory<GsubProcessor> { public static class Factory extends AbstractProcessorFactory<GsubProcessor> {
@Override @Override
public GsubProcessor doCreate(String processorTag, Map<String, Object> config) throws Exception { public GsubProcessor doCreate(String processorTag, Map<String, Object> config) throws Exception {
String field = ConfigurationUtils.readStringProperty(config, "field"); String field = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "field");
String pattern = ConfigurationUtils.readStringProperty(config, "pattern"); String pattern = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "pattern");
String replacement = ConfigurationUtils.readStringProperty(config, "replacement"); String replacement = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "replacement");
Pattern searchPattern = Pattern.compile(pattern); Pattern searchPattern = Pattern.compile(pattern);
return new GsubProcessor(processorTag, field, searchPattern, replacement); return new GsubProcessor(processorTag, field, searchPattern, replacement);
} }

@ -73,8 +73,8 @@ public class JoinProcessor extends AbstractProcessor {
public static class Factory extends AbstractProcessorFactory<JoinProcessor> { public static class Factory extends AbstractProcessorFactory<JoinProcessor> {
@Override @Override
public JoinProcessor doCreate(String processorTag, Map<String, Object> config) throws Exception { public JoinProcessor doCreate(String processorTag, Map<String, Object> config) throws Exception {
String field = ConfigurationUtils.readStringProperty(config, "field"); String field = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "field");
String separator = ConfigurationUtils.readStringProperty(config, "separator"); String separator = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "separator");
return new JoinProcessor(processorTag, field, separator); return new JoinProcessor(processorTag, field, separator);
} }
} }

@ -45,6 +45,11 @@ public class LowercaseProcessor extends AbstractStringProcessor {
} }
public static class Factory extends AbstractStringProcessor.Factory<LowercaseProcessor> { public static class Factory extends AbstractStringProcessor.Factory<LowercaseProcessor> {
public Factory() {
super(TYPE);
}
@Override @Override
protected LowercaseProcessor newProcessor(String tag, String field) { protected LowercaseProcessor newProcessor(String tag, String field) {
return new LowercaseProcessor(tag, field); return new LowercaseProcessor(tag, field);

@ -65,7 +65,7 @@ public class RemoveProcessor extends AbstractProcessor {
@Override @Override
public RemoveProcessor doCreate(String processorTag, Map<String, Object> config) throws Exception { public RemoveProcessor doCreate(String processorTag, Map<String, Object> config) throws Exception {
String field = ConfigurationUtils.readStringProperty(config, "field"); String field = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "field");
return new RemoveProcessor(processorTag, templateService.compile(field)); return new RemoveProcessor(processorTag, templateService.compile(field));
} }
} }

@ -78,8 +78,8 @@ public class RenameProcessor extends AbstractProcessor {
public static class Factory extends AbstractProcessorFactory<RenameProcessor> { public static class Factory extends AbstractProcessorFactory<RenameProcessor> {
@Override @Override
public RenameProcessor doCreate(String processorTag, Map<String, Object> config) throws Exception { public RenameProcessor doCreate(String processorTag, Map<String, Object> config) throws Exception {
String field = ConfigurationUtils.readStringProperty(config, "field"); String field = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "field");
String newField = ConfigurationUtils.readStringProperty(config, "to"); String newField = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "to");
return new RenameProcessor(processorTag, field, newField); return new RenameProcessor(processorTag, field, newField);
} }
} }

@ -73,8 +73,8 @@ public class SetProcessor extends AbstractProcessor {
@Override @Override
public SetProcessor doCreate(String processorTag, Map<String, Object> config) throws Exception { public SetProcessor doCreate(String processorTag, Map<String, Object> config) throws Exception {
String field = ConfigurationUtils.readStringProperty(config, "field"); String field = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "field");
Object value = ConfigurationUtils.readObject(config, "value"); Object value = ConfigurationUtils.readObject(TYPE, processorTag, config, "value");
return new SetProcessor(processorTag, templateService.compile(field), ValueSource.wrap(value, templateService)); return new SetProcessor(processorTag, templateService.compile(field), ValueSource.wrap(value, templateService));
} }
} }

@ -75,8 +75,8 @@ public class SplitProcessor extends AbstractProcessor {
public static class Factory extends AbstractProcessorFactory<SplitProcessor> { public static class Factory extends AbstractProcessorFactory<SplitProcessor> {
@Override @Override
public SplitProcessor doCreate(String processorTag, Map<String, Object> config) throws Exception { public SplitProcessor doCreate(String processorTag, Map<String, Object> config) throws Exception {
String field = ConfigurationUtils.readStringProperty(config, "field"); String field = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "field");
return new SplitProcessor(processorTag, field, ConfigurationUtils.readStringProperty(config, "separator")); return new SplitProcessor(processorTag, field, ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "separator"));
} }
} }
} }

@ -42,6 +42,11 @@ public class TrimProcessor extends AbstractStringProcessor {
} }
public static class Factory extends AbstractStringProcessor.Factory<TrimProcessor> { public static class Factory extends AbstractStringProcessor.Factory<TrimProcessor> {
public Factory() {
super(TYPE);
}
@Override @Override
protected TrimProcessor newProcessor(String tag, String field) { protected TrimProcessor newProcessor(String tag, String field) {
return new TrimProcessor(tag, field); return new TrimProcessor(tag, field);

@ -44,6 +44,11 @@ public class UppercaseProcessor extends AbstractStringProcessor {
} }
public static class Factory extends AbstractStringProcessor.Factory<UppercaseProcessor> { public static class Factory extends AbstractStringProcessor.Factory<UppercaseProcessor> {
public Factory() {
super(TYPE);
}
@Override @Override
protected UppercaseProcessor newProcessor(String tag, String field) { protected UppercaseProcessor newProcessor(String tag, String field) {
return new UppercaseProcessor(tag, field); return new UppercaseProcessor(tag, field);

@ -23,7 +23,7 @@ import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.monitor.fs.FsService; import org.elasticsearch.monitor.fs.FsService;
import org.elasticsearch.monitor.jvm.JvmMonitorService; import org.elasticsearch.monitor.jvm.JvmGcMonitorService;
import org.elasticsearch.monitor.jvm.JvmService; import org.elasticsearch.monitor.jvm.JvmService;
import org.elasticsearch.monitor.os.OsService; import org.elasticsearch.monitor.os.OsService;
import org.elasticsearch.monitor.process.ProcessService; import org.elasticsearch.monitor.process.ProcessService;
@ -36,7 +36,7 @@ import java.io.IOException;
*/ */
public class MonitorService extends AbstractLifecycleComponent<MonitorService> { public class MonitorService extends AbstractLifecycleComponent<MonitorService> {
private final JvmMonitorService jvmMonitorService; private final JvmGcMonitorService jvmGcMonitorService;
private final OsService osService; private final OsService osService;
@ -48,7 +48,7 @@ public class MonitorService extends AbstractLifecycleComponent<MonitorService> {
public MonitorService(Settings settings, NodeEnvironment nodeEnvironment, ThreadPool threadPool) throws IOException { public MonitorService(Settings settings, NodeEnvironment nodeEnvironment, ThreadPool threadPool) throws IOException {
super(settings); super(settings);
this.jvmMonitorService = new JvmMonitorService(settings, threadPool); this.jvmGcMonitorService = new JvmGcMonitorService(settings, threadPool);
this.osService = new OsService(settings); this.osService = new OsService(settings);
this.processService = new ProcessService(settings); this.processService = new ProcessService(settings);
this.jvmService = new JvmService(settings); this.jvmService = new JvmService(settings);
@ -73,16 +73,16 @@ public class MonitorService extends AbstractLifecycleComponent<MonitorService> {
@Override @Override
protected void doStart() { protected void doStart() {
jvmMonitorService.start(); jvmGcMonitorService.start();
} }
@Override @Override
protected void doStop() { protected void doStop() {
jvmMonitorService.stop(); jvmGcMonitorService.stop();
} }
@Override @Override
protected void doClose() { protected void doClose() {
jvmMonitorService.close(); jvmGcMonitorService.close();
} }
} }

@ -20,6 +20,7 @@
package org.elasticsearch.monitor.fs; package org.elasticsearch.monitor.fs;
import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.SingleObjectCache; import org.elasticsearch.common.util.SingleObjectCache;
@ -35,10 +36,13 @@ public class FsService extends AbstractComponent {
private final SingleObjectCache<FsInfo> fsStatsCache; private final SingleObjectCache<FsInfo> fsStatsCache;
public final static Setting<TimeValue> REFRESH_INTERVAL_SETTING =
Setting.timeSetting("monitor.fs.refresh_interval", TimeValue.timeValueSeconds(1), TimeValue.timeValueSeconds(1), false, Setting.Scope.CLUSTER);
public FsService(Settings settings, NodeEnvironment nodeEnvironment) throws IOException { public FsService(Settings settings, NodeEnvironment nodeEnvironment) throws IOException {
super(settings); super(settings);
this.probe = new FsProbe(settings, nodeEnvironment); this.probe = new FsProbe(settings, nodeEnvironment);
TimeValue refreshInterval = settings.getAsTime("monitor.fs.refresh_interval", TimeValue.timeValueSeconds(1)); TimeValue refreshInterval = REFRESH_INTERVAL_SETTING.get(settings);
fsStatsCache = new FsInfoCache(refreshInterval, probe.stats()); fsStatsCache = new FsInfoCache(refreshInterval, probe.stats());
logger.debug("Using probe [{}] with refresh_interval [{}]", probe, refreshInterval); logger.debug("Using probe [{}] with refresh_interval [{}]", probe, refreshInterval);
} }

@ -20,6 +20,8 @@
package org.elasticsearch.monitor.jvm; package org.elasticsearch.monitor.jvm;
import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Scope;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.common.util.concurrent.FutureUtils;
@ -31,13 +33,12 @@ import java.util.Map;
import java.util.concurrent.ScheduledFuture; import java.util.concurrent.ScheduledFuture;
import static java.util.Collections.unmodifiableMap; import static java.util.Collections.unmodifiableMap;
import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
import static org.elasticsearch.monitor.jvm.JvmStats.jvmStats; import static org.elasticsearch.monitor.jvm.JvmStats.jvmStats;
/** /**
* *
*/ */
public class JvmMonitorService extends AbstractLifecycleComponent<JvmMonitorService> { public class JvmGcMonitorService extends AbstractLifecycleComponent<JvmGcMonitorService> {
private final ThreadPool threadPool; private final ThreadPool threadPool;
private final boolean enabled; private final boolean enabled;
@ -46,6 +47,13 @@ public class JvmMonitorService extends AbstractLifecycleComponent<JvmMonitorServ
private volatile ScheduledFuture scheduledFuture; private volatile ScheduledFuture scheduledFuture;
public final static Setting<Boolean> ENABLED_SETTING = Setting.boolSetting("monitor.jvm.gc.enabled", true, false, Scope.CLUSTER);
public final static Setting<TimeValue> REFRESH_INTERVAL_SETTING =
Setting.timeSetting("monitor.jvm.gc.refresh_interval", TimeValue.timeValueSeconds(1), TimeValue.timeValueSeconds(1), false, Scope.CLUSTER);
private static String GC_COLLECTOR_PREFIX = "monitor.jvm.gc.collector.";
public final static Setting<Settings> GC_SETTING = Setting.groupSetting(GC_COLLECTOR_PREFIX, false, Scope.CLUSTER);
static class GcThreshold { static class GcThreshold {
public final String name; public final String name;
public final long warnThreshold; public final long warnThreshold;
@ -70,25 +78,21 @@ public class JvmMonitorService extends AbstractLifecycleComponent<JvmMonitorServ
} }
} }
public JvmMonitorService(Settings settings, ThreadPool threadPool) { public JvmGcMonitorService(Settings settings, ThreadPool threadPool) {
super(settings); super(settings);
this.threadPool = threadPool; this.threadPool = threadPool;
this.enabled = this.settings.getAsBoolean("monitor.jvm.enabled", true); this.enabled = ENABLED_SETTING.get(settings);
this.interval = this.settings.getAsTime("monitor.jvm.interval", timeValueSeconds(1)); this.interval = REFRESH_INTERVAL_SETTING.get(settings);
Map<String, GcThreshold> gcThresholds = new HashMap<>(); Map<String, GcThreshold> gcThresholds = new HashMap<>();
Map<String, Settings> gcThresholdGroups = this.settings.getGroups("monitor.jvm.gc"); Map<String, Settings> gcThresholdGroups = GC_SETTING.get(settings).getAsGroups();
for (Map.Entry<String, Settings> entry : gcThresholdGroups.entrySet()) { for (Map.Entry<String, Settings> entry : gcThresholdGroups.entrySet()) {
String name = entry.getKey(); String name = entry.getKey();
TimeValue warn = entry.getValue().getAsTime("warn", null); TimeValue warn = getValidThreshold(entry.getValue(), entry.getKey(), "warn");
TimeValue info = entry.getValue().getAsTime("info", null); TimeValue info = getValidThreshold(entry.getValue(), entry.getKey(), "info");
TimeValue debug = entry.getValue().getAsTime("debug", null); TimeValue debug = getValidThreshold(entry.getValue(), entry.getKey(), "debug");
if (warn == null || info == null || debug == null) { gcThresholds.put(name, new GcThreshold(name, warn.millis(), info.millis(), debug.millis()));
logger.warn("ignoring gc_threshold for [{}], missing warn/info/debug values", name);
} else {
gcThresholds.put(name, new GcThreshold(name, warn.millis(), info.millis(), debug.millis()));
}
} }
gcThresholds.putIfAbsent(GcNames.YOUNG, new GcThreshold(GcNames.YOUNG, 1000, 700, 400)); gcThresholds.putIfAbsent(GcNames.YOUNG, new GcThreshold(GcNames.YOUNG, 1000, 700, 400));
gcThresholds.putIfAbsent(GcNames.OLD, new GcThreshold(GcNames.OLD, 10000, 5000, 2000)); gcThresholds.putIfAbsent(GcNames.OLD, new GcThreshold(GcNames.OLD, 10000, 5000, 2000));
@ -98,6 +102,21 @@ public class JvmMonitorService extends AbstractLifecycleComponent<JvmMonitorServ
logger.debug("enabled [{}], interval [{}], gc_threshold [{}]", enabled, interval, this.gcThresholds); logger.debug("enabled [{}], interval [{}], gc_threshold [{}]", enabled, interval, this.gcThresholds);
} }
private static TimeValue getValidThreshold(Settings settings, String key, String level) {
TimeValue threshold = settings.getAsTime(level, null);
if (threshold == null) {
throw new IllegalArgumentException("missing gc_threshold for [" + getThresholdName(key, level) + "]");
}
if (threshold.nanos() <= 0) {
throw new IllegalArgumentException("invalid gc_threshold [" + threshold + "] for [" + getThresholdName(key, level) + "]");
}
return threshold;
}
private static String getThresholdName(String key, String level) {
return GC_COLLECTOR_PREFIX + key + "." + level;
}
@Override @Override
protected void doStart() { protected void doStart() {
if (!enabled) { if (!enabled) {

@ -20,6 +20,7 @@
package org.elasticsearch.monitor.jvm; package org.elasticsearch.monitor.jvm;
import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
@ -34,12 +35,15 @@ public class JvmService extends AbstractComponent {
private JvmStats jvmStats; private JvmStats jvmStats;
public final static Setting<TimeValue> REFRESH_INTERVAL_SETTING =
Setting.timeSetting("monitor.jvm.refresh_interval", TimeValue.timeValueSeconds(1), TimeValue.timeValueSeconds(1), false, Setting.Scope.CLUSTER);
public JvmService(Settings settings) { public JvmService(Settings settings) {
super(settings); super(settings);
this.jvmInfo = JvmInfo.jvmInfo(); this.jvmInfo = JvmInfo.jvmInfo();
this.jvmStats = JvmStats.jvmStats(); this.jvmStats = JvmStats.jvmStats();
this.refreshInterval = this.settings.getAsTime("refresh_interval", TimeValue.timeValueSeconds(1)); this.refreshInterval = REFRESH_INTERVAL_SETTING.get(settings);
logger.debug("Using refresh_interval [{}]", refreshInterval); logger.debug("Using refresh_interval [{}]", refreshInterval);
} }

@ -20,6 +20,7 @@
package org.elasticsearch.monitor.os; package org.elasticsearch.monitor.os;
import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.SingleObjectCache; import org.elasticsearch.common.util.SingleObjectCache;
@ -36,11 +37,14 @@ public class OsService extends AbstractComponent {
private SingleObjectCache<OsStats> osStatsCache; private SingleObjectCache<OsStats> osStatsCache;
public final static Setting<TimeValue> REFRESH_INTERVAL_SETTING =
Setting.timeSetting("monitor.os.refresh_interval", TimeValue.timeValueSeconds(1), TimeValue.timeValueSeconds(1), false, Setting.Scope.CLUSTER);
public OsService(Settings settings) { public OsService(Settings settings) {
super(settings); super(settings);
this.probe = OsProbe.getInstance(); this.probe = OsProbe.getInstance();
TimeValue refreshInterval = settings.getAsTime("monitor.os.refresh_interval", TimeValue.timeValueSeconds(1)); TimeValue refreshInterval = REFRESH_INTERVAL_SETTING.get(settings);
this.info = probe.osInfo(); this.info = probe.osInfo();
this.info.refreshInterval = refreshInterval.millis(); this.info.refreshInterval = refreshInterval.millis();

@ -20,6 +20,7 @@
package org.elasticsearch.monitor.process; package org.elasticsearch.monitor.process;
import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.SingleObjectCache; import org.elasticsearch.common.util.SingleObjectCache;
@ -33,11 +34,14 @@ public final class ProcessService extends AbstractComponent {
private final ProcessInfo info; private final ProcessInfo info;
private final SingleObjectCache<ProcessStats> processStatsCache; private final SingleObjectCache<ProcessStats> processStatsCache;
public final static Setting<TimeValue> REFRESH_INTERVAL_SETTING =
Setting.timeSetting("monitor.process.refresh_interval", TimeValue.timeValueSeconds(1), TimeValue.timeValueSeconds(1), false, Setting.Scope.CLUSTER);
public ProcessService(Settings settings) { public ProcessService(Settings settings) {
super(settings); super(settings);
this.probe = ProcessProbe.getInstance(); this.probe = ProcessProbe.getInstance();
final TimeValue refreshInterval = settings.getAsTime("monitor.process.refresh_interval", TimeValue.timeValueSeconds(1)); final TimeValue refreshInterval = REFRESH_INTERVAL_SETTING.get(settings);
processStatsCache = new ProcessStatsCache(refreshInterval, probe.processStats()); processStatsCache = new ProcessStatsCache(refreshInterval, probe.processStats());
this.info = probe.processInfo(); this.info = probe.processInfo();
this.info.refreshInterval = refreshInterval.millis(); this.info.refreshInterval = refreshInterval.millis();

@ -19,6 +19,7 @@
package org.elasticsearch.node; package org.elasticsearch.node;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.Build; import org.elasticsearch.Build;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version; import org.elasticsearch.Version;
@ -100,6 +101,7 @@ import org.elasticsearch.watcher.ResourceWatcherModule;
import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.watcher.ResourceWatcherService;
import java.io.BufferedWriter; import java.io.BufferedWriter;
import java.io.Closeable;
import java.io.IOException; import java.io.IOException;
import java.net.Inet6Address; import java.net.Inet6Address;
import java.net.InetAddress; import java.net.InetAddress;
@ -108,9 +110,11 @@ import java.nio.charset.Charset;
import java.nio.file.Files; import java.nio.file.Files;
import java.nio.file.Path; import java.nio.file.Path;
import java.nio.file.StandardCopyOption; import java.nio.file.StandardCopyOption;
import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collection; import java.util.Collection;
import java.util.Collections; import java.util.Collections;
import java.util.List;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.util.function.Function; import java.util.function.Function;
@ -120,7 +124,7 @@ import static org.elasticsearch.common.settings.Settings.settingsBuilder;
* A node represent a node within a cluster (<tt>cluster.name</tt>). The {@link #client()} can be used * A node represent a node within a cluster (<tt>cluster.name</tt>). The {@link #client()} can be used
* in order to use a {@link Client} to perform actions/operations against the cluster. * in order to use a {@link Client} to perform actions/operations against the cluster.
*/ */
public class Node implements Releasable { public class Node implements Closeable {
public static final Setting<Boolean> WRITE_PORTS_FIELD_SETTING = Setting.boolSetting("node.portsfile", false, false, Setting.Scope.CLUSTER); public static final Setting<Boolean> WRITE_PORTS_FIELD_SETTING = Setting.boolSetting("node.portsfile", false, false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> NODE_CLIENT_SETTING = Setting.boolSetting("node.client", false, false, Setting.Scope.CLUSTER); public static final Setting<Boolean> NODE_CLIENT_SETTING = Setting.boolSetting("node.client", false, false, Setting.Scope.CLUSTER);
@ -351,7 +355,7 @@ public class Node implements Releasable {
// If not, the hook that is added in Bootstrap#setup() will be useless: close() might not be executed, in case another (for example api) call // If not, the hook that is added in Bootstrap#setup() will be useless: close() might not be executed, in case another (for example api) call
// to close() has already set some lifecycles to stopped. In this case the process will be terminated even if the first call to close() has not finished yet. // to close() has already set some lifecycles to stopped. In this case the process will be terminated even if the first call to close() has not finished yet.
@Override @Override
public synchronized void close() { public synchronized void close() throws IOException {
if (lifecycle.started()) { if (lifecycle.started()) {
stop(); stop();
} }
@ -361,88 +365,80 @@ public class Node implements Releasable {
ESLogger logger = Loggers.getLogger(Node.class, settings.get("name")); ESLogger logger = Loggers.getLogger(Node.class, settings.get("name"));
logger.info("closing ..."); logger.info("closing ...");
List<Closeable> toClose = new ArrayList<>();
StopWatch stopWatch = new StopWatch("node_close"); StopWatch stopWatch = new StopWatch("node_close");
stopWatch.start("tribe"); toClose.add(() -> stopWatch.start("tribe"));
injector.getInstance(TribeService.class).close(); toClose.add(injector.getInstance(TribeService.class));
stopWatch.stop().start("node_service"); toClose.add(() -> stopWatch.stop().start("node_service"));
try { toClose.add(injector.getInstance(NodeService.class));
injector.getInstance(NodeService.class).close(); toClose.add(() ->stopWatch.stop().start("http"));
} catch (IOException e) {
logger.warn("NodeService close failed", e);
}
stopWatch.stop().start("http");
if (settings.getAsBoolean("http.enabled", true)) { if (settings.getAsBoolean("http.enabled", true)) {
injector.getInstance(HttpServer.class).close(); toClose.add(injector.getInstance(HttpServer.class));
} }
stopWatch.stop().start("snapshot_service"); toClose.add(() ->stopWatch.stop().start("snapshot_service"));
injector.getInstance(SnapshotsService.class).close(); toClose.add(injector.getInstance(SnapshotsService.class));
injector.getInstance(SnapshotShardsService.class).close(); toClose.add(injector.getInstance(SnapshotShardsService.class));
stopWatch.stop().start("client"); toClose.add(() ->stopWatch.stop().start("client"));
Releasables.close(injector.getInstance(Client.class)); Releasables.close(injector.getInstance(Client.class));
stopWatch.stop().start("indices_cluster"); toClose.add(() ->stopWatch.stop().start("indices_cluster"));
injector.getInstance(IndicesClusterStateService.class).close(); toClose.add(injector.getInstance(IndicesClusterStateService.class));
stopWatch.stop().start("indices"); toClose.add(() ->stopWatch.stop().start("indices"));
injector.getInstance(IndicesTTLService.class).close(); toClose.add(injector.getInstance(IndicesTTLService.class));
injector.getInstance(IndicesService.class).close(); toClose.add(injector.getInstance(IndicesService.class));
// close filter/fielddata caches after indices // close filter/fielddata caches after indices
injector.getInstance(IndicesQueryCache.class).close(); toClose.add(injector.getInstance(IndicesQueryCache.class));
injector.getInstance(IndicesFieldDataCache.class).close(); toClose.add(injector.getInstance(IndicesFieldDataCache.class));
injector.getInstance(IndicesStore.class).close(); toClose.add(injector.getInstance(IndicesStore.class));
stopWatch.stop().start("routing"); toClose.add(() ->stopWatch.stop().start("routing"));
injector.getInstance(RoutingService.class).close(); toClose.add(injector.getInstance(RoutingService.class));
stopWatch.stop().start("cluster"); toClose.add(() ->stopWatch.stop().start("cluster"));
injector.getInstance(ClusterService.class).close(); toClose.add(injector.getInstance(ClusterService.class));
stopWatch.stop().start("discovery"); toClose.add(() ->stopWatch.stop().start("discovery"));
injector.getInstance(DiscoveryService.class).close(); toClose.add(injector.getInstance(DiscoveryService.class));
stopWatch.stop().start("monitor"); toClose.add(() ->stopWatch.stop().start("monitor"));
injector.getInstance(MonitorService.class).close(); toClose.add(injector.getInstance(MonitorService.class));
stopWatch.stop().start("gateway"); toClose.add(() ->stopWatch.stop().start("gateway"));
injector.getInstance(GatewayService.class).close(); toClose.add(injector.getInstance(GatewayService.class));
stopWatch.stop().start("search"); toClose.add(() ->stopWatch.stop().start("search"));
injector.getInstance(SearchService.class).close(); toClose.add(injector.getInstance(SearchService.class));
stopWatch.stop().start("rest"); toClose.add(() ->stopWatch.stop().start("rest"));
injector.getInstance(RestController.class).close(); toClose.add(injector.getInstance(RestController.class));
stopWatch.stop().start("transport"); toClose.add(() ->stopWatch.stop().start("transport"));
injector.getInstance(TransportService.class).close(); toClose.add(injector.getInstance(TransportService.class));
stopWatch.stop().start("percolator_service"); toClose.add(() ->stopWatch.stop().start("percolator_service"));
injector.getInstance(PercolatorService.class).close(); toClose.add(injector.getInstance(PercolatorService.class));
for (Class<? extends LifecycleComponent> plugin : pluginsService.nodeServices()) { for (Class<? extends LifecycleComponent> plugin : pluginsService.nodeServices()) {
stopWatch.stop().start("plugin(" + plugin.getName() + ")"); toClose.add(() ->stopWatch.stop().start("plugin(" + plugin.getName() + ")"));
injector.getInstance(plugin).close(); toClose.add(injector.getInstance(plugin));
} }
stopWatch.stop().start("script"); toClose.add(() ->stopWatch.stop().start("script"));
try { toClose.add(injector.getInstance(ScriptService.class));
injector.getInstance(ScriptService.class).close();
} catch(IOException e) {
logger.warn("ScriptService close failed", e);
}
stopWatch.stop().start("thread_pool"); toClose.add(() ->stopWatch.stop().start("thread_pool"));
// TODO this should really use ThreadPool.terminate() // TODO this should really use ThreadPool.terminate()
injector.getInstance(ThreadPool.class).shutdown(); toClose.add(() -> injector.getInstance(ThreadPool.class).shutdown());
try { toClose.add(() -> {
injector.getInstance(ThreadPool.class).awaitTermination(10, TimeUnit.SECONDS); try {
} catch (InterruptedException e) { injector.getInstance(ThreadPool.class).awaitTermination(10, TimeUnit.SECONDS);
// ignore } catch (InterruptedException e) {
} // ignore
stopWatch.stop().start("thread_pool_force_shutdown"); }
try { });
injector.getInstance(ThreadPool.class).shutdownNow();
} catch (Exception e) { toClose.add(() ->stopWatch.stop().start("thread_pool_force_shutdown"));
// ignore toClose.add(() -> injector.getInstance(ThreadPool.class).shutdownNow());
} toClose.add(() -> stopWatch.stop());
stopWatch.stop();
toClose.add(injector.getInstance(NodeEnvironment.class));
toClose.add(injector.getInstance(PageCacheRecycler.class));
if (logger.isTraceEnabled()) { if (logger.isTraceEnabled()) {
logger.trace("Close times for each service:\n{}", stopWatch.prettyPrint()); logger.trace("Close times for each service:\n{}", stopWatch.prettyPrint());
} }
IOUtils.close(toClose);
injector.getInstance(NodeEnvironment.class).close();
injector.getInstance(PageCacheRecycler.class).close();
logger.info("closed"); logger.info("closed");
} }

@ -44,6 +44,7 @@ import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.text.Text; import org.elasticsearch.common.text.Text;
@ -87,7 +88,7 @@ import java.util.stream.StreamSupport;
import static org.apache.lucene.search.BooleanClause.Occur.FILTER; import static org.apache.lucene.search.BooleanClause.Occur.FILTER;
import static org.apache.lucene.search.BooleanClause.Occur.MUST; import static org.apache.lucene.search.BooleanClause.Occur.MUST;
public class PercolatorService extends AbstractComponent { public class PercolatorService extends AbstractComponent implements Releasable {
public final static float NO_SCORE = Float.NEGATIVE_INFINITY; public final static float NO_SCORE = Float.NEGATIVE_INFINITY;
public final static String TYPE_NAME = ".percolator"; public final static String TYPE_NAME = ".percolator";
@ -308,6 +309,7 @@ public class PercolatorService extends AbstractComponent {
} }
} }
@Override
public void close() { public void close() {
cache.close(); cache.close();
} }

@ -20,9 +20,13 @@
package org.elasticsearch.rest.action.ingest; package org.elasticsearch.rest.action.ingest;
import org.elasticsearch.action.ingest.PutPipelineRequest; import org.elasticsearch.action.ingest.PutPipelineRequest;
import org.elasticsearch.action.ingest.WritePipelineResponse;
import org.elasticsearch.action.ingest.WritePipelineResponseRestListener;
import org.elasticsearch.client.Client; import org.elasticsearch.client.Client;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentBuilderString;
import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BaseRestHandler;
import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestChannel;
import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestController;
@ -30,6 +34,8 @@ import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.rest.action.support.AcknowledgedRestListener; import org.elasticsearch.rest.action.support.AcknowledgedRestListener;
import org.elasticsearch.rest.action.support.RestActions; import org.elasticsearch.rest.action.support.RestActions;
import java.io.IOException;
public class RestPutPipelineAction extends BaseRestHandler { public class RestPutPipelineAction extends BaseRestHandler {
@Inject @Inject
@ -43,6 +49,7 @@ public class RestPutPipelineAction extends BaseRestHandler {
PutPipelineRequest request = new PutPipelineRequest(restRequest.param("id"), RestActions.getRestContent(restRequest)); PutPipelineRequest request = new PutPipelineRequest(restRequest.param("id"), RestActions.getRestContent(restRequest));
request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout()));
request.timeout(restRequest.paramAsTime("timeout", request.timeout())); request.timeout(restRequest.paramAsTime("timeout", request.timeout()));
client.admin().cluster().putPipeline(request, new AcknowledgedRestListener<>(channel)); client.admin().cluster().putPipeline(request, new WritePipelineResponseRestListener(channel));
} }
} }

@ -28,6 +28,7 @@ import org.elasticsearch.rest.RestChannel;
import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestController;
import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.rest.action.support.RestActions; import org.elasticsearch.rest.action.support.RestActions;
import org.elasticsearch.rest.action.support.RestStatusToXContentListener;
import org.elasticsearch.rest.action.support.RestToXContentListener; import org.elasticsearch.rest.action.support.RestToXContentListener;
public class RestSimulatePipelineAction extends BaseRestHandler { public class RestSimulatePipelineAction extends BaseRestHandler {
@ -46,6 +47,6 @@ public class RestSimulatePipelineAction extends BaseRestHandler {
SimulatePipelineRequest request = new SimulatePipelineRequest(RestActions.getRestContent(restRequest)); SimulatePipelineRequest request = new SimulatePipelineRequest(RestActions.getRestContent(restRequest));
request.setId(restRequest.param("id")); request.setId(restRequest.param("id"));
request.setVerbose(restRequest.paramAsBoolean("verbose", false)); request.setVerbose(restRequest.paramAsBoolean("verbose", false));
client.admin().cluster().simulatePipeline(request, new RestToXContentListener<>(channel)); client.admin().cluster().simulatePipeline(request, new RestStatusToXContentListener<>(channel));
} }
} }

@ -57,7 +57,7 @@ public class RestUtils {
if (fromIndex >= s.length()) { if (fromIndex >= s.length()) {
return; return;
} }
int queryStringLength = s.contains("#") ? s.indexOf("#") : s.length(); int queryStringLength = s.contains("#") ? s.indexOf("#") : s.length();
String name = null; String name = null;

@ -1135,7 +1135,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp
// Use the same value for both checks since lastAccessTime can // Use the same value for both checks since lastAccessTime can
// be modified by another thread between checks! // be modified by another thread between checks!
final long lastAccessTime = context.lastAccessTime(); final long lastAccessTime = context.lastAccessTime();
if (lastAccessTime == -1l) { // its being processed or timeout is disabled if (lastAccessTime == -1L) { // its being processed or timeout is disabled
continue; continue;
} }
if ((time - lastAccessTime > context.keepAlive())) { if ((time - lastAccessTime > context.keepAlive())) {

@ -114,7 +114,7 @@ public class AvgAggregator extends NumericMetricsAggregator.SingleValue {
@Override @Override
public InternalAggregation buildEmptyAggregation() { public InternalAggregation buildEmptyAggregation() {
return new InternalAvg(name, 0.0, 0l, formatter, pipelineAggregators(), metaData()); return new InternalAvg(name, 0.0, 0L, formatter, pipelineAggregators(), metaData());
} }
public static class Factory extends ValuesSourceAggregatorFactory.LeafOnly<ValuesSource.Numeric, Factory> { public static class Factory extends ValuesSourceAggregatorFactory.LeafOnly<ValuesSource.Numeric, Factory> {

@ -117,7 +117,7 @@ public final class GeoCentroidAggregator extends MetricsAggregator {
@Override @Override
public InternalAggregation buildEmptyAggregation() { public InternalAggregation buildEmptyAggregation() {
return new InternalGeoCentroid(name, null, 0l, pipelineAggregators(), metaData()); return new InternalGeoCentroid(name, null, 0L, pipelineAggregators(), metaData());
} }
@Override @Override

@ -104,7 +104,7 @@ public class ValueCountAggregator extends NumericMetricsAggregator.SingleValue {
@Override @Override
public InternalAggregation buildEmptyAggregation() { public InternalAggregation buildEmptyAggregation() {
return new InternalValueCount(name, 0l, formatter, pipelineAggregators(), metaData()); return new InternalValueCount(name, 0L, formatter, pipelineAggregators(), metaData());
} }
@Override @Override

@ -0,0 +1,493 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.suggest.phrase;
import org.apache.lucene.util.automaton.LevenshteinAutomata;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.query.QueryParseContext;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.search.suggest.SuggestUtils;
import org.elasticsearch.search.suggest.phrase.PhraseSuggestionBuilder.CandidateGenerator;
import java.io.IOException;
import java.util.HashSet;
import java.util.Objects;
import java.util.Set;
import java.util.function.Consumer;
public final class DirectCandidateGeneratorBuilder
implements Writeable<DirectCandidateGeneratorBuilder>, CandidateGenerator {
private static final String TYPE = "direct_generator";
static final DirectCandidateGeneratorBuilder PROTOTYPE = new DirectCandidateGeneratorBuilder("_na_");
static final ParseField DIRECT_GENERATOR_FIELD = new ParseField(TYPE);
static final ParseField FIELDNAME_FIELD = new ParseField("field");
static final ParseField PREFILTER_FIELD = new ParseField("pre_filter");
static final ParseField POSTFILTER_FIELD = new ParseField("post_filter");
static final ParseField SUGGESTMODE_FIELD = new ParseField("suggest_mode");
static final ParseField MIN_DOC_FREQ_FIELD = new ParseField("min_doc_freq");
static final ParseField ACCURACY_FIELD = new ParseField("accuracy");
static final ParseField SIZE_FIELD = new ParseField("size");
static final ParseField SORT_FIELD = new ParseField("sort");
static final ParseField STRING_DISTANCE_FIELD = new ParseField("string_distance");
static final ParseField MAX_EDITS_FIELD = new ParseField("max_edits");
static final ParseField MAX_INSPECTIONS_FIELD = new ParseField("max_inspections");
static final ParseField MAX_TERM_FREQ_FIELD = new ParseField("max_term_freq");
static final ParseField PREFIX_LENGTH_FIELD = new ParseField("prefix_length");
static final ParseField MIN_WORD_LENGTH_FIELD = new ParseField("min_word_length");
private final String field;
private String preFilter;
private String postFilter;
private String suggestMode;
private Float accuracy;
private Integer size;
private String sort;
private String stringDistance;
private Integer maxEdits;
private Integer maxInspections;
private Float maxTermFreq;
private Integer prefixLength;
private Integer minWordLength;
private Float minDocFreq;
/**
* @param field Sets from what field to fetch the candidate suggestions from.
*/
public DirectCandidateGeneratorBuilder(String field) {
this.field = field;
}
/**
* Quasi copy-constructor that takes all values from the generator
* passed in, but uses different field name. Needed by parser because we
* need to buffer the field name but read all other properties to a
* temporary object.
*/
private static DirectCandidateGeneratorBuilder replaceField(String field, DirectCandidateGeneratorBuilder other) {
DirectCandidateGeneratorBuilder generator = new DirectCandidateGeneratorBuilder(field);
generator.preFilter = other.preFilter;
generator.postFilter = other.postFilter;
generator.suggestMode = other.suggestMode;
generator.accuracy = other.accuracy;
generator.size = other.size;
generator.sort = other.sort;
generator.stringDistance = other.stringDistance;
generator.maxEdits = other.maxEdits;
generator.maxInspections = other.maxInspections;
generator.maxTermFreq = other.maxTermFreq;
generator.prefixLength = other.prefixLength;
generator.minWordLength = other.minWordLength;
generator.minDocFreq = other.minDocFreq;
return generator;
}
/**
* The global suggest mode controls what suggested terms are included or
* controls for what suggest text tokens, terms should be suggested for.
* Three possible values can be specified:
* <ol>
* <li><code>missing</code> - Only suggest terms in the suggest text
* that aren't in the index. This is the default.
* <li><code>popular</code> - Only suggest terms that occur in more docs
* then the original suggest text term.
* <li><code>always</code> - Suggest any matching suggest terms based on
* tokens in the suggest text.
* </ol>
*/
public DirectCandidateGeneratorBuilder suggestMode(String suggestMode) {
this.suggestMode = suggestMode;
return this;
}
/**
* Sets how similar the suggested terms at least need to be compared to
* the original suggest text tokens. A value between 0 and 1 can be
* specified. This value will be compared to the string distance result
* of each candidate spelling correction.
* <p>
* Default is <tt>0.5</tt>
*/
public DirectCandidateGeneratorBuilder accuracy(float accuracy) {
this.accuracy = accuracy;
return this;
}
/**
* Sets the maximum suggestions to be returned per suggest text term.
*/
public DirectCandidateGeneratorBuilder size(int size) {
if (size <= 0) {
throw new IllegalArgumentException("Size must be positive");
}
this.size = size;
return this;
}
/**
* Sets how to sort the suggest terms per suggest text token. Two
* possible values:
* <ol>
* <li><code>score</code> - Sort should first be based on score, then
* document frequency and then the term itself.
* <li><code>frequency</code> - Sort should first be based on document
* frequency, then score and then the term itself.
* </ol>
* <p>
* What the score is depends on the suggester being used.
*/
public DirectCandidateGeneratorBuilder sort(String sort) {
this.sort = sort;
return this;
}
/**
* Sets what string distance implementation to use for comparing how
* similar suggested terms are. Four possible values can be specified:
* <ol>
* <li><code>internal</code> - This is the default and is based on
* <code>damerau_levenshtein</code>, but highly optimized for comparing
* string distance for terms inside the index.
* <li><code>damerau_levenshtein</code> - String distance algorithm
* based on Damerau-Levenshtein algorithm.
* <li><code>levenstein</code> - String distance algorithm based on
* Levenstein edit distance algorithm.
* <li><code>jarowinkler</code> - String distance algorithm based on
* Jaro-Winkler algorithm.
* <li><code>ngram</code> - String distance algorithm based on character
* n-grams.
* </ol>
*/
public DirectCandidateGeneratorBuilder stringDistance(String stringDistance) {
this.stringDistance = stringDistance;
return this;
}
/**
* Sets the maximum edit distance candidate suggestions can have in
* order to be considered as a suggestion. Can only be a value between 1
* and 2. Any other value result in an bad request error being thrown.
* Defaults to <tt>2</tt>.
*/
public DirectCandidateGeneratorBuilder maxEdits(Integer maxEdits) {
if (maxEdits < 1 || maxEdits > LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE) {
throw new IllegalArgumentException("Illegal max_edits value " + maxEdits);
}
this.maxEdits = maxEdits;
return this;
}
/**
* A factor that is used to multiply with the size in order to inspect
* more candidate suggestions. Can improve accuracy at the cost of
* performance. Defaults to <tt>5</tt>.
*/
public DirectCandidateGeneratorBuilder maxInspections(Integer maxInspections) {
this.maxInspections = maxInspections;
return this;
}
/**
* Sets a maximum threshold in number of documents a suggest text token
* can exist in order to be corrected. Can be a relative percentage
* number (e.g 0.4) or an absolute number to represent document
* frequencies. If an value higher than 1 is specified then fractional
* can not be specified. Defaults to <tt>0.01</tt>.
* <p>
* This can be used to exclude high frequency terms from being
* suggested. High frequency terms are usually spelled correctly on top
* of this this also improves the suggest performance.
*/
public DirectCandidateGeneratorBuilder maxTermFreq(float maxTermFreq) {
this.maxTermFreq = maxTermFreq;
return this;
}
/**
* Sets the number of minimal prefix characters that must match in order
* be a candidate suggestion. Defaults to 1. Increasing this number
* improves suggest performance. Usually misspellings don't occur in the
* beginning of terms.
*/
public DirectCandidateGeneratorBuilder prefixLength(int prefixLength) {
this.prefixLength = prefixLength;
return this;
}
/**
* The minimum length a suggest text term must have in order to be
* corrected. Defaults to <tt>4</tt>.
*/
public DirectCandidateGeneratorBuilder minWordLength(int minWordLength) {
this.minWordLength = minWordLength;
return this;
}
/**
* Sets a minimal threshold in number of documents a suggested term
* should appear in. This can be specified as an absolute number or as a
* relative percentage of number of documents. This can improve quality
* by only suggesting high frequency terms. Defaults to 0f and is not
* enabled. If a value higher than 1 is specified then the number cannot
* be fractional.
*/
public DirectCandidateGeneratorBuilder minDocFreq(float minDocFreq) {
this.minDocFreq = minDocFreq;
return this;
}
/**
* Sets a filter (analyzer) that is applied to each of the tokens passed to this candidate generator.
* This filter is applied to the original token before candidates are generated.
*/
public DirectCandidateGeneratorBuilder preFilter(String preFilter) {
this.preFilter = preFilter;
return this;
}
/**
* Sets a filter (analyzer) that is applied to each of the generated tokens
* before they are passed to the actual phrase scorer.
*/
public DirectCandidateGeneratorBuilder postFilter(String postFilter) {
this.postFilter = postFilter;
return this;
}
/**
* gets the type identifier of this {@link CandidateGenerator}
*/
@Override
public String getType() {
return TYPE;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
outputFieldIfNotNull(field, FIELDNAME_FIELD, builder);
outputFieldIfNotNull(accuracy, ACCURACY_FIELD, builder);
outputFieldIfNotNull(maxEdits, MAX_EDITS_FIELD, builder);
outputFieldIfNotNull(maxInspections, MAX_INSPECTIONS_FIELD, builder);
outputFieldIfNotNull(maxTermFreq, MAX_TERM_FREQ_FIELD, builder);
outputFieldIfNotNull(minWordLength, MIN_WORD_LENGTH_FIELD, builder);
outputFieldIfNotNull(minDocFreq, MIN_DOC_FREQ_FIELD, builder);
outputFieldIfNotNull(preFilter, PREFILTER_FIELD, builder);
outputFieldIfNotNull(prefixLength, PREFIX_LENGTH_FIELD, builder);
outputFieldIfNotNull(postFilter, POSTFILTER_FIELD, builder);
outputFieldIfNotNull(suggestMode, SUGGESTMODE_FIELD, builder);
outputFieldIfNotNull(size, SIZE_FIELD, builder);
outputFieldIfNotNull(sort, SORT_FIELD, builder);
outputFieldIfNotNull(stringDistance, STRING_DISTANCE_FIELD, builder);
builder.endObject();
return builder;
}
private static <T> void outputFieldIfNotNull(T value, ParseField field, XContentBuilder builder) throws IOException {
if (value != null) {
builder.field(field.getPreferredName(), value);
}
}
private static ObjectParser<Tuple<Set<String>, DirectCandidateGeneratorBuilder>, QueryParseContext> PARSER = new ObjectParser<>(TYPE);
static {
PARSER.declareString((tp, s) -> tp.v1().add(s), FIELDNAME_FIELD);
PARSER.declareString((tp, s) -> tp.v2().preFilter(s), PREFILTER_FIELD);
PARSER.declareString((tp, s) -> tp.v2().postFilter(s), POSTFILTER_FIELD);
PARSER.declareString((tp, s) -> tp.v2().suggestMode(s), SUGGESTMODE_FIELD);
PARSER.declareFloat((tp, f) -> tp.v2().minDocFreq(f), MIN_DOC_FREQ_FIELD);
PARSER.declareFloat((tp, f) -> tp.v2().accuracy(f), ACCURACY_FIELD);
PARSER.declareInt((tp, i) -> tp.v2().size(i), SIZE_FIELD);
PARSER.declareString((tp, s) -> tp.v2().sort(s), SORT_FIELD);
PARSER.declareString((tp, s) -> tp.v2().stringDistance(s), STRING_DISTANCE_FIELD);
PARSER.declareInt((tp, i) -> tp.v2().maxInspections(i), MAX_INSPECTIONS_FIELD);
PARSER.declareFloat((tp, f) -> tp.v2().maxTermFreq(f), MAX_TERM_FREQ_FIELD);
PARSER.declareInt((tp, i) -> tp.v2().maxEdits(i), MAX_EDITS_FIELD);
PARSER.declareInt((tp, i) -> tp.v2().minWordLength(i), MIN_WORD_LENGTH_FIELD);
PARSER.declareInt((tp, i) -> tp.v2().prefixLength(i), PREFIX_LENGTH_FIELD);
}
@Override
public DirectCandidateGeneratorBuilder fromXContent(QueryParseContext parseContext) throws IOException {
DirectCandidateGeneratorBuilder tempGenerator = new DirectCandidateGeneratorBuilder("_na_");
Set<String> tmpFieldName = new HashSet<>(1); // bucket for the field
// name, needed as
// constructor arg
// later
PARSER.parse(parseContext.parser(),
new Tuple<Set<String>, DirectCandidateGeneratorBuilder>(tmpFieldName, tempGenerator));
if (tmpFieldName.size() != 1) {
throw new IllegalArgumentException("[" + TYPE + "] expects exactly one field parameter, but found " + tmpFieldName);
}
return replaceField(tmpFieldName.iterator().next(), tempGenerator);
}
public PhraseSuggestionContext.DirectCandidateGenerator build(QueryShardContext context) throws IOException {
MapperService mapperService = context.getMapperService();
PhraseSuggestionContext.DirectCandidateGenerator generator = new PhraseSuggestionContext.DirectCandidateGenerator();
generator.setField(this.field);
transferIfNotNull(this.size, generator::size);
if (this.preFilter != null) {
generator.preFilter(mapperService.analysisService().analyzer(this.preFilter));
if (generator.preFilter() == null) {
throw new IllegalArgumentException("Analyzer [" + this.preFilter + "] doesn't exists");
}
}
if (this.postFilter != null) {
generator.postFilter(mapperService.analysisService().analyzer(this.postFilter));
if (generator.postFilter() == null) {
throw new IllegalArgumentException("Analyzer [" + this.postFilter + "] doesn't exists");
}
}
transferIfNotNull(this.accuracy, generator::accuracy);
if (this.suggestMode != null) {
generator.suggestMode(SuggestUtils.resolveSuggestMode(this.suggestMode));
}
if (this.sort != null) {
generator.sort(SuggestUtils.resolveSort(this.sort));
}
if (this.stringDistance != null) {
generator.stringDistance(SuggestUtils.resolveDistance(this.stringDistance));
}
transferIfNotNull(this.maxEdits, generator::maxEdits);
if (generator.maxEdits() < 1 || generator.maxEdits() > LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE) {
throw new IllegalArgumentException("Illegal max_edits value " + generator.maxEdits());
}
transferIfNotNull(this.maxInspections, generator::maxInspections);
transferIfNotNull(this.maxTermFreq, generator::maxTermFreq);
transferIfNotNull(this.prefixLength, generator::prefixLength);
transferIfNotNull(this.minWordLength, generator::minQueryLength);
transferIfNotNull(this.minDocFreq, generator::minDocFreq);
return generator;
}
private static <T> void transferIfNotNull(T value, Consumer<T> consumer) {
if (value != null) {
consumer.accept(value);
}
}
@Override
public final String toString() {
try {
XContentBuilder builder = XContentFactory.jsonBuilder();
builder.prettyPrint();
toXContent(builder, EMPTY_PARAMS);
return builder.string();
} catch (Exception e) {
return "{ \"error\" : \"" + ExceptionsHelper.detailedMessage(e) + "\"}";
}
}
@Override
public DirectCandidateGeneratorBuilder readFrom(StreamInput in) throws IOException {
DirectCandidateGeneratorBuilder cg = new DirectCandidateGeneratorBuilder(in.readString());
cg.suggestMode = in.readOptionalString();
if (in.readBoolean()) {
cg.accuracy = in.readFloat();
}
cg.size = in.readOptionalVInt();
cg.sort = in.readOptionalString();
cg.stringDistance = in.readOptionalString();
cg.maxEdits = in.readOptionalVInt();
cg.maxInspections = in.readOptionalVInt();
if (in.readBoolean()) {
cg.maxTermFreq = in.readFloat();
}
cg.prefixLength = in.readOptionalVInt();
cg.minWordLength = in.readOptionalVInt();
if (in.readBoolean()) {
cg.minDocFreq = in.readFloat();
}
cg.preFilter = in.readOptionalString();
cg.postFilter = in.readOptionalString();
return cg;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(field);
out.writeOptionalString(suggestMode);
out.writeBoolean(accuracy != null);
if (accuracy != null) {
out.writeFloat(accuracy);
}
out.writeOptionalVInt(size);
out.writeOptionalString(sort);
out.writeOptionalString(stringDistance);
out.writeOptionalVInt(maxEdits);
out.writeOptionalVInt(maxInspections);
out.writeBoolean(maxTermFreq != null);
if (maxTermFreq != null) {
out.writeFloat(maxTermFreq);
}
out.writeOptionalVInt(prefixLength);
out.writeOptionalVInt(minWordLength);
out.writeBoolean(minDocFreq != null);
if (minDocFreq != null) {
out.writeFloat(minDocFreq);
}
out.writeOptionalString(preFilter);
out.writeOptionalString(postFilter);
}
@Override
public final int hashCode() {
return Objects.hash(field, preFilter, postFilter, suggestMode, accuracy,
size, sort, stringDistance, maxEdits, maxInspections,
maxTermFreq, prefixLength, minWordLength, minDocFreq);
}
@Override
public final boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
DirectCandidateGeneratorBuilder other = (DirectCandidateGeneratorBuilder) obj;
return Objects.equals(field, other.field) &&
Objects.equals(preFilter, other.preFilter) &&
Objects.equals(postFilter, other.postFilter) &&
Objects.equals(suggestMode, other.suggestMode) &&
Objects.equals(accuracy, other.accuracy) &&
Objects.equals(size, other.size) &&
Objects.equals(sort, other.sort) &&
Objects.equals(stringDistance, other.stringDistance) &&
Objects.equals(maxEdits, other.maxEdits) &&
Objects.equals(maxInspections, other.maxInspections) &&
Objects.equals(maxTermFreq, other.maxTermFreq) &&
Objects.equals(prefixLength, other.prefixLength) &&
Objects.equals(minWordLength, other.minWordLength) &&
Objects.equals(minDocFreq, other.minDocFreq);
}
}

@ -98,18 +98,10 @@ public final class PhraseSuggestParser implements SuggestContextParser {
} }
} }
} else if (token == Token.START_ARRAY) { } else if (token == Token.START_ARRAY) {
if ("direct_generator".equals(fieldName) || "directGenerator".equals(fieldName)) { if (parseFieldMatcher.match(fieldName, DirectCandidateGeneratorBuilder.DIRECT_GENERATOR_FIELD)) {
// for now we only have a single type of generators // for now we only have a single type of generators
while ((token = parser.nextToken()) == Token.START_OBJECT) { while ((token = parser.nextToken()) == Token.START_OBJECT) {
PhraseSuggestionContext.DirectCandidateGenerator generator = new PhraseSuggestionContext.DirectCandidateGenerator(); PhraseSuggestionContext.DirectCandidateGenerator generator = parseCandidateGenerator(parser, mapperService, parseFieldMatcher);
while ((token = parser.nextToken()) != Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
fieldName = parser.currentName();
}
if (token.isValue()) {
parseCandidateGenerator(parser, mapperService, fieldName, generator, parseFieldMatcher);
}
}
verifyGenerator(generator); verifyGenerator(generator);
suggestion.addGenerator(generator); suggestion.addGenerator(generator);
} }
@ -323,34 +315,44 @@ public final class PhraseSuggestParser implements SuggestContextParser {
} }
} }
private void parseCandidateGenerator(XContentParser parser, MapperService mapperService, String fieldName, static PhraseSuggestionContext.DirectCandidateGenerator parseCandidateGenerator(XContentParser parser, MapperService mapperService,
PhraseSuggestionContext.DirectCandidateGenerator generator, ParseFieldMatcher parseFieldMatcher) throws IOException { ParseFieldMatcher parseFieldMatcher) throws IOException {
if (!SuggestUtils.parseDirectSpellcheckerSettings(parser, fieldName, generator, parseFieldMatcher)) { XContentParser.Token token;
if ("field".equals(fieldName)) { String fieldName = null;
generator.setField(parser.text()); PhraseSuggestionContext.DirectCandidateGenerator generator = new PhraseSuggestionContext.DirectCandidateGenerator();
if (mapperService.fullName(generator.field()) == null) { while ((token = parser.nextToken()) != Token.END_OBJECT) {
throw new IllegalArgumentException("No mapping found for field [" + generator.field() + "]"); if (token == XContentParser.Token.FIELD_NAME) {
fieldName = parser.currentName();
}
if (token.isValue()) {
if (!SuggestUtils.parseDirectSpellcheckerSettings(parser, fieldName, generator, parseFieldMatcher)) {
if ("field".equals(fieldName)) {
generator.setField(parser.text());
if (mapperService.fullName(generator.field()) == null) {
throw new IllegalArgumentException("No mapping found for field [" + generator.field() + "]");
}
} else if ("size".equals(fieldName)) {
generator.size(parser.intValue());
} else if ("pre_filter".equals(fieldName) || "preFilter".equals(fieldName)) {
String analyzerName = parser.text();
Analyzer analyzer = mapperService.analysisService().analyzer(analyzerName);
if (analyzer == null) {
throw new IllegalArgumentException("Analyzer [" + analyzerName + "] doesn't exists");
}
generator.preFilter(analyzer);
} else if ("post_filter".equals(fieldName) || "postFilter".equals(fieldName)) {
String analyzerName = parser.text();
Analyzer analyzer = mapperService.analysisService().analyzer(analyzerName);
if (analyzer == null) {
throw new IllegalArgumentException("Analyzer [" + analyzerName + "] doesn't exists");
}
generator.postFilter(analyzer);
} else {
throw new IllegalArgumentException("CandidateGenerator doesn't support [" + fieldName + "]");
}
} }
} else if ("size".equals(fieldName)) {
generator.size(parser.intValue());
} else if ("pre_filter".equals(fieldName) || "preFilter".equals(fieldName)) {
String analyzerName = parser.text();
Analyzer analyzer = mapperService.analysisService().analyzer(analyzerName);
if (analyzer == null) {
throw new IllegalArgumentException("Analyzer [" + analyzerName + "] doesn't exists");
}
generator.preFilter(analyzer);
} else if ("post_filter".equals(fieldName) || "postFilter".equals(fieldName)) {
String analyzerName = parser.text();
Analyzer analyzer = mapperService.analysisService().analyzer(analyzerName);
if (analyzer == null) {
throw new IllegalArgumentException("Analyzer [" + analyzerName + "] doesn't exists");
}
generator.postFilter(analyzer);
} else {
throw new IllegalArgumentException("CandidateGenerator doesn't support [" + fieldName + "]");
} }
} }
return generator;
} }
} }

@ -278,13 +278,13 @@ public final class PhraseSuggestionBuilder extends SuggestionBuilder<PhraseSugge
} }
/** /**
* Creates a new {@link DirectCandidateGenerator} * Creates a new {@link DirectCandidateGeneratorBuilder}
* *
* @param field * @param field
* the field this candidate generator operates on. * the field this candidate generator operates on.
*/ */
public static DirectCandidateGenerator candidateGenerator(String field) { public static DirectCandidateGeneratorBuilder candidateGenerator(String field) {
return new DirectCandidateGenerator(field); return new DirectCandidateGeneratorBuilder(field);
} }
/** /**
@ -644,267 +644,11 @@ public final class PhraseSuggestionBuilder extends SuggestionBuilder<PhraseSugge
} }
/** /**
* {@link CandidateGenerator} base class. * {@link CandidateGenerator} interface.
*/ */
public static abstract class CandidateGenerator implements ToXContent { public interface CandidateGenerator extends ToXContent {
private final String type; String getType();
public CandidateGenerator(String type) {
this.type = type;
}
public String getType() {
return type;
}
CandidateGenerator fromXContent(QueryParseContext parseContext) throws IOException;
} }
/**
*
*
*/
public static final class DirectCandidateGenerator extends CandidateGenerator {
private final String field;
private String preFilter;
private String postFilter;
private String suggestMode;
private Float accuracy;
private Integer size;
private String sort;
private String stringDistance;
private Integer maxEdits;
private Integer maxInspections;
private Float maxTermFreq;
private Integer prefixLength;
private Integer minWordLength;
private Float minDocFreq;
/**
* @param field Sets from what field to fetch the candidate suggestions from.
*/
public DirectCandidateGenerator(String field) {
super("direct_generator");
this.field = field;
}
/**
* The global suggest mode controls what suggested terms are included or
* controls for what suggest text tokens, terms should be suggested for.
* Three possible values can be specified:
* <ol>
* <li><code>missing</code> - Only suggest terms in the suggest text
* that aren't in the index. This is the default.
* <li><code>popular</code> - Only suggest terms that occur in more docs
* then the original suggest text term.
* <li><code>always</code> - Suggest any matching suggest terms based on
* tokens in the suggest text.
* </ol>
*/
public DirectCandidateGenerator suggestMode(String suggestMode) {
this.suggestMode = suggestMode;
return this;
}
/**
* Sets how similar the suggested terms at least need to be compared to
* the original suggest text tokens. A value between 0 and 1 can be
* specified. This value will be compared to the string distance result
* of each candidate spelling correction.
* <p>
* Default is <tt>0.5</tt>
*/
public DirectCandidateGenerator accuracy(float accuracy) {
this.accuracy = accuracy;
return this;
}
/**
* Sets the maximum suggestions to be returned per suggest text term.
*/
public DirectCandidateGenerator size(int size) {
if (size <= 0) {
throw new IllegalArgumentException("Size must be positive");
}
this.size = size;
return this;
}
/**
* Sets how to sort the suggest terms per suggest text token. Two
* possible values:
* <ol>
* <li><code>score</code> - Sort should first be based on score, then
* document frequency and then the term itself.
* <li><code>frequency</code> - Sort should first be based on document
* frequency, then scotr and then the term itself.
* </ol>
* <p>
* What the score is depends on the suggester being used.
*/
public DirectCandidateGenerator sort(String sort) {
this.sort = sort;
return this;
}
/**
* Sets what string distance implementation to use for comparing how
* similar suggested terms are. Four possible values can be specified:
* <ol>
* <li><code>internal</code> - This is the default and is based on
* <code>damerau_levenshtein</code>, but highly optimized for comparing
* string distance for terms inside the index.
* <li><code>damerau_levenshtein</code> - String distance algorithm
* based on Damerau-Levenshtein algorithm.
* <li><code>levenstein</code> - String distance algorithm based on
* Levenstein edit distance algorithm.
* <li><code>jarowinkler</code> - String distance algorithm based on
* Jaro-Winkler algorithm.
* <li><code>ngram</code> - String distance algorithm based on character
* n-grams.
* </ol>
*/
public DirectCandidateGenerator stringDistance(String stringDistance) {
this.stringDistance = stringDistance;
return this;
}
/**
* Sets the maximum edit distance candidate suggestions can have in
* order to be considered as a suggestion. Can only be a value between 1
* and 2. Any other value result in an bad request error being thrown.
* Defaults to <tt>2</tt>.
*/
public DirectCandidateGenerator maxEdits(Integer maxEdits) {
this.maxEdits = maxEdits;
return this;
}
/**
* A factor that is used to multiply with the size in order to inspect
* more candidate suggestions. Can improve accuracy at the cost of
* performance. Defaults to <tt>5</tt>.
*/
public DirectCandidateGenerator maxInspections(Integer maxInspections) {
this.maxInspections = maxInspections;
return this;
}
/**
* Sets a maximum threshold in number of documents a suggest text token
* can exist in order to be corrected. Can be a relative percentage
* number (e.g 0.4) or an absolute number to represent document
* frequencies. If an value higher than 1 is specified then fractional
* can not be specified. Defaults to <tt>0.01</tt>.
* <p>
* This can be used to exclude high frequency terms from being
* suggested. High frequency terms are usually spelled correctly on top
* of this this also improves the suggest performance.
*/
public DirectCandidateGenerator maxTermFreq(float maxTermFreq) {
this.maxTermFreq = maxTermFreq;
return this;
}
/**
* Sets the number of minimal prefix characters that must match in order
* be a candidate suggestion. Defaults to 1. Increasing this number
* improves suggest performance. Usually misspellings don't occur in the
* beginning of terms.
*/
public DirectCandidateGenerator prefixLength(int prefixLength) {
this.prefixLength = prefixLength;
return this;
}
/**
* The minimum length a suggest text term must have in order to be
* corrected. Defaults to <tt>4</tt>.
*/
public DirectCandidateGenerator minWordLength(int minWordLength) {
this.minWordLength = minWordLength;
return this;
}
/**
* Sets a minimal threshold in number of documents a suggested term
* should appear in. This can be specified as an absolute number or as a
* relative percentage of number of documents. This can improve quality
* by only suggesting high frequency terms. Defaults to 0f and is not
* enabled. If a value higher than 1 is specified then the number cannot
* be fractional.
*/
public DirectCandidateGenerator minDocFreq(float minDocFreq) {
this.minDocFreq = minDocFreq;
return this;
}
/**
* Sets a filter (analyzer) that is applied to each of the tokens passed to this candidate generator.
* This filter is applied to the original token before candidates are generated.
*/
public DirectCandidateGenerator preFilter(String preFilter) {
this.preFilter = preFilter;
return this;
}
/**
* Sets a filter (analyzer) that is applied to each of the generated tokens
* before they are passed to the actual phrase scorer.
*/
public DirectCandidateGenerator postFilter(String postFilter) {
this.postFilter = postFilter;
return this;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
if (field != null) {
builder.field("field", field);
}
if (suggestMode != null) {
builder.field("suggest_mode", suggestMode);
}
if (accuracy != null) {
builder.field("accuracy", accuracy);
}
if (size != null) {
builder.field("size", size);
}
if (sort != null) {
builder.field("sort", sort);
}
if (stringDistance != null) {
builder.field("string_distance", stringDistance);
}
if (maxEdits != null) {
builder.field("max_edits", maxEdits);
}
if (maxInspections != null) {
builder.field("max_inspections", maxInspections);
}
if (maxTermFreq != null) {
builder.field("max_term_freq", maxTermFreq);
}
if (prefixLength != null) {
builder.field("prefix_length", prefixLength);
}
if (minWordLength != null) {
builder.field("min_word_length", minWordLength);
}
if (minDocFreq != null) {
builder.field("min_doc_freq", minDocFreq);
}
if (preFilter != null) {
builder.field("pre_filter", preFilter);
}
if (postFilter != null) {
builder.field("post_filter", postFilter);
}
builder.endObject();
return builder;
}
}
} }

@ -21,13 +21,21 @@ package org.elasticsearch.transport;
import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import java.util.List;
import static java.util.Collections.emptyList;
/** /**
* a collection of settings related to transport components, which are also needed in org.elasticsearch.bootstrap.Security * a collection of settings related to transport components, which are also needed in org.elasticsearch.bootstrap.Security
* This class should only contain static code which is *safe* to load before the security manager is enforced. * This class should only contain static code which is *safe* to load before the security manager is enforced.
*/ */
final public class TransportSettings { final public class TransportSettings {
public static final Setting<List<String>> HOST = Setting.listSetting("transport.host", emptyList(), s -> s, false, Setting.Scope.CLUSTER);
public static final Setting<List<String>> PUBLISH_HOST = Setting.listSetting("transport.publish_host", HOST, s -> s, false, Setting.Scope.CLUSTER);
public static final Setting<List<String>> BIND_HOST = Setting.listSetting("transport.bind_host", HOST, s -> s, false, Setting.Scope.CLUSTER);
public static final Setting<String> PORT = new Setting<>("transport.tcp.port", "9300-9400", s -> s, false, Setting.Scope.CLUSTER); public static final Setting<String> PORT = new Setting<>("transport.tcp.port", "9300-9400", s -> s, false, Setting.Scope.CLUSTER);
public static final Setting<Integer> PUBLISH_PORT = Setting.intSetting("transport.publish_port", -1, -1, false, Setting.Scope.CLUSTER);
public static final String DEFAULT_PROFILE = "default"; public static final String DEFAULT_PROFILE = "default";
public static final Setting<Settings> TRANSPORT_PROFILES_SETTING = Setting.groupSetting("transport.profiles.", true, Setting.Scope.CLUSTER); public static final Setting<Settings> TRANSPORT_PROFILES_SETTING = Setting.groupSetting("transport.profiles.", true, Setting.Scope.CLUSTER);

@ -119,12 +119,6 @@ import java.util.regex.Matcher;
import java.util.regex.Pattern; import java.util.regex.Pattern;
import static java.util.Collections.unmodifiableMap; import static java.util.Collections.unmodifiableMap;
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_BLOCKING_SERVER;
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_KEEP_ALIVE;
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_NO_DELAY;
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_RECEIVE_BUFFER_SIZE;
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_REUSE_ADDRESS;
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_SEND_BUFFER_SIZE;
import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.elasticsearch.common.settings.Settings.settingsBuilder;
import static org.elasticsearch.common.transport.NetworkExceptionHelper.isCloseConnectionException; import static org.elasticsearch.common.transport.NetworkExceptionHelper.isCloseConnectionException;
import static org.elasticsearch.common.transport.NetworkExceptionHelper.isConnectException; import static org.elasticsearch.common.transport.NetworkExceptionHelper.isConnectException;
@ -158,8 +152,16 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
public static final Setting<Integer> CONNECTIONS_PER_NODE_PING = Setting.intSetting("transport.connections_per_node.ping", 1, 1, false, Setting.Scope.CLUSTER); public static final Setting<Integer> CONNECTIONS_PER_NODE_PING = Setting.intSetting("transport.connections_per_node.ping", 1, 1, false, Setting.Scope.CLUSTER);
// the scheduled internal ping interval setting, defaults to disabled (-1) // the scheduled internal ping interval setting, defaults to disabled (-1)
public static final Setting<TimeValue> PING_SCHEDULE = Setting.timeSetting("transport.ping_schedule", TimeValue.timeValueSeconds(-1), false, Setting.Scope.CLUSTER); public static final Setting<TimeValue> PING_SCHEDULE = Setting.timeSetting("transport.ping_schedule", TimeValue.timeValueSeconds(-1), false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> TCP_BLOCKING_CLIENT = Setting.boolSetting("transport." + TcpSettings.TCP_BLOCKING_CLIENT.getKey(), TcpSettings.TCP_BLOCKING_CLIENT, false, Setting.Scope.CLUSTER); public static final Setting<Boolean> TCP_BLOCKING_CLIENT = Setting.boolSetting("transport.tcp.blocking_client", TcpSettings.TCP_BLOCKING_CLIENT, false, Setting.Scope.CLUSTER);
public static final Setting<TimeValue> TCP_CONNECT_TIMEOUT = Setting.timeSetting("transport." + TcpSettings.TCP_CONNECT_TIMEOUT.getKey(), TcpSettings.TCP_CONNECT_TIMEOUT, false, Setting.Scope.CLUSTER); public static final Setting<TimeValue> TCP_CONNECT_TIMEOUT = Setting.timeSetting("transport.tcp.connect_timeout", TcpSettings.TCP_CONNECT_TIMEOUT, false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> TCP_NO_DELAY = Setting.boolSetting("transport.tcp_no_delay", TcpSettings.TCP_NO_DELAY, false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> TCP_KEEP_ALIVE = Setting.boolSetting("transport.tcp.keep_alive", TcpSettings.TCP_KEEP_ALIVE, false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> TCP_BLOCKING_SERVER = Setting.boolSetting("transport.tcp.blocking_server", TcpSettings.TCP_BLOCKING_SERVER, false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> TCP_REUSE_ADDRESS = Setting.boolSetting("transport.tcp.reuse_address", TcpSettings.TCP_REUSE_ADDRESS, false, Setting.Scope.CLUSTER);
public static final Setting<ByteSizeValue> TCP_SEND_BUFFER_SIZE = Setting.byteSizeSetting("transport.tcp.send_buffer_size", TcpSettings.TCP_SEND_BUFFER_SIZE, false, Setting.Scope.CLUSTER);
public static final Setting<ByteSizeValue> TCP_RECEIVE_BUFFER_SIZE = Setting.byteSizeSetting("transport.tcp.receive_buffer_size", TcpSettings.TCP_RECEIVE_BUFFER_SIZE, false, Setting.Scope.CLUSTER);
public static final Setting<ByteSizeValue> NETTY_MAX_CUMULATION_BUFFER_CAPACITY = Setting.byteSizeSetting("transport.netty.max_cumulation_buffer_capacity", new ByteSizeValue(-1), false, Setting.Scope.CLUSTER); public static final Setting<ByteSizeValue> NETTY_MAX_CUMULATION_BUFFER_CAPACITY = Setting.byteSizeSetting("transport.netty.max_cumulation_buffer_capacity", new ByteSizeValue(-1), false, Setting.Scope.CLUSTER);
public static final Setting<Integer> NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS = Setting.intSetting("transport.netty.max_composite_buffer_components", -1, -1, false, Setting.Scope.CLUSTER); public static final Setting<Integer> NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS = Setting.intSetting("transport.netty.max_composite_buffer_components", -1, -1, false, Setting.Scope.CLUSTER);
@ -179,7 +181,6 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
public static final Setting<ByteSizeValue> NETTY_RECEIVE_PREDICTOR_MAX = Setting.byteSizeSetting("transport.netty.receive_predictor_max", NETTY_RECEIVE_PREDICTOR_SIZE, false, Setting.Scope.CLUSTER); public static final Setting<ByteSizeValue> NETTY_RECEIVE_PREDICTOR_MAX = Setting.byteSizeSetting("transport.netty.receive_predictor_max", NETTY_RECEIVE_PREDICTOR_SIZE, false, Setting.Scope.CLUSTER);
public static final Setting<Integer> NETTY_BOSS_COUNT = Setting.intSetting("transport.netty.boss_count", 1, 1, false, Setting.Scope.CLUSTER); public static final Setting<Integer> NETTY_BOSS_COUNT = Setting.intSetting("transport.netty.boss_count", 1, 1, false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> NETWORK_SERVER = Setting.boolSetting("network.server", true, false, Setting.Scope.CLUSTER);
protected final NetworkService networkService; protected final NetworkService networkService;
protected final Version version; protected final Version version;
@ -284,7 +285,7 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
boolean success = false; boolean success = false;
try { try {
clientBootstrap = createClientBootstrap(); clientBootstrap = createClientBootstrap();
if (NETWORK_SERVER.get(settings)) { if (NetworkService.NETWORK_SERVER.get(settings)) {
final OpenChannelsHandler openChannels = new OpenChannelsHandler(logger); final OpenChannelsHandler openChannels = new OpenChannelsHandler(logger);
this.serverOpenChannels = openChannels; this.serverOpenChannels = openChannels;
@ -356,25 +357,25 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
clientBootstrap.setPipelineFactory(configureClientChannelPipelineFactory()); clientBootstrap.setPipelineFactory(configureClientChannelPipelineFactory());
clientBootstrap.setOption("connectTimeoutMillis", connectTimeout.millis()); clientBootstrap.setOption("connectTimeoutMillis", connectTimeout.millis());
boolean tcpNoDelay = settings.getAsBoolean("transport.netty.tcp_no_delay", TCP_NO_DELAY.get(settings)); boolean tcpNoDelay = TCP_NO_DELAY.get(settings);
clientBootstrap.setOption("tcpNoDelay", tcpNoDelay); clientBootstrap.setOption("tcpNoDelay", tcpNoDelay);
boolean tcpKeepAlive = settings.getAsBoolean("transport.netty.tcp_keep_alive", TCP_KEEP_ALIVE.get(settings)); boolean tcpKeepAlive = TCP_KEEP_ALIVE.get(settings);
clientBootstrap.setOption("keepAlive", tcpKeepAlive); clientBootstrap.setOption("keepAlive", tcpKeepAlive);
ByteSizeValue tcpSendBufferSize = settings.getAsBytesSize("transport.netty.tcp_send_buffer_size", TCP_SEND_BUFFER_SIZE.get(settings)); ByteSizeValue tcpSendBufferSize = TCP_SEND_BUFFER_SIZE.get(settings);
if (tcpSendBufferSize.bytes() > 0) { if (tcpSendBufferSize.bytes() > 0) {
clientBootstrap.setOption("sendBufferSize", tcpSendBufferSize.bytes()); clientBootstrap.setOption("sendBufferSize", tcpSendBufferSize.bytes());
} }
ByteSizeValue tcpReceiveBufferSize = settings.getAsBytesSize("transport.netty.tcp_receive_buffer_size", TCP_RECEIVE_BUFFER_SIZE.get(settings)); ByteSizeValue tcpReceiveBufferSize = TCP_RECEIVE_BUFFER_SIZE.get(settings);
if (tcpReceiveBufferSize.bytes() > 0) { if (tcpReceiveBufferSize.bytes() > 0) {
clientBootstrap.setOption("receiveBufferSize", tcpReceiveBufferSize.bytes()); clientBootstrap.setOption("receiveBufferSize", tcpReceiveBufferSize.bytes());
} }
clientBootstrap.setOption("receiveBufferSizePredictorFactory", receiveBufferSizePredictorFactory); clientBootstrap.setOption("receiveBufferSizePredictorFactory", receiveBufferSizePredictorFactory);
boolean reuseAddress = settings.getAsBoolean("transport.netty.reuse_address", TCP_REUSE_ADDRESS.get(settings)); boolean reuseAddress = TCP_REUSE_ADDRESS.get(settings);
clientBootstrap.setOption("reuseAddress", reuseAddress); clientBootstrap.setOption("reuseAddress", reuseAddress);
return clientBootstrap; return clientBootstrap;
@ -383,31 +384,31 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
private Settings createFallbackSettings() { private Settings createFallbackSettings() {
Settings.Builder fallbackSettingsBuilder = settingsBuilder(); Settings.Builder fallbackSettingsBuilder = settingsBuilder();
String fallbackBindHost = settings.get("transport.netty.bind_host", settings.get("transport.bind_host", settings.get("transport.host"))); List<String> fallbackBindHost = TransportSettings.BIND_HOST.get(settings);
if (fallbackBindHost != null) { if (fallbackBindHost.isEmpty() == false) {
fallbackSettingsBuilder.put("bind_host", fallbackBindHost); fallbackSettingsBuilder.putArray("bind_host", fallbackBindHost);
} }
String fallbackPublishHost = settings.get("transport.netty.publish_host", settings.get("transport.publish_host", settings.get("transport.host"))); List<String> fallbackPublishHost = TransportSettings.PUBLISH_HOST.get(settings);
if (fallbackPublishHost != null) { if (fallbackPublishHost.isEmpty() == false) {
fallbackSettingsBuilder.put("publish_host", fallbackPublishHost); fallbackSettingsBuilder.putArray("publish_host", fallbackPublishHost);
} }
boolean fallbackTcpNoDelay = settings.getAsBoolean("transport.netty.tcp_no_delay", TCP_NO_DELAY.get(settings)); boolean fallbackTcpNoDelay = settings.getAsBoolean("transport.netty.tcp_no_delay", TcpSettings.TCP_NO_DELAY.get(settings));
fallbackSettingsBuilder.put("tcp_no_delay", fallbackTcpNoDelay); fallbackSettingsBuilder.put("tcp_no_delay", fallbackTcpNoDelay);
boolean fallbackTcpKeepAlive = settings.getAsBoolean("transport.netty.tcp_keep_alive", TCP_KEEP_ALIVE.get(settings)); boolean fallbackTcpKeepAlive = settings.getAsBoolean("transport.netty.tcp_keep_alive", TcpSettings.TCP_KEEP_ALIVE.get(settings));
fallbackSettingsBuilder.put("tcp_keep_alive", fallbackTcpKeepAlive); fallbackSettingsBuilder.put("tcp_keep_alive", fallbackTcpKeepAlive);
boolean fallbackReuseAddress = settings.getAsBoolean("transport.netty.reuse_address", TCP_REUSE_ADDRESS.get(settings)); boolean fallbackReuseAddress = settings.getAsBoolean("transport.netty.reuse_address", TcpSettings.TCP_REUSE_ADDRESS.get(settings));
fallbackSettingsBuilder.put("reuse_address", fallbackReuseAddress); fallbackSettingsBuilder.put("reuse_address", fallbackReuseAddress);
ByteSizeValue fallbackTcpSendBufferSize = settings.getAsBytesSize("transport.netty.tcp_send_buffer_size", TCP_SEND_BUFFER_SIZE.get(settings)); ByteSizeValue fallbackTcpSendBufferSize = settings.getAsBytesSize("transport.netty.tcp_send_buffer_size", TcpSettings.TCP_SEND_BUFFER_SIZE.get(settings));
if (fallbackTcpSendBufferSize.bytes() >= 0) { if (fallbackTcpSendBufferSize.bytes() >= 0) {
fallbackSettingsBuilder.put("tcp_send_buffer_size", fallbackTcpSendBufferSize); fallbackSettingsBuilder.put("tcp_send_buffer_size", fallbackTcpSendBufferSize);
} }
ByteSizeValue fallbackTcpBufferSize = settings.getAsBytesSize("transport.netty.tcp_receive_buffer_size", TCP_RECEIVE_BUFFER_SIZE.get(settings)); ByteSizeValue fallbackTcpBufferSize = settings.getAsBytesSize("transport.netty.tcp_receive_buffer_size", TcpSettings.TCP_RECEIVE_BUFFER_SIZE.get(settings));
if (fallbackTcpBufferSize.bytes() >= 0) { if (fallbackTcpBufferSize.bytes() >= 0) {
fallbackSettingsBuilder.put("tcp_receive_buffer_size", fallbackTcpBufferSize); fallbackSettingsBuilder.put("tcp_receive_buffer_size", fallbackTcpBufferSize);
} }
@ -495,7 +496,7 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
final String[] publishHosts; final String[] publishHosts;
if (TransportSettings.DEFAULT_PROFILE.equals(name)) { if (TransportSettings.DEFAULT_PROFILE.equals(name)) {
publishHosts = settings.getAsArray("transport.netty.publish_host", settings.getAsArray("transport.publish_host", settings.getAsArray("transport.host", null))); publishHosts = TransportSettings.PUBLISH_HOST.get(settings).toArray(Strings.EMPTY_ARRAY);
} else { } else {
publishHosts = profileSettings.getAsArray("publish_host", boundAddressesHostStrings); publishHosts = profileSettings.getAsArray("publish_host", boundAddressesHostStrings);
} }
@ -507,15 +508,15 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
throw new BindTransportException("Failed to resolve publish address", e); throw new BindTransportException("Failed to resolve publish address", e);
} }
Integer publishPort; int publishPort;
if (TransportSettings.DEFAULT_PROFILE.equals(name)) { if (TransportSettings.DEFAULT_PROFILE.equals(name)) {
publishPort = settings.getAsInt("transport.netty.publish_port", settings.getAsInt("transport.publish_port", null)); publishPort = TransportSettings.PUBLISH_PORT.get(settings);
} else { } else {
publishPort = profileSettings.getAsInt("publish_port", null); publishPort = profileSettings.getAsInt("publish_port", -1);
} }
// if port not explicitly provided, search for port of address in boundAddresses that matches publishInetAddress // if port not explicitly provided, search for port of address in boundAddresses that matches publishInetAddress
if (publishPort == null) { if (publishPort < 0) {
for (InetSocketAddress boundAddress : boundAddresses) { for (InetSocketAddress boundAddress : boundAddresses) {
InetAddress boundInetAddress = boundAddress.getAddress(); InetAddress boundInetAddress = boundAddress.getAddress();
if (boundInetAddress.isAnyLocalAddress() || boundInetAddress.equals(publishInetAddress)) { if (boundInetAddress.isAnyLocalAddress() || boundInetAddress.equals(publishInetAddress)) {
@ -526,7 +527,7 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
} }
// if port still not matches, just take port of first bound address // if port still not matches, just take port of first bound address
if (publishPort == null) { if (publishPort < 0) {
// TODO: In case of DEFAULT_PROFILE we should probably fail here, as publish address does not match any bound address // TODO: In case of DEFAULT_PROFILE we should probably fail here, as publish address does not match any bound address
// In case of a custom profile, we might use the publish address of the default profile // In case of a custom profile, we might use the publish address of the default profile
publishPort = boundAddresses.get(0).getPort(); publishPort = boundAddresses.get(0).getPort();
@ -538,15 +539,15 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
} }
private void createServerBootstrap(String name, Settings settings) { private void createServerBootstrap(String name, Settings settings) {
boolean blockingServer = settings.getAsBoolean("transport.tcp.blocking_server", TCP_BLOCKING_SERVER.get(settings)); boolean blockingServer = TCP_BLOCKING_SERVER.get(settings);
String port = settings.get("port"); String port = settings.get("port");
String bindHost = settings.get("bind_host"); String bindHost = settings.get("bind_host");
String publishHost = settings.get("publish_host"); String publishHost = settings.get("publish_host");
String tcpNoDelay = settings.get("tcp_no_delay"); String tcpNoDelay = settings.get("tcp_no_delay");
String tcpKeepAlive = settings.get("tcp_keep_alive"); String tcpKeepAlive = settings.get("tcp_keep_alive");
boolean reuseAddress = settings.getAsBoolean("reuse_address", NetworkUtils.defaultReuseAddress()); boolean reuseAddress = settings.getAsBoolean("reuse_address", NetworkUtils.defaultReuseAddress());
ByteSizeValue tcpSendBufferSize = settings.getAsBytesSize("tcp_send_buffer_size", TCP_SEND_BUFFER_SIZE.getDefault(settings)); ByteSizeValue tcpSendBufferSize = TCP_SEND_BUFFER_SIZE.getDefault(settings);
ByteSizeValue tcpReceiveBufferSize = settings.getAsBytesSize("tcp_receive_buffer_size", TCP_RECEIVE_BUFFER_SIZE.getDefault(settings)); ByteSizeValue tcpReceiveBufferSize = TCP_RECEIVE_BUFFER_SIZE.getDefault(settings);
logger.debug("using profile[{}], worker_count[{}], port[{}], bind_host[{}], publish_host[{}], compress[{}], connect_timeout[{}], connections_per_node[{}/{}/{}/{}/{}], receive_predictor[{}->{}]", logger.debug("using profile[{}], worker_count[{}], port[{}], bind_host[{}], publish_host[{}], compress[{}], connect_timeout[{}], connections_per_node[{}/{}/{}/{}/{}], receive_predictor[{}->{}]",
name, workerCount, port, bindHost, publishHost, compress, connectTimeout, connectionsPerNodeRecovery, connectionsPerNodeBulk, connectionsPerNodeReg, connectionsPerNodeState, connectionsPerNodePing, receivePredictorMin, receivePredictorMax); name, workerCount, port, bindHost, publishHost, compress, connectTimeout, connectionsPerNodeRecovery, connectionsPerNodeBulk, connectionsPerNodeReg, connectionsPerNodeState, connectionsPerNodePing, receivePredictorMin, receivePredictorMax);

@ -42,6 +42,7 @@ import org.elasticsearch.common.Strings;
import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.DiscoveryModule;
@ -51,12 +52,14 @@ import org.elasticsearch.gateway.GatewayService;
import org.elasticsearch.node.Node; import org.elasticsearch.node.Node;
import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.RestStatus;
import java.util.Collections;
import java.util.EnumSet; import java.util.EnumSet;
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Set; import java.util.Set;
import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CopyOnWriteArrayList;
import java.util.function.Function;
import static java.util.Collections.unmodifiableMap; import static java.util.Collections.unmodifiableMap;
@ -84,12 +87,12 @@ public class TribeService extends AbstractLifecycleComponent<TribeService> {
public static final ClusterBlock TRIBE_WRITE_BLOCK = new ClusterBlock(11, "tribe node, write not allowed", false, false, RestStatus.BAD_REQUEST, EnumSet.of(ClusterBlockLevel.WRITE)); public static final ClusterBlock TRIBE_WRITE_BLOCK = new ClusterBlock(11, "tribe node, write not allowed", false, false, RestStatus.BAD_REQUEST, EnumSet.of(ClusterBlockLevel.WRITE));
public static Settings processSettings(Settings settings) { public static Settings processSettings(Settings settings) {
if (settings.get(TRIBE_NAME) != null) { if (TRIBE_NAME_SETTING.exists(settings)) {
// if its a node client started by this service as tribe, remove any tribe group setting // if its a node client started by this service as tribe, remove any tribe group setting
// to avoid recursive configuration // to avoid recursive configuration
Settings.Builder sb = Settings.builder().put(settings); Settings.Builder sb = Settings.builder().put(settings);
for (String s : settings.getAsMap().keySet()) { for (String s : settings.getAsMap().keySet()) {
if (s.startsWith("tribe.") && !s.equals(TRIBE_NAME)) { if (s.startsWith("tribe.") && !s.equals(TRIBE_NAME_SETTING.getKey())) {
sb.remove(s); sb.remove(s);
} }
} }
@ -111,14 +114,26 @@ public class TribeService extends AbstractLifecycleComponent<TribeService> {
return sb.build(); return sb.build();
} }
public static final String TRIBE_NAME = "tribe.name"; private static final Setting<String> TRIBE_NAME_SETTING = Setting.simpleString("tribe.name", false, Setting.Scope.CLUSTER); // internal settings only
private final ClusterService clusterService; private final ClusterService clusterService;
private final String[] blockIndicesWrite; private final String[] blockIndicesWrite;
private final String[] blockIndicesRead; private final String[] blockIndicesRead;
private final String[] blockIndicesMetadata; private final String[] blockIndicesMetadata;
private static final String ON_CONFLICT_ANY = "any", ON_CONFLICT_DROP = "drop", ON_CONFLICT_PREFER = "prefer_"; private static final String ON_CONFLICT_ANY = "any", ON_CONFLICT_DROP = "drop", ON_CONFLICT_PREFER = "prefer_";
public static final Setting<String> ON_CONFLICT_SETTING = new Setting<>("tribe.on_conflict", ON_CONFLICT_ANY, (s) -> {
if (ON_CONFLICT_ANY.equals(s) || ON_CONFLICT_DROP.equals(s) || s.startsWith(ON_CONFLICT_PREFER)) {
return s;
}
throw new IllegalArgumentException("Invalid value for [tribe.on_conflict] must be either [any, drop or start with prefer_] but was: " +s);
}, false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> BLOCKS_METADATA_SETTING = Setting.boolSetting("tribe.blocks.metadata", false, false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> BLOCKS_WRITE_SETTING = Setting.boolSetting("tribe.blocks.write", false, false, Setting.Scope.CLUSTER);
public static final Setting<List<String>> BLOCKS_WRITE_INDICES_SETTING = Setting.listSetting("tribe.blocks.write.indices", Collections.emptyList(), Function.identity(), false, Setting.Scope.CLUSTER);
public static final Setting<List<String>> BLOCKS_READ_INDICES_SETTING = Setting.listSetting("tribe.blocks.read.indices", Collections.emptyList(), Function.identity(), false, Setting.Scope.CLUSTER);
public static final Setting<List<String>> BLOCKS_METADATA_INDICES_SETTING = Setting.listSetting("tribe.blocks.metadata.indices", Collections.emptyList(), Function.identity(), false, Setting.Scope.CLUSTER);
private final String onConflict; private final String onConflict;
private final Set<String> droppedIndices = ConcurrentCollections.newConcurrentSet(); private final Set<String> droppedIndices = ConcurrentCollections.newConcurrentSet();
@ -138,7 +153,7 @@ public class TribeService extends AbstractLifecycleComponent<TribeService> {
if (Environment.PATH_CONF_SETTING.exists(settings)) { if (Environment.PATH_CONF_SETTING.exists(settings)) {
sb.put(Environment.PATH_CONF_SETTING.getKey(), Environment.PATH_CONF_SETTING.get(settings)); sb.put(Environment.PATH_CONF_SETTING.getKey(), Environment.PATH_CONF_SETTING.get(settings));
} }
sb.put(TRIBE_NAME, entry.getKey()); sb.put(TRIBE_NAME_SETTING.getKey(), entry.getKey());
if (sb.get("http.enabled") == null) { if (sb.get("http.enabled") == null) {
sb.put("http.enabled", false); sb.put("http.enabled", false);
} }
@ -154,15 +169,15 @@ public class TribeService extends AbstractLifecycleComponent<TribeService> {
// master elected in this single tribe node local "cluster" // master elected in this single tribe node local "cluster"
clusterService.removeInitialStateBlock(discoveryService.getNoMasterBlock()); clusterService.removeInitialStateBlock(discoveryService.getNoMasterBlock());
clusterService.removeInitialStateBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK); clusterService.removeInitialStateBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK);
if (settings.getAsBoolean("tribe.blocks.write", false)) { if (BLOCKS_WRITE_SETTING.get(settings)) {
clusterService.addInitialStateBlock(TRIBE_WRITE_BLOCK); clusterService.addInitialStateBlock(TRIBE_WRITE_BLOCK);
} }
blockIndicesWrite = settings.getAsArray("tribe.blocks.write.indices", Strings.EMPTY_ARRAY); blockIndicesWrite = BLOCKS_WRITE_INDICES_SETTING.get(settings).toArray(Strings.EMPTY_ARRAY);
if (settings.getAsBoolean("tribe.blocks.metadata", false)) { if (BLOCKS_METADATA_SETTING.get(settings)) {
clusterService.addInitialStateBlock(TRIBE_METADATA_BLOCK); clusterService.addInitialStateBlock(TRIBE_METADATA_BLOCK);
} }
blockIndicesMetadata = settings.getAsArray("tribe.blocks.metadata.indices", Strings.EMPTY_ARRAY); blockIndicesMetadata = BLOCKS_METADATA_INDICES_SETTING.get(settings).toArray(Strings.EMPTY_ARRAY);
blockIndicesRead = settings.getAsArray("tribe.blocks.read.indices", Strings.EMPTY_ARRAY); blockIndicesRead = BLOCKS_READ_INDICES_SETTING.get(settings).toArray(Strings.EMPTY_ARRAY);
for (Node node : nodes) { for (Node node : nodes) {
node.injector().getInstance(ClusterService.class).add(new TribeClusterStateListener(node)); node.injector().getInstance(ClusterService.class).add(new TribeClusterStateListener(node));
} }
@ -171,7 +186,7 @@ public class TribeService extends AbstractLifecycleComponent<TribeService> {
this.blockIndicesRead = blockIndicesRead; this.blockIndicesRead = blockIndicesRead;
this.blockIndicesWrite = blockIndicesWrite; this.blockIndicesWrite = blockIndicesWrite;
this.onConflict = settings.get("tribe.on_conflict", ON_CONFLICT_ANY); this.onConflict = ON_CONFLICT_SETTING.get(settings);
} }
@Override @Override
@ -218,7 +233,7 @@ public class TribeService extends AbstractLifecycleComponent<TribeService> {
private final TribeNodeClusterStateTaskExecutor executor; private final TribeNodeClusterStateTaskExecutor executor;
TribeClusterStateListener(Node tribeNode) { TribeClusterStateListener(Node tribeNode) {
String tribeName = tribeNode.settings().get(TRIBE_NAME); String tribeName = TRIBE_NAME_SETTING.get(tribeNode.settings());
this.tribeName = tribeName; this.tribeName = tribeName;
executor = new TribeNodeClusterStateTaskExecutor(tribeName); executor = new TribeNodeClusterStateTaskExecutor(tribeName);
} }
@ -271,7 +286,7 @@ public class TribeService extends AbstractLifecycleComponent<TribeService> {
// -- merge nodes // -- merge nodes
// go over existing nodes, and see if they need to be removed // go over existing nodes, and see if they need to be removed
for (DiscoveryNode discoNode : currentState.nodes()) { for (DiscoveryNode discoNode : currentState.nodes()) {
String markedTribeName = discoNode.attributes().get(TRIBE_NAME); String markedTribeName = discoNode.attributes().get(TRIBE_NAME_SETTING.getKey());
if (markedTribeName != null && markedTribeName.equals(tribeName)) { if (markedTribeName != null && markedTribeName.equals(tribeName)) {
if (tribeState.nodes().get(discoNode.id()) == null) { if (tribeState.nodes().get(discoNode.id()) == null) {
clusterStateChanged = true; clusterStateChanged = true;
@ -288,7 +303,7 @@ public class TribeService extends AbstractLifecycleComponent<TribeService> {
for (ObjectObjectCursor<String, String> attr : tribe.attributes()) { for (ObjectObjectCursor<String, String> attr : tribe.attributes()) {
tribeAttr.put(attr.key, attr.value); tribeAttr.put(attr.key, attr.value);
} }
tribeAttr.put(TRIBE_NAME, tribeName); tribeAttr.put(TRIBE_NAME_SETTING.getKey(), tribeName);
DiscoveryNode discoNode = new DiscoveryNode(tribe.name(), tribe.id(), tribe.getHostName(), tribe.getHostAddress(), tribe.address(), unmodifiableMap(tribeAttr), tribe.version()); DiscoveryNode discoNode = new DiscoveryNode(tribe.name(), tribe.id(), tribe.getHostName(), tribe.getHostAddress(), tribe.address(), unmodifiableMap(tribeAttr), tribe.version());
clusterStateChanged = true; clusterStateChanged = true;
logger.info("[{}] adding node [{}]", tribeName, discoNode); logger.info("[{}] adding node [{}]", tribeName, discoNode);
@ -302,18 +317,18 @@ public class TribeService extends AbstractLifecycleComponent<TribeService> {
RoutingTable.Builder routingTable = RoutingTable.builder(currentState.routingTable()); RoutingTable.Builder routingTable = RoutingTable.builder(currentState.routingTable());
// go over existing indices, and see if they need to be removed // go over existing indices, and see if they need to be removed
for (IndexMetaData index : currentState.metaData()) { for (IndexMetaData index : currentState.metaData()) {
String markedTribeName = index.getSettings().get(TRIBE_NAME); String markedTribeName = TRIBE_NAME_SETTING.get(index.getSettings());
if (markedTribeName != null && markedTribeName.equals(tribeName)) { if (markedTribeName != null && markedTribeName.equals(tribeName)) {
IndexMetaData tribeIndex = tribeState.metaData().index(index.getIndex()); IndexMetaData tribeIndex = tribeState.metaData().index(index.getIndex());
clusterStateChanged = true; clusterStateChanged = true;
if (tribeIndex == null || tribeIndex.getState() == IndexMetaData.State.CLOSE) { if (tribeIndex == null || tribeIndex.getState() == IndexMetaData.State.CLOSE) {
logger.info("[{}] removing index [{}]", tribeName, index.getIndex()); logger.info("[{}] removing index {}", tribeName, index.getIndex());
removeIndex(blocks, metaData, routingTable, index); removeIndex(blocks, metaData, routingTable, index);
} else { } else {
// always make sure to update the metadata and routing table, in case // always make sure to update the metadata and routing table, in case
// there are changes in them (new mapping, shards moving from initializing to started) // there are changes in them (new mapping, shards moving from initializing to started)
routingTable.add(tribeState.routingTable().index(index.getIndex())); routingTable.add(tribeState.routingTable().index(index.getIndex()));
Settings tribeSettings = Settings.builder().put(tribeIndex.getSettings()).put(TRIBE_NAME, tribeName).build(); Settings tribeSettings = Settings.builder().put(tribeIndex.getSettings()).put(TRIBE_NAME_SETTING.getKey(), tribeName).build();
metaData.put(IndexMetaData.builder(tribeIndex).settings(tribeSettings)); metaData.put(IndexMetaData.builder(tribeIndex).settings(tribeSettings));
} }
} }
@ -327,14 +342,14 @@ public class TribeService extends AbstractLifecycleComponent<TribeService> {
} }
final IndexMetaData indexMetaData = currentState.metaData().index(tribeIndex.getIndex()); final IndexMetaData indexMetaData = currentState.metaData().index(tribeIndex.getIndex());
if (indexMetaData == null) { if (indexMetaData == null) {
if (!droppedIndices.contains(tribeIndex.getIndex())) { if (!droppedIndices.contains(tribeIndex.getIndex().getName())) {
// a new index, add it, and add the tribe name as a setting // a new index, add it, and add the tribe name as a setting
clusterStateChanged = true; clusterStateChanged = true;
logger.info("[{}] adding index [{}]", tribeName, tribeIndex.getIndex()); logger.info("[{}] adding index {}", tribeName, tribeIndex.getIndex());
addNewIndex(tribeState, blocks, metaData, routingTable, tribeIndex); addNewIndex(tribeState, blocks, metaData, routingTable, tribeIndex);
} }
} else { } else {
String existingFromTribe = indexMetaData.getSettings().get(TRIBE_NAME); String existingFromTribe = TRIBE_NAME_SETTING.get(indexMetaData.getSettings());
if (!tribeName.equals(existingFromTribe)) { if (!tribeName.equals(existingFromTribe)) {
// we have a potential conflict on index names, decide what to do... // we have a potential conflict on index names, decide what to do...
if (ON_CONFLICT_ANY.equals(onConflict)) { if (ON_CONFLICT_ANY.equals(onConflict)) {
@ -342,7 +357,7 @@ public class TribeService extends AbstractLifecycleComponent<TribeService> {
} else if (ON_CONFLICT_DROP.equals(onConflict)) { } else if (ON_CONFLICT_DROP.equals(onConflict)) {
// drop the indices, there is a conflict // drop the indices, there is a conflict
clusterStateChanged = true; clusterStateChanged = true;
logger.info("[{}] dropping index [{}] due to conflict with [{}]", tribeName, tribeIndex.getIndex(), existingFromTribe); logger.info("[{}] dropping index {} due to conflict with [{}]", tribeName, tribeIndex.getIndex(), existingFromTribe);
removeIndex(blocks, metaData, routingTable, tribeIndex); removeIndex(blocks, metaData, routingTable, tribeIndex);
droppedIndices.add(tribeIndex.getIndex().getName()); droppedIndices.add(tribeIndex.getIndex().getName());
} else if (onConflict.startsWith(ON_CONFLICT_PREFER)) { } else if (onConflict.startsWith(ON_CONFLICT_PREFER)) {
@ -351,7 +366,7 @@ public class TribeService extends AbstractLifecycleComponent<TribeService> {
if (tribeName.equals(preferredTribeName)) { if (tribeName.equals(preferredTribeName)) {
// the new one is hte preferred one, replace... // the new one is hte preferred one, replace...
clusterStateChanged = true; clusterStateChanged = true;
logger.info("[{}] adding index [{}], preferred over [{}]", tribeName, tribeIndex.getIndex(), existingFromTribe); logger.info("[{}] adding index {}, preferred over [{}]", tribeName, tribeIndex.getIndex(), existingFromTribe);
removeIndex(blocks, metaData, routingTable, tribeIndex); removeIndex(blocks, metaData, routingTable, tribeIndex);
addNewIndex(tribeState, blocks, metaData, routingTable, tribeIndex); addNewIndex(tribeState, blocks, metaData, routingTable, tribeIndex);
} // else: either the existing one is the preferred one, or we haven't seen one, carry on } // else: either the existing one is the preferred one, or we haven't seen one, carry on
@ -374,7 +389,7 @@ public class TribeService extends AbstractLifecycleComponent<TribeService> {
} }
private void addNewIndex(ClusterState tribeState, ClusterBlocks.Builder blocks, MetaData.Builder metaData, RoutingTable.Builder routingTable, IndexMetaData tribeIndex) { private void addNewIndex(ClusterState tribeState, ClusterBlocks.Builder blocks, MetaData.Builder metaData, RoutingTable.Builder routingTable, IndexMetaData tribeIndex) {
Settings tribeSettings = Settings.builder().put(tribeIndex.getSettings()).put(TRIBE_NAME, tribeName).build(); Settings tribeSettings = Settings.builder().put(tribeIndex.getSettings()).put(TRIBE_NAME_SETTING.getKey(), tribeName).build();
metaData.put(IndexMetaData.builder(tribeIndex).settings(tribeSettings)); metaData.put(IndexMetaData.builder(tribeIndex).settings(tribeSettings));
routingTable.add(tribeState.routingTable().index(tribeIndex.getIndex())); routingTable.add(tribeState.routingTable().index(tribeIndex.getIndex()));
if (Regex.simpleMatch(blockIndicesMetadata, tribeIndex.getIndex().getName())) { if (Regex.simpleMatch(blockIndicesMetadata, tribeIndex.getIndex().getName())) {

@ -286,12 +286,12 @@ public class ESExceptionTests extends ESTestCase {
public void testSerializeUnknownException() throws IOException { public void testSerializeUnknownException() throws IOException {
BytesStreamOutput out = new BytesStreamOutput(); BytesStreamOutput out = new BytesStreamOutput();
ParsingException ParsingException = new ParsingException(1, 2, "foobar", null); ParsingException ParsingException = new ParsingException(1, 2, "foobar", null);
Throwable ex = new Throwable("wtf", ParsingException); Throwable ex = new Throwable("eggplant", ParsingException);
out.writeThrowable(ex); out.writeThrowable(ex);
StreamInput in = StreamInput.wrap(out.bytes()); StreamInput in = StreamInput.wrap(out.bytes());
Throwable throwable = in.readThrowable(); Throwable throwable = in.readThrowable();
assertEquals("wtf", throwable.getMessage()); assertEquals("throwable: eggplant", throwable.getMessage());
assertTrue(throwable instanceof ElasticsearchException); assertTrue(throwable instanceof ElasticsearchException);
ParsingException e = (ParsingException)throwable.getCause(); ParsingException e = (ParsingException)throwable.getCause();
assertEquals(ParsingException.getIndex(), e.getIndex()); assertEquals(ParsingException.getIndex(), e.getIndex());
@ -329,7 +329,9 @@ public class ESExceptionTests extends ESTestCase {
StreamInput in = StreamInput.wrap(out.bytes()); StreamInput in = StreamInput.wrap(out.bytes());
ElasticsearchException e = in.readThrowable(); ElasticsearchException e = in.readThrowable();
assertEquals(e.getMessage(), ex.getMessage()); assertEquals(e.getMessage(), ex.getMessage());
assertEquals(ex.getCause().getClass().getName(), e.getCause().getMessage(), ex.getCause().getMessage()); assertTrue("Expected: " + e.getCause().getMessage() + " to contain: " +
ex.getCause().getClass().getName() + " but it didn't",
e.getCause().getMessage().contains(ex.getCause().getMessage()));
if (ex.getCause().getClass() != Throwable.class) { // throwable is not directly mapped if (ex.getCause().getClass() != Throwable.class) { // throwable is not directly mapped
assertEquals(e.getCause().getClass(), ex.getCause().getClass()); assertEquals(e.getCause().getClass(), ex.getCause().getClass());
} else { } else {

@ -543,9 +543,9 @@ public class ExceptionSerializationTests extends ESTestCase {
public void testNotSerializableExceptionWrapper() throws IOException { public void testNotSerializableExceptionWrapper() throws IOException {
NotSerializableExceptionWrapper ex = serialize(new NotSerializableExceptionWrapper(new NullPointerException())); NotSerializableExceptionWrapper ex = serialize(new NotSerializableExceptionWrapper(new NullPointerException()));
assertEquals("{\"type\":\"null_pointer_exception\",\"reason\":null}", toXContent(ex)); assertEquals("{\"type\":\"null_pointer_exception\",\"reason\":\"null_pointer_exception: null\"}", toXContent(ex));
ex = serialize(new NotSerializableExceptionWrapper(new IllegalArgumentException("nono!"))); ex = serialize(new NotSerializableExceptionWrapper(new IllegalArgumentException("nono!")));
assertEquals("{\"type\":\"illegal_argument_exception\",\"reason\":\"nono!\"}", toXContent(ex)); assertEquals("{\"type\":\"illegal_argument_exception\",\"reason\":\"illegal_argument_exception: nono!\"}", toXContent(ex));
Throwable[] unknowns = new Throwable[]{ Throwable[] unknowns = new Throwable[]{
new Exception("foobar"), new Exception("foobar"),
@ -586,7 +586,7 @@ public class ExceptionSerializationTests extends ESTestCase {
ElasticsearchException serialize = serialize((ElasticsearchException) uhe); ElasticsearchException serialize = serialize((ElasticsearchException) uhe);
assertTrue(serialize instanceof NotSerializableExceptionWrapper); assertTrue(serialize instanceof NotSerializableExceptionWrapper);
NotSerializableExceptionWrapper e = (NotSerializableExceptionWrapper) serialize; NotSerializableExceptionWrapper e = (NotSerializableExceptionWrapper) serialize;
assertEquals("msg", e.getMessage()); assertEquals("unknown_header_exception: msg", e.getMessage());
assertEquals(2, e.getHeader("foo").size()); assertEquals(2, e.getHeader("foo").size());
assertEquals("foo", e.getHeader("foo").get(0)); assertEquals("foo", e.getHeader("foo").get(0));
assertEquals("bar", e.getHeader("foo").get(1)); assertEquals("bar", e.getHeader("foo").get(1));

@ -119,7 +119,7 @@ public class HotThreadsIT extends ESIntegTestCase {
.setQuery(matchAllQuery()) .setQuery(matchAllQuery())
.setPostFilter(boolQuery().must(matchAllQuery()).mustNot(boolQuery().must(termQuery("field1", "value1")).must(termQuery("field1", "value2")))) .setPostFilter(boolQuery().must(matchAllQuery()).mustNot(boolQuery().must(termQuery("field1", "value1")).must(termQuery("field1", "value2"))))
.get(), .get(),
3l); 3L);
} }
latch.await(); latch.await();
assertThat(hasErrors.get(), is(false)); assertThat(hasErrors.get(), is(false));

@ -93,7 +93,7 @@ public class ClusterStatsIT extends ESIntegTestCase {
ensureYellow(); ensureYellow();
response = client().admin().cluster().prepareClusterStats().get(); response = client().admin().cluster().prepareClusterStats().get();
assertThat(response.getStatus(), Matchers.equalTo(ClusterHealthStatus.YELLOW)); assertThat(response.getStatus(), Matchers.equalTo(ClusterHealthStatus.YELLOW));
assertThat(response.indicesStats.getDocs().getCount(), Matchers.equalTo(0l)); assertThat(response.indicesStats.getDocs().getCount(), Matchers.equalTo(0L));
assertThat(response.indicesStats.getIndexCount(), Matchers.equalTo(1)); assertThat(response.indicesStats.getIndexCount(), Matchers.equalTo(1));
assertShardStats(response.getIndicesStats().getShards(), 1, 2, 2, 0.0); assertShardStats(response.getIndicesStats().getShards(), 1, 2, 2, 0.0);
@ -104,7 +104,7 @@ public class ClusterStatsIT extends ESIntegTestCase {
refresh(); // make the doc visible refresh(); // make the doc visible
response = client().admin().cluster().prepareClusterStats().get(); response = client().admin().cluster().prepareClusterStats().get();
assertThat(response.getStatus(), Matchers.equalTo(ClusterHealthStatus.GREEN)); assertThat(response.getStatus(), Matchers.equalTo(ClusterHealthStatus.GREEN));
assertThat(response.indicesStats.getDocs().getCount(), Matchers.equalTo(1l)); assertThat(response.indicesStats.getDocs().getCount(), Matchers.equalTo(1L));
assertShardStats(response.getIndicesStats().getShards(), 1, 4, 2, 1.0); assertShardStats(response.getIndicesStats().getShards(), 1, 4, 2, 1.0);
prepareCreate("test2").setSettings("number_of_shards", 3, "number_of_replicas", 0).get(); prepareCreate("test2").setSettings("number_of_shards", 3, "number_of_replicas", 0).get();
@ -141,10 +141,10 @@ public class ClusterStatsIT extends ESIntegTestCase {
ensureYellow("test1"); ensureYellow("test1");
ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().get(); ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().get();
String msg = response.toString(); String msg = response.toString();
assertThat(msg, response.getTimestamp(), Matchers.greaterThan(946681200000l)); // 1 Jan 2000 assertThat(msg, response.getTimestamp(), Matchers.greaterThan(946681200000L)); // 1 Jan 2000
assertThat(msg, response.indicesStats.getStore().getSizeInBytes(), Matchers.greaterThan(0l)); assertThat(msg, response.indicesStats.getStore().getSizeInBytes(), Matchers.greaterThan(0L));
assertThat(msg, response.nodesStats.getFs().getTotal().bytes(), Matchers.greaterThan(0l)); assertThat(msg, response.nodesStats.getFs().getTotal().bytes(), Matchers.greaterThan(0L));
assertThat(msg, response.nodesStats.getJvm().getVersions().size(), Matchers.greaterThan(0)); assertThat(msg, response.nodesStats.getJvm().getVersions().size(), Matchers.greaterThan(0));
assertThat(msg, response.nodesStats.getVersions().size(), Matchers.greaterThan(0)); assertThat(msg, response.nodesStats.getVersions().size(), Matchers.greaterThan(0));

@ -54,7 +54,7 @@ import static org.hamcrest.core.IsNull.notNullValue;
public class CreateIndexIT extends ESIntegTestCase { public class CreateIndexIT extends ESIntegTestCase {
public void testCreationDateGivenFails() { public void testCreationDateGivenFails() {
try { try {
prepareCreate("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_CREATION_DATE, 4l)).get(); prepareCreate("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_CREATION_DATE, 4L)).get();
fail(); fail();
} catch (IllegalArgumentException ex) { } catch (IllegalArgumentException ex) {
assertEquals("unknown setting [index.creation_date]", ex.getMessage()); assertEquals("unknown setting [index.creation_date]", ex.getMessage());

@ -93,7 +93,7 @@ public class IndicesShardStoreRequestIT extends ESIntegTestCase {
assertThat(shardStores.values().size(), equalTo(2)); assertThat(shardStores.values().size(), equalTo(2));
for (ObjectCursor<List<IndicesShardStoresResponse.StoreStatus>> shardStoreStatuses : shardStores.values()) { for (ObjectCursor<List<IndicesShardStoresResponse.StoreStatus>> shardStoreStatuses : shardStores.values()) {
for (IndicesShardStoresResponse.StoreStatus storeStatus : shardStoreStatuses.value) { for (IndicesShardStoresResponse.StoreStatus storeStatus : shardStoreStatuses.value) {
assertThat(storeStatus.getVersion(), greaterThan(-1l)); assertThat(storeStatus.getVersion(), greaterThan(-1L));
assertThat(storeStatus.getAllocationId(), notNullValue()); assertThat(storeStatus.getAllocationId(), notNullValue());
assertThat(storeStatus.getNode(), notNullValue()); assertThat(storeStatus.getNode(), notNullValue());
assertThat(storeStatus.getStoreException(), nullValue()); assertThat(storeStatus.getStoreException(), nullValue());
@ -191,10 +191,10 @@ public class IndicesShardStoreRequestIT extends ESIntegTestCase {
for (IndicesShardStoresResponse.StoreStatus status : shardStatus.value) { for (IndicesShardStoresResponse.StoreStatus status : shardStatus.value) {
if (corruptedShardIDMap.containsKey(shardStatus.key) if (corruptedShardIDMap.containsKey(shardStatus.key)
&& corruptedShardIDMap.get(shardStatus.key).contains(status.getNode().name())) { && corruptedShardIDMap.get(shardStatus.key).contains(status.getNode().name())) {
assertThat(status.getVersion(), greaterThanOrEqualTo(0l)); assertThat(status.getVersion(), greaterThanOrEqualTo(0L));
assertThat(status.getStoreException(), notNullValue()); assertThat(status.getStoreException(), notNullValue());
} else { } else {
assertThat(status.getVersion(), greaterThanOrEqualTo(0l)); assertThat(status.getVersion(), greaterThanOrEqualTo(0L));
assertNull(status.getStoreException()); assertNull(status.getStoreException());
} }
} }

@ -66,11 +66,11 @@ public class IndicesStatsTests extends ESSingleNodeTestCase {
IndicesStatsResponse rsp = client().admin().indices().prepareStats("test").get(); IndicesStatsResponse rsp = client().admin().indices().prepareStats("test").get();
SegmentsStats stats = rsp.getIndex("test").getTotal().getSegments(); SegmentsStats stats = rsp.getIndex("test").getTotal().getSegments();
assertThat(stats.getTermsMemoryInBytes(), greaterThan(0l)); assertThat(stats.getTermsMemoryInBytes(), greaterThan(0L));
assertThat(stats.getStoredFieldsMemoryInBytes(), greaterThan(0l)); assertThat(stats.getStoredFieldsMemoryInBytes(), greaterThan(0L));
assertThat(stats.getTermVectorsMemoryInBytes(), greaterThan(0l)); assertThat(stats.getTermVectorsMemoryInBytes(), greaterThan(0L));
assertThat(stats.getNormsMemoryInBytes(), greaterThan(0l)); assertThat(stats.getNormsMemoryInBytes(), greaterThan(0L));
assertThat(stats.getDocValuesMemoryInBytes(), greaterThan(0l)); assertThat(stats.getDocValuesMemoryInBytes(), greaterThan(0L));
// now check multiple segments stats are merged together // now check multiple segments stats are merged together
client().prepareIndex("test", "doc", "2").setSource("foo", "bar").get(); client().prepareIndex("test", "doc", "2").setSource("foo", "bar").get();
@ -93,7 +93,7 @@ public class IndicesStatsTests extends ESSingleNodeTestCase {
for (ShardStats shardStats : rsp.getIndex("test").getShards()) { for (ShardStats shardStats : rsp.getIndex("test").getShards()) {
final CommitStats commitStats = shardStats.getCommitStats(); final CommitStats commitStats = shardStats.getCommitStats();
assertNotNull(commitStats); assertNotNull(commitStats);
assertThat(commitStats.getGeneration(), greaterThan(0l)); assertThat(commitStats.getGeneration(), greaterThan(0L));
assertThat(commitStats.getId(), notNullValue()); assertThat(commitStats.getId(), notNullValue());
assertThat(commitStats.getUserData(), hasKey(Translog.TRANSLOG_GENERATION_KEY)); assertThat(commitStats.getUserData(), hasKey(Translog.TRANSLOG_GENERATION_KEY));
assertThat(commitStats.getUserData(), hasKey(Translog.TRANSLOG_UUID_KEY)); assertThat(commitStats.getUserData(), hasKey(Translog.TRANSLOG_UUID_KEY));

@ -0,0 +1,61 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.ingest;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.ingest.core.PipelineFactoryError;
import org.elasticsearch.test.ESTestCase;
import java.io.IOException;
import static org.hamcrest.CoreMatchers.equalTo;
import static org.hamcrest.CoreMatchers.nullValue;
public class WritePipelineResponseTests extends ESTestCase {
public void testSerializationWithoutError() throws IOException {
boolean isAcknowledged = randomBoolean();
WritePipelineResponse response;
response = new WritePipelineResponse(isAcknowledged);
BytesStreamOutput out = new BytesStreamOutput();
response.writeTo(out);
StreamInput streamInput = StreamInput.wrap(out.bytes());
WritePipelineResponse otherResponse = new WritePipelineResponse();
otherResponse.readFrom(streamInput);
assertThat(otherResponse.isAcknowledged(), equalTo(response.isAcknowledged()));
}
public void testSerializationWithError() throws IOException {
PipelineFactoryError error = new PipelineFactoryError("error");
WritePipelineResponse response = new WritePipelineResponse(error);
BytesStreamOutput out = new BytesStreamOutput();
response.writeTo(out);
StreamInput streamInput = StreamInput.wrap(out.bytes());
WritePipelineResponse otherResponse = new WritePipelineResponse();
otherResponse.readFrom(streamInput);
assertThat(otherResponse.getError().getReason(), equalTo(response.getError().getReason()));
assertThat(otherResponse.getError().getProcessorType(), equalTo(response.getError().getProcessorType()));
assertThat(otherResponse.getError().getProcessorTag(), equalTo(response.getError().getProcessorTag()));
assertThat(otherResponse.getError().getProcessorPropertyName(), equalTo(response.getError().getProcessorPropertyName()));
}
}

@ -99,7 +99,7 @@ public class GetTermVectorsCheckDocFreqIT extends ESIntegTestCase {
Fields fields = response.getFields(); Fields fields = response.getFields();
assertThat(fields.size(), equalTo(1)); assertThat(fields.size(), equalTo(1));
Terms terms = fields.terms("field"); Terms terms = fields.terms("field");
assertThat(terms.size(), equalTo(8l)); assertThat(terms.size(), equalTo(8L));
assertThat(terms.getSumTotalTermFreq(), Matchers.equalTo((long) -1)); assertThat(terms.getSumTotalTermFreq(), Matchers.equalTo((long) -1));
assertThat(terms.getDocCount(), Matchers.equalTo(-1)); assertThat(terms.getDocCount(), Matchers.equalTo(-1));
assertThat(terms.getSumDocFreq(), equalTo((long) -1)); assertThat(terms.getSumDocFreq(), equalTo((long) -1));
@ -158,7 +158,7 @@ public class GetTermVectorsCheckDocFreqIT extends ESIntegTestCase {
Fields fields = response.getFields(); Fields fields = response.getFields();
assertThat(fields.size(), equalTo(1)); assertThat(fields.size(), equalTo(1));
Terms terms = fields.terms("field"); Terms terms = fields.terms("field");
assertThat(terms.size(), equalTo(8l)); assertThat(terms.size(), equalTo(8L));
assertThat(terms.getSumTotalTermFreq(), Matchers.equalTo((long) (9 * numDocs))); assertThat(terms.getSumTotalTermFreq(), Matchers.equalTo((long) (9 * numDocs)));
assertThat(terms.getDocCount(), Matchers.equalTo(numDocs)); assertThat(terms.getDocCount(), Matchers.equalTo(numDocs));
assertThat(terms.getSumDocFreq(), equalTo((long) numDocs * values.length)); assertThat(terms.getSumDocFreq(), equalTo((long) numDocs * values.length));
@ -214,7 +214,7 @@ public class GetTermVectorsCheckDocFreqIT extends ESIntegTestCase {
Fields fields = response.getFields(); Fields fields = response.getFields();
assertThat(fields.size(), equalTo(1)); assertThat(fields.size(), equalTo(1));
Terms terms = fields.terms("field"); Terms terms = fields.terms("field");
assertThat(terms.size(), equalTo(8l)); assertThat(terms.size(), equalTo(8L));
assertThat(terms.getSumTotalTermFreq(), Matchers.equalTo((long) (9 * numDocs))); assertThat(terms.getSumTotalTermFreq(), Matchers.equalTo((long) (9 * numDocs)));
assertThat(terms.getDocCount(), Matchers.equalTo(numDocs)); assertThat(terms.getDocCount(), Matchers.equalTo(numDocs));
assertThat(terms.getSumDocFreq(), equalTo((long) numDocs * values.length)); assertThat(terms.getSumDocFreq(), equalTo((long) numDocs * values.length));

@ -317,7 +317,7 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase {
assertThat(fields.size(), equalTo(ft.storeTermVectors() ? 1 : 0)); assertThat(fields.size(), equalTo(ft.storeTermVectors() ? 1 : 0));
if (ft.storeTermVectors()) { if (ft.storeTermVectors()) {
Terms terms = fields.terms("field"); Terms terms = fields.terms("field");
assertThat(terms.size(), equalTo(8l)); assertThat(terms.size(), equalTo(8L));
TermsEnum iterator = terms.iterator(); TermsEnum iterator = terms.iterator();
for (int j = 0; j < values.length; j++) { for (int j = 0; j < values.length; j++) {
String string = values[j]; String string = values[j];
@ -637,7 +637,7 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase {
int[][] endOffset = {{15}, {43}, {19}, {25}, {39}, {30}, {9}, {3, 34}}; int[][] endOffset = {{15}, {43}, {19}, {25}, {39}, {30}, {9}, {3, 34}};
Terms terms = fields.terms(fieldName); Terms terms = fields.terms(fieldName);
assertThat(terms.size(), equalTo(8l)); assertThat(terms.size(), equalTo(8L));
TermsEnum iterator = terms.iterator(); TermsEnum iterator = terms.iterator();
for (int j = 0; j < values.length; j++) { for (int j = 0; j < values.length; j++) {
String string = values[j]; String string = values[j];
@ -1087,12 +1087,12 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase {
response = client().prepareTermVectors(indexOrAlias(), "type1", "1").setVersion(Versions.MATCH_ANY).get(); response = client().prepareTermVectors(indexOrAlias(), "type1", "1").setVersion(Versions.MATCH_ANY).get();
assertThat(response.isExists(), equalTo(true)); assertThat(response.isExists(), equalTo(true));
assertThat(response.getId(), equalTo("1")); assertThat(response.getId(), equalTo("1"));
assertThat(response.getVersion(), equalTo(1l)); assertThat(response.getVersion(), equalTo(1L));
response = client().prepareTermVectors(indexOrAlias(), "type1", "1").setVersion(1).get(); response = client().prepareTermVectors(indexOrAlias(), "type1", "1").setVersion(1).get();
assertThat(response.isExists(), equalTo(true)); assertThat(response.isExists(), equalTo(true));
assertThat(response.getId(), equalTo("1")); assertThat(response.getId(), equalTo("1"));
assertThat(response.getVersion(), equalTo(1l)); assertThat(response.getVersion(), equalTo(1L));
try { try {
client().prepareGet(indexOrAlias(), "type1", "1").setVersion(2).get(); client().prepareGet(indexOrAlias(), "type1", "1").setVersion(2).get();
@ -1109,13 +1109,13 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase {
assertThat(response.isExists(), equalTo(true)); assertThat(response.isExists(), equalTo(true));
assertThat(response.getId(), equalTo("1")); assertThat(response.getId(), equalTo("1"));
assertThat(response.getIndex(), equalTo("test")); assertThat(response.getIndex(), equalTo("test"));
assertThat(response.getVersion(), equalTo(1l)); assertThat(response.getVersion(), equalTo(1L));
response = client().prepareTermVectors(indexOrAlias(), "type1", "1").setVersion(1).setRealtime(false).get(); response = client().prepareTermVectors(indexOrAlias(), "type1", "1").setVersion(1).setRealtime(false).get();
assertThat(response.isExists(), equalTo(true)); assertThat(response.isExists(), equalTo(true));
assertThat(response.getId(), equalTo("1")); assertThat(response.getId(), equalTo("1"));
assertThat(response.getIndex(), equalTo("test")); assertThat(response.getIndex(), equalTo("test"));
assertThat(response.getVersion(), equalTo(1l)); assertThat(response.getVersion(), equalTo(1L));
try { try {
client().prepareGet(indexOrAlias(), "type1", "1").setVersion(2).setRealtime(false).get(); client().prepareGet(indexOrAlias(), "type1", "1").setVersion(2).setRealtime(false).get();
@ -1134,7 +1134,7 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase {
assertThat(response.isExists(), equalTo(true)); assertThat(response.isExists(), equalTo(true));
assertThat(response.getId(), equalTo("1")); assertThat(response.getId(), equalTo("1"));
assertThat(response.getIndex(), equalTo("test")); assertThat(response.getIndex(), equalTo("test"));
assertThat(response.getVersion(), equalTo(2l)); assertThat(response.getVersion(), equalTo(2L));
try { try {
client().prepareGet(indexOrAlias(), "type1", "1").setVersion(1).get(); client().prepareGet(indexOrAlias(), "type1", "1").setVersion(1).get();
@ -1147,7 +1147,7 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase {
assertThat(response.isExists(), equalTo(true)); assertThat(response.isExists(), equalTo(true));
assertThat(response.getId(), equalTo("1")); assertThat(response.getId(), equalTo("1"));
assertThat(response.getIndex(), equalTo("test")); assertThat(response.getIndex(), equalTo("test"));
assertThat(response.getVersion(), equalTo(2l)); assertThat(response.getVersion(), equalTo(2L));
// From Lucene index: // From Lucene index:
refresh(); refresh();
@ -1157,7 +1157,7 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase {
assertThat(response.isExists(), equalTo(true)); assertThat(response.isExists(), equalTo(true));
assertThat(response.getId(), equalTo("1")); assertThat(response.getId(), equalTo("1"));
assertThat(response.getIndex(), equalTo("test")); assertThat(response.getIndex(), equalTo("test"));
assertThat(response.getVersion(), equalTo(2l)); assertThat(response.getVersion(), equalTo(2L));
try { try {
client().prepareGet(indexOrAlias(), "type1", "1").setVersion(1).setRealtime(false).get(); client().prepareGet(indexOrAlias(), "type1", "1").setVersion(1).setRealtime(false).get();
@ -1170,7 +1170,7 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase {
assertThat(response.isExists(), equalTo(true)); assertThat(response.isExists(), equalTo(true));
assertThat(response.getId(), equalTo("1")); assertThat(response.getId(), equalTo("1"));
assertThat(response.getIndex(), equalTo("test")); assertThat(response.getIndex(), equalTo("test"));
assertThat(response.getVersion(), equalTo(2l)); assertThat(response.getVersion(), equalTo(2L));
} }
public void testFilterLength() throws ExecutionException, InterruptedException, IOException { public void testFilterLength() throws ExecutionException, InterruptedException, IOException {

@ -58,7 +58,7 @@ public class BroadcastActionsIT extends ESIntegTestCase {
SearchResponse countResponse = client().prepareSearch("test").setSize(0) SearchResponse countResponse = client().prepareSearch("test").setSize(0)
.setQuery(termQuery("_type", "type1")) .setQuery(termQuery("_type", "type1"))
.get(); .get();
assertThat(countResponse.getHits().totalHits(), equalTo(2l)); assertThat(countResponse.getHits().totalHits(), equalTo(2L));
assertThat(countResponse.getTotalShards(), equalTo(numShards.numPrimaries)); assertThat(countResponse.getTotalShards(), equalTo(numShards.numPrimaries));
assertThat(countResponse.getSuccessfulShards(), equalTo(numShards.numPrimaries)); assertThat(countResponse.getSuccessfulShards(), equalTo(numShards.numPrimaries));
assertThat(countResponse.getFailedShards(), equalTo(0)); assertThat(countResponse.getFailedShards(), equalTo(0));

@ -122,11 +122,11 @@ public class BasicBackwardsCompatibilityIT extends ESBackcompatTestCase {
assertThat(id, client().prepareIndex("test", "type1", id).setRouting(routingKey).setSource("field1", English.intToEnglish(i)).get().isCreated(), is(true)); assertThat(id, client().prepareIndex("test", "type1", id).setRouting(routingKey).setSource("field1", English.intToEnglish(i)).get().isCreated(), is(true));
GetResponse get = client().prepareGet("test", "type1", id).setRouting(routingKey).setVersion(1).get(); GetResponse get = client().prepareGet("test", "type1", id).setRouting(routingKey).setVersion(1).get();
assertThat("Document with ID " + id + " should exist but doesn't", get.isExists(), is(true)); assertThat("Document with ID " + id + " should exist but doesn't", get.isExists(), is(true));
assertThat(get.getVersion(), equalTo(1l)); assertThat(get.getVersion(), equalTo(1L));
client().prepareIndex("test", "type1", id).setRouting(routingKey).setSource("field1", English.intToEnglish(i)).execute().actionGet(); client().prepareIndex("test", "type1", id).setRouting(routingKey).setSource("field1", English.intToEnglish(i)).execute().actionGet();
get = client().prepareGet("test", "type1", id).setRouting(routingKey).setVersion(2).get(); get = client().prepareGet("test", "type1", id).setRouting(routingKey).setVersion(2).get();
assertThat("Document with ID " + id + " should exist but doesn't", get.isExists(), is(true)); assertThat("Document with ID " + id + " should exist but doesn't", get.isExists(), is(true));
assertThat(get.getVersion(), equalTo(2l)); assertThat(get.getVersion(), equalTo(2L));
} }
assertVersionCreated(compatibilityVersion(), "test"); assertVersionCreated(compatibilityVersion(), "test");
@ -416,30 +416,30 @@ public class BasicBackwardsCompatibilityIT extends ESBackcompatTestCase {
client().prepareIndex(indexName, "type1", "4").setSource(jsonBuilder().startObject().startObject("obj2").field("obj2_val", "1").endObject().field("y2", "y_2").field("field3", "value3_4").endObject())); client().prepareIndex(indexName, "type1", "4").setSource(jsonBuilder().startObject().startObject("obj2").field("obj2_val", "1").endObject().field("y2", "y_2").field("field3", "value3_4").endObject()));
SearchResponse countResponse = client().prepareSearch().setSize(0).setQuery(existsQuery("field1")).get(); SearchResponse countResponse = client().prepareSearch().setSize(0).setQuery(existsQuery("field1")).get();
assertHitCount(countResponse, 2l); assertHitCount(countResponse, 2L);
countResponse = client().prepareSearch().setSize(0).setQuery(constantScoreQuery(existsQuery("field1"))).get(); countResponse = client().prepareSearch().setSize(0).setQuery(constantScoreQuery(existsQuery("field1"))).get();
assertHitCount(countResponse, 2l); assertHitCount(countResponse, 2L);
countResponse = client().prepareSearch().setSize(0).setQuery(queryStringQuery("_exists_:field1")).get(); countResponse = client().prepareSearch().setSize(0).setQuery(queryStringQuery("_exists_:field1")).get();
assertHitCount(countResponse, 2l); assertHitCount(countResponse, 2L);
countResponse = client().prepareSearch().setSize(0).setQuery(existsQuery("field2")).get(); countResponse = client().prepareSearch().setSize(0).setQuery(existsQuery("field2")).get();
assertHitCount(countResponse, 2l); assertHitCount(countResponse, 2L);
countResponse = client().prepareSearch().setSize(0).setQuery(existsQuery("field3")).get(); countResponse = client().prepareSearch().setSize(0).setQuery(existsQuery("field3")).get();
assertHitCount(countResponse, 1l); assertHitCount(countResponse, 1L);
// wildcard check // wildcard check
countResponse = client().prepareSearch().setSize(0).setQuery(existsQuery("x*")).get(); countResponse = client().prepareSearch().setSize(0).setQuery(existsQuery("x*")).get();
assertHitCount(countResponse, 2l); assertHitCount(countResponse, 2L);
// object check // object check
countResponse = client().prepareSearch().setSize(0).setQuery(existsQuery("obj1")).get(); countResponse = client().prepareSearch().setSize(0).setQuery(existsQuery("obj1")).get();
assertHitCount(countResponse, 2l); assertHitCount(countResponse, 2L);
countResponse = client().prepareSearch().setSize(0).setQuery(queryStringQuery("_missing_:field1")).get(); countResponse = client().prepareSearch().setSize(0).setQuery(queryStringQuery("_missing_:field1")).get();
assertHitCount(countResponse, 2l); assertHitCount(countResponse, 2L);
if (!backwardsCluster().upgradeOneNode()) { if (!backwardsCluster().upgradeOneNode()) {
break; break;
@ -598,7 +598,7 @@ public class BasicBackwardsCompatibilityIT extends ESBackcompatTestCase {
assertThat(termVectorsResponse.isExists(), equalTo(true)); assertThat(termVectorsResponse.isExists(), equalTo(true));
Fields fields = termVectorsResponse.getFields(); Fields fields = termVectorsResponse.getFields();
assertThat(fields.size(), equalTo(1)); assertThat(fields.size(), equalTo(1));
assertThat(fields.terms("field").size(), equalTo(8l)); assertThat(fields.terms("field").size(), equalTo(8L));
} }
public void testIndicesStats() { public void testIndicesStats() {

@ -332,7 +332,7 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
} }
} }
SearchResponse test = client().prepareSearch(indexName).get(); SearchResponse test = client().prepareSearch(indexName).get();
assertThat(test.getHits().getTotalHits(), greaterThanOrEqualTo(1l)); assertThat(test.getHits().getTotalHits(), greaterThanOrEqualTo(1L));
} }
void assertBasicSearchWorks(String indexName) { void assertBasicSearchWorks(String indexName) {

@ -49,7 +49,7 @@ public class StaticIndexBackwardCompatibilityIT extends ESIntegTestCase {
assertEquals(index, getIndexResponse.indices()[0]); assertEquals(index, getIndexResponse.indices()[0]);
ensureYellow(index); ensureYellow(index);
SearchResponse test = client().prepareSearch(index).get(); SearchResponse test = client().prepareSearch(index).get();
assertThat(test.getHits().getTotalHits(), greaterThanOrEqualTo(1l)); assertThat(test.getHits().getTotalHits(), greaterThanOrEqualTo(1L));
} }
} }

@ -32,6 +32,8 @@ import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
import org.elasticsearch.test.ESIntegTestCase.Scope; import org.elasticsearch.test.ESIntegTestCase.Scope;
import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportService;
import java.io.IOException;
import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.elasticsearch.common.settings.Settings.settingsBuilder;
import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo;
@ -48,7 +50,7 @@ public class TransportClientIT extends ESIntegTestCase {
} }
public void testNodeVersionIsUpdated() { public void testNodeVersionIsUpdated() throws IOException {
TransportClient client = (TransportClient) internalCluster().client(); TransportClient client = (TransportClient) internalCluster().client();
TransportClientNodesService nodeService = client.nodeService(); TransportClientNodesService nodeService = client.nodeService();
Node node = new Node(Settings.builder() Node node = new Node(Settings.builder()

@ -632,7 +632,7 @@ public class ClusterServiceIT extends ESIntegTestCase {
controlSources = new HashSet<>(Arrays.asList("1", "2", "3", "4", "5")); controlSources = new HashSet<>(Arrays.asList("1", "2", "3", "4", "5"));
for (PendingClusterTask task : response) { for (PendingClusterTask task : response) {
if (controlSources.remove(task.getSource().string())) { if (controlSources.remove(task.getSource().string())) {
assertThat(task.getTimeInQueueInMillis(), greaterThan(0l)); assertThat(task.getTimeInQueueInMillis(), greaterThan(0L));
} }
} }
assertTrue(controlSources.isEmpty()); assertTrue(controlSources.isEmpty());

@ -115,8 +115,8 @@ public class DiskUsageTests extends ESTestCase {
assertEquals(2, shardSizes.size()); assertEquals(2, shardSizes.size());
assertTrue(shardSizes.containsKey(ClusterInfo.shardIdentifierFromRouting(test_0))); assertTrue(shardSizes.containsKey(ClusterInfo.shardIdentifierFromRouting(test_0)));
assertTrue(shardSizes.containsKey(ClusterInfo.shardIdentifierFromRouting(test_1))); assertTrue(shardSizes.containsKey(ClusterInfo.shardIdentifierFromRouting(test_1)));
assertEquals(100l, shardSizes.get(ClusterInfo.shardIdentifierFromRouting(test_0)).longValue()); assertEquals(100L, shardSizes.get(ClusterInfo.shardIdentifierFromRouting(test_0)).longValue());
assertEquals(1000l, shardSizes.get(ClusterInfo.shardIdentifierFromRouting(test_1)).longValue()); assertEquals(1000L, shardSizes.get(ClusterInfo.shardIdentifierFromRouting(test_1)).longValue());
assertEquals(2, routingToPath.size()); assertEquals(2, routingToPath.size());
assertTrue(routingToPath.containsKey(test_0)); assertTrue(routingToPath.containsKey(test_0));

Some files were not shown because too many files have changed in this diff Show More