Merge branch 'master' into trash_context_and_headers
This commit is contained in:
commit
71c3e57aee
|
@ -112,9 +112,6 @@ public class PluginBuildPlugin extends BuildPlugin {
|
|||
include 'config/**'
|
||||
include 'bin/**'
|
||||
}
|
||||
from('src/site') {
|
||||
include '_site/**'
|
||||
}
|
||||
}
|
||||
project.assemble.dependsOn(bundle)
|
||||
|
||||
|
|
|
@ -36,15 +36,9 @@ class PluginPropertiesExtension {
|
|||
@Input
|
||||
String description
|
||||
|
||||
@Input
|
||||
boolean jvm = true
|
||||
|
||||
@Input
|
||||
String classname
|
||||
|
||||
@Input
|
||||
boolean site = false
|
||||
|
||||
@Input
|
||||
boolean isolated = true
|
||||
|
||||
|
|
|
@ -51,11 +51,11 @@ class PluginPropertiesTask extends Copy {
|
|||
if (extension.description == null) {
|
||||
throw new InvalidUserDataException('description is a required setting for esplugin')
|
||||
}
|
||||
if (extension.jvm && extension.classname == null) {
|
||||
throw new InvalidUserDataException('classname is a required setting for esplugin with jvm=true')
|
||||
if (extension.classname == null) {
|
||||
throw new InvalidUserDataException('classname is a required setting for esplugin')
|
||||
}
|
||||
doFirst {
|
||||
if (extension.jvm && extension.isolated == false) {
|
||||
if (extension.isolated == false) {
|
||||
String warning = "WARNING: Disabling plugin isolation in ${project.path} is deprecated and will be removed in the future"
|
||||
logger.warn("${'=' * warning.length()}\n${warning}\n${'=' * warning.length()}")
|
||||
}
|
||||
|
@ -74,10 +74,8 @@ class PluginPropertiesTask extends Copy {
|
|||
'version': extension.version,
|
||||
'elasticsearchVersion': VersionProperties.elasticsearch,
|
||||
'javaVersion': project.targetCompatibility as String,
|
||||
'jvm': extension.jvm as String,
|
||||
'site': extension.site as String,
|
||||
'isolated': extension.isolated as String,
|
||||
'classname': extension.jvm ? extension.classname : 'NA'
|
||||
'classname': extension.classname
|
||||
]
|
||||
}
|
||||
}
|
||||
|
|
|
@ -129,7 +129,7 @@ class NodeInfo {
|
|||
'JAVA_HOME' : project.javaHome,
|
||||
'ES_GC_OPTS': config.jvmArgs // we pass these with the undocumented gc opts so the argline can set gc, etc
|
||||
]
|
||||
args.add("-Des.tests.portsfile=true")
|
||||
args.add("-Des.node.portsfile=true")
|
||||
args.addAll(config.systemProperties.collect { key, value -> "-D${key}=${value}" })
|
||||
for (Map.Entry<String, String> property : System.properties.entrySet()) {
|
||||
if (property.getKey().startsWith('es.')) {
|
||||
|
|
|
@ -2,26 +2,13 @@
|
|||
# This file must exist as 'plugin-descriptor.properties' at
|
||||
# the root directory of all plugins.
|
||||
#
|
||||
# A plugin can be 'site', 'jvm', or both.
|
||||
#
|
||||
### example site plugin for "foo":
|
||||
#
|
||||
# foo.zip <-- zip file for the plugin, with this structure:
|
||||
# _site/ <-- the contents that will be served
|
||||
# plugin-descriptor.properties <-- example contents below:
|
||||
#
|
||||
# site=true
|
||||
# description=My cool plugin
|
||||
# version=1.0
|
||||
#
|
||||
### example jvm plugin for "foo"
|
||||
### example plugin for "foo"
|
||||
#
|
||||
# foo.zip <-- zip file for the plugin, with this structure:
|
||||
# <arbitrary name1>.jar <-- classes, resources, dependencies
|
||||
# <arbitrary nameN>.jar <-- any number of jars
|
||||
# plugin-descriptor.properties <-- example contents below:
|
||||
#
|
||||
# jvm=true
|
||||
# classname=foo.bar.BazPlugin
|
||||
# description=My cool plugin
|
||||
# version=2.0
|
||||
|
@ -38,21 +25,6 @@ version=${version}
|
|||
#
|
||||
# 'name': the plugin name
|
||||
name=${name}
|
||||
|
||||
### mandatory elements for site plugins:
|
||||
#
|
||||
# 'site': set to true to indicate contents of the _site/
|
||||
# directory in the root of the plugin should be served.
|
||||
site=${site}
|
||||
#
|
||||
### mandatory elements for jvm plugins :
|
||||
#
|
||||
# 'jvm': true if the 'classname' class should be loaded
|
||||
# from jar files in the root directory of the plugin.
|
||||
# Note that only jar files in the root directory are
|
||||
# added to the classpath for the plugin! If you need
|
||||
# other resources, package them into a resources jar.
|
||||
jvm=${jvm}
|
||||
#
|
||||
# 'classname': the name of the class to load, fully-qualified.
|
||||
classname=${classname}
|
||||
|
|
|
@ -149,6 +149,16 @@ import org.elasticsearch.action.indexedscripts.get.GetIndexedScriptAction;
|
|||
import org.elasticsearch.action.indexedscripts.get.TransportGetIndexedScriptAction;
|
||||
import org.elasticsearch.action.indexedscripts.put.PutIndexedScriptAction;
|
||||
import org.elasticsearch.action.indexedscripts.put.TransportPutIndexedScriptAction;
|
||||
import org.elasticsearch.action.ingest.IngestActionFilter;
|
||||
import org.elasticsearch.action.ingest.IngestProxyActionFilter;
|
||||
import org.elasticsearch.action.ingest.DeletePipelineAction;
|
||||
import org.elasticsearch.action.ingest.DeletePipelineTransportAction;
|
||||
import org.elasticsearch.action.ingest.GetPipelineAction;
|
||||
import org.elasticsearch.action.ingest.GetPipelineTransportAction;
|
||||
import org.elasticsearch.action.ingest.PutPipelineAction;
|
||||
import org.elasticsearch.action.ingest.PutPipelineTransportAction;
|
||||
import org.elasticsearch.action.ingest.SimulatePipelineAction;
|
||||
import org.elasticsearch.action.ingest.SimulatePipelineTransportAction;
|
||||
import org.elasticsearch.action.percolate.MultiPercolateAction;
|
||||
import org.elasticsearch.action.percolate.PercolateAction;
|
||||
import org.elasticsearch.action.percolate.TransportMultiPercolateAction;
|
||||
|
@ -186,6 +196,8 @@ import org.elasticsearch.action.update.UpdateAction;
|
|||
import org.elasticsearch.common.inject.AbstractModule;
|
||||
import org.elasticsearch.common.inject.multibindings.MapBinder;
|
||||
import org.elasticsearch.common.inject.multibindings.Multibinder;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.node.NodeModule;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
|
@ -210,13 +222,13 @@ public class ActionModule extends AbstractModule {
|
|||
this.transportAction = transportAction;
|
||||
this.supportTransportActions = supportTransportActions;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
private final boolean ingestEnabled;
|
||||
private final boolean proxy;
|
||||
|
||||
public ActionModule(boolean proxy) {
|
||||
public ActionModule(boolean ingestEnabled, boolean proxy) {
|
||||
this.ingestEnabled = ingestEnabled;
|
||||
this.proxy = proxy;
|
||||
}
|
||||
|
||||
|
@ -240,6 +252,13 @@ public class ActionModule extends AbstractModule {
|
|||
|
||||
@Override
|
||||
protected void configure() {
|
||||
if (proxy == false) {
|
||||
if (ingestEnabled) {
|
||||
registerFilter(IngestActionFilter.class);
|
||||
} else {
|
||||
registerFilter(IngestProxyActionFilter.class);
|
||||
}
|
||||
}
|
||||
|
||||
Multibinder<ActionFilter> actionFilterMultibinder = Multibinder.newSetBinder(binder(), ActionFilter.class);
|
||||
for (Class<? extends ActionFilter> actionFilter : actionFilters) {
|
||||
|
@ -340,6 +359,11 @@ public class ActionModule extends AbstractModule {
|
|||
|
||||
registerAction(FieldStatsAction.INSTANCE, TransportFieldStatsTransportAction.class);
|
||||
|
||||
registerAction(PutPipelineAction.INSTANCE, PutPipelineTransportAction.class);
|
||||
registerAction(GetPipelineAction.INSTANCE, GetPipelineTransportAction.class);
|
||||
registerAction(DeletePipelineAction.INSTANCE, DeletePipelineTransportAction.class);
|
||||
registerAction(SimulatePipelineAction.INSTANCE, SimulatePipelineTransportAction.class);
|
||||
|
||||
// register Name -> GenericAction Map that can be injected to instances.
|
||||
MapBinder<String, GenericAction> actionsBinder
|
||||
= MapBinder.newMapBinder(binder(), String.class, GenericAction.class);
|
||||
|
|
|
@ -45,7 +45,7 @@ import static org.elasticsearch.common.Strings.hasLength;
|
|||
import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
|
||||
import static org.elasticsearch.common.settings.Settings.readSettingsFromStream;
|
||||
import static org.elasticsearch.common.settings.Settings.writeSettingsToStream;
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue;
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenientNodeBooleanValue;
|
||||
|
||||
/**
|
||||
* Create snapshot request
|
||||
|
@ -379,14 +379,14 @@ public class CreateSnapshotRequest extends MasterNodeRequest<CreateSnapshotReque
|
|||
throw new IllegalArgumentException("malformed indices section, should be an array of strings");
|
||||
}
|
||||
} else if (name.equals("partial")) {
|
||||
partial(nodeBooleanValue(entry.getValue()));
|
||||
partial(lenientNodeBooleanValue(entry.getValue()));
|
||||
} else if (name.equals("settings")) {
|
||||
if (!(entry.getValue() instanceof Map)) {
|
||||
throw new IllegalArgumentException("malformed settings section, should indices an inner object");
|
||||
}
|
||||
settings((Map<String, Object>) entry.getValue());
|
||||
} else if (name.equals("include_global_state")) {
|
||||
includeGlobalState = nodeBooleanValue(entry.getValue());
|
||||
includeGlobalState = lenientNodeBooleanValue(entry.getValue());
|
||||
}
|
||||
}
|
||||
indicesOptions(IndicesOptions.fromMap((Map<String, Object>) source, IndicesOptions.lenientExpandOpen()));
|
||||
|
|
|
@ -43,7 +43,7 @@ import static org.elasticsearch.common.Strings.hasLength;
|
|||
import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
|
||||
import static org.elasticsearch.common.settings.Settings.readSettingsFromStream;
|
||||
import static org.elasticsearch.common.settings.Settings.writeSettingsToStream;
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue;
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenientNodeBooleanValue;
|
||||
|
||||
/**
|
||||
* Restore snapshot request
|
||||
|
@ -498,16 +498,16 @@ public class RestoreSnapshotRequest extends MasterNodeRequest<RestoreSnapshotReq
|
|||
throw new IllegalArgumentException("malformed indices section, should be an array of strings");
|
||||
}
|
||||
} else if (name.equals("partial")) {
|
||||
partial(nodeBooleanValue(entry.getValue()));
|
||||
partial(lenientNodeBooleanValue(entry.getValue()));
|
||||
} else if (name.equals("settings")) {
|
||||
if (!(entry.getValue() instanceof Map)) {
|
||||
throw new IllegalArgumentException("malformed settings section");
|
||||
}
|
||||
settings((Map<String, Object>) entry.getValue());
|
||||
} else if (name.equals("include_global_state")) {
|
||||
includeGlobalState = nodeBooleanValue(entry.getValue());
|
||||
includeGlobalState = lenientNodeBooleanValue(entry.getValue());
|
||||
} else if (name.equals("include_aliases")) {
|
||||
includeAliases = nodeBooleanValue(entry.getValue());
|
||||
includeAliases = lenientNodeBooleanValue(entry.getValue());
|
||||
} else if (name.equals("rename_pattern")) {
|
||||
if (entry.getValue() instanceof String) {
|
||||
renamePattern((String) entry.getValue());
|
||||
|
|
|
@ -289,11 +289,11 @@ public class BulkProcessor implements Closeable {
|
|||
}
|
||||
|
||||
public BulkProcessor add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType) throws Exception {
|
||||
return add(data, defaultIndex, defaultType, null);
|
||||
return add(data, defaultIndex, defaultType, null, null);
|
||||
}
|
||||
|
||||
public synchronized BulkProcessor add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, @Nullable Object payload) throws Exception {
|
||||
bulkRequest.add(data, defaultIndex, defaultType, null, null, payload, true);
|
||||
public synchronized BulkProcessor add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, @Nullable String defaultPipeline, @Nullable Object payload) throws Exception {
|
||||
bulkRequest.add(data, defaultIndex, defaultType, null, null, defaultPipeline, payload, true);
|
||||
executeIfNeeded();
|
||||
return this;
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.elasticsearch.action.delete.DeleteRequest;
|
|||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.update.UpdateRequest;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
@ -245,17 +246,17 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
|
|||
* Adds a framed data in binary format
|
||||
*/
|
||||
public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType) throws Exception {
|
||||
return add(data, defaultIndex, defaultType, null, null, null, true);
|
||||
return add(data, defaultIndex, defaultType, null, null, null, null, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a framed data in binary format
|
||||
*/
|
||||
public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, boolean allowExplicitIndex) throws Exception {
|
||||
return add(data, defaultIndex, defaultType, null, null, null, allowExplicitIndex);
|
||||
return add(data, defaultIndex, defaultType, null, null, null, null, allowExplicitIndex);
|
||||
}
|
||||
|
||||
public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, @Nullable String defaultRouting, @Nullable String[] defaultFields, @Nullable Object payload, boolean allowExplicitIndex) throws Exception {
|
||||
public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, @Nullable String defaultRouting, @Nullable String[] defaultFields, @Nullable String defaultPipeline, @Nullable Object payload, boolean allowExplicitIndex) throws Exception {
|
||||
XContent xContent = XContentFactory.xContent(data);
|
||||
int line = 0;
|
||||
int from = 0;
|
||||
|
@ -296,6 +297,7 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
|
|||
long version = Versions.MATCH_ANY;
|
||||
VersionType versionType = VersionType.INTERNAL;
|
||||
int retryOnConflict = 0;
|
||||
String pipeline = defaultPipeline;
|
||||
|
||||
// at this stage, next token can either be END_OBJECT (and use default index and type, with auto generated id)
|
||||
// or START_OBJECT which will have another set of parameters
|
||||
|
@ -336,6 +338,8 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
|
|||
versionType = VersionType.fromString(parser.text());
|
||||
} else if ("_retry_on_conflict".equals(currentFieldName) || "_retryOnConflict".equals(currentFieldName)) {
|
||||
retryOnConflict = parser.intValue();
|
||||
} else if ("pipeline".equals(currentFieldName)) {
|
||||
pipeline = parser.text();
|
||||
} else if ("fields".equals(currentFieldName)) {
|
||||
throw new IllegalArgumentException("Action/metadata line [" + line + "] contains a simple value for parameter [fields] while a list is expected");
|
||||
} else {
|
||||
|
@ -372,15 +376,15 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
|
|||
if ("index".equals(action)) {
|
||||
if (opType == null) {
|
||||
internalAdd(new IndexRequest(index, type, id).routing(routing).parent(parent).timestamp(timestamp).ttl(ttl).version(version).versionType(versionType)
|
||||
.source(data.slice(from, nextMarker - from)), payload);
|
||||
.setPipeline(pipeline).source(data.slice(from, nextMarker - from)), payload);
|
||||
} else {
|
||||
internalAdd(new IndexRequest(index, type, id).routing(routing).parent(parent).timestamp(timestamp).ttl(ttl).version(version).versionType(versionType)
|
||||
.create("create".equals(opType))
|
||||
.create("create".equals(opType)).setPipeline(pipeline)
|
||||
.source(data.slice(from, nextMarker - from)), payload);
|
||||
}
|
||||
} else if ("create".equals(action)) {
|
||||
internalAdd(new IndexRequest(index, type, id).routing(routing).parent(parent).timestamp(timestamp).ttl(ttl).version(version).versionType(versionType)
|
||||
.create(true)
|
||||
.create(true).setPipeline(pipeline)
|
||||
.source(data.slice(from, nextMarker - from)), payload);
|
||||
} else if ("update".equals(action)) {
|
||||
UpdateRequest updateRequest = new UpdateRequest(index, type, id).routing(routing).parent(parent).retryOnConflict(retryOnConflict)
|
||||
|
@ -471,6 +475,22 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
|
|||
return -1;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Whether this bulk request contains index request with an ingest pipeline enabled.
|
||||
*/
|
||||
public boolean hasIndexRequestsWithPipelines() {
|
||||
for (ActionRequest actionRequest : requests) {
|
||||
if (actionRequest instanceof IndexRequest) {
|
||||
IndexRequest indexRequest = (IndexRequest) actionRequest;
|
||||
if (Strings.hasText(indexRequest.getPipeline())) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
ActionRequestValidationException validationException = null;
|
||||
|
|
|
@ -154,6 +154,8 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
|||
|
||||
private XContentType contentType = Requests.INDEX_CONTENT_TYPE;
|
||||
|
||||
private String pipeline;
|
||||
|
||||
public IndexRequest() {
|
||||
}
|
||||
|
||||
|
@ -354,6 +356,21 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
|||
return this.ttl;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the ingest pipeline to be executed before indexing the document
|
||||
*/
|
||||
public IndexRequest setPipeline(String pipeline) {
|
||||
this.pipeline = pipeline;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the ingest pipeline to be executed before indexing the document
|
||||
*/
|
||||
public String getPipeline() {
|
||||
return this.pipeline;
|
||||
}
|
||||
|
||||
/**
|
||||
* The source of the document to index, recopied to a new array if it is unsage.
|
||||
*/
|
||||
|
@ -649,6 +666,7 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
|||
refresh = in.readBoolean();
|
||||
version = in.readLong();
|
||||
versionType = VersionType.fromValue(in.readByte());
|
||||
pipeline = in.readOptionalString();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -670,6 +688,7 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
|||
out.writeBoolean(refresh);
|
||||
out.writeLong(version);
|
||||
out.writeByte(versionType.getValue());
|
||||
out.writeOptionalString(pipeline);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -278,4 +278,12 @@ public class IndexRequestBuilder extends ReplicationRequestBuilder<IndexRequest,
|
|||
request.ttl(ttl);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the ingest pipeline to be executed before indexing the document
|
||||
*/
|
||||
public IndexRequestBuilder setPipeline(String pipeline) {
|
||||
request.setPipeline(pipeline);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,43 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.ingest;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
public class DeletePipelineAction extends Action<DeletePipelineRequest, WritePipelineResponse, DeletePipelineRequestBuilder> {
|
||||
|
||||
public static final DeletePipelineAction INSTANCE = new DeletePipelineAction();
|
||||
public static final String NAME = "cluster:admin/ingest/pipeline/delete";
|
||||
|
||||
public DeletePipelineAction() {
|
||||
super(NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public DeletePipelineRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new DeletePipelineRequestBuilder(client, this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public WritePipelineResponse newResponse() {
|
||||
return new WritePipelineResponse();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,70 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.ingest;
|
||||
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedRequest;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
|
||||
public class DeletePipelineRequest extends AcknowledgedRequest<DeletePipelineRequest> {
|
||||
|
||||
private String id;
|
||||
|
||||
public DeletePipelineRequest(String id) {
|
||||
if (id == null) {
|
||||
throw new IllegalArgumentException("id is missing");
|
||||
}
|
||||
this.id = id;
|
||||
}
|
||||
|
||||
DeletePipelineRequest() {
|
||||
}
|
||||
|
||||
public void setId(String id) {
|
||||
this.id = Objects.requireNonNull(id);
|
||||
}
|
||||
|
||||
public String getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
id = in.readString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeString(id);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,35 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.ingest;
|
||||
|
||||
import org.elasticsearch.action.ActionRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
public class DeletePipelineRequestBuilder extends ActionRequestBuilder<DeletePipelineRequest, WritePipelineResponse, DeletePipelineRequestBuilder> {
|
||||
|
||||
public DeletePipelineRequestBuilder(ElasticsearchClient client, DeletePipelineAction action) {
|
||||
super(client, action, new DeletePipelineRequest());
|
||||
}
|
||||
|
||||
public DeletePipelineRequestBuilder(ElasticsearchClient client, DeletePipelineAction action, String id) {
|
||||
super(client, action, new DeletePipelineRequest(id));
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,71 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.ingest;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.ingest.PipelineStore;
|
||||
import org.elasticsearch.node.service.NodeService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
public class DeletePipelineTransportAction extends TransportMasterNodeAction<DeletePipelineRequest, WritePipelineResponse> {
|
||||
|
||||
private final PipelineStore pipelineStore;
|
||||
private final ClusterService clusterService;
|
||||
|
||||
@Inject
|
||||
public DeletePipelineTransportAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
|
||||
TransportService transportService, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, NodeService nodeService) {
|
||||
super(settings, DeletePipelineAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, DeletePipelineRequest::new);
|
||||
this.clusterService = clusterService;
|
||||
this.pipelineStore = nodeService.getIngestService().getPipelineStore();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String executor() {
|
||||
return ThreadPool.Names.SAME;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected WritePipelineResponse newResponse() {
|
||||
return new WritePipelineResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void masterOperation(DeletePipelineRequest request, ClusterState state, ActionListener<WritePipelineResponse> listener) throws Exception {
|
||||
pipelineStore.delete(clusterService, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ClusterBlockException checkBlock(DeletePipelineRequest request, ClusterState state) {
|
||||
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE);
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,43 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.ingest;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
public class GetPipelineAction extends Action<GetPipelineRequest, GetPipelineResponse, GetPipelineRequestBuilder> {
|
||||
|
||||
public static final GetPipelineAction INSTANCE = new GetPipelineAction();
|
||||
public static final String NAME = "cluster:admin/ingest/pipeline/get";
|
||||
|
||||
public GetPipelineAction() {
|
||||
super(NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public GetPipelineRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new GetPipelineRequestBuilder(client, this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public GetPipelineResponse newResponse() {
|
||||
return new GetPipelineResponse();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,66 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.ingest;
|
||||
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.support.master.MasterNodeReadRequest;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
|
||||
public class GetPipelineRequest extends MasterNodeReadRequest<GetPipelineRequest> {
|
||||
|
||||
private String[] ids;
|
||||
|
||||
public GetPipelineRequest(String... ids) {
|
||||
if (ids == null || ids.length == 0) {
|
||||
throw new IllegalArgumentException("No ids specified");
|
||||
}
|
||||
this.ids = ids;
|
||||
}
|
||||
|
||||
GetPipelineRequest() {
|
||||
}
|
||||
|
||||
public String[] getIds() {
|
||||
return ids;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
ids = in.readStringArray();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeStringArray(ids);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,35 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.ingest;
|
||||
|
||||
import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
public class GetPipelineRequestBuilder extends MasterNodeReadOperationRequestBuilder<GetPipelineRequest, GetPipelineResponse, GetPipelineRequestBuilder> {
|
||||
|
||||
public GetPipelineRequestBuilder(ElasticsearchClient client, GetPipelineAction action) {
|
||||
super(client, action, new GetPipelineRequest());
|
||||
}
|
||||
|
||||
public GetPipelineRequestBuilder(ElasticsearchClient client, GetPipelineAction action, String[] ids) {
|
||||
super(client, action, new GetPipelineRequest(ids));
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,86 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.ingest;
|
||||
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.StatusToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.ingest.PipelineConfiguration;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
public class GetPipelineResponse extends ActionResponse implements StatusToXContent {
|
||||
|
||||
private List<PipelineConfiguration> pipelines;
|
||||
|
||||
public GetPipelineResponse() {
|
||||
}
|
||||
|
||||
public GetPipelineResponse(List<PipelineConfiguration> pipelines) {
|
||||
this.pipelines = pipelines;
|
||||
}
|
||||
|
||||
public List<PipelineConfiguration> pipelines() {
|
||||
return pipelines;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
int size = in.readVInt();
|
||||
pipelines = new ArrayList<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
pipelines.add(PipelineConfiguration.readPipelineConfiguration(in));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeVInt(pipelines.size());
|
||||
for (PipelineConfiguration pipeline : pipelines) {
|
||||
pipeline.writeTo(out);
|
||||
}
|
||||
}
|
||||
|
||||
public boolean isFound() {
|
||||
return !pipelines.isEmpty();
|
||||
}
|
||||
|
||||
@Override
|
||||
public RestStatus status() {
|
||||
return isFound() ? RestStatus.OK : RestStatus.NOT_FOUND;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startArray("pipelines");
|
||||
for (PipelineConfiguration pipeline : pipelines) {
|
||||
pipeline.toXContent(builder, params);
|
||||
}
|
||||
builder.endArray();
|
||||
return builder;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,69 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.ingest;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeReadAction;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.ingest.PipelineStore;
|
||||
import org.elasticsearch.node.service.NodeService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
public class GetPipelineTransportAction extends TransportMasterNodeReadAction<GetPipelineRequest, GetPipelineResponse> {
|
||||
|
||||
private final PipelineStore pipelineStore;
|
||||
|
||||
@Inject
|
||||
public GetPipelineTransportAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
|
||||
TransportService transportService, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, NodeService nodeService) {
|
||||
super(settings, GetPipelineAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, GetPipelineRequest::new);
|
||||
this.pipelineStore = nodeService.getIngestService().getPipelineStore();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String executor() {
|
||||
return ThreadPool.Names.SAME;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected GetPipelineResponse newResponse() {
|
||||
return new GetPipelineResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void masterOperation(GetPipelineRequest request, ClusterState state, ActionListener<GetPipelineResponse> listener) throws Exception {
|
||||
listener.onResponse(new GetPipelineResponse(pipelineStore.getPipelines(state, request.getIds())));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ClusterBlockException checkBlock(GetPipelineRequest request, ClusterState state) {
|
||||
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ);
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,225 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.ingest;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.action.bulk.BulkAction;
|
||||
import org.elasticsearch.action.bulk.BulkItemResponse;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.bulk.BulkResponse;
|
||||
import org.elasticsearch.action.index.IndexAction;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.support.ActionFilter;
|
||||
import org.elasticsearch.action.support.ActionFilterChain;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.ingest.PipelineExecutionService;
|
||||
import org.elasticsearch.node.service.NodeService;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
public final class IngestActionFilter extends AbstractComponent implements ActionFilter {
|
||||
|
||||
private final PipelineExecutionService executionService;
|
||||
|
||||
@Inject
|
||||
public IngestActionFilter(Settings settings, NodeService nodeService) {
|
||||
super(settings);
|
||||
this.executionService = nodeService.getIngestService().getPipelineExecutionService();
|
||||
}
|
||||
|
||||
@Override
|
||||
public <Request extends ActionRequest<Request>, Response extends ActionResponse> void apply(Task task, String action, Request request, ActionListener<Response> listener, ActionFilterChain<Request, Response> chain) {
|
||||
switch (action) {
|
||||
case IndexAction.NAME:
|
||||
IndexRequest indexRequest = (IndexRequest) request;
|
||||
if (Strings.hasText(indexRequest.getPipeline())) {
|
||||
processIndexRequest(task, action, listener, chain, (IndexRequest) request);
|
||||
} else {
|
||||
chain.proceed(task, action, request, listener);
|
||||
}
|
||||
break;
|
||||
case BulkAction.NAME:
|
||||
BulkRequest bulkRequest = (BulkRequest) request;
|
||||
if (bulkRequest.hasIndexRequestsWithPipelines()) {
|
||||
@SuppressWarnings("unchecked")
|
||||
ActionListener<BulkResponse> actionListener = (ActionListener<BulkResponse>) listener;
|
||||
processBulkIndexRequest(task, bulkRequest, action, chain, actionListener);
|
||||
} else {
|
||||
chain.proceed(task, action, request, listener);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
chain.proceed(task, action, request, listener);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public <Response extends ActionResponse> void apply(String action, Response response, ActionListener<Response> listener, ActionFilterChain<?, Response> chain) {
|
||||
chain.proceed(action, response, listener);
|
||||
}
|
||||
|
||||
void processIndexRequest(Task task, String action, ActionListener listener, ActionFilterChain chain, IndexRequest indexRequest) {
|
||||
|
||||
executionService.execute(indexRequest, t -> {
|
||||
logger.error("failed to execute pipeline [{}]", t, indexRequest.getPipeline());
|
||||
listener.onFailure(t);
|
||||
}, success -> {
|
||||
// TransportIndexAction uses IndexRequest and same action name on the node that receives the request and the node that
|
||||
// processes the primary action. This could lead to a pipeline being executed twice for the same
|
||||
// index request, hence we set the pipeline to null once its execution completed.
|
||||
indexRequest.setPipeline(null);
|
||||
chain.proceed(task, action, indexRequest, listener);
|
||||
});
|
||||
}
|
||||
|
||||
void processBulkIndexRequest(Task task, BulkRequest original, String action, ActionFilterChain chain, ActionListener<BulkResponse> listener) {
|
||||
BulkRequestModifier bulkRequestModifier = new BulkRequestModifier(original);
|
||||
executionService.execute(() -> bulkRequestModifier, (indexRequest, throwable) -> {
|
||||
logger.debug("failed to execute pipeline [{}] for document [{}/{}/{}]", indexRequest.getPipeline(), indexRequest.index(), indexRequest.type(), indexRequest.id(), throwable);
|
||||
bulkRequestModifier.markCurrentItemAsFailed(throwable);
|
||||
}, (throwable) -> {
|
||||
if (throwable != null) {
|
||||
logger.error("failed to execute pipeline for a bulk request", throwable);
|
||||
listener.onFailure(throwable);
|
||||
} else {
|
||||
BulkRequest bulkRequest = bulkRequestModifier.getBulkRequest();
|
||||
ActionListener<BulkResponse> actionListener = bulkRequestModifier.wrapActionListenerIfNeeded(listener);
|
||||
if (bulkRequest.requests().isEmpty()) {
|
||||
// at this stage, the transport bulk action can't deal with a bulk request with no requests,
|
||||
// so we stop and send an empty response back to the client.
|
||||
// (this will happen if pre-processing all items in the bulk failed)
|
||||
actionListener.onResponse(new BulkResponse(new BulkItemResponse[0], 0));
|
||||
} else {
|
||||
chain.proceed(task, action, bulkRequest, actionListener);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public int order() {
|
||||
return Integer.MAX_VALUE;
|
||||
}
|
||||
|
||||
final static class BulkRequestModifier implements Iterator<ActionRequest<?>> {
|
||||
|
||||
final BulkRequest bulkRequest;
|
||||
final Set<Integer> failedSlots;
|
||||
final List<BulkItemResponse> itemResponses;
|
||||
|
||||
int currentSlot = -1;
|
||||
int[] originalSlots;
|
||||
|
||||
BulkRequestModifier(BulkRequest bulkRequest) {
|
||||
this.bulkRequest = bulkRequest;
|
||||
this.failedSlots = new HashSet<>();
|
||||
this.itemResponses = new ArrayList<>(bulkRequest.requests().size());
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionRequest next() {
|
||||
return bulkRequest.requests().get(++currentSlot);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
return (currentSlot + 1) < bulkRequest.requests().size();
|
||||
}
|
||||
|
||||
BulkRequest getBulkRequest() {
|
||||
if (itemResponses.isEmpty()) {
|
||||
return bulkRequest;
|
||||
} else {
|
||||
BulkRequest modifiedBulkRequest = new BulkRequest(bulkRequest);
|
||||
modifiedBulkRequest.refresh(bulkRequest.refresh());
|
||||
modifiedBulkRequest.consistencyLevel(bulkRequest.consistencyLevel());
|
||||
modifiedBulkRequest.timeout(bulkRequest.timeout());
|
||||
|
||||
int slot = 0;
|
||||
originalSlots = new int[bulkRequest.requests().size() - failedSlots.size()];
|
||||
for (int i = 0; i < bulkRequest.requests().size(); i++) {
|
||||
ActionRequest request = bulkRequest.requests().get(i);
|
||||
if (failedSlots.contains(i) == false) {
|
||||
modifiedBulkRequest.add(request);
|
||||
originalSlots[slot++] = i;
|
||||
}
|
||||
}
|
||||
return modifiedBulkRequest;
|
||||
}
|
||||
}
|
||||
|
||||
ActionListener<BulkResponse> wrapActionListenerIfNeeded(ActionListener<BulkResponse> actionListener) {
|
||||
if (itemResponses.isEmpty()) {
|
||||
return actionListener;
|
||||
} else {
|
||||
return new IngestBulkResponseListener(originalSlots, itemResponses, actionListener);
|
||||
}
|
||||
}
|
||||
|
||||
void markCurrentItemAsFailed(Throwable e) {
|
||||
IndexRequest indexRequest = (IndexRequest) bulkRequest.requests().get(currentSlot);
|
||||
// We hit a error during preprocessing a request, so we:
|
||||
// 1) Remember the request item slot from the bulk, so that we're done processing all requests we know what failed
|
||||
// 2) Add a bulk item failure for this request
|
||||
// 3) Continue with the next request in the bulk.
|
||||
failedSlots.add(currentSlot);
|
||||
BulkItemResponse.Failure failure = new BulkItemResponse.Failure(indexRequest.index(), indexRequest.type(), indexRequest.id(), e);
|
||||
itemResponses.add(new BulkItemResponse(currentSlot, indexRequest.opType().lowercase(), failure));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private final static class IngestBulkResponseListener implements ActionListener<BulkResponse> {
|
||||
|
||||
private final int[] originalSlots;
|
||||
private final List<BulkItemResponse> itemResponses;
|
||||
private final ActionListener<BulkResponse> actionListener;
|
||||
|
||||
IngestBulkResponseListener(int[] originalSlots, List<BulkItemResponse> itemResponses, ActionListener<BulkResponse> actionListener) {
|
||||
this.itemResponses = itemResponses;
|
||||
this.actionListener = actionListener;
|
||||
this.originalSlots = originalSlots;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onResponse(BulkResponse bulkItemResponses) {
|
||||
for (int i = 0; i < bulkItemResponses.getItems().length; i++) {
|
||||
itemResponses.add(originalSlots[i], bulkItemResponses.getItems()[i]);
|
||||
}
|
||||
actionListener.onResponse(new BulkResponse(itemResponses.toArray(new BulkItemResponse[itemResponses.size()]), bulkItemResponses.getTookInMillis()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
actionListener.onFailure(e);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,125 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.action.ingest;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionListenerResponseHandler;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.action.bulk.BulkAction;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.index.IndexAction;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.support.ActionFilter;
|
||||
import org.elasticsearch.action.support.ActionFilterChain;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.common.Randomness;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.transport.TransportResponse;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
public final class IngestProxyActionFilter implements ActionFilter {
|
||||
|
||||
private final ClusterService clusterService;
|
||||
private final TransportService transportService;
|
||||
private final AtomicInteger randomNodeGenerator = new AtomicInteger(Randomness.get().nextInt());
|
||||
|
||||
@Inject
|
||||
public IngestProxyActionFilter(ClusterService clusterService, TransportService transportService) {
|
||||
this.clusterService = clusterService;
|
||||
this.transportService = transportService;
|
||||
}
|
||||
|
||||
@Override
|
||||
public <Request extends ActionRequest<Request>, Response extends ActionResponse> void apply(Task task, String action, Request request, ActionListener<Response> listener, ActionFilterChain<Request, Response> chain) {
|
||||
Action ingestAction;
|
||||
switch (action) {
|
||||
case IndexAction.NAME:
|
||||
ingestAction = IndexAction.INSTANCE;
|
||||
IndexRequest indexRequest = (IndexRequest) request;
|
||||
if (Strings.hasText(indexRequest.getPipeline())) {
|
||||
forwardIngestRequest(ingestAction, request, listener);
|
||||
} else {
|
||||
chain.proceed(task, action, request, listener);
|
||||
}
|
||||
break;
|
||||
case BulkAction.NAME:
|
||||
ingestAction = BulkAction.INSTANCE;
|
||||
BulkRequest bulkRequest = (BulkRequest) request;
|
||||
if (bulkRequest.hasIndexRequestsWithPipelines()) {
|
||||
forwardIngestRequest(ingestAction, request, listener);
|
||||
} else {
|
||||
chain.proceed(task, action, request, listener);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
chain.proceed(task, action, request, listener);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private void forwardIngestRequest(Action<?, ?, ?> action, ActionRequest request, ActionListener<?> listener) {
|
||||
transportService.sendRequest(randomIngestNode(), action.name(), request, new ActionListenerResponseHandler(listener) {
|
||||
@Override
|
||||
public TransportResponse newInstance() {
|
||||
return action.newResponse();
|
||||
}
|
||||
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public <Response extends ActionResponse> void apply(String action, Response response, ActionListener<Response> listener, ActionFilterChain<?, Response> chain) {
|
||||
chain.proceed(action, response, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int order() {
|
||||
return Integer.MAX_VALUE;
|
||||
}
|
||||
|
||||
private DiscoveryNode randomIngestNode() {
|
||||
assert clusterService.localNode().isIngestNode() == false;
|
||||
DiscoveryNodes nodes = clusterService.state().getNodes();
|
||||
DiscoveryNode[] ingestNodes = nodes.getIngestNodes().values().toArray(DiscoveryNode.class);
|
||||
if (ingestNodes.length == 0) {
|
||||
throw new IllegalStateException("There are no ingest nodes in this cluster, unable to forward request to an ingest node.");
|
||||
}
|
||||
|
||||
int index = getNodeNumber();
|
||||
return ingestNodes[(index) % ingestNodes.length];
|
||||
}
|
||||
|
||||
private int getNodeNumber() {
|
||||
int index = randomNodeGenerator.incrementAndGet();
|
||||
if (index < 0) {
|
||||
index = 0;
|
||||
randomNodeGenerator.set(0);
|
||||
}
|
||||
return index;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,44 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.ingest;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.action.index.IndexResponse;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
public class PutPipelineAction extends Action<PutPipelineRequest, WritePipelineResponse, PutPipelineRequestBuilder> {
|
||||
|
||||
public static final PutPipelineAction INSTANCE = new PutPipelineAction();
|
||||
public static final String NAME = "cluster:admin/ingest/pipeline/put";
|
||||
|
||||
public PutPipelineAction() {
|
||||
super(NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public PutPipelineRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new PutPipelineRequestBuilder(client, this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public WritePipelineResponse newResponse() {
|
||||
return new WritePipelineResponse();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,79 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.ingest;
|
||||
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedRequest;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
|
||||
public class PutPipelineRequest extends AcknowledgedRequest<PutPipelineRequest> {
|
||||
|
||||
private String id;
|
||||
private BytesReference source;
|
||||
|
||||
public PutPipelineRequest(String id, BytesReference source) {
|
||||
if (id == null) {
|
||||
throw new IllegalArgumentException("id is missing");
|
||||
}
|
||||
if (source == null) {
|
||||
throw new IllegalArgumentException("source is missing");
|
||||
}
|
||||
|
||||
this.id = id;
|
||||
this.source = source;
|
||||
}
|
||||
|
||||
PutPipelineRequest() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
return null;
|
||||
}
|
||||
|
||||
public String getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
public BytesReference getSource() {
|
||||
return source;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
id = in.readString();
|
||||
source = in.readBytesReference();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeString(id);
|
||||
out.writeBytesReference(source);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,36 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.ingest;
|
||||
|
||||
import org.elasticsearch.action.ActionRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
|
||||
public class PutPipelineRequestBuilder extends ActionRequestBuilder<PutPipelineRequest, WritePipelineResponse, PutPipelineRequestBuilder> {
|
||||
|
||||
public PutPipelineRequestBuilder(ElasticsearchClient client, PutPipelineAction action) {
|
||||
super(client, action, new PutPipelineRequest());
|
||||
}
|
||||
|
||||
public PutPipelineRequestBuilder(ElasticsearchClient client, PutPipelineAction action, String id, BytesReference source) {
|
||||
super(client, action, new PutPipelineRequest(id, source));
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,71 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.ingest;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.ingest.PipelineStore;
|
||||
import org.elasticsearch.node.service.NodeService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
public class PutPipelineTransportAction extends TransportMasterNodeAction<PutPipelineRequest, WritePipelineResponse> {
|
||||
|
||||
private final PipelineStore pipelineStore;
|
||||
private final ClusterService clusterService;
|
||||
|
||||
@Inject
|
||||
public PutPipelineTransportAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
|
||||
TransportService transportService, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, NodeService nodeService) {
|
||||
super(settings, PutPipelineAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, PutPipelineRequest::new);
|
||||
this.clusterService = clusterService;
|
||||
this.pipelineStore = nodeService.getIngestService().getPipelineStore();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String executor() {
|
||||
return ThreadPool.Names.SAME;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected WritePipelineResponse newResponse() {
|
||||
return new WritePipelineResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void masterOperation(PutPipelineRequest request, ClusterState state, ActionListener<WritePipelineResponse> listener) throws Exception {
|
||||
pipelineStore.put(clusterService, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ClusterBlockException checkBlock(PutPipelineRequest request, ClusterState state) {
|
||||
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE);
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,98 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.action.ingest;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.ingest.core.IngestDocument;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
|
||||
/**
|
||||
* Holds the end result of what a pipeline did to sample document provided via the simulate api.
|
||||
*/
|
||||
public final class SimulateDocumentBaseResult implements SimulateDocumentResult<SimulateDocumentBaseResult> {
|
||||
|
||||
private static final SimulateDocumentBaseResult PROTOTYPE = new SimulateDocumentBaseResult(new WriteableIngestDocument(new IngestDocument(Collections.emptyMap(), Collections.emptyMap())));
|
||||
|
||||
private WriteableIngestDocument ingestDocument;
|
||||
private Exception failure;
|
||||
|
||||
public SimulateDocumentBaseResult(IngestDocument ingestDocument) {
|
||||
this.ingestDocument = new WriteableIngestDocument(ingestDocument);
|
||||
}
|
||||
|
||||
private SimulateDocumentBaseResult(WriteableIngestDocument ingestDocument) {
|
||||
this.ingestDocument = ingestDocument;
|
||||
}
|
||||
|
||||
public SimulateDocumentBaseResult(Exception failure) {
|
||||
this.failure = failure;
|
||||
}
|
||||
|
||||
public IngestDocument getIngestDocument() {
|
||||
if (ingestDocument == null) {
|
||||
return null;
|
||||
}
|
||||
return ingestDocument.getIngestDocument();
|
||||
}
|
||||
|
||||
public Exception getFailure() {
|
||||
return failure;
|
||||
}
|
||||
|
||||
public static SimulateDocumentBaseResult readSimulateDocumentSimpleResult(StreamInput in) throws IOException {
|
||||
return PROTOTYPE.readFrom(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public SimulateDocumentBaseResult readFrom(StreamInput in) throws IOException {
|
||||
if (in.readBoolean()) {
|
||||
Exception exception = in.readThrowable();
|
||||
return new SimulateDocumentBaseResult(exception);
|
||||
}
|
||||
return new SimulateDocumentBaseResult(new WriteableIngestDocument(in));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
if (failure == null) {
|
||||
out.writeBoolean(false);
|
||||
ingestDocument.writeTo(out);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
out.writeThrowable(failure);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
if (failure == null) {
|
||||
ingestDocument.toXContent(builder, params);
|
||||
} else {
|
||||
ElasticsearchException.renderThrowable(builder, params, failure);
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,26 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.action.ingest;
|
||||
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
|
||||
public interface SimulateDocumentResult<T extends SimulateDocumentResult> extends Writeable<T>, ToXContent {
|
||||
|
||||
}
|
|
@ -0,0 +1,81 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.action.ingest;
|
||||
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Holds the result of what a pipeline did to a sample document via the simulate api, but instead of {@link SimulateDocumentBaseResult}
|
||||
* this result class holds the intermediate result each processor did to the sample document.
|
||||
*/
|
||||
public final class SimulateDocumentVerboseResult implements SimulateDocumentResult<SimulateDocumentVerboseResult> {
|
||||
|
||||
private static final SimulateDocumentVerboseResult PROTOTYPE = new SimulateDocumentVerboseResult(Collections.emptyList());
|
||||
|
||||
private final List<SimulateProcessorResult> processorResults;
|
||||
|
||||
public SimulateDocumentVerboseResult(List<SimulateProcessorResult> processorResults) {
|
||||
this.processorResults = processorResults;
|
||||
}
|
||||
|
||||
public List<SimulateProcessorResult> getProcessorResults() {
|
||||
return processorResults;
|
||||
}
|
||||
|
||||
public static SimulateDocumentVerboseResult readSimulateDocumentVerboseResultFrom(StreamInput in) throws IOException {
|
||||
return PROTOTYPE.readFrom(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public SimulateDocumentVerboseResult readFrom(StreamInput in) throws IOException {
|
||||
int size = in.readVInt();
|
||||
List<SimulateProcessorResult> processorResults = new ArrayList<>();
|
||||
for (int i = 0; i < size; i++) {
|
||||
processorResults.add(new SimulateProcessorResult(in));
|
||||
}
|
||||
return new SimulateDocumentVerboseResult(processorResults);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVInt(processorResults.size());
|
||||
for (SimulateProcessorResult result : processorResults) {
|
||||
result.writeTo(out);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.startArray("processor_results");
|
||||
for (SimulateProcessorResult processorResult : processorResults) {
|
||||
processorResult.toXContent(builder, params);
|
||||
}
|
||||
builder.endArray();
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,99 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.ingest;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRunnable;
|
||||
import org.elasticsearch.ingest.core.IngestDocument;
|
||||
import org.elasticsearch.ingest.core.Pipeline;
|
||||
import org.elasticsearch.ingest.core.Processor;
|
||||
import org.elasticsearch.ingest.core.CompoundProcessor;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
class SimulateExecutionService {
|
||||
|
||||
private static final String THREAD_POOL_NAME = ThreadPool.Names.MANAGEMENT;
|
||||
|
||||
private final ThreadPool threadPool;
|
||||
|
||||
SimulateExecutionService(ThreadPool threadPool) {
|
||||
this.threadPool = threadPool;
|
||||
}
|
||||
|
||||
void executeVerboseDocument(Processor processor, IngestDocument ingestDocument, List<SimulateProcessorResult> processorResultList) throws Exception {
|
||||
if (processor instanceof CompoundProcessor) {
|
||||
CompoundProcessor cp = (CompoundProcessor) processor;
|
||||
try {
|
||||
for (Processor p : cp.getProcessors()) {
|
||||
executeVerboseDocument(p, ingestDocument, processorResultList);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
for (Processor p : cp.getOnFailureProcessors()) {
|
||||
executeVerboseDocument(p, ingestDocument, processorResultList);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
try {
|
||||
processor.execute(ingestDocument);
|
||||
processorResultList.add(new SimulateProcessorResult(processor.getTag(), new IngestDocument(ingestDocument)));
|
||||
} catch (Exception e) {
|
||||
processorResultList.add(new SimulateProcessorResult(processor.getTag(), e));
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
SimulateDocumentResult executeDocument(Pipeline pipeline, IngestDocument ingestDocument, boolean verbose) {
|
||||
if (verbose) {
|
||||
List<SimulateProcessorResult> processorResultList = new ArrayList<>();
|
||||
IngestDocument currentIngestDocument = new IngestDocument(ingestDocument);
|
||||
CompoundProcessor pipelineProcessor = new CompoundProcessor(pipeline.getProcessors(), pipeline.getOnFailureProcessors());
|
||||
try {
|
||||
executeVerboseDocument(pipelineProcessor, currentIngestDocument, processorResultList);
|
||||
} catch (Exception e) {
|
||||
return new SimulateDocumentBaseResult(e);
|
||||
}
|
||||
return new SimulateDocumentVerboseResult(processorResultList);
|
||||
} else {
|
||||
try {
|
||||
pipeline.execute(ingestDocument);
|
||||
return new SimulateDocumentBaseResult(ingestDocument);
|
||||
} catch (Exception e) {
|
||||
return new SimulateDocumentBaseResult(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void execute(SimulatePipelineRequest.Parsed request, ActionListener<SimulatePipelineResponse> listener) {
|
||||
threadPool.executor(THREAD_POOL_NAME).execute(new ActionRunnable<SimulatePipelineResponse>(listener) {
|
||||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
List<SimulateDocumentResult> responses = new ArrayList<>();
|
||||
for (IngestDocument ingestDocument : request.getDocuments()) {
|
||||
responses.add(executeDocument(request.getPipeline(), ingestDocument, request.isVerbose()));
|
||||
}
|
||||
listener.onResponse(new SimulatePipelineResponse(request.getPipeline().getId(), request.isVerbose(), responses));
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
|
@ -0,0 +1,43 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.ingest;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
public class SimulatePipelineAction extends Action<SimulatePipelineRequest, SimulatePipelineResponse, SimulatePipelineRequestBuilder> {
|
||||
|
||||
public static final SimulatePipelineAction INSTANCE = new SimulatePipelineAction();
|
||||
public static final String NAME = "cluster:admin/ingest/pipeline/simulate";
|
||||
|
||||
public SimulatePipelineAction() {
|
||||
super(NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public SimulatePipelineRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new SimulatePipelineRequestBuilder(client, this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public SimulatePipelineResponse newResponse() {
|
||||
return new SimulatePipelineResponse();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,165 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.ingest;
|
||||
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.ingest.core.ConfigurationUtils;
|
||||
import org.elasticsearch.ingest.core.IngestDocument;
|
||||
import org.elasticsearch.ingest.core.Pipeline;
|
||||
import org.elasticsearch.ingest.PipelineStore;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
import static org.elasticsearch.ingest.core.IngestDocument.MetaData;
|
||||
|
||||
public class SimulatePipelineRequest extends ActionRequest<SimulatePipelineRequest> {
|
||||
|
||||
private String id;
|
||||
private boolean verbose;
|
||||
private BytesReference source;
|
||||
|
||||
public SimulatePipelineRequest(BytesReference source) {
|
||||
if (source == null) {
|
||||
throw new IllegalArgumentException("source is missing");
|
||||
}
|
||||
this.source = source;
|
||||
}
|
||||
|
||||
SimulatePipelineRequest() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
return null;
|
||||
}
|
||||
|
||||
public String getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
public void setId(String id) {
|
||||
this.id = id;
|
||||
}
|
||||
|
||||
public boolean isVerbose() {
|
||||
return verbose;
|
||||
}
|
||||
|
||||
public void setVerbose(boolean verbose) {
|
||||
this.verbose = verbose;
|
||||
}
|
||||
|
||||
public BytesReference getSource() {
|
||||
return source;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
id = in.readString();
|
||||
verbose = in.readBoolean();
|
||||
source = in.readBytesReference();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeString(id);
|
||||
out.writeBoolean(verbose);
|
||||
out.writeBytesReference(source);
|
||||
}
|
||||
|
||||
public static final class Fields {
|
||||
static final String PIPELINE = "pipeline";
|
||||
static final String DOCS = "docs";
|
||||
static final String SOURCE = "_source";
|
||||
}
|
||||
|
||||
static class Parsed {
|
||||
private final List<IngestDocument> documents;
|
||||
private final Pipeline pipeline;
|
||||
private final boolean verbose;
|
||||
|
||||
Parsed(Pipeline pipeline, List<IngestDocument> documents, boolean verbose) {
|
||||
this.pipeline = pipeline;
|
||||
this.documents = Collections.unmodifiableList(documents);
|
||||
this.verbose = verbose;
|
||||
}
|
||||
|
||||
public Pipeline getPipeline() {
|
||||
return pipeline;
|
||||
}
|
||||
|
||||
public List<IngestDocument> getDocuments() {
|
||||
return documents;
|
||||
}
|
||||
|
||||
public boolean isVerbose() {
|
||||
return verbose;
|
||||
}
|
||||
}
|
||||
|
||||
private static final Pipeline.Factory PIPELINE_FACTORY = new Pipeline.Factory();
|
||||
static final String SIMULATED_PIPELINE_ID = "_simulate_pipeline";
|
||||
|
||||
static Parsed parseWithPipelineId(String pipelineId, Map<String, Object> config, boolean verbose, PipelineStore pipelineStore) {
|
||||
if (pipelineId == null) {
|
||||
throw new IllegalArgumentException("param [pipeline] is null");
|
||||
}
|
||||
Pipeline pipeline = pipelineStore.get(pipelineId);
|
||||
List<IngestDocument> ingestDocumentList = parseDocs(config);
|
||||
return new Parsed(pipeline, ingestDocumentList, verbose);
|
||||
}
|
||||
|
||||
static Parsed parse(Map<String, Object> config, boolean verbose, PipelineStore pipelineStore) throws Exception {
|
||||
Map<String, Object> pipelineConfig = ConfigurationUtils.readMap(config, Fields.PIPELINE);
|
||||
Pipeline pipeline = PIPELINE_FACTORY.create(SIMULATED_PIPELINE_ID, pipelineConfig, pipelineStore.getProcessorFactoryRegistry());
|
||||
List<IngestDocument> ingestDocumentList = parseDocs(config);
|
||||
return new Parsed(pipeline, ingestDocumentList, verbose);
|
||||
}
|
||||
|
||||
private static List<IngestDocument> parseDocs(Map<String, Object> config) {
|
||||
List<Map<String, Object>> docs = ConfigurationUtils.readList(config, Fields.DOCS);
|
||||
List<IngestDocument> ingestDocumentList = new ArrayList<>();
|
||||
for (Map<String, Object> dataMap : docs) {
|
||||
Map<String, Object> document = ConfigurationUtils.readMap(dataMap, Fields.SOURCE);
|
||||
IngestDocument ingestDocument = new IngestDocument(ConfigurationUtils.readStringProperty(dataMap, MetaData.INDEX.getFieldName(), "_index"),
|
||||
ConfigurationUtils.readStringProperty(dataMap, MetaData.TYPE.getFieldName(), "_type"),
|
||||
ConfigurationUtils.readStringProperty(dataMap, MetaData.ID.getFieldName(), "_id"),
|
||||
ConfigurationUtils.readOptionalStringProperty(dataMap, MetaData.ROUTING.getFieldName()),
|
||||
ConfigurationUtils.readOptionalStringProperty(dataMap, MetaData.PARENT.getFieldName()),
|
||||
ConfigurationUtils.readOptionalStringProperty(dataMap, MetaData.TIMESTAMP.getFieldName()),
|
||||
ConfigurationUtils.readOptionalStringProperty(dataMap, MetaData.TTL.getFieldName()),
|
||||
document);
|
||||
ingestDocumentList.add(ingestDocument);
|
||||
}
|
||||
return ingestDocumentList;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,46 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.ingest;
|
||||
|
||||
import org.elasticsearch.action.ActionRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
|
||||
public class SimulatePipelineRequestBuilder extends ActionRequestBuilder<SimulatePipelineRequest, SimulatePipelineResponse, SimulatePipelineRequestBuilder> {
|
||||
|
||||
public SimulatePipelineRequestBuilder(ElasticsearchClient client, SimulatePipelineAction action) {
|
||||
super(client, action, new SimulatePipelineRequest());
|
||||
}
|
||||
|
||||
public SimulatePipelineRequestBuilder(ElasticsearchClient client, SimulatePipelineAction action, BytesReference source) {
|
||||
super(client, action, new SimulatePipelineRequest(source));
|
||||
}
|
||||
|
||||
public SimulatePipelineRequestBuilder setId(String id) {
|
||||
request.setId(id);
|
||||
return this;
|
||||
}
|
||||
|
||||
public SimulatePipelineRequestBuilder setVerbose(boolean verbose) {
|
||||
request.setVerbose(verbose);
|
||||
return this;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,103 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.ingest;
|
||||
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilderString;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
public class SimulatePipelineResponse extends ActionResponse implements ToXContent {
|
||||
private String pipelineId;
|
||||
private boolean verbose;
|
||||
private List<SimulateDocumentResult> results;
|
||||
|
||||
public SimulatePipelineResponse() {
|
||||
|
||||
}
|
||||
|
||||
public SimulatePipelineResponse(String pipelineId, boolean verbose, List<SimulateDocumentResult> responses) {
|
||||
this.pipelineId = pipelineId;
|
||||
this.verbose = verbose;
|
||||
this.results = Collections.unmodifiableList(responses);
|
||||
}
|
||||
|
||||
public String getPipelineId() {
|
||||
return pipelineId;
|
||||
}
|
||||
|
||||
public List<SimulateDocumentResult> getResults() {
|
||||
return results;
|
||||
}
|
||||
|
||||
public boolean isVerbose() {
|
||||
return verbose;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeString(pipelineId);
|
||||
out.writeBoolean(verbose);
|
||||
out.writeVInt(results.size());
|
||||
for (SimulateDocumentResult response : results) {
|
||||
response.writeTo(out);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
this.pipelineId = in.readString();
|
||||
boolean verbose = in.readBoolean();
|
||||
int responsesLength = in.readVInt();
|
||||
results = new ArrayList<>();
|
||||
for (int i = 0; i < responsesLength; i++) {
|
||||
SimulateDocumentResult<?> simulateDocumentResult;
|
||||
if (verbose) {
|
||||
simulateDocumentResult = SimulateDocumentVerboseResult.readSimulateDocumentVerboseResultFrom(in);
|
||||
} else {
|
||||
simulateDocumentResult = SimulateDocumentBaseResult.readSimulateDocumentSimpleResult(in);
|
||||
}
|
||||
results.add(simulateDocumentResult);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startArray(Fields.DOCUMENTS);
|
||||
for (SimulateDocumentResult response : results) {
|
||||
response.toXContent(builder, params);
|
||||
}
|
||||
builder.endArray();
|
||||
return builder;
|
||||
}
|
||||
|
||||
static final class Fields {
|
||||
static final XContentBuilderString DOCUMENTS = new XContentBuilderString("docs");
|
||||
}
|
||||
}
|
|
@ -0,0 +1,66 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.ingest;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.ingest.PipelineStore;
|
||||
import org.elasticsearch.node.service.NodeService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
public class SimulatePipelineTransportAction extends HandledTransportAction<SimulatePipelineRequest, SimulatePipelineResponse> {
|
||||
|
||||
private final PipelineStore pipelineStore;
|
||||
private final SimulateExecutionService executionService;
|
||||
|
||||
@Inject
|
||||
public SimulatePipelineTransportAction(Settings settings, ThreadPool threadPool, TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, NodeService nodeService) {
|
||||
super(settings, SimulatePipelineAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, SimulatePipelineRequest::new);
|
||||
this.pipelineStore = nodeService.getIngestService().getPipelineStore();
|
||||
this.executionService = new SimulateExecutionService(threadPool);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(SimulatePipelineRequest request, ActionListener<SimulatePipelineResponse> listener) {
|
||||
final Map<String, Object> source = XContentHelper.convertToMap(request.getSource(), false).v2();
|
||||
|
||||
final SimulatePipelineRequest.Parsed simulateRequest;
|
||||
try {
|
||||
if (request.getId() != null) {
|
||||
simulateRequest = SimulatePipelineRequest.parseWithPipelineId(request.getId(), source, request.isVerbose(), pipelineStore);
|
||||
} else {
|
||||
simulateRequest = SimulatePipelineRequest.parse(source, request.isVerbose(), pipelineStore);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
listener.onFailure(e);
|
||||
return;
|
||||
}
|
||||
|
||||
executionService.execute(simulateRequest, listener);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,106 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.action.ingest;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.ingest.core.AbstractProcessorFactory;
|
||||
import org.elasticsearch.ingest.core.IngestDocument;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class SimulateProcessorResult implements Writeable<SimulateProcessorResult>, ToXContent {
|
||||
private final String processorTag;
|
||||
private final WriteableIngestDocument ingestDocument;
|
||||
private final Exception failure;
|
||||
|
||||
public SimulateProcessorResult(StreamInput in) throws IOException {
|
||||
this.processorTag = in.readString();
|
||||
if (in.readBoolean()) {
|
||||
this.failure = in.readThrowable();
|
||||
this.ingestDocument = null;
|
||||
} else {
|
||||
this.ingestDocument = new WriteableIngestDocument(in);
|
||||
this.failure = null;
|
||||
}
|
||||
}
|
||||
|
||||
public SimulateProcessorResult(String processorTag, IngestDocument ingestDocument) {
|
||||
this.processorTag = processorTag;
|
||||
this.ingestDocument = new WriteableIngestDocument(ingestDocument);
|
||||
this.failure = null;
|
||||
}
|
||||
|
||||
public SimulateProcessorResult(String processorTag, Exception failure) {
|
||||
this.processorTag = processorTag;
|
||||
this.failure = failure;
|
||||
this.ingestDocument = null;
|
||||
}
|
||||
|
||||
public IngestDocument getIngestDocument() {
|
||||
if (ingestDocument == null) {
|
||||
return null;
|
||||
}
|
||||
return ingestDocument.getIngestDocument();
|
||||
}
|
||||
|
||||
public String getProcessorTag() {
|
||||
return processorTag;
|
||||
}
|
||||
|
||||
public Exception getFailure() {
|
||||
return failure;
|
||||
}
|
||||
|
||||
@Override
|
||||
public SimulateProcessorResult readFrom(StreamInput in) throws IOException {
|
||||
return new SimulateProcessorResult(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeString(processorTag);
|
||||
if (failure == null) {
|
||||
out.writeBoolean(false);
|
||||
ingestDocument.writeTo(out);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
out.writeThrowable(failure);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
if (processorTag != null) {
|
||||
builder.field(AbstractProcessorFactory.TAG_KEY, processorTag);
|
||||
}
|
||||
if (failure == null) {
|
||||
ingestDocument.toXContent(builder, params);
|
||||
} else {
|
||||
ElasticsearchException.renderThrowable(builder, params, failure);
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,48 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.ingest;
|
||||
|
||||
import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class WritePipelineResponse extends AcknowledgedResponse {
|
||||
|
||||
WritePipelineResponse() {
|
||||
}
|
||||
|
||||
public WritePipelineResponse(boolean acknowledge) {
|
||||
super(acknowledge);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
readAcknowledged(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
writeAcknowledged(out);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,105 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.ingest;
|
||||
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilderString;
|
||||
import org.elasticsearch.ingest.core.IngestDocument;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
final class WriteableIngestDocument implements Writeable<WriteableIngestDocument>, ToXContent {
|
||||
|
||||
private final IngestDocument ingestDocument;
|
||||
|
||||
WriteableIngestDocument(IngestDocument ingestDocument) {
|
||||
assert ingestDocument != null;
|
||||
this.ingestDocument = ingestDocument;
|
||||
}
|
||||
|
||||
WriteableIngestDocument(StreamInput in) throws IOException {
|
||||
Map<String, Object> sourceAndMetadata = in.readMap();
|
||||
@SuppressWarnings("unchecked")
|
||||
Map<String, String> ingestMetadata = (Map<String, String>) in.readGenericValue();
|
||||
this.ingestDocument = new IngestDocument(sourceAndMetadata, ingestMetadata);
|
||||
}
|
||||
|
||||
IngestDocument getIngestDocument() {
|
||||
return ingestDocument;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public WriteableIngestDocument readFrom(StreamInput in) throws IOException {
|
||||
return new WriteableIngestDocument(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeMap(ingestDocument.getSourceAndMetadata());
|
||||
out.writeGenericValue(ingestDocument.getIngestMetadata());
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject("doc");
|
||||
Map<IngestDocument.MetaData, String> metadataMap = ingestDocument.extractMetadata();
|
||||
for (Map.Entry<IngestDocument.MetaData, String> metadata : metadataMap.entrySet()) {
|
||||
builder.field(metadata.getKey().getFieldName(), metadata.getValue());
|
||||
}
|
||||
builder.field("_source", ingestDocument.getSourceAndMetadata());
|
||||
builder.startObject("_ingest");
|
||||
for (Map.Entry<String, String> ingestMetadata : ingestDocument.getIngestMetadata().entrySet()) {
|
||||
builder.field(ingestMetadata.getKey(), ingestMetadata.getValue());
|
||||
}
|
||||
builder.endObject();
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) {
|
||||
return true;
|
||||
}
|
||||
if (o == null || getClass() != o.getClass()) {
|
||||
return false;
|
||||
}
|
||||
WriteableIngestDocument that = (WriteableIngestDocument) o;
|
||||
return Objects.equals(ingestDocument, that.ingestDocument);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(ingestDocument);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return ingestDocument.toString();
|
||||
}
|
||||
}
|
|
@ -28,6 +28,7 @@ import org.elasticsearch.index.query.QueryBuilder;
|
|||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.Template;
|
||||
import org.elasticsearch.search.Scroll;
|
||||
import org.elasticsearch.search.searchafter.SearchAfterBuilder;
|
||||
import org.elasticsearch.search.aggregations.AbstractAggregationBuilder;
|
||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||
import org.elasticsearch.search.fetch.innerhits.InnerHitsBuilder;
|
||||
|
@ -343,6 +344,15 @@ public class SearchRequestBuilder extends ActionRequestBuilder<SearchRequest, Se
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the sort values that indicates which docs this request should "search after".
|
||||
*
|
||||
*/
|
||||
public SearchRequestBuilder searchAfter(Object[] values) {
|
||||
sourceBuilder().searchAfter(values);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Applies when sorting, and controls if scores will be tracked as well. Defaults to
|
||||
* <tt>false</tt>.
|
||||
|
@ -391,27 +401,27 @@ public class SearchRequestBuilder extends ActionRequestBuilder<SearchRequest, Se
|
|||
|
||||
/**
|
||||
* Clears all rescorers on the builder and sets the first one. To use multiple rescore windows use
|
||||
* {@link #addRescorer(org.elasticsearch.search.rescore.RescoreBuilder.Rescorer, int)}.
|
||||
* {@link #addRescorer(org.elasticsearch.search.rescore.RescoreBuilder, int)}.
|
||||
*
|
||||
* @param rescorer rescorer configuration
|
||||
* @return this for chaining
|
||||
*/
|
||||
public SearchRequestBuilder setRescorer(RescoreBuilder.Rescorer rescorer) {
|
||||
public SearchRequestBuilder setRescorer(RescoreBuilder<?> rescorer) {
|
||||
sourceBuilder().clearRescorers();
|
||||
return addRescorer(rescorer);
|
||||
}
|
||||
|
||||
/**
|
||||
* Clears all rescorers on the builder and sets the first one. To use multiple rescore windows use
|
||||
* {@link #addRescorer(org.elasticsearch.search.rescore.RescoreBuilder.Rescorer, int)}.
|
||||
* {@link #addRescorer(org.elasticsearch.search.rescore.RescoreBuilder, int)}.
|
||||
*
|
||||
* @param rescorer rescorer configuration
|
||||
* @param window rescore window
|
||||
* @return this for chaining
|
||||
*/
|
||||
public SearchRequestBuilder setRescorer(RescoreBuilder.Rescorer rescorer, int window) {
|
||||
public SearchRequestBuilder setRescorer(RescoreBuilder rescorer, int window) {
|
||||
sourceBuilder().clearRescorers();
|
||||
return addRescorer(rescorer, window);
|
||||
return addRescorer(rescorer.windowSize(window));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -420,8 +430,8 @@ public class SearchRequestBuilder extends ActionRequestBuilder<SearchRequest, Se
|
|||
* @param rescorer rescorer configuration
|
||||
* @return this for chaining
|
||||
*/
|
||||
public SearchRequestBuilder addRescorer(RescoreBuilder.Rescorer rescorer) {
|
||||
sourceBuilder().addRescorer(new RescoreBuilder(rescorer));
|
||||
public SearchRequestBuilder addRescorer(RescoreBuilder<?> rescorer) {
|
||||
sourceBuilder().addRescorer(rescorer);
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -432,8 +442,8 @@ public class SearchRequestBuilder extends ActionRequestBuilder<SearchRequest, Se
|
|||
* @param window rescore window
|
||||
* @return this for chaining
|
||||
*/
|
||||
public SearchRequestBuilder addRescorer(RescoreBuilder.Rescorer rescorer, int window) {
|
||||
sourceBuilder().addRescorer(new RescoreBuilder(rescorer).windowSize(window));
|
||||
public SearchRequestBuilder addRescorer(RescoreBuilder<?> rescorer, int window) {
|
||||
sourceBuilder().addRescorer(rescorer.windowSize(window));
|
||||
return this;
|
||||
}
|
||||
|
||||
|
|
|
@ -26,7 +26,7 @@ import org.elasticsearch.rest.RestRequest;
|
|||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue;
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenientNodeBooleanValue;
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeStringArrayValue;
|
||||
|
||||
/**
|
||||
|
@ -195,8 +195,8 @@ public class IndicesOptions {
|
|||
|
||||
//note that allowAliasesToMultipleIndices is not exposed, always true (only for internal use)
|
||||
return fromOptions(
|
||||
nodeBooleanValue(ignoreUnavailableString, defaultSettings.ignoreUnavailable()),
|
||||
nodeBooleanValue(allowNoIndicesString, defaultSettings.allowNoIndices()),
|
||||
lenientNodeBooleanValue(ignoreUnavailableString, defaultSettings.ignoreUnavailable()),
|
||||
lenientNodeBooleanValue(allowNoIndicesString, defaultSettings.allowNoIndices()),
|
||||
expandWildcardsOpen,
|
||||
expandWildcardsClosed,
|
||||
defaultSettings.allowAliasesToMultipleIndices(),
|
||||
|
|
|
@ -241,26 +241,26 @@ final class Security {
|
|||
*/
|
||||
static void addFilePermissions(Permissions policy, Environment environment) {
|
||||
// read-only dirs
|
||||
addPath(policy, "path.home", environment.binFile(), "read,readlink");
|
||||
addPath(policy, "path.home", environment.libFile(), "read,readlink");
|
||||
addPath(policy, "path.home", environment.modulesFile(), "read,readlink");
|
||||
addPath(policy, "path.plugins", environment.pluginsFile(), "read,readlink");
|
||||
addPath(policy, "path.conf", environment.configFile(), "read,readlink");
|
||||
addPath(policy, "path.scripts", environment.scriptsFile(), "read,readlink");
|
||||
addPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.binFile(), "read,readlink");
|
||||
addPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.libFile(), "read,readlink");
|
||||
addPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.modulesFile(), "read,readlink");
|
||||
addPath(policy, Environment.PATH_PLUGINS_SETTING.getKey(), environment.pluginsFile(), "read,readlink");
|
||||
addPath(policy, Environment.PATH_CONF_SETTING.getKey(), environment.configFile(), "read,readlink");
|
||||
addPath(policy, Environment.PATH_SCRIPTS_SETTING.getKey(), environment.scriptsFile(), "read,readlink");
|
||||
// read-write dirs
|
||||
addPath(policy, "java.io.tmpdir", environment.tmpFile(), "read,readlink,write,delete");
|
||||
addPath(policy, "path.logs", environment.logsFile(), "read,readlink,write,delete");
|
||||
addPath(policy, Environment.PATH_LOGS_SETTING.getKey(), environment.logsFile(), "read,readlink,write,delete");
|
||||
if (environment.sharedDataFile() != null) {
|
||||
addPath(policy, "path.shared_data", environment.sharedDataFile(), "read,readlink,write,delete");
|
||||
addPath(policy, Environment.PATH_SHARED_DATA_SETTING.getKey(), environment.sharedDataFile(), "read,readlink,write,delete");
|
||||
}
|
||||
for (Path path : environment.dataFiles()) {
|
||||
addPath(policy, "path.data", path, "read,readlink,write,delete");
|
||||
addPath(policy, Environment.PATH_DATA_SETTING.getKey(), path, "read,readlink,write,delete");
|
||||
}
|
||||
for (Path path : environment.dataWithClusterFiles()) {
|
||||
addPath(policy, "path.data", path, "read,readlink,write,delete");
|
||||
addPath(policy, Environment.PATH_DATA_SETTING.getKey(), path, "read,readlink,write,delete");
|
||||
}
|
||||
for (Path path : environment.repoFiles()) {
|
||||
addPath(policy, "path.repo", path, "read,readlink,write,delete");
|
||||
addPath(policy, Environment.PATH_REPO_SETTING.getKey(), path, "read,readlink,write,delete");
|
||||
}
|
||||
if (environment.pidFile() != null) {
|
||||
// we just need permission to remove the file if its elsewhere.
|
||||
|
|
|
@ -55,6 +55,17 @@ import org.elasticsearch.action.indexedscripts.get.GetIndexedScriptResponse;
|
|||
import org.elasticsearch.action.indexedscripts.put.PutIndexedScriptRequest;
|
||||
import org.elasticsearch.action.indexedscripts.put.PutIndexedScriptRequestBuilder;
|
||||
import org.elasticsearch.action.indexedscripts.put.PutIndexedScriptResponse;
|
||||
import org.elasticsearch.action.ingest.DeletePipelineRequest;
|
||||
import org.elasticsearch.action.ingest.DeletePipelineRequestBuilder;
|
||||
import org.elasticsearch.action.ingest.GetPipelineRequest;
|
||||
import org.elasticsearch.action.ingest.GetPipelineRequestBuilder;
|
||||
import org.elasticsearch.action.ingest.GetPipelineResponse;
|
||||
import org.elasticsearch.action.ingest.PutPipelineRequest;
|
||||
import org.elasticsearch.action.ingest.PutPipelineRequestBuilder;
|
||||
import org.elasticsearch.action.ingest.SimulatePipelineRequest;
|
||||
import org.elasticsearch.action.ingest.SimulatePipelineRequestBuilder;
|
||||
import org.elasticsearch.action.ingest.SimulatePipelineResponse;
|
||||
import org.elasticsearch.action.ingest.WritePipelineResponse;
|
||||
import org.elasticsearch.action.percolate.MultiPercolateRequest;
|
||||
import org.elasticsearch.action.percolate.MultiPercolateRequestBuilder;
|
||||
import org.elasticsearch.action.percolate.MultiPercolateResponse;
|
||||
|
@ -85,6 +96,7 @@ import org.elasticsearch.action.update.UpdateRequest;
|
|||
import org.elasticsearch.action.update.UpdateRequestBuilder;
|
||||
import org.elasticsearch.action.update.UpdateResponse;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.lease.Releasable;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
|
@ -597,6 +609,66 @@ public interface Client extends ElasticsearchClient, Releasable {
|
|||
|
||||
void fieldStats(FieldStatsRequest request, ActionListener<FieldStatsResponse> listener);
|
||||
|
||||
/**
|
||||
* Stores an ingest pipeline
|
||||
*/
|
||||
void putPipeline(PutPipelineRequest request, ActionListener<WritePipelineResponse> listener);
|
||||
|
||||
/**
|
||||
* Stores an ingest pipeline
|
||||
*/
|
||||
ActionFuture<WritePipelineResponse> putPipeline(PutPipelineRequest request);
|
||||
|
||||
/**
|
||||
* Stores an ingest pipeline
|
||||
*/
|
||||
PutPipelineRequestBuilder preparePutPipeline(String id, BytesReference source);
|
||||
|
||||
/**
|
||||
* Deletes a stored ingest pipeline
|
||||
*/
|
||||
void deletePipeline(DeletePipelineRequest request, ActionListener<WritePipelineResponse> listener);
|
||||
|
||||
/**
|
||||
* Deletes a stored ingest pipeline
|
||||
*/
|
||||
ActionFuture<WritePipelineResponse> deletePipeline(DeletePipelineRequest request);
|
||||
|
||||
/**
|
||||
* Deletes a stored ingest pipeline
|
||||
*/
|
||||
DeletePipelineRequestBuilder prepareDeletePipeline();
|
||||
|
||||
/**
|
||||
* Returns a stored ingest pipeline
|
||||
*/
|
||||
void getPipeline(GetPipelineRequest request, ActionListener<GetPipelineResponse> listener);
|
||||
|
||||
/**
|
||||
* Returns a stored ingest pipeline
|
||||
*/
|
||||
ActionFuture<GetPipelineResponse> getPipeline(GetPipelineRequest request);
|
||||
|
||||
/**
|
||||
* Returns a stored ingest pipeline
|
||||
*/
|
||||
GetPipelineRequestBuilder prepareGetPipeline(String... ids);
|
||||
|
||||
/**
|
||||
* Simulates an ingest pipeline
|
||||
*/
|
||||
void simulatePipeline(SimulatePipelineRequest request, ActionListener<SimulatePipelineResponse> listener);
|
||||
|
||||
/**
|
||||
* Simulates an ingest pipeline
|
||||
*/
|
||||
ActionFuture<SimulatePipelineResponse> simulatePipeline(SimulatePipelineRequest request);
|
||||
|
||||
/**
|
||||
* Simulates an ingest pipeline
|
||||
*/
|
||||
SimulatePipelineRequestBuilder prepareSimulatePipeline(BytesReference source);
|
||||
|
||||
/**
|
||||
* Returns this clients settings
|
||||
*/
|
||||
|
|
|
@ -272,6 +272,21 @@ import org.elasticsearch.action.indexedscripts.put.PutIndexedScriptAction;
|
|||
import org.elasticsearch.action.indexedscripts.put.PutIndexedScriptRequest;
|
||||
import org.elasticsearch.action.indexedscripts.put.PutIndexedScriptRequestBuilder;
|
||||
import org.elasticsearch.action.indexedscripts.put.PutIndexedScriptResponse;
|
||||
import org.elasticsearch.action.ingest.DeletePipelineAction;
|
||||
import org.elasticsearch.action.ingest.DeletePipelineRequest;
|
||||
import org.elasticsearch.action.ingest.DeletePipelineRequestBuilder;
|
||||
import org.elasticsearch.action.ingest.GetPipelineAction;
|
||||
import org.elasticsearch.action.ingest.GetPipelineRequest;
|
||||
import org.elasticsearch.action.ingest.GetPipelineRequestBuilder;
|
||||
import org.elasticsearch.action.ingest.GetPipelineResponse;
|
||||
import org.elasticsearch.action.ingest.PutPipelineAction;
|
||||
import org.elasticsearch.action.ingest.PutPipelineRequest;
|
||||
import org.elasticsearch.action.ingest.PutPipelineRequestBuilder;
|
||||
import org.elasticsearch.action.ingest.SimulatePipelineAction;
|
||||
import org.elasticsearch.action.ingest.SimulatePipelineRequest;
|
||||
import org.elasticsearch.action.ingest.SimulatePipelineRequestBuilder;
|
||||
import org.elasticsearch.action.ingest.SimulatePipelineResponse;
|
||||
import org.elasticsearch.action.ingest.WritePipelineResponse;
|
||||
import org.elasticsearch.action.percolate.MultiPercolateAction;
|
||||
import org.elasticsearch.action.percolate.MultiPercolateRequest;
|
||||
import org.elasticsearch.action.percolate.MultiPercolateRequestBuilder;
|
||||
|
@ -320,6 +335,7 @@ import org.elasticsearch.client.ElasticsearchClient;
|
|||
import org.elasticsearch.client.FilterClient;
|
||||
import org.elasticsearch.client.IndicesAdminClient;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
|
@ -789,6 +805,66 @@ public abstract class AbstractClient extends AbstractComponent implements Client
|
|||
return new FieldStatsRequestBuilder(this, FieldStatsAction.INSTANCE);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void putPipeline(PutPipelineRequest request, ActionListener<WritePipelineResponse> listener) {
|
||||
execute(PutPipelineAction.INSTANCE, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionFuture<WritePipelineResponse> putPipeline(PutPipelineRequest request) {
|
||||
return execute(PutPipelineAction.INSTANCE, request);
|
||||
}
|
||||
|
||||
@Override
|
||||
public PutPipelineRequestBuilder preparePutPipeline(String id, BytesReference source) {
|
||||
return new PutPipelineRequestBuilder(this, PutPipelineAction.INSTANCE, id, source);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void deletePipeline(DeletePipelineRequest request, ActionListener<WritePipelineResponse> listener) {
|
||||
execute(DeletePipelineAction.INSTANCE, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionFuture<WritePipelineResponse> deletePipeline(DeletePipelineRequest request) {
|
||||
return execute(DeletePipelineAction.INSTANCE, request);
|
||||
}
|
||||
|
||||
@Override
|
||||
public DeletePipelineRequestBuilder prepareDeletePipeline() {
|
||||
return new DeletePipelineRequestBuilder(this, DeletePipelineAction.INSTANCE);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void getPipeline(GetPipelineRequest request, ActionListener<GetPipelineResponse> listener) {
|
||||
execute(GetPipelineAction.INSTANCE, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionFuture<GetPipelineResponse> getPipeline(GetPipelineRequest request) {
|
||||
return execute(GetPipelineAction.INSTANCE, request);
|
||||
}
|
||||
|
||||
@Override
|
||||
public GetPipelineRequestBuilder prepareGetPipeline(String... ids) {
|
||||
return new GetPipelineRequestBuilder(this, GetPipelineAction.INSTANCE, ids);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void simulatePipeline(SimulatePipelineRequest request, ActionListener<SimulatePipelineResponse> listener) {
|
||||
execute(SimulatePipelineAction.INSTANCE, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionFuture<SimulatePipelineResponse> simulatePipeline(SimulatePipelineRequest request) {
|
||||
return execute(SimulatePipelineAction.INSTANCE, request);
|
||||
}
|
||||
|
||||
@Override
|
||||
public SimulatePipelineRequestBuilder prepareSimulatePipeline(BytesReference source) {
|
||||
return new SimulatePipelineRequestBuilder(this, SimulatePipelineAction.INSTANCE, source);
|
||||
}
|
||||
|
||||
static class Admin implements AdminClient {
|
||||
|
||||
private final ClusterAdmin clusterAdmin;
|
||||
|
|
|
@ -19,10 +19,6 @@
|
|||
|
||||
package org.elasticsearch.client.transport;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
|
@ -48,6 +44,7 @@ import org.elasticsearch.common.settings.SettingsModule;
|
|||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.indices.breaker.CircuitBreakerModule;
|
||||
import org.elasticsearch.monitor.MonitorService;
|
||||
import org.elasticsearch.node.Node;
|
||||
import org.elasticsearch.node.internal.InternalSettingsPreparer;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.plugins.PluginsModule;
|
||||
|
@ -58,6 +55,10 @@ import org.elasticsearch.threadpool.ThreadPoolModule;
|
|||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.transport.netty.NettyTransport;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
|
||||
|
||||
/**
|
||||
|
@ -112,10 +113,10 @@ public class TransportClient extends AbstractClient {
|
|||
.put(NettyTransport.PING_SCHEDULE, "5s") // enable by default the transport schedule ping interval
|
||||
.put( InternalSettingsPreparer.prepareSettings(settings))
|
||||
.put("network.server", false)
|
||||
.put("node.client", true)
|
||||
.put(Node.NODE_CLIENT_SETTING.getKey(), true)
|
||||
.put(CLIENT_TYPE_SETTING, CLIENT_TYPE);
|
||||
return new PluginsService(settingsBuilder.build(), null, null, pluginClasses);
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Builds a new instance of the transport client.
|
||||
|
@ -149,7 +150,7 @@ public class TransportClient extends AbstractClient {
|
|||
// noop
|
||||
}
|
||||
});
|
||||
modules.add(new ActionModule(true));
|
||||
modules.add(new ActionModule(false, true));
|
||||
modules.add(new CircuitBreakerModule(settings));
|
||||
|
||||
pluginsService.processModules(modules);
|
||||
|
|
|
@ -33,6 +33,7 @@ import org.elasticsearch.cluster.ClusterName;
|
|||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
@ -98,6 +99,11 @@ public class TransportClientNodesService extends AbstractComponent {
|
|||
|
||||
private volatile boolean closed;
|
||||
|
||||
|
||||
public static final Setting<TimeValue> CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL = Setting.positiveTimeSetting("client.transport.nodes_sampler_interval", timeValueSeconds(5), false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<TimeValue> CLIENT_TRANSPORT_PING_TIMEOUT = Setting.positiveTimeSetting("client.transport.ping_timeout", timeValueSeconds(5), false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Boolean> CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME = Setting.boolSetting("client.transport.ignore_cluster_name", false, false, Setting.Scope.CLUSTER);
|
||||
|
||||
@Inject
|
||||
public TransportClientNodesService(Settings settings, ClusterName clusterName, TransportService transportService,
|
||||
ThreadPool threadPool, Version version) {
|
||||
|
@ -107,9 +113,9 @@ public class TransportClientNodesService extends AbstractComponent {
|
|||
this.threadPool = threadPool;
|
||||
this.minCompatibilityVersion = version.minimumCompatibilityVersion();
|
||||
|
||||
this.nodesSamplerInterval = this.settings.getAsTime("client.transport.nodes_sampler_interval", timeValueSeconds(5));
|
||||
this.pingTimeout = this.settings.getAsTime("client.transport.ping_timeout", timeValueSeconds(5)).millis();
|
||||
this.ignoreClusterName = this.settings.getAsBoolean("client.transport.ignore_cluster_name", false);
|
||||
this.nodesSamplerInterval = CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL.get(this.settings);
|
||||
this.pingTimeout = CLIENT_TRANSPORT_PING_TIMEOUT.get(this.settings).millis();
|
||||
this.ignoreClusterName = CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME.get(this.settings);
|
||||
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("node_sampler_interval[" + nodesSamplerInterval + "]");
|
||||
|
|
|
@ -57,6 +57,7 @@ import org.elasticsearch.cluster.service.InternalClusterService;
|
|||
import org.elasticsearch.common.inject.AbstractModule;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.ExtensionPoint;
|
||||
import org.elasticsearch.gateway.GatewayAllocator;
|
||||
|
@ -64,6 +65,7 @@ import org.elasticsearch.gateway.GatewayAllocator;
|
|||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.function.Function;
|
||||
|
||||
/**
|
||||
* Configures classes and services that affect the entire cluster.
|
||||
|
@ -72,7 +74,7 @@ public class ClusterModule extends AbstractModule {
|
|||
|
||||
public static final String EVEN_SHARD_COUNT_ALLOCATOR = "even_shard";
|
||||
public static final String BALANCED_ALLOCATOR = "balanced"; // default
|
||||
public static final String SHARDS_ALLOCATOR_TYPE_KEY = "cluster.routing.allocation.type";
|
||||
public static final Setting<String> SHARDS_ALLOCATOR_TYPE_SETTING = new Setting<>("cluster.routing.allocation.type", BALANCED_ALLOCATOR, Function.identity(), false, Setting.Scope.CLUSTER);
|
||||
public static final List<Class<? extends AllocationDecider>> DEFAULT_ALLOCATION_DECIDERS =
|
||||
Collections.unmodifiableList(Arrays.asList(
|
||||
SameShardAllocationDecider.class,
|
||||
|
@ -121,7 +123,7 @@ public class ClusterModule extends AbstractModule {
|
|||
@Override
|
||||
protected void configure() {
|
||||
// bind ShardsAllocator
|
||||
String shardsAllocatorType = shardsAllocators.bindType(binder(), settings, ClusterModule.SHARDS_ALLOCATOR_TYPE_KEY, ClusterModule.BALANCED_ALLOCATOR);
|
||||
String shardsAllocatorType = shardsAllocators.bindType(binder(), settings, ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING.getKey(), ClusterModule.BALANCED_ALLOCATOR);
|
||||
if (shardsAllocatorType.equals(ClusterModule.EVEN_SHARD_COUNT_ALLOCATOR)) {
|
||||
final ESLogger logger = Loggers.getLogger(getClass(), settings);
|
||||
logger.warn("{} allocator has been removed in 2.0 using {} instead", ClusterModule.EVEN_SHARD_COUNT_ALLOCATOR, ClusterModule.BALANCED_ALLOCATOR);
|
||||
|
|
|
@ -120,7 +120,7 @@ public interface ClusterStateTaskExecutor<T> {
|
|||
}
|
||||
|
||||
public boolean isSuccess() {
|
||||
return failure != null;
|
||||
return this == SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -30,6 +30,7 @@ import org.elasticsearch.cluster.MasterNodeChangePredicate;
|
|||
import org.elasticsearch.cluster.NotMasterException;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingNodes;
|
||||
import org.elasticsearch.cluster.routing.RoutingService;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.allocation.AllocationService;
|
||||
|
@ -61,6 +62,8 @@ import java.io.IOException;
|
|||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.elasticsearch.cluster.routing.ShardRouting.readShardRoutingEntry;
|
||||
|
||||
|
@ -91,7 +94,7 @@ public class ShardStateAction extends AbstractComponent {
|
|||
logger.warn("{} no master known for action [{}] for shard [{}]", shardRoutingEntry.getShardRouting().shardId(), actionName, shardRoutingEntry.getShardRouting());
|
||||
waitForNewMasterAndRetry(actionName, observer, shardRoutingEntry, listener);
|
||||
} else {
|
||||
logger.debug("{} sending [{}] to [{}] for shard [{}]", shardRoutingEntry.getShardRouting().getId(), actionName, masterNode.getId(), shardRoutingEntry);
|
||||
logger.debug("{} sending [{}] to [{}] for shard [{}]", shardRoutingEntry.getShardRouting().shardId(), actionName, masterNode.getId(), shardRoutingEntry);
|
||||
transportService.sendRequest(masterNode,
|
||||
actionName, shardRoutingEntry, new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
|
||||
@Override
|
||||
|
@ -146,7 +149,7 @@ public class ShardStateAction extends AbstractComponent {
|
|||
|
||||
@Override
|
||||
public void onClusterServiceClose() {
|
||||
logger.warn("{} node closed while execution action [{}] for shard [{}]", shardRoutingEntry.failure, shardRoutingEntry.getShardRouting().getId(), actionName, shardRoutingEntry.getShardRouting());
|
||||
logger.warn("{} node closed while execution action [{}] for shard [{}]", shardRoutingEntry.failure, shardRoutingEntry.getShardRouting().shardId(), actionName, shardRoutingEntry.getShardRouting());
|
||||
listener.onFailure(new NodeClosedException(clusterService.localNode()));
|
||||
}
|
||||
|
||||
|
@ -211,12 +214,12 @@ public class ShardStateAction extends AbstractComponent {
|
|||
}
|
||||
}
|
||||
|
||||
private static class ShardFailedClusterStateTaskExecutor implements ClusterStateTaskExecutor<ShardRoutingEntry> {
|
||||
static class ShardFailedClusterStateTaskExecutor implements ClusterStateTaskExecutor<ShardRoutingEntry> {
|
||||
private final AllocationService allocationService;
|
||||
private final RoutingService routingService;
|
||||
private final ESLogger logger;
|
||||
|
||||
public ShardFailedClusterStateTaskExecutor(AllocationService allocationService, RoutingService routingService, ESLogger logger) {
|
||||
ShardFailedClusterStateTaskExecutor(AllocationService allocationService, RoutingService routingService, ESLogger logger) {
|
||||
this.allocationService = allocationService;
|
||||
this.routingService = routingService;
|
||||
this.logger = logger;
|
||||
|
@ -225,23 +228,56 @@ public class ShardStateAction extends AbstractComponent {
|
|||
@Override
|
||||
public BatchResult<ShardRoutingEntry> execute(ClusterState currentState, List<ShardRoutingEntry> tasks) throws Exception {
|
||||
BatchResult.Builder<ShardRoutingEntry> batchResultBuilder = BatchResult.builder();
|
||||
List<FailedRerouteAllocation.FailedShard> failedShards = new ArrayList<>(tasks.size());
|
||||
for (ShardRoutingEntry task : tasks) {
|
||||
failedShards.add(new FailedRerouteAllocation.FailedShard(task.shardRouting, task.message, task.failure));
|
||||
}
|
||||
|
||||
// partition tasks into those that correspond to shards
|
||||
// that exist versus do not exist
|
||||
Map<Boolean, List<ShardRoutingEntry>> partition =
|
||||
tasks.stream().collect(Collectors.partitioningBy(task -> shardExists(currentState, task)));
|
||||
|
||||
// tasks that correspond to non-existent shards are marked
|
||||
// as successful
|
||||
batchResultBuilder.successes(partition.get(false));
|
||||
|
||||
ClusterState maybeUpdatedState = currentState;
|
||||
List<ShardRoutingEntry> tasksToFail = partition.get(true);
|
||||
try {
|
||||
RoutingAllocation.Result result = allocationService.applyFailedShards(currentState, failedShards);
|
||||
List<FailedRerouteAllocation.FailedShard> failedShards =
|
||||
tasksToFail
|
||||
.stream()
|
||||
.map(task -> new FailedRerouteAllocation.FailedShard(task.shardRouting, task.message, task.failure))
|
||||
.collect(Collectors.toList());
|
||||
RoutingAllocation.Result result = applyFailedShards(currentState, failedShards);
|
||||
if (result.changed()) {
|
||||
maybeUpdatedState = ClusterState.builder(currentState).routingResult(result).build();
|
||||
}
|
||||
batchResultBuilder.successes(tasks);
|
||||
batchResultBuilder.successes(tasksToFail);
|
||||
} catch (Throwable t) {
|
||||
batchResultBuilder.failures(tasks, t);
|
||||
// failures are communicated back to the requester
|
||||
// cluster state will not be updated in this case
|
||||
batchResultBuilder.failures(tasksToFail, t);
|
||||
}
|
||||
|
||||
return batchResultBuilder.build(maybeUpdatedState);
|
||||
}
|
||||
|
||||
// visible for testing
|
||||
RoutingAllocation.Result applyFailedShards(ClusterState currentState, List<FailedRerouteAllocation.FailedShard> failedShards) {
|
||||
return allocationService.applyFailedShards(currentState, failedShards);
|
||||
}
|
||||
|
||||
private boolean shardExists(ClusterState currentState, ShardRoutingEntry task) {
|
||||
RoutingNodes.RoutingNodeIterator routingNodeIterator =
|
||||
currentState.getRoutingNodes().routingNodeIter(task.getShardRouting().currentNodeId());
|
||||
if (routingNodeIterator != null) {
|
||||
for (ShardRouting maybe : routingNodeIterator) {
|
||||
if (task.getShardRouting().isSameAllocation(maybe)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clusterStatePublished(ClusterState newClusterState) {
|
||||
int numberOfUnassignedShards = newClusterState.getRoutingNodes().unassigned().size();
|
||||
|
|
|
@ -41,7 +41,7 @@ import java.io.IOException;
|
|||
import java.util.Arrays;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue;
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenientNodeBooleanValue;
|
||||
|
||||
/**
|
||||
* Mapping configuration for a type.
|
||||
|
@ -237,7 +237,7 @@ public class MappingMetaData extends AbstractDiffable<MappingMetaData> {
|
|||
String fieldName = Strings.toUnderscoreCase(entry.getKey());
|
||||
Object fieldNode = entry.getValue();
|
||||
if (fieldName.equals("required")) {
|
||||
required = nodeBooleanValue(fieldNode);
|
||||
required = lenientNodeBooleanValue(fieldNode);
|
||||
}
|
||||
}
|
||||
this.routing = new Routing(required);
|
||||
|
@ -254,13 +254,13 @@ public class MappingMetaData extends AbstractDiffable<MappingMetaData> {
|
|||
String fieldName = Strings.toUnderscoreCase(entry.getKey());
|
||||
Object fieldNode = entry.getValue();
|
||||
if (fieldName.equals("enabled")) {
|
||||
enabled = nodeBooleanValue(fieldNode);
|
||||
enabled = lenientNodeBooleanValue(fieldNode);
|
||||
} else if (fieldName.equals("format")) {
|
||||
format = fieldNode.toString();
|
||||
} else if (fieldName.equals("default") && fieldNode != null) {
|
||||
defaultTimestamp = fieldNode.toString();
|
||||
} else if (fieldName.equals("ignore_missing")) {
|
||||
ignoreMissing = nodeBooleanValue(fieldNode);
|
||||
ignoreMissing = lenientNodeBooleanValue(fieldNode);
|
||||
}
|
||||
}
|
||||
this.timestamp = new Timestamp(enabled, format, defaultTimestamp, ignoreMissing);
|
||||
|
|
|
@ -54,6 +54,7 @@ import org.elasticsearch.index.IndexNotFoundException;
|
|||
import org.elasticsearch.index.store.IndexStoreConfig;
|
||||
import org.elasticsearch.indices.recovery.RecoverySettings;
|
||||
import org.elasticsearch.indices.ttl.IndicesTTLService;
|
||||
import org.elasticsearch.ingest.IngestMetadata;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -111,6 +112,7 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
|
|||
static {
|
||||
// register non plugin custom metadata
|
||||
registerPrototype(RepositoriesMetaData.TYPE, RepositoriesMetaData.PROTO);
|
||||
registerPrototype(IngestMetadata.TYPE, IngestMetadata.PROTO);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -20,11 +20,15 @@ package org.elasticsearch.cluster.metadata;
|
|||
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.misc.IndexMergeTool;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.IndexScopedSettings;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.MergePolicyConfig;
|
||||
import org.elasticsearch.index.analysis.AnalysisService;
|
||||
import org.elasticsearch.index.analysis.NamedAnalyzer;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
|
@ -32,6 +36,7 @@ import org.elasticsearch.index.similarity.SimilarityService;
|
|||
import org.elasticsearch.indices.mapper.MapperRegistry;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import static java.util.Collections.unmodifiableSet;
|
||||
|
@ -48,11 +53,13 @@ import static org.elasticsearch.common.util.set.Sets.newHashSet;
|
|||
public class MetaDataIndexUpgradeService extends AbstractComponent {
|
||||
|
||||
private final MapperRegistry mapperRegistry;
|
||||
private final IndexScopedSettings indexScopedSettigns;
|
||||
|
||||
@Inject
|
||||
public MetaDataIndexUpgradeService(Settings settings, MapperRegistry mapperRegistry) {
|
||||
public MetaDataIndexUpgradeService(Settings settings, MapperRegistry mapperRegistry, IndexScopedSettings indexScopedSettings) {
|
||||
super(settings);
|
||||
this.mapperRegistry = mapperRegistry;
|
||||
this.indexScopedSettigns = indexScopedSettings;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -65,21 +72,25 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
|
|||
public IndexMetaData upgradeIndexMetaData(IndexMetaData indexMetaData) {
|
||||
// Throws an exception if there are too-old segments:
|
||||
if (isUpgraded(indexMetaData)) {
|
||||
assert indexMetaData == archiveBrokenIndexSettings(indexMetaData) : "all settings must have been upgraded before";
|
||||
return indexMetaData;
|
||||
}
|
||||
checkSupportedVersion(indexMetaData);
|
||||
IndexMetaData newMetaData = indexMetaData;
|
||||
// we have to run this first otherwise in we try to create IndexSettings
|
||||
// with broken settings and fail in checkMappingsCompatibility
|
||||
newMetaData = archiveBrokenIndexSettings(newMetaData);
|
||||
// only run the check with the upgraded settings!!
|
||||
checkMappingsCompatibility(newMetaData);
|
||||
newMetaData = markAsUpgraded(newMetaData);
|
||||
return newMetaData;
|
||||
return markAsUpgraded(newMetaData);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Checks if the index was already opened by this version of Elasticsearch and doesn't require any additional checks.
|
||||
*/
|
||||
private boolean isUpgraded(IndexMetaData indexMetaData) {
|
||||
return indexMetaData.getUpgradedVersion().onOrAfter(Version.V_3_0_0);
|
||||
boolean isUpgraded(IndexMetaData indexMetaData) {
|
||||
return indexMetaData.getUpgradedVersion().onOrAfter(Version.CURRENT);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -171,4 +182,39 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
|
|||
}
|
||||
}
|
||||
|
||||
private static final String ARCHIVED_SETTINGS_PREFIX = "archived.";
|
||||
|
||||
IndexMetaData archiveBrokenIndexSettings(IndexMetaData indexMetaData) {
|
||||
Settings settings = indexMetaData.getSettings();
|
||||
Settings.Builder builder = Settings.builder();
|
||||
boolean changed = false;
|
||||
for (Map.Entry<String, String> entry : settings.getAsMap().entrySet()) {
|
||||
try {
|
||||
Setting<?> setting = indexScopedSettigns.get(entry.getKey());
|
||||
if (setting != null) {
|
||||
setting.get(settings);
|
||||
builder.put(entry.getKey(), entry.getValue());
|
||||
} else {
|
||||
if (indexScopedSettigns.isPrivateSetting(entry.getKey()) || entry.getKey().startsWith(ARCHIVED_SETTINGS_PREFIX)) {
|
||||
builder.put(entry.getKey(), entry.getValue());
|
||||
} else {
|
||||
changed = true;
|
||||
logger.warn("[{}] found unknown index setting: {} value: {} - archiving", indexMetaData.getIndex(), entry.getKey(), entry.getValue());
|
||||
// we put them back in here such that tools can check from the outside if there are any indices with broken settings. The setting can remain there
|
||||
// but we want users to be aware that some of their setting are broken and they can research why and what they need to do to replace them.
|
||||
builder.put(ARCHIVED_SETTINGS_PREFIX + entry.getKey(), entry.getValue());
|
||||
}
|
||||
}
|
||||
} catch (IllegalArgumentException ex) {
|
||||
changed = true;
|
||||
logger.warn("[{}] found invalid index setting: {} value: {} - archiving",ex, indexMetaData.getIndex(), entry.getKey(), entry.getValue());
|
||||
// we put them back in here such that tools can check from the outside if there are any indices with broken settings. The setting can remain there
|
||||
// but we want users to be aware that some of their setting sare broken and they can research why and what they need to do to replace them.
|
||||
builder.put(ARCHIVED_SETTINGS_PREFIX + entry.getKey(), entry.getValue());
|
||||
}
|
||||
}
|
||||
|
||||
return changed ? IndexMetaData.builder(indexMetaData).settings(builder.build()).build() : indexMetaData;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -32,6 +32,7 @@ import org.elasticsearch.common.transport.TransportAddress;
|
|||
import org.elasticsearch.common.transport.TransportAddressSerializers;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.node.Node;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
|
@ -46,11 +47,11 @@ import static org.elasticsearch.common.transport.TransportAddressSerializers.add
|
|||
public class DiscoveryNode implements Streamable, ToXContent {
|
||||
|
||||
public static boolean localNode(Settings settings) {
|
||||
if (settings.get("node.local") != null) {
|
||||
return settings.getAsBoolean("node.local", false);
|
||||
if (Node.NODE_LOCAL_SETTING.exists(settings)) {
|
||||
return Node.NODE_LOCAL_SETTING.get(settings);
|
||||
}
|
||||
if (settings.get("node.mode") != null) {
|
||||
String nodeMode = settings.get("node.mode");
|
||||
if (Node.NODE_MODE_SETTING.exists(settings)) {
|
||||
String nodeMode = Node.NODE_MODE_SETTING.get(settings);
|
||||
if ("local".equals(nodeMode)) {
|
||||
return true;
|
||||
} else if ("network".equals(nodeMode)) {
|
||||
|
@ -63,28 +64,29 @@ public class DiscoveryNode implements Streamable, ToXContent {
|
|||
}
|
||||
|
||||
public static boolean nodeRequiresLocalStorage(Settings settings) {
|
||||
return !(settings.getAsBoolean("node.client", false) || (!settings.getAsBoolean("node.data", true) && !settings.getAsBoolean("node.master", true)));
|
||||
return (Node.NODE_CLIENT_SETTING.get(settings) || (Node.NODE_DATA_SETTING.get(settings) == false && Node.NODE_MASTER_SETTING.get(settings) == false)) == false;
|
||||
}
|
||||
|
||||
public static boolean clientNode(Settings settings) {
|
||||
String client = settings.get("node.client");
|
||||
return Booleans.isExplicitTrue(client);
|
||||
return Node.NODE_CLIENT_SETTING.get(settings);
|
||||
}
|
||||
|
||||
public static boolean masterNode(Settings settings) {
|
||||
String master = settings.get("node.master");
|
||||
if (master == null) {
|
||||
return !clientNode(settings);
|
||||
if (Node.NODE_MASTER_SETTING.exists(settings)) {
|
||||
return Node.NODE_MASTER_SETTING.get(settings);
|
||||
}
|
||||
return Booleans.isExplicitTrue(master);
|
||||
return clientNode(settings) == false;
|
||||
}
|
||||
|
||||
public static boolean dataNode(Settings settings) {
|
||||
String data = settings.get("node.data");
|
||||
if (data == null) {
|
||||
return !clientNode(settings);
|
||||
if (Node.NODE_DATA_SETTING.exists(settings)) {
|
||||
return Node.NODE_DATA_SETTING.get(settings);
|
||||
}
|
||||
return Booleans.isExplicitTrue(data);
|
||||
return clientNode(settings) == false;
|
||||
}
|
||||
|
||||
public static boolean ingestNode(Settings settings) {
|
||||
return Node.NODE_INGEST_SETTING.get(settings);
|
||||
}
|
||||
|
||||
public static final List<DiscoveryNode> EMPTY_LIST = Collections.emptyList();
|
||||
|
@ -316,6 +318,14 @@ public class DiscoveryNode implements Streamable, ToXContent {
|
|||
return masterNode();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a boolean that tells whether this an ingest node or not
|
||||
*/
|
||||
public boolean isIngestNode() {
|
||||
String ingest = attributes.get("ingest");
|
||||
return ingest == null ? true : Booleans.parseBooleanExact(ingest);
|
||||
}
|
||||
|
||||
public Version version() {
|
||||
return this.version;
|
||||
}
|
||||
|
|
|
@ -52,16 +52,20 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
|||
private final ImmutableOpenMap<String, DiscoveryNode> nodes;
|
||||
private final ImmutableOpenMap<String, DiscoveryNode> dataNodes;
|
||||
private final ImmutableOpenMap<String, DiscoveryNode> masterNodes;
|
||||
private final ImmutableOpenMap<String, DiscoveryNode> ingestNodes;
|
||||
|
||||
private final String masterNodeId;
|
||||
private final String localNodeId;
|
||||
private final Version minNodeVersion;
|
||||
private final Version minNonClientNodeVersion;
|
||||
|
||||
private DiscoveryNodes(ImmutableOpenMap<String, DiscoveryNode> nodes, ImmutableOpenMap<String, DiscoveryNode> dataNodes, ImmutableOpenMap<String, DiscoveryNode> masterNodes, String masterNodeId, String localNodeId, Version minNodeVersion, Version minNonClientNodeVersion) {
|
||||
private DiscoveryNodes(ImmutableOpenMap<String, DiscoveryNode> nodes, ImmutableOpenMap<String, DiscoveryNode> dataNodes,
|
||||
ImmutableOpenMap<String, DiscoveryNode> masterNodes, ImmutableOpenMap<String, DiscoveryNode> ingestNodes,
|
||||
String masterNodeId, String localNodeId, Version minNodeVersion, Version minNonClientNodeVersion) {
|
||||
this.nodes = nodes;
|
||||
this.dataNodes = dataNodes;
|
||||
this.masterNodes = masterNodes;
|
||||
this.ingestNodes = ingestNodes;
|
||||
this.masterNodeId = masterNodeId;
|
||||
this.localNodeId = localNodeId;
|
||||
this.minNodeVersion = minNodeVersion;
|
||||
|
@ -164,6 +168,13 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
|||
return masterNodes();
|
||||
}
|
||||
|
||||
/**
|
||||
* @return All the ingest nodes arranged by their ids
|
||||
*/
|
||||
public ImmutableOpenMap<String, DiscoveryNode> getIngestNodes() {
|
||||
return ingestNodes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a {@link Map} of the discovered master and data nodes arranged by their ids
|
||||
*
|
||||
|
@ -654,6 +665,7 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
|||
public DiscoveryNodes build() {
|
||||
ImmutableOpenMap.Builder<String, DiscoveryNode> dataNodesBuilder = ImmutableOpenMap.builder();
|
||||
ImmutableOpenMap.Builder<String, DiscoveryNode> masterNodesBuilder = ImmutableOpenMap.builder();
|
||||
ImmutableOpenMap.Builder<String, DiscoveryNode> ingestNodesBuilder = ImmutableOpenMap.builder();
|
||||
Version minNodeVersion = Version.CURRENT;
|
||||
Version minNonClientNodeVersion = Version.CURRENT;
|
||||
for (ObjectObjectCursor<String, DiscoveryNode> nodeEntry : nodes) {
|
||||
|
@ -665,10 +677,16 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
|||
masterNodesBuilder.put(nodeEntry.key, nodeEntry.value);
|
||||
minNonClientNodeVersion = Version.smallest(minNonClientNodeVersion, nodeEntry.value.version());
|
||||
}
|
||||
if (nodeEntry.value.isIngestNode()) {
|
||||
ingestNodesBuilder.put(nodeEntry.key, nodeEntry.value);
|
||||
}
|
||||
minNodeVersion = Version.smallest(minNodeVersion, nodeEntry.value.version());
|
||||
}
|
||||
|
||||
return new DiscoveryNodes(nodes.build(), dataNodesBuilder.build(), masterNodesBuilder.build(), masterNodeId, localNodeId, minNodeVersion, minNonClientNodeVersion);
|
||||
return new DiscoveryNodes(
|
||||
nodes.build(), dataNodesBuilder.build(), masterNodesBuilder.build(), ingestNodesBuilder.build(),
|
||||
masterNodeId, localNodeId, minNodeVersion, minNonClientNodeVersion
|
||||
);
|
||||
}
|
||||
|
||||
public static DiscoveryNodes readFrom(StreamInput in, @Nullable DiscoveryNode localNode) throws IOException {
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.common;
|
||||
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
import java.lang.reflect.Method;
|
||||
|
@ -40,7 +41,7 @@ import java.util.concurrent.ThreadLocalRandom;
|
|||
* setting a reproducible seed. When running the Elasticsearch server
|
||||
* process, non-reproducible sources of randomness are provided (unless
|
||||
* a setting is provided for a module that exposes a seed setting (e.g.,
|
||||
* DiscoveryService#SETTING_DISCOVERY_SEED)).
|
||||
* DiscoveryService#DISCOVERY_SEED_SETTING)).
|
||||
*/
|
||||
public final class Randomness {
|
||||
private static final Method currentMethod;
|
||||
|
@ -68,13 +69,12 @@ public final class Randomness {
|
|||
* seed in the settings with the key setting.
|
||||
*
|
||||
* @param settings the settings containing the seed
|
||||
* @param setting the key to access the seed
|
||||
* @param setting the setting to access the seed
|
||||
* @return a reproducible source of randomness
|
||||
*/
|
||||
public static Random get(Settings settings, String setting) {
|
||||
Long maybeSeed = settings.getAsLong(setting, null);
|
||||
if (maybeSeed != null) {
|
||||
return new Random(maybeSeed);
|
||||
public static Random get(Settings settings, Setting<Long> setting) {
|
||||
if (setting.exists(settings)) {
|
||||
return new Random(setting.get(settings));
|
||||
} else {
|
||||
return get();
|
||||
}
|
||||
|
|
|
@ -37,7 +37,7 @@ import org.elasticsearch.common.geo.builders.ShapeBuilder;
|
|||
import org.elasticsearch.common.text.Text;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder;
|
||||
import org.elasticsearch.search.rescore.RescoreBuilder.Rescorer;
|
||||
import org.elasticsearch.search.rescore.RescoreBuilder;
|
||||
import org.joda.time.DateTime;
|
||||
import org.joda.time.DateTimeZone;
|
||||
|
||||
|
@ -61,7 +61,6 @@ import java.util.HashMap;
|
|||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
import static org.elasticsearch.ElasticsearchException.readException;
|
||||
|
@ -678,10 +677,10 @@ public abstract class StreamInput extends InputStream {
|
|||
}
|
||||
|
||||
/**
|
||||
* Reads a {@link QueryBuilder} from the current stream
|
||||
* Reads a {@link RescoreBuilder} from the current stream
|
||||
*/
|
||||
public Rescorer readRescorer() throws IOException {
|
||||
return readNamedWriteable(Rescorer.class);
|
||||
public RescoreBuilder<?> readRescorer() throws IOException {
|
||||
return readNamedWriteable(RescoreBuilder.class);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -36,7 +36,7 @@ import org.elasticsearch.common.geo.builders.ShapeBuilder;
|
|||
import org.elasticsearch.common.text.Text;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder;
|
||||
import org.elasticsearch.search.rescore.RescoreBuilder.Rescorer;
|
||||
import org.elasticsearch.search.rescore.RescoreBuilder;
|
||||
import org.joda.time.ReadableInstant;
|
||||
|
||||
import java.io.EOFException;
|
||||
|
@ -679,9 +679,9 @@ public abstract class StreamOutput extends OutputStream {
|
|||
}
|
||||
|
||||
/**
|
||||
* Writes a {@link Rescorer} to the current stream
|
||||
* Writes a {@link RescoreBuilder} to the current stream
|
||||
*/
|
||||
public void writeRescorer(Rescorer rescorer) throws IOException {
|
||||
public void writeRescorer(RescoreBuilder<?> rescorer) throws IOException {
|
||||
writeNamedWriteable(rescorer);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -117,12 +117,6 @@ public class Queries {
|
|||
if (minimumShouldMatch == null) {
|
||||
return query;
|
||||
}
|
||||
// Queries with a single word expanded with synonyms
|
||||
// have their coordination factor disabled (@see org.apache.lucene.util.QueryBuilder#analyzeBoolean()).
|
||||
// minimumShouldMatch should not be applicable in such case.
|
||||
if (query.isCoordDisabled()) {
|
||||
return query;
|
||||
}
|
||||
int optionalClauses = 0;
|
||||
for (BooleanClause c : query.clauses()) {
|
||||
if (c.getOccur() == BooleanClause.Occur.SHOULD) {
|
||||
|
|
|
@ -27,7 +27,9 @@ import org.elasticsearch.client.transport.support.TransportProxyClient;
|
|||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.inject.AbstractModule;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.settings.Setting.Scope;
|
||||
import org.elasticsearch.common.util.ExtensionPoint;
|
||||
import org.elasticsearch.http.HttpServer;
|
||||
import org.elasticsearch.http.HttpServerTransport;
|
||||
|
@ -115,6 +117,10 @@ import org.elasticsearch.rest.action.get.RestGetSourceAction;
|
|||
import org.elasticsearch.rest.action.get.RestHeadAction;
|
||||
import org.elasticsearch.rest.action.get.RestMultiGetAction;
|
||||
import org.elasticsearch.rest.action.index.RestIndexAction;
|
||||
import org.elasticsearch.rest.action.ingest.RestDeletePipelineAction;
|
||||
import org.elasticsearch.rest.action.ingest.RestGetPipelineAction;
|
||||
import org.elasticsearch.rest.action.ingest.RestPutPipelineAction;
|
||||
import org.elasticsearch.rest.action.ingest.RestSimulatePipelineAction;
|
||||
import org.elasticsearch.rest.action.main.RestMainAction;
|
||||
import org.elasticsearch.rest.action.percolate.RestMultiPercolateAction;
|
||||
import org.elasticsearch.rest.action.percolate.RestPercolateAction;
|
||||
|
@ -149,7 +155,7 @@ public class NetworkModule extends AbstractModule {
|
|||
public static final String NETTY_TRANSPORT = "netty";
|
||||
|
||||
public static final String HTTP_TYPE_KEY = "http.type";
|
||||
public static final String HTTP_ENABLED = "http.enabled";
|
||||
public static final Setting<Boolean> HTTP_ENABLED = Setting.boolSetting("http.enabled", true, false, Scope.CLUSTER);
|
||||
|
||||
private static final List<Class<? extends RestHandler>> builtinRestHandlers = Arrays.asList(
|
||||
RestMainAction.class,
|
||||
|
@ -255,7 +261,13 @@ public class NetworkModule extends AbstractModule {
|
|||
RestCatAction.class,
|
||||
|
||||
// Tasks API
|
||||
RestListTasksAction.class
|
||||
RestListTasksAction.class,
|
||||
|
||||
// Ingest API
|
||||
RestPutPipelineAction.class,
|
||||
RestGetPipelineAction.class,
|
||||
RestDeletePipelineAction.class,
|
||||
RestSimulatePipelineAction.class
|
||||
);
|
||||
|
||||
private static final List<Class<? extends AbstractCatAction>> builtinCatHandlers = Arrays.asList(
|
||||
|
@ -366,7 +378,7 @@ public class NetworkModule extends AbstractModule {
|
|||
bind(TransportProxyClient.class).asEagerSingleton();
|
||||
bind(TransportClientNodesService.class).asEagerSingleton();
|
||||
} else {
|
||||
if (settings.getAsBoolean(HTTP_ENABLED, true)) {
|
||||
if (HTTP_ENABLED.get(settings)) {
|
||||
bind(HttpServer.class).asEagerSingleton();
|
||||
httpTransportTypes.bindType(binder(), settings, HTTP_TYPE_KEY, NETTY_TRANSPORT);
|
||||
}
|
||||
|
|
|
@ -19,7 +19,9 @@
|
|||
|
||||
package org.elasticsearch.common.network;
|
||||
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
@ -41,31 +43,30 @@ public class NetworkService extends AbstractComponent {
|
|||
/** By default, we bind to loopback interfaces */
|
||||
public static final String DEFAULT_NETWORK_HOST = "_local_";
|
||||
|
||||
private static final String GLOBAL_NETWORK_HOST_SETTING = "network.host";
|
||||
private static final String GLOBAL_NETWORK_BINDHOST_SETTING = "network.bind_host";
|
||||
private static final String GLOBAL_NETWORK_PUBLISHHOST_SETTING = "network.publish_host";
|
||||
public static final Setting<List<String>> GLOBAL_NETWORK_HOST_SETTING = Setting.listSetting("network.host", Arrays.asList(DEFAULT_NETWORK_HOST),
|
||||
s -> s, false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<List<String>> GLOBAL_NETWORK_BINDHOST_SETTING = Setting.listSetting("network.bind_host", GLOBAL_NETWORK_HOST_SETTING,
|
||||
s -> s, false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<List<String>> GLOBAL_NETWORK_PUBLISHHOST_SETTING = Setting.listSetting("network.publish_host", GLOBAL_NETWORK_HOST_SETTING,
|
||||
s -> s, false, Setting.Scope.CLUSTER);
|
||||
|
||||
public static final class TcpSettings {
|
||||
public static final String TCP_NO_DELAY = "network.tcp.no_delay";
|
||||
public static final String TCP_KEEP_ALIVE = "network.tcp.keep_alive";
|
||||
public static final String TCP_REUSE_ADDRESS = "network.tcp.reuse_address";
|
||||
public static final String TCP_SEND_BUFFER_SIZE = "network.tcp.send_buffer_size";
|
||||
public static final String TCP_RECEIVE_BUFFER_SIZE = "network.tcp.receive_buffer_size";
|
||||
public static final String TCP_BLOCKING = "network.tcp.blocking";
|
||||
public static final String TCP_BLOCKING_SERVER = "network.tcp.blocking_server";
|
||||
public static final String TCP_BLOCKING_CLIENT = "network.tcp.blocking_client";
|
||||
public static final String TCP_CONNECT_TIMEOUT = "network.tcp.connect_timeout";
|
||||
|
||||
public static final ByteSizeValue TCP_DEFAULT_SEND_BUFFER_SIZE = null;
|
||||
public static final ByteSizeValue TCP_DEFAULT_RECEIVE_BUFFER_SIZE = null;
|
||||
public static final TimeValue TCP_DEFAULT_CONNECT_TIMEOUT = new TimeValue(30, TimeUnit.SECONDS);
|
||||
public static final Setting<Boolean> TCP_NO_DELAY = Setting.boolSetting("network.tcp.no_delay", true, false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Boolean> TCP_KEEP_ALIVE = Setting.boolSetting("network.tcp.keep_alive", true, false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Boolean> TCP_REUSE_ADDRESS = Setting.boolSetting("network.tcp.reuse_address", NetworkUtils.defaultReuseAddress(), false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<ByteSizeValue> TCP_SEND_BUFFER_SIZE = Setting.byteSizeSetting("network.tcp.send_buffer_size", new ByteSizeValue(-1), false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<ByteSizeValue> TCP_RECEIVE_BUFFER_SIZE = Setting.byteSizeSetting("network.tcp.receive_buffer_size", new ByteSizeValue(-1), false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Boolean> TCP_BLOCKING = Setting.boolSetting("network.tcp.blocking", false, false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Boolean> TCP_BLOCKING_SERVER = Setting.boolSetting("network.tcp.blocking_server", TCP_BLOCKING, false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Boolean> TCP_BLOCKING_CLIENT = Setting.boolSetting("network.tcp.blocking_client", TCP_BLOCKING, false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<TimeValue> TCP_CONNECT_TIMEOUT = Setting.timeSetting("network.tcp.connect_timeout", new TimeValue(30, TimeUnit.SECONDS), false, Setting.Scope.CLUSTER);
|
||||
}
|
||||
|
||||
/**
|
||||
* A custom name resolver can support custom lookup keys (my_net_key:ipv4) and also change
|
||||
* the default inet address used in case no settings is provided.
|
||||
*/
|
||||
public static interface CustomNameResolver {
|
||||
public interface CustomNameResolver {
|
||||
/**
|
||||
* Resolves the default value if possible. If not, return <tt>null</tt>.
|
||||
*/
|
||||
|
@ -94,6 +95,7 @@ public class NetworkService extends AbstractComponent {
|
|||
/**
|
||||
* Resolves {@code bindHosts} to a list of internet addresses. The list will
|
||||
* not contain duplicate addresses.
|
||||
*
|
||||
* @param bindHosts list of hosts to bind to. this may contain special pseudo-hostnames
|
||||
* such as _local_ (see the documentation). if it is null, it will be populated
|
||||
* based on global default settings.
|
||||
|
@ -102,21 +104,22 @@ public class NetworkService extends AbstractComponent {
|
|||
public InetAddress[] resolveBindHostAddresses(String bindHosts[]) throws IOException {
|
||||
// first check settings
|
||||
if (bindHosts == null) {
|
||||
bindHosts = settings.getAsArray(GLOBAL_NETWORK_BINDHOST_SETTING, settings.getAsArray(GLOBAL_NETWORK_HOST_SETTING, null));
|
||||
}
|
||||
// next check any registered custom resolvers
|
||||
if (bindHosts == null) {
|
||||
for (CustomNameResolver customNameResolver : customNameResolvers) {
|
||||
InetAddress addresses[] = customNameResolver.resolveDefault();
|
||||
if (addresses != null) {
|
||||
return addresses;
|
||||
if (GLOBAL_NETWORK_BINDHOST_SETTING.exists(settings) || GLOBAL_NETWORK_HOST_SETTING.exists(settings)) {
|
||||
// if we have settings use them (we have a fallback to GLOBAL_NETWORK_HOST_SETTING inline
|
||||
bindHosts = GLOBAL_NETWORK_BINDHOST_SETTING.get(settings).toArray(Strings.EMPTY_ARRAY);
|
||||
} else {
|
||||
// next check any registered custom resolvers
|
||||
for (CustomNameResolver customNameResolver : customNameResolvers) {
|
||||
InetAddress addresses[] = customNameResolver.resolveDefault();
|
||||
if (addresses != null) {
|
||||
return addresses;
|
||||
}
|
||||
}
|
||||
// we know it's not here. get the defaults
|
||||
bindHosts = GLOBAL_NETWORK_BINDHOST_SETTING.get(settings).toArray(Strings.EMPTY_ARRAY);
|
||||
}
|
||||
}
|
||||
// finally, fill with our default
|
||||
if (bindHosts == null) {
|
||||
bindHosts = new String[] { DEFAULT_NETWORK_HOST };
|
||||
}
|
||||
|
||||
InetAddress addresses[] = resolveInetAddresses(bindHosts);
|
||||
|
||||
// try to deal with some (mis)configuration
|
||||
|
@ -138,6 +141,7 @@ public class NetworkService extends AbstractComponent {
|
|||
* only one address is just a current limitation.
|
||||
* <p>
|
||||
* If {@code publishHosts} resolves to more than one address, <b>then one is selected with magic</b>
|
||||
*
|
||||
* @param publishHosts list of hosts to publish as. this may contain special pseudo-hostnames
|
||||
* such as _local_ (see the documentation). if it is null, it will be populated
|
||||
* based on global default settings.
|
||||
|
@ -145,23 +149,23 @@ public class NetworkService extends AbstractComponent {
|
|||
*/
|
||||
// TODO: needs to be InetAddress[]
|
||||
public InetAddress resolvePublishHostAddresses(String publishHosts[]) throws IOException {
|
||||
// first check settings
|
||||
if (publishHosts == null) {
|
||||
publishHosts = settings.getAsArray(GLOBAL_NETWORK_PUBLISHHOST_SETTING, settings.getAsArray(GLOBAL_NETWORK_HOST_SETTING, null));
|
||||
}
|
||||
// next check any registered custom resolvers
|
||||
if (publishHosts == null) {
|
||||
for (CustomNameResolver customNameResolver : customNameResolvers) {
|
||||
InetAddress addresses[] = customNameResolver.resolveDefault();
|
||||
if (addresses != null) {
|
||||
return addresses[0];
|
||||
if (GLOBAL_NETWORK_PUBLISHHOST_SETTING.exists(settings) || GLOBAL_NETWORK_HOST_SETTING.exists(settings)) {
|
||||
// if we have settings use them (we have a fallback to GLOBAL_NETWORK_HOST_SETTING inline
|
||||
publishHosts = GLOBAL_NETWORK_PUBLISHHOST_SETTING.get(settings).toArray(Strings.EMPTY_ARRAY);
|
||||
} else {
|
||||
// next check any registered custom resolvers
|
||||
for (CustomNameResolver customNameResolver : customNameResolvers) {
|
||||
InetAddress addresses[] = customNameResolver.resolveDefault();
|
||||
if (addresses != null) {
|
||||
return addresses[0];
|
||||
}
|
||||
}
|
||||
// we know it's not here. get the defaults
|
||||
publishHosts = GLOBAL_NETWORK_PUBLISHHOST_SETTING.get(settings).toArray(Strings.EMPTY_ARRAY);
|
||||
}
|
||||
}
|
||||
// finally, fill with our default
|
||||
if (publishHosts == null) {
|
||||
publishHosts = new String[] { DEFAULT_NETWORK_HOST };
|
||||
}
|
||||
|
||||
InetAddress addresses[] = resolveInetAddresses(publishHosts);
|
||||
// TODO: allow publishing multiple addresses
|
||||
// for now... the hack begins
|
||||
|
@ -184,17 +188,17 @@ public class NetworkService extends AbstractComponent {
|
|||
throw new IllegalArgumentException("publish address: {" + NetworkAddress.format(address) + "} is wildcard, but multiple addresses specified: this makes no sense");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// 3. if we end out with multiple publish addresses, select by preference.
|
||||
// don't warn the user, or they will get confused by bind_host vs publish_host etc.
|
||||
if (addresses.length > 1) {
|
||||
List<InetAddress> sorted = new ArrayList<>(Arrays.asList(addresses));
|
||||
NetworkUtils.sortAddresses(sorted);
|
||||
addresses = new InetAddress[] { sorted.get(0) };
|
||||
addresses = new InetAddress[]{sorted.get(0)};
|
||||
}
|
||||
return addresses[0];
|
||||
}
|
||||
|
||||
|
||||
/** resolves (and deduplicates) host specification */
|
||||
private InetAddress[] resolveInetAddresses(String hosts[]) throws IOException {
|
||||
if (hosts.length == 0) {
|
||||
|
|
|
@ -20,6 +20,8 @@ package org.elasticsearch.common.settings;
|
|||
|
||||
import org.elasticsearch.action.admin.indices.close.TransportCloseIndexAction;
|
||||
import org.elasticsearch.action.support.DestructiveOperations;
|
||||
import org.elasticsearch.client.transport.TransportClientNodesService;
|
||||
import org.elasticsearch.cluster.ClusterModule;
|
||||
import org.elasticsearch.cluster.InternalClusterInfoService;
|
||||
import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
|
@ -35,15 +37,33 @@ import org.elasticsearch.cluster.routing.allocation.decider.SnapshotInProgressAl
|
|||
import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider;
|
||||
import org.elasticsearch.cluster.service.InternalClusterService;
|
||||
import org.elasticsearch.common.logging.ESLoggerFactory;
|
||||
import org.elasticsearch.common.util.concurrent.EsExecutors;
|
||||
import org.elasticsearch.discovery.DiscoveryModule;
|
||||
import org.elasticsearch.discovery.DiscoveryService;
|
||||
import org.elasticsearch.common.network.NetworkModule;
|
||||
import org.elasticsearch.common.network.NetworkService;
|
||||
import org.elasticsearch.discovery.DiscoverySettings;
|
||||
import org.elasticsearch.discovery.zen.ZenDiscovery;
|
||||
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.gateway.GatewayService;
|
||||
import org.elasticsearch.discovery.zen.fd.FaultDetection;
|
||||
import org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing;
|
||||
import org.elasticsearch.gateway.PrimaryShardAllocator;
|
||||
import org.elasticsearch.http.netty.NettyHttpServerTransport;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.store.IndexStoreConfig;
|
||||
import org.elasticsearch.indices.analysis.HunspellService;
|
||||
import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService;
|
||||
import org.elasticsearch.indices.cache.request.IndicesRequestCache;
|
||||
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
|
||||
import org.elasticsearch.indices.recovery.RecoverySettings;
|
||||
import org.elasticsearch.indices.store.IndicesStore;
|
||||
import org.elasticsearch.indices.ttl.IndicesTTLService;
|
||||
import org.elasticsearch.node.Node;
|
||||
import org.elasticsearch.repositories.fs.FsRepository;
|
||||
import org.elasticsearch.repositories.uri.URLRepository;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.search.SearchService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.Transport;
|
||||
|
@ -98,6 +118,9 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
|||
|
||||
|
||||
public static Set<Setting<?>> BUILT_IN_CLUSTER_SETTINGS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING,
|
||||
TransportClientNodesService.CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL, // TODO these transport client settings are kind of odd here and should only be valid if we are a transport client
|
||||
TransportClientNodesService.CLIENT_TRANSPORT_PING_TIMEOUT,
|
||||
TransportClientNodesService.CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME,
|
||||
AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING,
|
||||
BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING,
|
||||
BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING,
|
||||
|
@ -110,6 +133,9 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
|||
FilterAllocationDecider.CLUSTER_ROUTING_INCLUDE_GROUP_SETTING,
|
||||
FilterAllocationDecider.CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING,
|
||||
FilterAllocationDecider.CLUSTER_ROUTING_REQUIRE_GROUP_SETTING,
|
||||
FsRepository.REPOSITORIES_CHUNK_SIZE_SETTING,
|
||||
FsRepository.REPOSITORIES_COMPRESS_SETTING,
|
||||
FsRepository.REPOSITORIES_LOCATION_SETTING,
|
||||
IndexStoreConfig.INDICES_STORE_THROTTLE_TYPE_SETTING,
|
||||
IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING,
|
||||
IndicesTTLService.INDICES_TTL_INTERVAL_SETTING,
|
||||
|
@ -139,6 +165,19 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
|||
DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING,
|
||||
DiscoverySettings.COMMIT_TIMEOUT_SETTING,
|
||||
DiscoverySettings.NO_MASTER_BLOCK_SETTING,
|
||||
GatewayService.EXPECTED_DATA_NODES_SETTING,
|
||||
GatewayService.EXPECTED_MASTER_NODES_SETTING,
|
||||
GatewayService.EXPECTED_NODES_SETTING,
|
||||
GatewayService.RECOVER_AFTER_DATA_NODES_SETTING,
|
||||
GatewayService.RECOVER_AFTER_MASTER_NODES_SETTING,
|
||||
GatewayService.RECOVER_AFTER_NODES_SETTING,
|
||||
GatewayService.RECOVER_AFTER_TIME_SETTING,
|
||||
NetworkModule.HTTP_ENABLED,
|
||||
NettyHttpServerTransport.SETTING_CORS_ALLOW_CREDENTIALS,
|
||||
NettyHttpServerTransport.SETTING_CORS_ENABLED,
|
||||
NettyHttpServerTransport.SETTING_CORS_MAX_AGE,
|
||||
NettyHttpServerTransport.SETTING_HTTP_DETAILED_ERRORS_ENABLED,
|
||||
NettyHttpServerTransport.SETTING_PIPELINING,
|
||||
HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING,
|
||||
HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING,
|
||||
HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING,
|
||||
|
@ -156,7 +195,72 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
|||
HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING,
|
||||
Transport.TRANSPORT_PROFILES_SETTING,
|
||||
Transport.TRANSPORT_TCP_COMPRESS,
|
||||
NetworkService.GLOBAL_NETWORK_HOST_SETTING,
|
||||
NetworkService.GLOBAL_NETWORK_BINDHOST_SETTING,
|
||||
NetworkService.GLOBAL_NETWORK_PUBLISHHOST_SETTING,
|
||||
NetworkService.TcpSettings.TCP_NO_DELAY,
|
||||
NetworkService.TcpSettings.TCP_KEEP_ALIVE,
|
||||
NetworkService.TcpSettings.TCP_REUSE_ADDRESS,
|
||||
NetworkService.TcpSettings.TCP_SEND_BUFFER_SIZE,
|
||||
NetworkService.TcpSettings.TCP_RECEIVE_BUFFER_SIZE,
|
||||
NetworkService.TcpSettings.TCP_BLOCKING,
|
||||
NetworkService.TcpSettings.TCP_BLOCKING_SERVER,
|
||||
NetworkService.TcpSettings.TCP_BLOCKING_CLIENT,
|
||||
NetworkService.TcpSettings.TCP_CONNECT_TIMEOUT,
|
||||
IndexSettings.QUERY_STRING_ANALYZE_WILDCARD,
|
||||
IndexSettings.QUERY_STRING_ALLOW_LEADING_WILDCARD,
|
||||
PrimaryShardAllocator.NODE_INITIAL_SHARDS_SETTING)));
|
||||
PrimaryShardAllocator.NODE_INITIAL_SHARDS_SETTING,
|
||||
ScriptService.SCRIPT_CACHE_SIZE_SETTING,
|
||||
IndicesFieldDataCache.INDICES_FIELDDATA_CLEAN_INTERVAL_SETTING,
|
||||
IndicesFieldDataCache.INDICES_FIELDDATA_CACHE_SIZE_KEY,
|
||||
IndicesRequestCache.INDICES_CACHE_QUERY_SIZE,
|
||||
IndicesRequestCache.INDICES_CACHE_QUERY_EXPIRE,
|
||||
HunspellService.HUNSPELL_LAZY_LOAD,
|
||||
HunspellService.HUNSPELL_IGNORE_CASE,
|
||||
HunspellService.HUNSPELL_DICTIONARY_OPTIONS,
|
||||
IndicesStore.INDICES_STORE_DELETE_SHARD_TIMEOUT,
|
||||
Environment.PATH_CONF_SETTING,
|
||||
Environment.PATH_DATA_SETTING,
|
||||
Environment.PATH_HOME_SETTING,
|
||||
Environment.PATH_LOGS_SETTING,
|
||||
Environment.PATH_PLUGINS_SETTING,
|
||||
Environment.PATH_REPO_SETTING,
|
||||
Environment.PATH_SCRIPTS_SETTING,
|
||||
Environment.PATH_SHARED_DATA_SETTING,
|
||||
Environment.PIDFILE_SETTING,
|
||||
DiscoveryService.DISCOVERY_SEED_SETTING,
|
||||
DiscoveryService.INITIAL_STATE_TIMEOUT_SETTING,
|
||||
DiscoveryModule.DISCOVERY_TYPE_SETTING,
|
||||
DiscoveryModule.ZEN_MASTER_SERVICE_TYPE_SETTING,
|
||||
FaultDetection.PING_RETRIES_SETTING,
|
||||
FaultDetection.PING_TIMEOUT_SETTING,
|
||||
FaultDetection.REGISTER_CONNECTION_LISTENER_SETTING,
|
||||
FaultDetection.PING_INTERVAL_SETTING,
|
||||
FaultDetection.CONNECT_ON_NETWORK_DISCONNECT_SETTING,
|
||||
ZenDiscovery.PING_TIMEOUT_SETTING,
|
||||
ZenDiscovery.JOIN_TIMEOUT_SETTING,
|
||||
ZenDiscovery.JOIN_RETRY_ATTEMPTS_SETTING,
|
||||
ZenDiscovery.JOIN_RETRY_DELAY_SETTING,
|
||||
ZenDiscovery.MAX_PINGS_FROM_ANOTHER_MASTER_SETTING,
|
||||
ZenDiscovery.SEND_LEAVE_REQUEST_SETTING,
|
||||
ZenDiscovery.MASTER_ELECTION_FILTER_CLIENT_SETTING,
|
||||
ZenDiscovery.MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING,
|
||||
ZenDiscovery.MASTER_ELECTION_FILTER_DATA_SETTING,
|
||||
UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING,
|
||||
UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING,
|
||||
SearchService.DEFAULT_KEEPALIVE_SETTING,
|
||||
SearchService.KEEPALIVE_INTERVAL_SETTING,
|
||||
Node.WRITE_PORTS_FIELD_SETTING,
|
||||
Node.NODE_CLIENT_SETTING,
|
||||
Node.NODE_DATA_SETTING,
|
||||
Node.NODE_MASTER_SETTING,
|
||||
Node.NODE_LOCAL_SETTING,
|
||||
Node.NODE_MODE_SETTING,
|
||||
Node.NODE_INGEST_SETTING,
|
||||
URLRepository.ALLOWED_URLS_SETTING,
|
||||
URLRepository.REPOSITORIES_LIST_DIRECTORIES_SETTING,
|
||||
URLRepository.REPOSITORIES_URL_SETTING,
|
||||
URLRepository.SUPPORTED_PROTOCOLS_SETTING,
|
||||
ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING,
|
||||
EsExecutors.PROCESSORS_SETTING)));
|
||||
}
|
||||
|
|
|
@ -152,4 +152,17 @@ public final class IndexScopedSettings extends AbstractScopedSettings {
|
|||
public IndexScopedSettings copy(Settings settings, IndexMetaData metaData) {
|
||||
return new IndexScopedSettings(settings, this, metaData);
|
||||
}
|
||||
|
||||
public boolean isPrivateSetting(String key) {
|
||||
switch (key) {
|
||||
case IndexMetaData.SETTING_CREATION_DATE:
|
||||
case IndexMetaData.SETTING_INDEX_UUID:
|
||||
case IndexMetaData.SETTING_VERSION_CREATED:
|
||||
case IndexMetaData.SETTING_VERSION_UPGRADED:
|
||||
case MergePolicyConfig.INDEX_MERGE_ENABLED:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -41,6 +41,7 @@ import java.util.function.BiConsumer;
|
|||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
import java.util.regex.Pattern;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
* A setting. Encapsulates typical stuff like default value, parsing, and scope.
|
||||
|
@ -70,8 +71,21 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
this.scope = scope;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new Setting instance
|
||||
* @param key the settings key for this setting.
|
||||
* @param fallBackSetting a setting to fall back to if the current setting is not set.
|
||||
* @param parser a parser that parses the string rep into a complex datatype.
|
||||
* @param dynamic true iff this setting can be dynamically updateable
|
||||
* @param scope the scope of this setting
|
||||
*/
|
||||
public Setting(String key, Setting<T> fallBackSetting, Function<String, T> parser, boolean dynamic, Scope scope) {
|
||||
this(key, fallBackSetting::getRaw, parser, dynamic, scope);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the settings key or a prefix if this setting is a group setting
|
||||
*
|
||||
* @see #isGroupSetting()
|
||||
*/
|
||||
public final String getKey() {
|
||||
|
@ -106,13 +120,21 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
}
|
||||
|
||||
/**
|
||||
* Returns the default values string representation for this setting.
|
||||
* Returns the default value string representation for this setting.
|
||||
* @param settings a settings object for settings that has a default value depending on another setting if available
|
||||
*/
|
||||
public final String getDefault(Settings settings) {
|
||||
public final String getDefaultRaw(Settings settings) {
|
||||
return defaultValue.apply(settings);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the default value for this setting.
|
||||
* @param settings a settings object for settings that has a default value depending on another setting if available
|
||||
*/
|
||||
public final T getDefault(Settings settings) {
|
||||
return parser.apply(getDefaultRaw(settings));
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> iff this setting is present in the given settings object. Otherwise <code>false</code>
|
||||
*/
|
||||
|
@ -309,6 +331,10 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
return new Setting<>(key, (s) -> Long.toString(defaultValue), (s) -> parseLong(s, minValue, key), dynamic, scope);
|
||||
}
|
||||
|
||||
public static Setting<String> simpleString(String key, boolean dynamic, Scope scope) {
|
||||
return new Setting<>(key, "", Function.identity(), dynamic, scope);
|
||||
}
|
||||
|
||||
public static int parseInt(String s, int minValue, String key) {
|
||||
int value = Integer.parseInt(s);
|
||||
if (value < minValue) {
|
||||
|
@ -333,6 +359,10 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
return new Setting<>(key, (s) -> Boolean.toString(defaultValue), Booleans::parseBooleanExact, dynamic, scope);
|
||||
}
|
||||
|
||||
public static Setting<Boolean> boolSetting(String key, Setting<Boolean> fallbackSetting, boolean dynamic, Scope scope) {
|
||||
return new Setting<>(key, fallbackSetting, Booleans::parseBooleanExact, dynamic, scope);
|
||||
}
|
||||
|
||||
public static Setting<ByteSizeValue> byteSizeSetting(String key, String percentage, boolean dynamic, Scope scope) {
|
||||
return new Setting<>(key, (s) -> percentage, (s) -> MemorySizeValue.parseBytesSizeValueOrHeapRatio(s, key), dynamic, scope);
|
||||
}
|
||||
|
@ -348,25 +378,15 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
public static <T> Setting<List<T>> listSetting(String key, List<String> defaultStringValue, Function<String, T> singleValueParser, boolean dynamic, Scope scope) {
|
||||
return listSetting(key, (s) -> defaultStringValue, singleValueParser, dynamic, scope);
|
||||
}
|
||||
|
||||
public static <T> Setting<List<T>> listSetting(String key, Setting<List<T>> fallbackSetting, Function<String, T> singleValueParser, boolean dynamic, Scope scope) {
|
||||
return listSetting(key, (s) -> parseableStringToList(fallbackSetting.getRaw(s)), singleValueParser, dynamic, scope);
|
||||
}
|
||||
|
||||
public static <T> Setting<List<T>> listSetting(String key, Function<Settings, List<String>> defaultStringValue, Function<String, T> singleValueParser, boolean dynamic, Scope scope) {
|
||||
Function<String, List<T>> parser = (s) -> {
|
||||
try (XContentParser xContentParser = XContentType.JSON.xContent().createParser(s)){
|
||||
XContentParser.Token token = xContentParser.nextToken();
|
||||
if (token != XContentParser.Token.START_ARRAY) {
|
||||
throw new IllegalArgumentException("expected START_ARRAY but got " + token);
|
||||
}
|
||||
ArrayList<T> list = new ArrayList<>();
|
||||
while ((token = xContentParser.nextToken()) !=XContentParser.Token.END_ARRAY) {
|
||||
if (token != XContentParser.Token.VALUE_STRING) {
|
||||
throw new IllegalArgumentException("expected VALUE_STRING but got " + token);
|
||||
}
|
||||
list.add(singleValueParser.apply(xContentParser.text()));
|
||||
}
|
||||
return list;
|
||||
} catch (IOException e) {
|
||||
throw new IllegalArgumentException("failed to parse array", e);
|
||||
}
|
||||
};
|
||||
Function<String, List<T>> parser = (s) ->
|
||||
parseableStringToList(s).stream().map(singleValueParser).collect(Collectors.toList());
|
||||
|
||||
return new Setting<List<T>>(key, (s) -> arrayToParsableString(defaultStringValue.apply(s).toArray(Strings.EMPTY_ARRAY)), parser, dynamic, scope) {
|
||||
private final Pattern pattern = Pattern.compile(Pattern.quote(key)+"(\\.\\d+)?");
|
||||
@Override
|
||||
|
@ -387,6 +407,26 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
};
|
||||
}
|
||||
|
||||
private static List<String> parseableStringToList(String parsableString) {
|
||||
try (XContentParser xContentParser = XContentType.JSON.xContent().createParser(parsableString)) {
|
||||
XContentParser.Token token = xContentParser.nextToken();
|
||||
if (token != XContentParser.Token.START_ARRAY) {
|
||||
throw new IllegalArgumentException("expected START_ARRAY but got " + token);
|
||||
}
|
||||
ArrayList<String> list = new ArrayList<>();
|
||||
while ((token = xContentParser.nextToken()) != XContentParser.Token.END_ARRAY) {
|
||||
if (token != XContentParser.Token.VALUE_STRING) {
|
||||
throw new IllegalArgumentException("expected VALUE_STRING but got " + token);
|
||||
}
|
||||
list.add(xContentParser.text());
|
||||
}
|
||||
return list;
|
||||
} catch (IOException e) {
|
||||
throw new IllegalArgumentException("failed to parse array", e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private static String arrayToParsableString(String[] array) {
|
||||
try {
|
||||
XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent());
|
||||
|
|
|
@ -24,6 +24,7 @@ import org.elasticsearch.common.inject.AbstractModule;
|
|||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
/**
|
||||
* A module that binds the provided settings to the {@link Settings} interface.
|
||||
|
@ -54,18 +55,13 @@ public class SettingsModule extends AbstractModule {
|
|||
final ClusterSettings clusterSettings = new ClusterSettings(settings, new HashSet<>(this.clusterSettings.values()));
|
||||
// by now we are fully configured, lets check node level settings for unregistered index settings
|
||||
indexScopedSettings.validate(settings.filter(IndexScopedSettings.INDEX_SETTINGS_KEY_PREDICATE));
|
||||
// we can't call this method yet since we have not all node level settings registered.
|
||||
// yet we can validate the ones we have registered to not have invalid values. this is better than nothing
|
||||
// and progress over perfection and we fail as soon as possible.
|
||||
// clusterSettings.validate(settings.filter(IndexScopedSettings.INDEX_SETTINGS_KEY_PREDICATE.negate()));
|
||||
for (Map.Entry<String, String> entry : settings.filter(IndexScopedSettings.INDEX_SETTINGS_KEY_PREDICATE.negate()).getAsMap().entrySet()) {
|
||||
if (clusterSettings.get(entry.getKey()) != null) {
|
||||
clusterSettings.validate(entry.getKey(), settings);
|
||||
} else if (AbstractScopedSettings.isValidKey(entry.getKey()) == false) {
|
||||
throw new IllegalArgumentException("illegal settings key: [" + entry.getKey() + "]");
|
||||
}
|
||||
Predicate<String> noIndexSettingPredicate = IndexScopedSettings.INDEX_SETTINGS_KEY_PREDICATE.negate();
|
||||
Predicate<String> noTribePredicate = (s) -> s.startsWith("tribe.") == false;
|
||||
for (Map.Entry<String, String> entry : settings.filter(noTribePredicate.and(noIndexSettingPredicate)).getAsMap().entrySet()) {
|
||||
validateClusterSetting(clusterSettings, entry.getKey(), settings);
|
||||
}
|
||||
|
||||
validateTribeSettings(settings, clusterSettings);
|
||||
bind(Settings.class).toInstance(settings);
|
||||
bind(SettingsFilter.class).toInstance(settingsFilter);
|
||||
|
||||
|
@ -90,4 +86,25 @@ public class SettingsModule extends AbstractModule {
|
|||
}
|
||||
}
|
||||
|
||||
public void validateTribeSettings(Settings settings, ClusterSettings clusterSettings) {
|
||||
Map<String, Settings> groups = settings.getGroups("tribe.", true);
|
||||
for (Map.Entry<String, Settings> tribeSettings : groups.entrySet()) {
|
||||
for (Map.Entry<String, String> entry : tribeSettings.getValue().getAsMap().entrySet()) {
|
||||
validateClusterSetting(clusterSettings, entry.getKey(), tribeSettings.getValue());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private final void validateClusterSetting(ClusterSettings clusterSettings, String key, Settings settings) {
|
||||
// we can't call this method yet since we have not all node level settings registered.
|
||||
// yet we can validate the ones we have registered to not have invalid values. this is better than nothing
|
||||
// and progress over perfection and we fail as soon as possible.
|
||||
// clusterSettings.validate(settings.filter(IndexScopedSettings.INDEX_SETTINGS_KEY_PREDICATE.negate()));
|
||||
if (clusterSettings.get(key) != null) {
|
||||
clusterSettings.validate(key, settings);
|
||||
} else if (AbstractScopedSettings.isValidKey(key) == false) {
|
||||
throw new IllegalArgumentException("illegal settings key: [" + key + "]");
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -1,383 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.common.util;
|
||||
|
||||
import org.apache.lucene.index.CheckIndex;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.Lock;
|
||||
import org.apache.lucene.store.LockObtainFailedException;
|
||||
import org.apache.lucene.store.SimpleFSDirectory;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.io.FileSystemUtils;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.env.ShardLock;
|
||||
import org.elasticsearch.gateway.MetaDataStateFormat;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.shard.ShardPath;
|
||||
import org.elasticsearch.index.shard.ShardStateMetaData;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.PrintStream;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.DirectoryStream;
|
||||
import java.nio.file.FileStore;
|
||||
import java.nio.file.FileVisitResult;
|
||||
import java.nio.file.FileVisitor;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.StandardCopyOption;
|
||||
import java.nio.file.attribute.BasicFileAttributes;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
*/
|
||||
public class MultiDataPathUpgrader {
|
||||
|
||||
private final NodeEnvironment nodeEnvironment;
|
||||
private final ESLogger logger = Loggers.getLogger(getClass());
|
||||
|
||||
|
||||
/**
|
||||
* Creates a new upgrader instance
|
||||
* @param nodeEnvironment the node env to operate on.
|
||||
*
|
||||
*/
|
||||
public MultiDataPathUpgrader(NodeEnvironment nodeEnvironment) {
|
||||
this.nodeEnvironment = nodeEnvironment;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Upgrades the given shard Id from multiple shard paths into the given target path.
|
||||
*
|
||||
* @see #pickShardPath(org.elasticsearch.index.shard.ShardId)
|
||||
*/
|
||||
public void upgrade(ShardId shard, ShardPath targetPath) throws IOException {
|
||||
final Path[] paths = nodeEnvironment.availableShardPaths(shard); // custom data path doesn't need upgrading
|
||||
if (isTargetPathConfigured(paths, targetPath) == false) {
|
||||
throw new IllegalArgumentException("shard path must be one of the shards data paths");
|
||||
}
|
||||
assert needsUpgrading(shard) : "Should not upgrade a path that needs no upgrading";
|
||||
logger.info("{} upgrading multi data dir to {}", shard, targetPath.getDataPath());
|
||||
final ShardStateMetaData loaded = ShardStateMetaData.FORMAT.loadLatestState(logger, paths);
|
||||
if (loaded == null) {
|
||||
throw new IllegalStateException(shard + " no shard state found in any of: " + Arrays.toString(paths) + " please check and remove them if possible");
|
||||
}
|
||||
logger.info("{} loaded shard state {}", shard, loaded);
|
||||
|
||||
ShardStateMetaData.FORMAT.write(loaded, loaded.version, targetPath.getShardStatePath());
|
||||
Files.createDirectories(targetPath.resolveIndex());
|
||||
try (SimpleFSDirectory directory = new SimpleFSDirectory(targetPath.resolveIndex())) {
|
||||
try (final Lock lock = directory.obtainLock(IndexWriter.WRITE_LOCK_NAME)) {
|
||||
upgradeFiles(shard, targetPath, targetPath.resolveIndex(), ShardPath.INDEX_FOLDER_NAME, paths);
|
||||
} catch (LockObtainFailedException ex) {
|
||||
throw new IllegalStateException("Can't obtain lock on " + targetPath.resolveIndex(), ex);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
upgradeFiles(shard, targetPath, targetPath.resolveTranslog(), ShardPath.TRANSLOG_FOLDER_NAME, paths);
|
||||
|
||||
logger.info("{} wipe upgraded directories", shard);
|
||||
for (Path path : paths) {
|
||||
if (path.equals(targetPath.getShardStatePath()) == false) {
|
||||
logger.info("{} wipe shard directories: [{}]", shard, path);
|
||||
IOUtils.rm(path);
|
||||
}
|
||||
}
|
||||
|
||||
if (FileSystemUtils.files(targetPath.resolveIndex()).length == 0) {
|
||||
throw new IllegalStateException("index folder [" + targetPath.resolveIndex() + "] is empty");
|
||||
}
|
||||
|
||||
if (FileSystemUtils.files(targetPath.resolveTranslog()).length == 0) {
|
||||
throw new IllegalStateException("translog folder [" + targetPath.resolveTranslog() + "] is empty");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Runs check-index on the target shard and throws an exception if it failed
|
||||
*/
|
||||
public void checkIndex(ShardPath targetPath) throws IOException {
|
||||
BytesStreamOutput os = new BytesStreamOutput();
|
||||
PrintStream out = new PrintStream(os, false, StandardCharsets.UTF_8.name());
|
||||
try (Directory directory = new SimpleFSDirectory(targetPath.resolveIndex());
|
||||
final CheckIndex checkIndex = new CheckIndex(directory)) {
|
||||
checkIndex.setInfoStream(out);
|
||||
CheckIndex.Status status = checkIndex.checkIndex();
|
||||
out.flush();
|
||||
if (!status.clean) {
|
||||
logger.warn("check index [failure]\n{}", new String(os.bytes().toBytes(), StandardCharsets.UTF_8));
|
||||
throw new IllegalStateException("index check failure");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true iff the given shard needs upgrading.
|
||||
*/
|
||||
public boolean needsUpgrading(ShardId shard) {
|
||||
final Path[] paths = nodeEnvironment.availableShardPaths(shard);
|
||||
// custom data path doesn't need upgrading neither single path envs
|
||||
if (paths.length > 1) {
|
||||
int numPathsExist = 0;
|
||||
for (Path path : paths) {
|
||||
if (Files.exists(path.resolve(MetaDataStateFormat.STATE_DIR_NAME))) {
|
||||
numPathsExist++;
|
||||
if (numPathsExist > 1) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Picks a target ShardPath to allocate and upgrade the given shard to. It picks the target based on a simple
|
||||
* heuristic:
|
||||
* <ul>
|
||||
* <li>if the smallest datapath has 2x more space available that the shards total size the datapath with the most bytes for that shard is picked to minimize the amount of bytes to copy</li>
|
||||
* <li>otherwise the largest available datapath is used as the target no matter how big of a slice of the shard it already holds.</li>
|
||||
* </ul>
|
||||
*/
|
||||
public ShardPath pickShardPath(ShardId shard) throws IOException {
|
||||
if (needsUpgrading(shard) == false) {
|
||||
throw new IllegalStateException("Shard doesn't need upgrading");
|
||||
}
|
||||
final NodeEnvironment.NodePath[] paths = nodeEnvironment.nodePaths();
|
||||
|
||||
// if we need upgrading make sure we have all paths.
|
||||
for (NodeEnvironment.NodePath path : paths) {
|
||||
Files.createDirectories(path.resolve(shard));
|
||||
}
|
||||
final ShardFileInfo[] shardFileInfo = getShardFileInfo(shard, paths);
|
||||
long totalBytesUsedByShard = 0;
|
||||
long leastUsableSpace = Long.MAX_VALUE;
|
||||
long mostUsableSpace = Long.MIN_VALUE;
|
||||
assert shardFileInfo.length == nodeEnvironment.availableShardPaths(shard).length;
|
||||
for (ShardFileInfo info : shardFileInfo) {
|
||||
totalBytesUsedByShard += info.spaceUsedByShard;
|
||||
leastUsableSpace = Math.min(leastUsableSpace, info.usableSpace + info.spaceUsedByShard);
|
||||
mostUsableSpace = Math.max(mostUsableSpace, info.usableSpace + info.spaceUsedByShard);
|
||||
}
|
||||
|
||||
if (mostUsableSpace < totalBytesUsedByShard) {
|
||||
throw new IllegalStateException("Can't upgrade path available space: " + new ByteSizeValue(mostUsableSpace) + " required space: " + new ByteSizeValue(totalBytesUsedByShard));
|
||||
}
|
||||
ShardFileInfo target = shardFileInfo[0];
|
||||
if (leastUsableSpace >= (2 * totalBytesUsedByShard)) {
|
||||
for (ShardFileInfo info : shardFileInfo) {
|
||||
if (info.spaceUsedByShard > target.spaceUsedByShard) {
|
||||
target = info;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (ShardFileInfo info : shardFileInfo) {
|
||||
if (info.usableSpace > target.usableSpace) {
|
||||
target = info;
|
||||
}
|
||||
}
|
||||
}
|
||||
return new ShardPath(false, target.path, target.path, IndexMetaData.INDEX_UUID_NA_VALUE /* we don't know */, shard);
|
||||
}
|
||||
|
||||
private ShardFileInfo[] getShardFileInfo(ShardId shard, NodeEnvironment.NodePath[] paths) throws IOException {
|
||||
final ShardFileInfo[] info = new ShardFileInfo[paths.length];
|
||||
for (int i = 0; i < info.length; i++) {
|
||||
Path path = paths[i].resolve(shard);
|
||||
final long usabelSpace = getUsabelSpace(paths[i]);
|
||||
info[i] = new ShardFileInfo(path, usabelSpace, getSpaceUsedByShard(path));
|
||||
}
|
||||
return info;
|
||||
}
|
||||
|
||||
protected long getSpaceUsedByShard(Path path) throws IOException {
|
||||
final long[] spaceUsedByShard = new long[] {0};
|
||||
if (Files.exists(path)) {
|
||||
Files.walkFileTree(path, new FileVisitor<Path>() {
|
||||
@Override
|
||||
public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) throws IOException {
|
||||
return FileVisitResult.CONTINUE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
|
||||
if (attrs.isRegularFile()) {
|
||||
spaceUsedByShard[0] += attrs.size();
|
||||
}
|
||||
return FileVisitResult.CONTINUE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public FileVisitResult visitFileFailed(Path file, IOException exc) throws IOException {
|
||||
return FileVisitResult.CONTINUE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public FileVisitResult postVisitDirectory(Path dir, IOException exc) throws IOException {
|
||||
return FileVisitResult.CONTINUE;
|
||||
}
|
||||
});
|
||||
}
|
||||
return spaceUsedByShard[0];
|
||||
}
|
||||
|
||||
protected long getUsabelSpace(NodeEnvironment.NodePath path) throws IOException {
|
||||
FileStore fileStore = path.fileStore;
|
||||
return fileStore.getUsableSpace();
|
||||
}
|
||||
|
||||
static class ShardFileInfo {
|
||||
final Path path;
|
||||
final long usableSpace;
|
||||
final long spaceUsedByShard;
|
||||
|
||||
ShardFileInfo(Path path, long usableSpace, long spaceUsedByShard) {
|
||||
this.path = path;
|
||||
this.usableSpace = usableSpace;
|
||||
this.spaceUsedByShard = spaceUsedByShard;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
private void upgradeFiles(ShardId shard, ShardPath targetPath, final Path targetDir, String folderName, Path[] paths) throws IOException {
|
||||
List<Path> movedFiles = new ArrayList<>();
|
||||
for (Path path : paths) {
|
||||
if (path.equals(targetPath.getDataPath()) == false) {
|
||||
final Path sourceDir = path.resolve(folderName);
|
||||
if (Files.exists(sourceDir)) {
|
||||
logger.info("{} upgrading [{}] from [{}] to [{}]", shard, folderName, sourceDir, targetDir);
|
||||
try (DirectoryStream<Path> stream = Files.newDirectoryStream(sourceDir)) {
|
||||
Files.createDirectories(targetDir);
|
||||
for (Path file : stream) {
|
||||
if (IndexWriter.WRITE_LOCK_NAME.equals(file.getFileName().toString()) || Files.isDirectory(file)) {
|
||||
continue; // skip write.lock
|
||||
}
|
||||
logger.info("{} move file [{}] size: [{}]", shard, file.getFileName(), Files.size(file));
|
||||
final Path targetFile = targetDir.resolve(file.getFileName());
|
||||
/* We are pessimistic and do a copy first to the other path and then and atomic move to rename it such that
|
||||
in the worst case the file exists twice but is never lost or half written.*/
|
||||
final Path targetTempFile = Files.createTempFile(targetDir, "upgrade_", "_" + file.getFileName().toString());
|
||||
Files.copy(file, targetTempFile, StandardCopyOption.COPY_ATTRIBUTES, StandardCopyOption.REPLACE_EXISTING);
|
||||
Files.move(targetTempFile, targetFile, StandardCopyOption.ATOMIC_MOVE); // we are on the same FS - this must work otherwise all bets are off
|
||||
Files.delete(file);
|
||||
movedFiles.add(targetFile);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (movedFiles.isEmpty() == false) {
|
||||
// fsync later it might be on disk already
|
||||
logger.info("{} fsync files", shard);
|
||||
for (Path moved : movedFiles) {
|
||||
logger.info("{} syncing [{}]", shard, moved.getFileName());
|
||||
IOUtils.fsync(moved, false);
|
||||
}
|
||||
logger.info("{} syncing directory [{}]", shard, targetDir);
|
||||
IOUtils.fsync(targetDir, true);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> iff the target path is one of the given paths.
|
||||
*/
|
||||
private boolean isTargetPathConfigured(final Path[] paths, ShardPath targetPath) {
|
||||
for (Path path : paths) {
|
||||
if (path.equals(targetPath.getDataPath())) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Runs an upgrade on all shards located under the given node environment if there is more than 1 data.path configured
|
||||
* otherwise this method will return immediately.
|
||||
*/
|
||||
public static void upgradeMultiDataPath(NodeEnvironment nodeEnv, ESLogger logger) throws IOException {
|
||||
if (nodeEnv.nodeDataPaths().length > 1) {
|
||||
final MultiDataPathUpgrader upgrader = new MultiDataPathUpgrader(nodeEnv);
|
||||
final Set<String> allIndices = nodeEnv.findAllIndices();
|
||||
|
||||
for (String index : allIndices) {
|
||||
for (ShardId shardId : findAllShardIds(nodeEnv.indexPaths(new Index(index)))) {
|
||||
try (ShardLock lock = nodeEnv.shardLock(shardId, 0)) {
|
||||
if (upgrader.needsUpgrading(shardId)) {
|
||||
final ShardPath shardPath = upgrader.pickShardPath(shardId);
|
||||
upgrader.upgrade(shardId, shardPath);
|
||||
// we have to check if the index path exists since we might
|
||||
// have only upgraded the shard state that is written under /indexname/shardid/_state
|
||||
// in the case we upgraded a dedicated index directory index
|
||||
if (Files.exists(shardPath.resolveIndex())) {
|
||||
upgrader.checkIndex(shardPath);
|
||||
}
|
||||
} else {
|
||||
logger.debug("{} no upgrade needed - already upgraded");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static Set<ShardId> findAllShardIds(Path... locations) throws IOException {
|
||||
final Set<ShardId> shardIds = new HashSet<>();
|
||||
for (final Path location : locations) {
|
||||
if (Files.isDirectory(location)) {
|
||||
shardIds.addAll(findAllShardsForIndex(location));
|
||||
}
|
||||
}
|
||||
return shardIds;
|
||||
}
|
||||
|
||||
private static Set<ShardId> findAllShardsForIndex(Path indexPath) throws IOException {
|
||||
Set<ShardId> shardIds = new HashSet<>();
|
||||
if (Files.isDirectory(indexPath)) {
|
||||
try (DirectoryStream<Path> stream = Files.newDirectoryStream(indexPath)) {
|
||||
String currentIndex = indexPath.getFileName().toString();
|
||||
for (Path shardPath : stream) {
|
||||
String fileName = shardPath.getFileName().toString();
|
||||
if (Files.isDirectory(shardPath) && fileName.chars().allMatch(Character::isDigit)) {
|
||||
int shardId = Integer.parseInt(fileName);
|
||||
ShardId id = new ShardId(currentIndex, shardId);
|
||||
shardIds.add(id);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return shardIds;
|
||||
}
|
||||
|
||||
}
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.common.util.concurrent;
|
||||
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
@ -40,10 +41,7 @@ public class EsExecutors {
|
|||
* Settings key to manually set the number of available processors.
|
||||
* This is used to adjust thread pools sizes etc. per node.
|
||||
*/
|
||||
public static final String PROCESSORS = "processors";
|
||||
|
||||
/** Useful for testing */
|
||||
public static final String DEFAULT_SYSPROP = "es.processors.override";
|
||||
public static final Setting<Integer> PROCESSORS_SETTING = Setting.intSetting("processors", Math.min(32, Runtime.getRuntime().availableProcessors()), 1, false, Setting.Scope.CLUSTER) ;
|
||||
|
||||
/**
|
||||
* Returns the number of processors available but at most <tt>32</tt>.
|
||||
|
@ -53,11 +51,7 @@ public class EsExecutors {
|
|||
* ie. >= 48 create too many threads and run into OOM see #3478
|
||||
* We just use an 32 core upper-bound here to not stress the system
|
||||
* too much with too many created threads */
|
||||
int defaultValue = Math.min(32, Runtime.getRuntime().availableProcessors());
|
||||
try {
|
||||
defaultValue = Integer.parseInt(System.getProperty(DEFAULT_SYSPROP));
|
||||
} catch (Throwable ignored) {}
|
||||
return settings.getAsInt(PROCESSORS, defaultValue);
|
||||
return PROCESSORS_SETTING.get(settings);
|
||||
}
|
||||
|
||||
public static PrioritizedEsThreadPoolExecutor newSinglePrioritizing(String name, ThreadFactory threadFactory, ThreadContext contextHolder) {
|
||||
|
|
|
@ -223,7 +223,7 @@ public final class ObjectParser<Value, Context> implements BiFunction<XContentPa
|
|||
list.add(supplier.get()); // single value
|
||||
} else {
|
||||
while (parser.nextToken() != XContentParser.Token.END_ARRAY) {
|
||||
if (parser.currentToken().isValue()) {
|
||||
if (parser.currentToken().isValue() || parser.currentToken() == XContentParser.Token.START_OBJECT) {
|
||||
list.add(supplier.get());
|
||||
} else {
|
||||
throw new IllegalStateException("expected value but got [" + parser.currentToken() + "]");
|
||||
|
@ -237,6 +237,11 @@ public final class ObjectParser<Value, Context> implements BiFunction<XContentPa
|
|||
declareField((p, v, c) -> consumer.accept(v, objectParser.apply(p, c)), field, ValueType.OBJECT);
|
||||
}
|
||||
|
||||
public <T> void declareObjectArray(BiConsumer<Value, List<T>> consumer, BiFunction<XContentParser, Context, T> objectParser, ParseField field) {
|
||||
declareField((p, v, c) -> consumer.accept(v, parseArray(p, () -> objectParser.apply(p, c))), field, ValueType.OBJECT_ARRAY);
|
||||
}
|
||||
|
||||
|
||||
public <T> void declareObjectOrDefault(BiConsumer<Value, T> consumer, BiFunction<XContentParser, Context, T> objectParser, Supplier<T> defaultValue, ParseField field) {
|
||||
declareField((p, v, c) -> {
|
||||
if (p.currentToken() == XContentParser.Token.VALUE_BOOLEAN) {
|
||||
|
@ -333,6 +338,7 @@ public final class ObjectParser<Value, Context> implements BiFunction<XContentPa
|
|||
INT_ARRAY(EnumSet.of(XContentParser.Token.START_ARRAY, XContentParser.Token.VALUE_NUMBER, XContentParser.Token.VALUE_STRING)),
|
||||
BOOLEAN_ARRAY(EnumSet.of(XContentParser.Token.START_ARRAY, XContentParser.Token.VALUE_BOOLEAN)),
|
||||
OBJECT(EnumSet.of(XContentParser.Token.START_OBJECT)),
|
||||
OBJECT_ARRAY(EnumSet.of(XContentParser.Token.START_OBJECT, XContentParser.Token.START_ARRAY)),
|
||||
OBJECT_OR_BOOLEAN(EnumSet.of(XContentParser.Token.START_OBJECT, XContentParser.Token.VALUE_BOOLEAN)),
|
||||
VALUE(EnumSet.of(XContentParser.Token.VALUE_BOOLEAN, XContentParser.Token.VALUE_NULL ,XContentParser.Token.VALUE_EMBEDDED_OBJECT,XContentParser.Token.VALUE_NUMBER,XContentParser.Token.VALUE_STRING));
|
||||
|
||||
|
|
|
@ -347,14 +347,20 @@ public class XContentMapValues {
|
|||
return Long.parseLong(node.toString());
|
||||
}
|
||||
|
||||
public static boolean nodeBooleanValue(Object node, boolean defaultValue) {
|
||||
/**
|
||||
* This method is very lenient, use {@link #nodeBooleanValue} instead.
|
||||
*/
|
||||
public static boolean lenientNodeBooleanValue(Object node, boolean defaultValue) {
|
||||
if (node == null) {
|
||||
return defaultValue;
|
||||
}
|
||||
return nodeBooleanValue(node);
|
||||
return lenientNodeBooleanValue(node);
|
||||
}
|
||||
|
||||
public static boolean nodeBooleanValue(Object node) {
|
||||
/**
|
||||
* This method is very lenient, use {@link #nodeBooleanValue} instead.
|
||||
*/
|
||||
public static boolean lenientNodeBooleanValue(Object node) {
|
||||
if (node instanceof Boolean) {
|
||||
return (Boolean) node;
|
||||
}
|
||||
|
@ -365,6 +371,17 @@ public class XContentMapValues {
|
|||
return !(value.equals("false") || value.equals("0") || value.equals("off"));
|
||||
}
|
||||
|
||||
public static boolean nodeBooleanValue(Object node) {
|
||||
switch (node.toString()) {
|
||||
case "true":
|
||||
return true;
|
||||
case "false":
|
||||
return false;
|
||||
default:
|
||||
throw new IllegalArgumentException("Can't parse boolean value [" + node + "], expected [true] or [false]");
|
||||
}
|
||||
}
|
||||
|
||||
public static TimeValue nodeTimeValue(Object node, TimeValue defaultValue) {
|
||||
if (node == null) {
|
||||
return defaultValue;
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.discovery;
|
|||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.inject.AbstractModule;
|
||||
import org.elasticsearch.common.inject.multibindings.Multibinder;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.ExtensionPoint;
|
||||
import org.elasticsearch.discovery.local.LocalDiscovery;
|
||||
|
@ -36,14 +37,17 @@ import java.util.ArrayList;
|
|||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.function.Function;
|
||||
|
||||
/**
|
||||
* A module for loading classes for node discovery.
|
||||
*/
|
||||
public class DiscoveryModule extends AbstractModule {
|
||||
|
||||
public static final String DISCOVERY_TYPE_KEY = "discovery.type";
|
||||
public static final String ZEN_MASTER_SERVICE_TYPE_KEY = "discovery.zen.masterservice.type";
|
||||
public static final Setting<String> DISCOVERY_TYPE_SETTING = new Setting<>("discovery.type",
|
||||
settings -> DiscoveryNode.localNode(settings) ? "local" : "zen", Function.identity(), false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<String> ZEN_MASTER_SERVICE_TYPE_SETTING = new Setting<>("discovery.zen.masterservice.type",
|
||||
"zen", Function.identity(), false, Setting.Scope.CLUSTER);
|
||||
|
||||
private final Settings settings;
|
||||
private final List<Class<? extends UnicastHostsProvider>> unicastHostProviders = new ArrayList<>();
|
||||
|
@ -93,15 +97,14 @@ public class DiscoveryModule extends AbstractModule {
|
|||
|
||||
@Override
|
||||
protected void configure() {
|
||||
String defaultType = DiscoveryNode.localNode(settings) ? "local" : "zen";
|
||||
String discoveryType = settings.get(DISCOVERY_TYPE_KEY, defaultType);
|
||||
String discoveryType = DISCOVERY_TYPE_SETTING.get(settings);
|
||||
Class<? extends Discovery> discoveryClass = discoveryTypes.get(discoveryType);
|
||||
if (discoveryClass == null) {
|
||||
throw new IllegalArgumentException("Unknown Discovery type [" + discoveryType + "]");
|
||||
}
|
||||
|
||||
if (discoveryType.equals("local") == false) {
|
||||
String masterServiceTypeKey = settings.get(ZEN_MASTER_SERVICE_TYPE_KEY, "zen");
|
||||
String masterServiceTypeKey = ZEN_MASTER_SERVICE_TYPE_SETTING.get(settings);
|
||||
final Class<? extends ElectMasterService> masterService = masterServiceType.get(masterServiceTypeKey);
|
||||
if (masterService == null) {
|
||||
throw new IllegalArgumentException("Unknown master service type [" + masterServiceTypeKey + "]");
|
||||
|
@ -121,4 +124,4 @@ public class DiscoveryModule extends AbstractModule {
|
|||
bind(Discovery.class).to(discoveryClass).asEagerSingleton();
|
||||
bind(DiscoveryService.class).asEagerSingleton();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.elasticsearch.common.Randomness;
|
|||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
||||
|
@ -39,8 +40,8 @@ import java.util.concurrent.TimeUnit;
|
|||
*/
|
||||
public class DiscoveryService extends AbstractLifecycleComponent<DiscoveryService> {
|
||||
|
||||
public static final String SETTING_INITIAL_STATE_TIMEOUT = "discovery.initial_state_timeout";
|
||||
public static final String SETTING_DISCOVERY_SEED = "discovery.id.seed";
|
||||
public static final Setting<TimeValue> INITIAL_STATE_TIMEOUT_SETTING = Setting.positiveTimeSetting("discovery.initial_state_timeout", TimeValue.timeValueSeconds(30), false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Long> DISCOVERY_SEED_SETTING = Setting.longSetting("discovery.id.seed", 0l, Long.MIN_VALUE, false, Setting.Scope.CLUSTER);
|
||||
|
||||
private static class InitialStateListener implements InitialStateDiscoveryListener {
|
||||
|
||||
|
@ -71,7 +72,7 @@ public class DiscoveryService extends AbstractLifecycleComponent<DiscoveryServic
|
|||
super(settings);
|
||||
this.discoverySettings = discoverySettings;
|
||||
this.discovery = discovery;
|
||||
this.initialStateTimeout = settings.getAsTime(SETTING_INITIAL_STATE_TIMEOUT, TimeValue.timeValueSeconds(30));
|
||||
this.initialStateTimeout = INITIAL_STATE_TIMEOUT_SETTING.get(settings);
|
||||
}
|
||||
|
||||
public ClusterBlock getNoMasterBlock() {
|
||||
|
@ -132,7 +133,7 @@ public class DiscoveryService extends AbstractLifecycleComponent<DiscoveryServic
|
|||
}
|
||||
|
||||
public static String generateNodeId(Settings settings) {
|
||||
Random random = Randomness.get(settings, DiscoveryService.SETTING_DISCOVERY_SEED);
|
||||
Random random = Randomness.get(settings, DiscoveryService.DISCOVERY_SEED_SETTING);
|
||||
return Strings.randomBase64UUID(random);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -90,15 +90,17 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
|
|||
public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implements Discovery, PingContextProvider {
|
||||
|
||||
public final static Setting<Boolean> REJOIN_ON_MASTER_GONE_SETTING = Setting.boolSetting("discovery.zen.rejoin_on_master_gone", true, true, Setting.Scope.CLUSTER);
|
||||
public final static String SETTING_PING_TIMEOUT = "discovery.zen.ping_timeout";
|
||||
public final static String SETTING_JOIN_TIMEOUT = "discovery.zen.join_timeout";
|
||||
public final static String SETTING_JOIN_RETRY_ATTEMPTS = "discovery.zen.join_retry_attempts";
|
||||
public final static String SETTING_JOIN_RETRY_DELAY = "discovery.zen.join_retry_delay";
|
||||
public final static String SETTING_MAX_PINGS_FROM_ANOTHER_MASTER = "discovery.zen.max_pings_from_another_master";
|
||||
public final static String SETTING_SEND_LEAVE_REQUEST = "discovery.zen.send_leave_request";
|
||||
public final static String SETTING_MASTER_ELECTION_FILTER_CLIENT = "discovery.zen.master_election.filter_client";
|
||||
public final static String SETTING_MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT = "discovery.zen.master_election.wait_for_joins_timeout";
|
||||
public final static String SETTING_MASTER_ELECTION_FILTER_DATA = "discovery.zen.master_election.filter_data";
|
||||
public final static Setting<TimeValue> PING_TIMEOUT_SETTING = Setting.positiveTimeSetting("discovery.zen.ping_timeout", timeValueSeconds(3), false, Setting.Scope.CLUSTER);
|
||||
public final static Setting<TimeValue> JOIN_TIMEOUT_SETTING = Setting.timeSetting("discovery.zen.join_timeout",
|
||||
settings -> TimeValue.timeValueMillis(PING_TIMEOUT_SETTING.get(settings).millis() * 20).toString(), TimeValue.timeValueMillis(0), false, Setting.Scope.CLUSTER);
|
||||
public final static Setting<Integer> JOIN_RETRY_ATTEMPTS_SETTING = Setting.intSetting("discovery.zen.join_retry_attempts", 3, 1, false, Setting.Scope.CLUSTER);
|
||||
public final static Setting<TimeValue> JOIN_RETRY_DELAY_SETTING = Setting.positiveTimeSetting("discovery.zen.join_retry_delay", TimeValue.timeValueMillis(100), false, Setting.Scope.CLUSTER);
|
||||
public final static Setting<Integer> MAX_PINGS_FROM_ANOTHER_MASTER_SETTING = Setting.intSetting("discovery.zen.max_pings_from_another_master", 3, 1, false, Setting.Scope.CLUSTER);
|
||||
public final static Setting<Boolean> SEND_LEAVE_REQUEST_SETTING = Setting.boolSetting("discovery.zen.send_leave_request", true, false, Setting.Scope.CLUSTER);
|
||||
public final static Setting<Boolean> MASTER_ELECTION_FILTER_CLIENT_SETTING = Setting.boolSetting("discovery.zen.master_election.filter_client", true, false, Setting.Scope.CLUSTER);
|
||||
public final static Setting<TimeValue> MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING = Setting.timeSetting("discovery.zen.master_election.wait_for_joins_timeout",
|
||||
settings -> TimeValue.timeValueMillis(JOIN_TIMEOUT_SETTING.get(settings).millis() / 2).toString(), TimeValue.timeValueMillis(0), false, Setting.Scope.CLUSTER);
|
||||
public final static Setting<Boolean> MASTER_ELECTION_FILTER_DATA_SETTING = Setting.boolSetting("discovery.zen.master_election.filter_data", false, false, Setting.Scope.CLUSTER);
|
||||
|
||||
public static final String DISCOVERY_REJOIN_ACTION_NAME = "internal:discovery/zen/rejoin";
|
||||
|
||||
|
@ -164,26 +166,19 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
|||
this.discoverySettings = discoverySettings;
|
||||
this.pingService = pingService;
|
||||
this.electMaster = electMasterService;
|
||||
this.pingTimeout = settings.getAsTime(SETTING_PING_TIMEOUT, timeValueSeconds(3));
|
||||
this.pingTimeout = PING_TIMEOUT_SETTING.get(settings);
|
||||
|
||||
this.joinTimeout = settings.getAsTime(SETTING_JOIN_TIMEOUT, TimeValue.timeValueMillis(this.pingTimeout.millis() * 20));
|
||||
this.joinRetryAttempts = settings.getAsInt(SETTING_JOIN_RETRY_ATTEMPTS, 3);
|
||||
this.joinRetryDelay = settings.getAsTime(SETTING_JOIN_RETRY_DELAY, TimeValue.timeValueMillis(100));
|
||||
this.maxPingsFromAnotherMaster = settings.getAsInt(SETTING_MAX_PINGS_FROM_ANOTHER_MASTER, 3);
|
||||
this.sendLeaveRequest = settings.getAsBoolean(SETTING_SEND_LEAVE_REQUEST, true);
|
||||
this.joinTimeout = JOIN_TIMEOUT_SETTING.get(settings);
|
||||
this.joinRetryAttempts = JOIN_RETRY_ATTEMPTS_SETTING.get(settings);
|
||||
this.joinRetryDelay = JOIN_RETRY_DELAY_SETTING.get(settings);
|
||||
this.maxPingsFromAnotherMaster = MAX_PINGS_FROM_ANOTHER_MASTER_SETTING.get(settings);
|
||||
this.sendLeaveRequest = SEND_LEAVE_REQUEST_SETTING.get(settings);
|
||||
|
||||
this.masterElectionFilterClientNodes = settings.getAsBoolean(SETTING_MASTER_ELECTION_FILTER_CLIENT, true);
|
||||
this.masterElectionFilterDataNodes = settings.getAsBoolean(SETTING_MASTER_ELECTION_FILTER_DATA, false);
|
||||
this.masterElectionWaitForJoinsTimeout = settings.getAsTime(SETTING_MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT, TimeValue.timeValueMillis(joinTimeout.millis() / 2));
|
||||
this.masterElectionFilterClientNodes = MASTER_ELECTION_FILTER_CLIENT_SETTING.get(settings);
|
||||
this.masterElectionFilterDataNodes = MASTER_ELECTION_FILTER_DATA_SETTING.get(settings);
|
||||
this.masterElectionWaitForJoinsTimeout = MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING.get(settings);
|
||||
this.rejoinOnMasterGone = REJOIN_ON_MASTER_GONE_SETTING.get(settings);
|
||||
|
||||
if (this.joinRetryAttempts < 1) {
|
||||
throw new IllegalArgumentException("'" + SETTING_JOIN_RETRY_ATTEMPTS + "' must be a positive number. got [" + SETTING_JOIN_RETRY_ATTEMPTS + "]");
|
||||
}
|
||||
if (this.maxPingsFromAnotherMaster < 1) {
|
||||
throw new IllegalArgumentException("'" + SETTING_MAX_PINGS_FROM_ANOTHER_MASTER + "' must be a positive number. got [" + this.maxPingsFromAnotherMaster + "]");
|
||||
}
|
||||
|
||||
logger.debug("using ping_timeout [{}], join.timeout [{}], master_election.filter_client [{}], master_election.filter_data [{}]", this.pingTimeout, joinTimeout, masterElectionFilterClientNodes, masterElectionFilterDataNodes);
|
||||
|
||||
clusterSettings.addSettingsUpdateConsumer(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING, this::handleMinimumMasterNodesChanged, (value) -> {
|
||||
|
|
|
@ -21,6 +21,8 @@ package org.elasticsearch.discovery.zen.fd;
|
|||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Scope;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
@ -35,11 +37,11 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
|
|||
*/
|
||||
public abstract class FaultDetection extends AbstractComponent {
|
||||
|
||||
public static final String SETTING_CONNECT_ON_NETWORK_DISCONNECT = "discovery.zen.fd.connect_on_network_disconnect";
|
||||
public static final String SETTING_PING_INTERVAL = "discovery.zen.fd.ping_interval";
|
||||
public static final String SETTING_PING_TIMEOUT = "discovery.zen.fd.ping_timeout";
|
||||
public static final String SETTING_PING_RETRIES = "discovery.zen.fd.ping_retries";
|
||||
public static final String SETTING_REGISTER_CONNECTION_LISTENER = "discovery.zen.fd.register_connection_listener";
|
||||
public static final Setting<Boolean> CONNECT_ON_NETWORK_DISCONNECT_SETTING = Setting.boolSetting("discovery.zen.fd.connect_on_network_disconnect", false, false, Scope.CLUSTER);
|
||||
public static final Setting<TimeValue> PING_INTERVAL_SETTING = Setting.positiveTimeSetting("discovery.zen.fd.ping_interval", timeValueSeconds(1), false, Scope.CLUSTER);
|
||||
public static final Setting<TimeValue> PING_TIMEOUT_SETTING = Setting.timeSetting("discovery.zen.fd.ping_timeout", timeValueSeconds(30), false, Scope.CLUSTER);
|
||||
public static final Setting<Integer> PING_RETRIES_SETTING = Setting.intSetting("discovery.zen.fd.ping_retries", 3, false, Scope.CLUSTER);
|
||||
public static final Setting<Boolean> REGISTER_CONNECTION_LISTENER_SETTING = Setting.boolSetting("discovery.zen.fd.register_connection_listener", true, false, Scope.CLUSTER);
|
||||
|
||||
protected final ThreadPool threadPool;
|
||||
protected final ClusterName clusterName;
|
||||
|
@ -60,11 +62,11 @@ public abstract class FaultDetection extends AbstractComponent {
|
|||
this.transportService = transportService;
|
||||
this.clusterName = clusterName;
|
||||
|
||||
this.connectOnNetworkDisconnect = settings.getAsBoolean(SETTING_CONNECT_ON_NETWORK_DISCONNECT, false);
|
||||
this.pingInterval = settings.getAsTime(SETTING_PING_INTERVAL, timeValueSeconds(1));
|
||||
this.pingRetryTimeout = settings.getAsTime(SETTING_PING_TIMEOUT, timeValueSeconds(30));
|
||||
this.pingRetryCount = settings.getAsInt(SETTING_PING_RETRIES, 3);
|
||||
this.registerConnectionListener = settings.getAsBoolean(SETTING_REGISTER_CONNECTION_LISTENER, true);
|
||||
this.connectOnNetworkDisconnect = CONNECT_ON_NETWORK_DISCONNECT_SETTING.get(settings);
|
||||
this.pingInterval = PING_INTERVAL_SETTING.get(settings);
|
||||
this.pingRetryTimeout = PING_TIMEOUT_SETTING.get(settings);
|
||||
this.pingRetryCount = PING_RETRIES_SETTING.get(settings);
|
||||
this.registerConnectionListener = REGISTER_CONNECTION_LISTENER_SETTING.get(settings);
|
||||
|
||||
this.connectionListener = new FDConnectionListener();
|
||||
if (registerConnectionListener) {
|
||||
|
|
|
@ -31,6 +31,7 @@ import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
|||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
@ -58,6 +59,7 @@ import java.io.Closeable;
|
|||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -72,6 +74,7 @@ import java.util.concurrent.TimeUnit;
|
|||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.function.Function;
|
||||
|
||||
import static org.elasticsearch.common.unit.TimeValue.readTimeValue;
|
||||
import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap;
|
||||
|
@ -83,7 +86,8 @@ import static org.elasticsearch.discovery.zen.ping.ZenPing.PingResponse.readPing
|
|||
public class UnicastZenPing extends AbstractLifecycleComponent<ZenPing> implements ZenPing {
|
||||
|
||||
public static final String ACTION_NAME = "internal:discovery/zen/unicast";
|
||||
public static final String DISCOVERY_ZEN_PING_UNICAST_HOSTS = "discovery.zen.ping.unicast.hosts";
|
||||
public static final Setting<List<String>> DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING = Setting.listSetting("discovery.zen.ping.unicast.hosts", Collections.emptyList(), Function.identity(), false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Integer> DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING = Setting.intSetting("discovery.zen.ping.unicast.concurrent_connects", 10, 0, false, Setting.Scope.CLUSTER);
|
||||
|
||||
// these limits are per-address
|
||||
public static final int LIMIT_FOREIGN_PORTS_COUNT = 1;
|
||||
|
@ -135,13 +139,8 @@ public class UnicastZenPing extends AbstractLifecycleComponent<ZenPing> implemen
|
|||
}
|
||||
}
|
||||
|
||||
this.concurrentConnects = this.settings.getAsInt("discovery.zen.ping.unicast.concurrent_connects", 10);
|
||||
String[] hostArr = this.settings.getAsArray(DISCOVERY_ZEN_PING_UNICAST_HOSTS);
|
||||
// trim the hosts
|
||||
for (int i = 0; i < hostArr.length; i++) {
|
||||
hostArr[i] = hostArr[i].trim();
|
||||
}
|
||||
List<String> hosts = CollectionUtils.arrayAsArrayList(hostArr);
|
||||
this.concurrentConnects = DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING.get(settings);
|
||||
List<String> hosts = DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.get(settings);
|
||||
final int limitPortCounts;
|
||||
if (hosts.isEmpty()) {
|
||||
// if unicast hosts are not specified, fill with simple defaults on the local machine
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.apache.lucene.util.Constants;
|
|||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.io.PathUtils;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -33,6 +34,9 @@ import java.nio.file.FileStore;
|
|||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.function.Function;
|
||||
|
||||
import static org.elasticsearch.common.Strings.cleanPath;
|
||||
|
||||
|
@ -43,6 +47,15 @@ import static org.elasticsearch.common.Strings.cleanPath;
|
|||
// TODO: move PathUtils to be package-private here instead of
|
||||
// public+forbidden api!
|
||||
public class Environment {
|
||||
public static final Setting<String> PATH_HOME_SETTING = Setting.simpleString("path.home", false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<String> PATH_CONF_SETTING = Setting.simpleString("path.conf", false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<String> PATH_SCRIPTS_SETTING = Setting.simpleString("path.scripts", false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<List<String>> PATH_DATA_SETTING = Setting.listSetting("path.data", Collections.emptyList(), Function.identity(), false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<String> PATH_LOGS_SETTING = Setting.simpleString("path.logs", false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<String> PATH_PLUGINS_SETTING = Setting.simpleString("path.plugins", false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<List<String>> PATH_REPO_SETTING = Setting.listSetting("path.repo", Collections.emptyList(), Function.identity(), false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<String> PATH_SHARED_DATA_SETTING = Setting.simpleString("path.shared_data", false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<String> PIDFILE_SETTING = Setting.simpleString("pidfile", false, Setting.Scope.CLUSTER);
|
||||
|
||||
private final Settings settings;
|
||||
|
||||
|
@ -95,64 +108,64 @@ public class Environment {
|
|||
public Environment(Settings settings) {
|
||||
this.settings = settings;
|
||||
final Path homeFile;
|
||||
if (settings.get("path.home") != null) {
|
||||
homeFile = PathUtils.get(cleanPath(settings.get("path.home")));
|
||||
if (PATH_HOME_SETTING.exists(settings)) {
|
||||
homeFile = PathUtils.get(cleanPath(PATH_HOME_SETTING.get(settings)));
|
||||
} else {
|
||||
throw new IllegalStateException("path.home is not configured");
|
||||
throw new IllegalStateException(PATH_HOME_SETTING.getKey() + " is not configured");
|
||||
}
|
||||
|
||||
if (settings.get("path.conf") != null) {
|
||||
configFile = PathUtils.get(cleanPath(settings.get("path.conf")));
|
||||
if (PATH_CONF_SETTING.exists(settings)) {
|
||||
configFile = PathUtils.get(cleanPath(PATH_CONF_SETTING.get(settings)));
|
||||
} else {
|
||||
configFile = homeFile.resolve("config");
|
||||
}
|
||||
|
||||
if (settings.get("path.scripts") != null) {
|
||||
scriptsFile = PathUtils.get(cleanPath(settings.get("path.scripts")));
|
||||
if (PATH_SCRIPTS_SETTING.exists(settings)) {
|
||||
scriptsFile = PathUtils.get(cleanPath(PATH_SCRIPTS_SETTING.get(settings)));
|
||||
} else {
|
||||
scriptsFile = configFile.resolve("scripts");
|
||||
}
|
||||
|
||||
if (settings.get("path.plugins") != null) {
|
||||
pluginsFile = PathUtils.get(cleanPath(settings.get("path.plugins")));
|
||||
if (PATH_PLUGINS_SETTING.exists(settings)) {
|
||||
pluginsFile = PathUtils.get(cleanPath(PATH_PLUGINS_SETTING.get(settings)));
|
||||
} else {
|
||||
pluginsFile = homeFile.resolve("plugins");
|
||||
}
|
||||
|
||||
String[] dataPaths = settings.getAsArray("path.data");
|
||||
if (dataPaths.length > 0) {
|
||||
dataFiles = new Path[dataPaths.length];
|
||||
dataWithClusterFiles = new Path[dataPaths.length];
|
||||
for (int i = 0; i < dataPaths.length; i++) {
|
||||
dataFiles[i] = PathUtils.get(dataPaths[i]);
|
||||
List<String> dataPaths = PATH_DATA_SETTING.get(settings);
|
||||
if (dataPaths.isEmpty() == false) {
|
||||
dataFiles = new Path[dataPaths.size()];
|
||||
dataWithClusterFiles = new Path[dataPaths.size()];
|
||||
for (int i = 0; i < dataPaths.size(); i++) {
|
||||
dataFiles[i] = PathUtils.get(dataPaths.get(i));
|
||||
dataWithClusterFiles[i] = dataFiles[i].resolve(ClusterName.clusterNameFromSettings(settings).value());
|
||||
}
|
||||
} else {
|
||||
dataFiles = new Path[]{homeFile.resolve("data")};
|
||||
dataWithClusterFiles = new Path[]{homeFile.resolve("data").resolve(ClusterName.clusterNameFromSettings(settings).value())};
|
||||
}
|
||||
if (settings.get("path.shared_data") != null) {
|
||||
sharedDataFile = PathUtils.get(cleanPath(settings.get("path.shared_data")));
|
||||
if (PATH_SHARED_DATA_SETTING.exists(settings)) {
|
||||
sharedDataFile = PathUtils.get(cleanPath(PATH_SHARED_DATA_SETTING.get(settings)));
|
||||
} else {
|
||||
sharedDataFile = null;
|
||||
}
|
||||
String[] repoPaths = settings.getAsArray("path.repo");
|
||||
if (repoPaths.length > 0) {
|
||||
repoFiles = new Path[repoPaths.length];
|
||||
for (int i = 0; i < repoPaths.length; i++) {
|
||||
repoFiles[i] = PathUtils.get(repoPaths[i]);
|
||||
List<String> repoPaths = PATH_REPO_SETTING.get(settings);
|
||||
if (repoPaths.isEmpty() == false) {
|
||||
repoFiles = new Path[repoPaths.size()];
|
||||
for (int i = 0; i < repoPaths.size(); i++) {
|
||||
repoFiles[i] = PathUtils.get(repoPaths.get(i));
|
||||
}
|
||||
} else {
|
||||
repoFiles = new Path[0];
|
||||
}
|
||||
if (settings.get("path.logs") != null) {
|
||||
logsFile = PathUtils.get(cleanPath(settings.get("path.logs")));
|
||||
if (PATH_LOGS_SETTING.exists(settings)) {
|
||||
logsFile = PathUtils.get(cleanPath(PATH_LOGS_SETTING.get(settings)));
|
||||
} else {
|
||||
logsFile = homeFile.resolve("logs");
|
||||
}
|
||||
|
||||
if (settings.get("pidfile") != null) {
|
||||
pidFile = PathUtils.get(cleanPath(settings.get("pidfile")));
|
||||
if (PIDFILE_SETTING.exists(settings)) {
|
||||
pidFile = PathUtils.get(cleanPath(PIDFILE_SETTING.get(settings)));
|
||||
} else {
|
||||
pidFile = null;
|
||||
}
|
||||
|
|
|
@ -34,7 +34,6 @@ import org.elasticsearch.common.component.AbstractComponent;
|
|||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.MultiDataPathUpgrader;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
|
||||
import java.nio.file.DirectoryStream;
|
||||
|
@ -77,7 +76,6 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL
|
|||
|
||||
if (DiscoveryNode.dataNode(settings)) {
|
||||
ensureNoPre019ShardState(nodeEnv);
|
||||
MultiDataPathUpgrader.upgradeMultiDataPath(nodeEnv, logger);
|
||||
}
|
||||
|
||||
if (DiscoveryNode.masterNode(settings) || DiscoveryNode.dataNode(settings)) {
|
||||
|
|
|
@ -36,6 +36,7 @@ import org.elasticsearch.cluster.routing.allocation.AllocationService;
|
|||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.discovery.DiscoveryService;
|
||||
|
@ -49,6 +50,21 @@ import java.util.concurrent.atomic.AtomicBoolean;
|
|||
*/
|
||||
public class GatewayService extends AbstractLifecycleComponent<GatewayService> implements ClusterStateListener {
|
||||
|
||||
public static final Setting<Integer> EXPECTED_NODES_SETTING = Setting.intSetting(
|
||||
"gateway.expected_nodes", -1, -1, false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Integer> EXPECTED_DATA_NODES_SETTING = Setting.intSetting(
|
||||
"gateway.expected_data_nodes", -1, -1, false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Integer> EXPECTED_MASTER_NODES_SETTING = Setting.intSetting(
|
||||
"gateway.expected_master_nodes", -1, -1, false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<TimeValue> RECOVER_AFTER_TIME_SETTING = Setting.positiveTimeSetting(
|
||||
"gateway.recover_after_time", TimeValue.timeValueMillis(0), false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Integer> RECOVER_AFTER_NODES_SETTING = Setting.intSetting(
|
||||
"gateway.recover_after_nodes", -1, -1, false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Integer> RECOVER_AFTER_DATA_NODES_SETTING = Setting.intSetting(
|
||||
"gateway.recover_after_data_nodes", -1, -1, false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Integer> RECOVER_AFTER_MASTER_NODES_SETTING = Setting.intSetting(
|
||||
"gateway.recover_after_master_nodes", 0, 0, false, Setting.Scope.CLUSTER);
|
||||
|
||||
public static final ClusterBlock STATE_NOT_RECOVERED_BLOCK = new ClusterBlock(1, "state not recovered / initialized", true, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL);
|
||||
|
||||
public static final TimeValue DEFAULT_RECOVER_AFTER_TIME_IF_EXPECTED_NODES_IS_SET = TimeValue.timeValueMinutes(5);
|
||||
|
@ -84,20 +100,26 @@ public class GatewayService extends AbstractLifecycleComponent<GatewayService> i
|
|||
this.discoveryService = discoveryService;
|
||||
this.threadPool = threadPool;
|
||||
// allow to control a delay of when indices will get created
|
||||
this.expectedNodes = this.settings.getAsInt("gateway.expected_nodes", -1);
|
||||
this.expectedDataNodes = this.settings.getAsInt("gateway.expected_data_nodes", -1);
|
||||
this.expectedMasterNodes = this.settings.getAsInt("gateway.expected_master_nodes", -1);
|
||||
this.expectedNodes = EXPECTED_NODES_SETTING.get(this.settings);
|
||||
this.expectedDataNodes = EXPECTED_DATA_NODES_SETTING.get(this.settings);
|
||||
this.expectedMasterNodes = EXPECTED_MASTER_NODES_SETTING.get(this.settings);
|
||||
|
||||
TimeValue defaultRecoverAfterTime = null;
|
||||
if (expectedNodes >= 0 || expectedDataNodes >= 0 || expectedMasterNodes >= 0) {
|
||||
defaultRecoverAfterTime = DEFAULT_RECOVER_AFTER_TIME_IF_EXPECTED_NODES_IS_SET;
|
||||
if (RECOVER_AFTER_TIME_SETTING.exists(this.settings)) {
|
||||
recoverAfterTime = RECOVER_AFTER_TIME_SETTING.get(this.settings);
|
||||
} else if (expectedNodes >= 0 || expectedDataNodes >= 0 || expectedMasterNodes >= 0) {
|
||||
recoverAfterTime = DEFAULT_RECOVER_AFTER_TIME_IF_EXPECTED_NODES_IS_SET;
|
||||
} else {
|
||||
recoverAfterTime = null;
|
||||
}
|
||||
|
||||
this.recoverAfterTime = this.settings.getAsTime("gateway.recover_after_time", defaultRecoverAfterTime);
|
||||
this.recoverAfterNodes = this.settings.getAsInt("gateway.recover_after_nodes", -1);
|
||||
this.recoverAfterDataNodes = this.settings.getAsInt("gateway.recover_after_data_nodes", -1);
|
||||
this.recoverAfterNodes = RECOVER_AFTER_NODES_SETTING.get(this.settings);
|
||||
this.recoverAfterDataNodes = RECOVER_AFTER_DATA_NODES_SETTING.get(this.settings);
|
||||
// default the recover after master nodes to the minimum master nodes in the discovery
|
||||
this.recoverAfterMasterNodes = this.settings.getAsInt("gateway.recover_after_master_nodes", settings.getAsInt("discovery.zen.minimum_master_nodes", -1));
|
||||
if (RECOVER_AFTER_MASTER_NODES_SETTING.exists(this.settings)) {
|
||||
recoverAfterMasterNodes = RECOVER_AFTER_MASTER_NODES_SETTING.get(this.settings);
|
||||
} else {
|
||||
// TODO: change me once the minimum_master_nodes is changed too
|
||||
recoverAfterMasterNodes = settings.getAsInt("discovery.zen.minimum_master_nodes", -1);
|
||||
}
|
||||
|
||||
// Add the not recovered as initial state block, we don't allow anything until
|
||||
this.clusterService.addInitialStateBlock(STATE_NOT_RECOVERED_BLOCK);
|
||||
|
|
|
@ -21,38 +21,25 @@ package org.elasticsearch.http;
|
|||
|
||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.FileSystemUtils;
|
||||
import org.elasticsearch.common.io.Streams;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.node.service.NodeService;
|
||||
import org.elasticsearch.rest.BytesRestResponse;
|
||||
import org.elasticsearch.rest.RestChannel;
|
||||
import org.elasticsearch.rest.RestController;
|
||||
import org.elasticsearch.rest.RestFilter;
|
||||
import org.elasticsearch.rest.RestFilterChain;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.attribute.BasicFileAttributes;
|
||||
import java.util.HashMap;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
|
||||
import static java.util.Collections.unmodifiableMap;
|
||||
import static org.elasticsearch.rest.RestStatus.FORBIDDEN;
|
||||
import static org.elasticsearch.rest.RestStatus.INTERNAL_SERVER_ERROR;
|
||||
import static org.elasticsearch.rest.RestStatus.NOT_FOUND;
|
||||
import static org.elasticsearch.rest.RestStatus.OK;
|
||||
|
||||
/**
|
||||
*
|
||||
* A component to serve http requests, backed by rest handlers.
|
||||
*/
|
||||
public class HttpServer extends AbstractLifecycleComponent<HttpServer> implements HttpServerAdapter {
|
||||
|
||||
|
@ -64,10 +51,6 @@ public class HttpServer extends AbstractLifecycleComponent<HttpServer> implement
|
|||
|
||||
private final NodeService nodeService;
|
||||
|
||||
private final boolean disableSites;
|
||||
|
||||
private final PluginSiteFilter pluginSiteFilter = new PluginSiteFilter();
|
||||
|
||||
@Inject
|
||||
public HttpServer(Settings settings, Environment environment, HttpServerTransport transport,
|
||||
RestController restController,
|
||||
|
@ -78,8 +61,6 @@ public class HttpServer extends AbstractLifecycleComponent<HttpServer> implement
|
|||
this.restController = restController;
|
||||
this.nodeService = nodeService;
|
||||
nodeService.setHttpServer(this);
|
||||
|
||||
this.disableSites = this.settings.getAsBoolean("http.disable_sites", false);
|
||||
transport.httpServerAdapter(this);
|
||||
}
|
||||
|
||||
|
@ -113,27 +94,13 @@ public class HttpServer extends AbstractLifecycleComponent<HttpServer> implement
|
|||
}
|
||||
|
||||
public void dispatchRequest(HttpRequest request, HttpChannel channel, ThreadContext threadContext) {
|
||||
String rawPath = request.rawPath();
|
||||
if (rawPath.startsWith("/_plugin/")) {
|
||||
RestFilterChain filterChain = restController.filterChain(pluginSiteFilter);
|
||||
filterChain.continueProcessing(request, channel);
|
||||
return;
|
||||
} else if (rawPath.equals("/favicon.ico")) {
|
||||
if (request.rawPath().equals("/favicon.ico")) {
|
||||
handleFavicon(request, channel);
|
||||
return;
|
||||
}
|
||||
restController.dispatchRequest(request, channel, threadContext);
|
||||
}
|
||||
|
||||
|
||||
class PluginSiteFilter extends RestFilter {
|
||||
|
||||
@Override
|
||||
public void process(RestRequest request, RestChannel channel, RestFilterChain filterChain) throws IOException {
|
||||
handlePluginSite((HttpRequest) request, (HttpChannel) channel);
|
||||
}
|
||||
}
|
||||
|
||||
void handleFavicon(HttpRequest request, HttpChannel channel) {
|
||||
if (request.method() == RestRequest.Method.GET) {
|
||||
try {
|
||||
|
@ -150,129 +117,4 @@ public class HttpServer extends AbstractLifecycleComponent<HttpServer> implement
|
|||
channel.sendResponse(new BytesRestResponse(FORBIDDEN));
|
||||
}
|
||||
}
|
||||
|
||||
void handlePluginSite(HttpRequest request, HttpChannel channel) throws IOException {
|
||||
if (disableSites) {
|
||||
channel.sendResponse(new BytesRestResponse(FORBIDDEN));
|
||||
return;
|
||||
}
|
||||
if (request.method() == RestRequest.Method.OPTIONS) {
|
||||
// when we have OPTIONS request, simply send OK by default (with the Access Control Origin header which gets automatically added)
|
||||
channel.sendResponse(new BytesRestResponse(OK));
|
||||
return;
|
||||
}
|
||||
if (request.method() != RestRequest.Method.GET) {
|
||||
channel.sendResponse(new BytesRestResponse(FORBIDDEN));
|
||||
return;
|
||||
}
|
||||
// TODO for a "/_plugin" endpoint, we should have a page that lists all the plugins?
|
||||
|
||||
String path = request.rawPath().substring("/_plugin/".length());
|
||||
int i1 = path.indexOf('/');
|
||||
String pluginName;
|
||||
String sitePath;
|
||||
if (i1 == -1) {
|
||||
pluginName = path;
|
||||
sitePath = null;
|
||||
// If a trailing / is missing, we redirect to the right page #2654
|
||||
String redirectUrl = request.rawPath() + "/";
|
||||
BytesRestResponse restResponse = new BytesRestResponse(RestStatus.MOVED_PERMANENTLY, "text/html", "<head><meta http-equiv=\"refresh\" content=\"0; URL=" + redirectUrl + "\"></head>");
|
||||
restResponse.addHeader("Location", redirectUrl);
|
||||
channel.sendResponse(restResponse);
|
||||
return;
|
||||
} else {
|
||||
pluginName = path.substring(0, i1);
|
||||
sitePath = path.substring(i1 + 1);
|
||||
}
|
||||
|
||||
// we default to index.html, or what the plugin provides (as a unix-style path)
|
||||
// this is a relative path under _site configured by the plugin.
|
||||
if (sitePath.length() == 0) {
|
||||
sitePath = "index.html";
|
||||
} else {
|
||||
// remove extraneous leading slashes, its not an absolute path.
|
||||
while (sitePath.length() > 0 && sitePath.charAt(0) == '/') {
|
||||
sitePath = sitePath.substring(1);
|
||||
}
|
||||
}
|
||||
final Path siteFile = environment.pluginsFile().resolve(pluginName).resolve("_site");
|
||||
|
||||
final String separator = siteFile.getFileSystem().getSeparator();
|
||||
// Convert file separators.
|
||||
sitePath = sitePath.replace("/", separator);
|
||||
|
||||
Path file = siteFile.resolve(sitePath);
|
||||
|
||||
// return not found instead of forbidden to prevent malicious requests to find out if files exist or dont exist
|
||||
if (!Files.exists(file) || FileSystemUtils.isHidden(file) || !file.toAbsolutePath().normalize().startsWith(siteFile.toAbsolutePath().normalize())) {
|
||||
channel.sendResponse(new BytesRestResponse(NOT_FOUND));
|
||||
return;
|
||||
}
|
||||
|
||||
BasicFileAttributes attributes = Files.readAttributes(file, BasicFileAttributes.class);
|
||||
if (!attributes.isRegularFile()) {
|
||||
// If it's not a dir, we send a 403
|
||||
if (!attributes.isDirectory()) {
|
||||
channel.sendResponse(new BytesRestResponse(FORBIDDEN));
|
||||
return;
|
||||
}
|
||||
// We don't serve dir but if index.html exists in dir we should serve it
|
||||
file = file.resolve("index.html");
|
||||
if (!Files.exists(file) || FileSystemUtils.isHidden(file) || !Files.isRegularFile(file)) {
|
||||
channel.sendResponse(new BytesRestResponse(FORBIDDEN));
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
byte[] data = Files.readAllBytes(file);
|
||||
channel.sendResponse(new BytesRestResponse(OK, guessMimeType(sitePath), data));
|
||||
} catch (IOException e) {
|
||||
channel.sendResponse(new BytesRestResponse(INTERNAL_SERVER_ERROR));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// TODO: Don't respond with a mime type that violates the request's Accept header
|
||||
private String guessMimeType(String path) {
|
||||
int lastDot = path.lastIndexOf('.');
|
||||
if (lastDot == -1) {
|
||||
return "";
|
||||
}
|
||||
String extension = path.substring(lastDot + 1).toLowerCase(Locale.ROOT);
|
||||
String mimeType = DEFAULT_MIME_TYPES.get(extension);
|
||||
if (mimeType == null) {
|
||||
return "";
|
||||
}
|
||||
return mimeType;
|
||||
}
|
||||
|
||||
static {
|
||||
// This is not an exhaustive list, just the most common types. Call registerMimeType() to add more.
|
||||
Map<String, String> mimeTypes = new HashMap<>();
|
||||
mimeTypes.put("txt", "text/plain");
|
||||
mimeTypes.put("css", "text/css");
|
||||
mimeTypes.put("csv", "text/csv");
|
||||
mimeTypes.put("htm", "text/html");
|
||||
mimeTypes.put("html", "text/html");
|
||||
mimeTypes.put("xml", "text/xml");
|
||||
mimeTypes.put("js", "text/javascript"); // Technically it should be application/javascript (RFC 4329), but IE8 struggles with that
|
||||
mimeTypes.put("xhtml", "application/xhtml+xml");
|
||||
mimeTypes.put("json", "application/json");
|
||||
mimeTypes.put("pdf", "application/pdf");
|
||||
mimeTypes.put("zip", "application/zip");
|
||||
mimeTypes.put("tar", "application/x-tar");
|
||||
mimeTypes.put("gif", "image/gif");
|
||||
mimeTypes.put("jpeg", "image/jpeg");
|
||||
mimeTypes.put("jpg", "image/jpeg");
|
||||
mimeTypes.put("tiff", "image/tiff");
|
||||
mimeTypes.put("tif", "image/tiff");
|
||||
mimeTypes.put("png", "image/png");
|
||||
mimeTypes.put("svg", "image/svg+xml");
|
||||
mimeTypes.put("ico", "image/vnd.microsoft.icon");
|
||||
mimeTypes.put("mp3", "audio/mpeg");
|
||||
DEFAULT_MIME_TYPES = unmodifiableMap(mimeTypes);
|
||||
}
|
||||
|
||||
public static final Map<String, String> DEFAULT_MIME_TYPES;
|
||||
}
|
||||
|
|
|
@ -113,7 +113,7 @@ public class NettyHttpChannel extends HttpChannel {
|
|||
resp = new DefaultHttpResponse(HttpVersion.HTTP_1_1, status);
|
||||
}
|
||||
if (RestUtils.isBrowser(nettyRequest.headers().get(USER_AGENT))) {
|
||||
if (transport.settings().getAsBoolean(SETTING_CORS_ENABLED, false)) {
|
||||
if (SETTING_CORS_ENABLED.get(transport.settings())) {
|
||||
String originHeader = request.header(ORIGIN);
|
||||
if (!Strings.isNullOrEmpty(originHeader)) {
|
||||
if (corsPattern == null) {
|
||||
|
@ -127,12 +127,12 @@ public class NettyHttpChannel extends HttpChannel {
|
|||
}
|
||||
if (nettyRequest.getMethod() == HttpMethod.OPTIONS) {
|
||||
// Allow Ajax requests based on the CORS "preflight" request
|
||||
resp.headers().add(ACCESS_CONTROL_MAX_AGE, transport.settings().getAsInt(SETTING_CORS_MAX_AGE, 1728000));
|
||||
resp.headers().add(ACCESS_CONTROL_MAX_AGE, SETTING_CORS_MAX_AGE.get(transport.settings()));
|
||||
resp.headers().add(ACCESS_CONTROL_ALLOW_METHODS, transport.settings().get(SETTING_CORS_ALLOW_METHODS, "OPTIONS, HEAD, GET, POST, PUT, DELETE"));
|
||||
resp.headers().add(ACCESS_CONTROL_ALLOW_HEADERS, transport.settings().get(SETTING_CORS_ALLOW_HEADERS, "X-Requested-With, Content-Type, Content-Length"));
|
||||
}
|
||||
|
||||
if (transport.settings().getAsBoolean(SETTING_CORS_ALLOW_CREDENTIALS, false)) {
|
||||
if (SETTING_CORS_ALLOW_CREDENTIALS.get(transport.settings())) {
|
||||
resp.headers().add(ACCESS_CONTROL_ALLOW_CREDENTIALS, "true");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.http.netty;
|
||||
|
||||
import org.elasticsearch.common.Booleans;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
|
@ -27,7 +26,8 @@ import org.elasticsearch.common.netty.NettyUtils;
|
|||
import org.elasticsearch.common.netty.OpenChannelsHandler;
|
||||
import org.elasticsearch.common.network.NetworkAddress;
|
||||
import org.elasticsearch.common.network.NetworkService;
|
||||
import org.elasticsearch.common.network.NetworkUtils;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Scope;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.BoundTransportAddress;
|
||||
import org.elasticsearch.common.transport.InetSocketTransportAddress;
|
||||
|
@ -77,9 +77,6 @@ import java.util.concurrent.Executors;
|
|||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_BLOCKING;
|
||||
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_BLOCKING_SERVER;
|
||||
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_DEFAULT_RECEIVE_BUFFER_SIZE;
|
||||
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_DEFAULT_SEND_BUFFER_SIZE;
|
||||
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_KEEP_ALIVE;
|
||||
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_NO_DELAY;
|
||||
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_RECEIVE_BUFFER_SIZE;
|
||||
|
@ -96,19 +93,19 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
|
|||
NettyUtils.setup();
|
||||
}
|
||||
|
||||
public static final String SETTING_CORS_ENABLED = "http.cors.enabled";
|
||||
public static final Setting<Boolean> SETTING_CORS_ENABLED = Setting.boolSetting("http.cors.enabled", false, false, Scope.CLUSTER);
|
||||
public static final String SETTING_CORS_ALLOW_ORIGIN = "http.cors.allow-origin";
|
||||
public static final String SETTING_CORS_MAX_AGE = "http.cors.max-age";
|
||||
public static final Setting<Integer> SETTING_CORS_MAX_AGE = Setting.intSetting("http.cors.max-age", 1728000, false, Scope.CLUSTER);
|
||||
public static final String SETTING_CORS_ALLOW_METHODS = "http.cors.allow-methods";
|
||||
public static final String SETTING_CORS_ALLOW_HEADERS = "http.cors.allow-headers";
|
||||
public static final String SETTING_CORS_ALLOW_CREDENTIALS = "http.cors.allow-credentials";
|
||||
public static final String SETTING_PIPELINING = "http.pipelining";
|
||||
public static final Setting<Boolean> SETTING_CORS_ALLOW_CREDENTIALS = Setting.boolSetting("http.cors.allow-credentials", false, false, Scope.CLUSTER);
|
||||
|
||||
public static final Setting<Boolean> SETTING_PIPELINING = Setting.boolSetting("http.pipelining", true, false, Scope.CLUSTER);
|
||||
public static final String SETTING_PIPELINING_MAX_EVENTS = "http.pipelining.max_events";
|
||||
public static final String SETTING_HTTP_COMPRESSION = "http.compression";
|
||||
public static final String SETTING_HTTP_COMPRESSION_LEVEL = "http.compression_level";
|
||||
public static final String SETTING_HTTP_DETAILED_ERRORS_ENABLED = "http.detailed_errors.enabled";
|
||||
public static final Setting<Boolean> SETTING_HTTP_DETAILED_ERRORS_ENABLED = Setting.boolSetting("http.detailed_errors.enabled", true, false, Scope.CLUSTER);
|
||||
|
||||
public static final boolean DEFAULT_SETTING_PIPELINING = true;
|
||||
public static final int DEFAULT_SETTING_PIPELINING_MAX_EVENTS = 10000;
|
||||
public static final String DEFAULT_PORT_RANGE = "9200-9300";
|
||||
|
||||
|
@ -145,8 +142,8 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
|
|||
|
||||
protected int publishPort;
|
||||
|
||||
protected final String tcpNoDelay;
|
||||
protected final String tcpKeepAlive;
|
||||
protected final boolean tcpNoDelay;
|
||||
protected final boolean tcpKeepAlive;
|
||||
protected final boolean reuseAddress;
|
||||
|
||||
protected final ByteSizeValue tcpSendBufferSize;
|
||||
|
@ -190,17 +187,17 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
|
|||
this.maxCumulationBufferCapacity = settings.getAsBytesSize("http.netty.max_cumulation_buffer_capacity", null);
|
||||
this.maxCompositeBufferComponents = settings.getAsInt("http.netty.max_composite_buffer_components", -1);
|
||||
this.workerCount = settings.getAsInt("http.netty.worker_count", EsExecutors.boundedNumberOfProcessors(settings) * 2);
|
||||
this.blockingServer = settings.getAsBoolean("http.netty.http.blocking_server", settings.getAsBoolean(TCP_BLOCKING_SERVER, settings.getAsBoolean(TCP_BLOCKING, false)));
|
||||
this.blockingServer = settings.getAsBoolean("http.netty.http.blocking_server", TCP_BLOCKING.get(settings));
|
||||
this.port = settings.get("http.netty.port", settings.get("http.port", DEFAULT_PORT_RANGE));
|
||||
this.bindHosts = settings.getAsArray("http.netty.bind_host", settings.getAsArray("http.bind_host", settings.getAsArray("http.host", null)));
|
||||
this.publishHosts = settings.getAsArray("http.netty.publish_host", settings.getAsArray("http.publish_host", settings.getAsArray("http.host", null)));
|
||||
this.publishPort = settings.getAsInt("http.netty.publish_port", settings.getAsInt("http.publish_port", 0));
|
||||
this.tcpNoDelay = settings.get("http.netty.tcp_no_delay", settings.get(TCP_NO_DELAY, "true"));
|
||||
this.tcpKeepAlive = settings.get("http.netty.tcp_keep_alive", settings.get(TCP_KEEP_ALIVE, "true"));
|
||||
this.reuseAddress = settings.getAsBoolean("http.netty.reuse_address", settings.getAsBoolean(TCP_REUSE_ADDRESS, NetworkUtils.defaultReuseAddress()));
|
||||
this.tcpSendBufferSize = settings.getAsBytesSize("http.netty.tcp_send_buffer_size", settings.getAsBytesSize(TCP_SEND_BUFFER_SIZE, TCP_DEFAULT_SEND_BUFFER_SIZE));
|
||||
this.tcpReceiveBufferSize = settings.getAsBytesSize("http.netty.tcp_receive_buffer_size", settings.getAsBytesSize(TCP_RECEIVE_BUFFER_SIZE, TCP_DEFAULT_RECEIVE_BUFFER_SIZE));
|
||||
this.detailedErrorsEnabled = settings.getAsBoolean(SETTING_HTTP_DETAILED_ERRORS_ENABLED, true);
|
||||
this.tcpNoDelay = settings.getAsBoolean("http.netty.tcp_no_delay", TCP_NO_DELAY.get(settings));
|
||||
this.tcpKeepAlive = settings.getAsBoolean("http.netty.tcp_keep_alive", TCP_KEEP_ALIVE.get(settings));
|
||||
this.reuseAddress = settings.getAsBoolean("http.netty.reuse_address", TCP_REUSE_ADDRESS.get(settings));
|
||||
this.tcpSendBufferSize = settings.getAsBytesSize("http.netty.tcp_send_buffer_size", TCP_SEND_BUFFER_SIZE.get(settings));
|
||||
this.tcpReceiveBufferSize = settings.getAsBytesSize("http.netty.tcp_receive_buffer_size", TCP_RECEIVE_BUFFER_SIZE.get(settings));
|
||||
this.detailedErrorsEnabled = SETTING_HTTP_DETAILED_ERRORS_ENABLED.get(settings);
|
||||
|
||||
long defaultReceiverPredictor = 512 * 1024;
|
||||
if (JvmInfo.jvmInfo().getMem().getDirectMemoryMax().bytes() > 0) {
|
||||
|
@ -220,7 +217,7 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
|
|||
|
||||
this.compression = settings.getAsBoolean(SETTING_HTTP_COMPRESSION, false);
|
||||
this.compressionLevel = settings.getAsInt(SETTING_HTTP_COMPRESSION_LEVEL, 6);
|
||||
this.pipelining = settings.getAsBoolean(SETTING_PIPELINING, DEFAULT_SETTING_PIPELINING);
|
||||
this.pipelining = SETTING_PIPELINING.get(settings);
|
||||
this.pipeliningMaxEvents = settings.getAsInt(SETTING_PIPELINING_MAX_EVENTS, DEFAULT_SETTING_PIPELINING_MAX_EVENTS);
|
||||
|
||||
// validate max content length
|
||||
|
@ -261,16 +258,13 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
|
|||
|
||||
serverBootstrap.setPipelineFactory(configureServerChannelPipelineFactory());
|
||||
|
||||
if (!"default".equals(tcpNoDelay)) {
|
||||
serverBootstrap.setOption("child.tcpNoDelay", Booleans.parseBoolean(tcpNoDelay, null));
|
||||
}
|
||||
if (!"default".equals(tcpKeepAlive)) {
|
||||
serverBootstrap.setOption("child.keepAlive", Booleans.parseBoolean(tcpKeepAlive, null));
|
||||
}
|
||||
if (tcpSendBufferSize != null && tcpSendBufferSize.bytes() > 0) {
|
||||
serverBootstrap.setOption("child.tcpNoDelay", tcpNoDelay);
|
||||
serverBootstrap.setOption("child.keepAlive", tcpKeepAlive);
|
||||
if (tcpSendBufferSize.bytes() > 0) {
|
||||
|
||||
serverBootstrap.setOption("child.sendBufferSize", tcpSendBufferSize.bytes());
|
||||
}
|
||||
if (tcpReceiveBufferSize != null && tcpReceiveBufferSize.bytes() > 0) {
|
||||
if (tcpReceiveBufferSize.bytes() > 0) {
|
||||
serverBootstrap.setOption("child.receiveBufferSize", tcpReceiveBufferSize.bytes());
|
||||
}
|
||||
serverBootstrap.setOption("receiveBufferSizePredictorFactory", receiveBufferSizePredictorFactory);
|
||||
|
@ -312,7 +306,8 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
|
|||
throw new BindHttpException("Publish address [" + publishInetAddress + "] does not match any of the bound addresses [" + boundAddresses + "]");
|
||||
}
|
||||
|
||||
final InetSocketAddress publishAddress = new InetSocketAddress(publishInetAddress, publishPort);;
|
||||
final InetSocketAddress publishAddress = new InetSocketAddress(publishInetAddress, publishPort);
|
||||
;
|
||||
this.boundAddress = new BoundTransportAddress(boundAddresses.toArray(new TransportAddress[boundAddresses.size()]), new InetSocketTransportAddress(publishAddress));
|
||||
}
|
||||
|
||||
|
|
|
@ -1065,7 +1065,7 @@ public abstract class Engine implements Closeable {
|
|||
}
|
||||
}
|
||||
|
||||
public static class CommitId implements Writeable<CommitId> {
|
||||
public static class CommitId implements Writeable {
|
||||
|
||||
private final byte[] id;
|
||||
|
||||
|
|
|
@ -28,7 +28,6 @@ import org.elasticsearch.index.AbstractIndexComponent;
|
|||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.fielddata.plain.AbstractGeoPointDVIndexFieldData;
|
||||
import org.elasticsearch.index.fielddata.plain.BytesBinaryDVIndexFieldData;
|
||||
import org.elasticsearch.index.fielddata.plain.DisabledIndexFieldData;
|
||||
import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData;
|
||||
import org.elasticsearch.index.fielddata.plain.GeoPointArrayIndexFieldData;
|
||||
import org.elasticsearch.index.fielddata.plain.IndexIndexFieldData;
|
||||
|
@ -79,6 +78,14 @@ public class IndexFieldDataService extends AbstractIndexComponent implements Clo
|
|||
private static final String DOC_VALUES_FORMAT = "doc_values";
|
||||
private static final String PAGED_BYTES_FORMAT = "paged_bytes";
|
||||
|
||||
private static final IndexFieldData.Builder DISABLED_BUILDER = new IndexFieldData.Builder() {
|
||||
@Override
|
||||
public IndexFieldData<?> build(IndexSettings indexSettings, MappedFieldType fieldType, IndexFieldDataCache cache,
|
||||
CircuitBreakerService breakerService, MapperService mapperService) {
|
||||
throw new IllegalStateException("Field data loading is forbidden on [" + fieldType.name() + "]");
|
||||
}
|
||||
};
|
||||
|
||||
private final static Map<String, IndexFieldData.Builder> buildersByType;
|
||||
private final static Map<String, IndexFieldData.Builder> docValuesBuildersByType;
|
||||
private final static Map<Tuple<String, String>, IndexFieldData.Builder> buildersByTypeAndFormat;
|
||||
|
@ -96,7 +103,7 @@ public class IndexFieldDataService extends AbstractIndexComponent implements Clo
|
|||
buildersByTypeBuilder.put("geo_point", new GeoPointArrayIndexFieldData.Builder());
|
||||
buildersByTypeBuilder.put(ParentFieldMapper.NAME, new ParentChildIndexFieldData.Builder());
|
||||
buildersByTypeBuilder.put(IndexFieldMapper.NAME, new IndexIndexFieldData.Builder());
|
||||
buildersByTypeBuilder.put("binary", new DisabledIndexFieldData.Builder());
|
||||
buildersByTypeBuilder.put("binary", DISABLED_BUILDER);
|
||||
buildersByTypeBuilder.put(BooleanFieldMapper.CONTENT_TYPE, MISSING_DOC_VALUES_BUILDER);
|
||||
buildersByType = unmodifiableMap(buildersByTypeBuilder);
|
||||
|
||||
|
@ -117,35 +124,35 @@ public class IndexFieldDataService extends AbstractIndexComponent implements Clo
|
|||
buildersByTypeAndFormat = MapBuilder.<Tuple<String, String>, IndexFieldData.Builder>newMapBuilder()
|
||||
.put(Tuple.tuple("string", PAGED_BYTES_FORMAT), new PagedBytesIndexFieldData.Builder())
|
||||
.put(Tuple.tuple("string", DOC_VALUES_FORMAT), new DocValuesIndexFieldData.Builder())
|
||||
.put(Tuple.tuple("string", DISABLED_FORMAT), new DisabledIndexFieldData.Builder())
|
||||
.put(Tuple.tuple("string", DISABLED_FORMAT), DISABLED_BUILDER)
|
||||
|
||||
.put(Tuple.tuple("float", DOC_VALUES_FORMAT), new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.FLOAT))
|
||||
.put(Tuple.tuple("float", DISABLED_FORMAT), new DisabledIndexFieldData.Builder())
|
||||
.put(Tuple.tuple("float", DISABLED_FORMAT), DISABLED_BUILDER)
|
||||
|
||||
.put(Tuple.tuple("double", DOC_VALUES_FORMAT), new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.DOUBLE))
|
||||
.put(Tuple.tuple("double", DISABLED_FORMAT), new DisabledIndexFieldData.Builder())
|
||||
.put(Tuple.tuple("double", DISABLED_FORMAT), DISABLED_BUILDER)
|
||||
|
||||
.put(Tuple.tuple("byte", DOC_VALUES_FORMAT), new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.BYTE))
|
||||
.put(Tuple.tuple("byte", DISABLED_FORMAT), new DisabledIndexFieldData.Builder())
|
||||
.put(Tuple.tuple("byte", DISABLED_FORMAT), DISABLED_BUILDER)
|
||||
|
||||
.put(Tuple.tuple("short", DOC_VALUES_FORMAT), new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.SHORT))
|
||||
.put(Tuple.tuple("short", DISABLED_FORMAT), new DisabledIndexFieldData.Builder())
|
||||
.put(Tuple.tuple("short", DISABLED_FORMAT), DISABLED_BUILDER)
|
||||
|
||||
.put(Tuple.tuple("int", DOC_VALUES_FORMAT), new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.INT))
|
||||
.put(Tuple.tuple("int", DISABLED_FORMAT), new DisabledIndexFieldData.Builder())
|
||||
.put(Tuple.tuple("int", DISABLED_FORMAT), DISABLED_BUILDER)
|
||||
|
||||
.put(Tuple.tuple("long", DOC_VALUES_FORMAT), new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.LONG))
|
||||
.put(Tuple.tuple("long", DISABLED_FORMAT), new DisabledIndexFieldData.Builder())
|
||||
.put(Tuple.tuple("long", DISABLED_FORMAT), DISABLED_BUILDER)
|
||||
|
||||
.put(Tuple.tuple("geo_point", ARRAY_FORMAT), new GeoPointArrayIndexFieldData.Builder())
|
||||
.put(Tuple.tuple("geo_point", DOC_VALUES_FORMAT), new AbstractGeoPointDVIndexFieldData.Builder())
|
||||
.put(Tuple.tuple("geo_point", DISABLED_FORMAT), new DisabledIndexFieldData.Builder())
|
||||
.put(Tuple.tuple("geo_point", DISABLED_FORMAT), DISABLED_BUILDER)
|
||||
|
||||
.put(Tuple.tuple("binary", DOC_VALUES_FORMAT), new BytesBinaryDVIndexFieldData.Builder())
|
||||
.put(Tuple.tuple("binary", DISABLED_FORMAT), new DisabledIndexFieldData.Builder())
|
||||
.put(Tuple.tuple("binary", DISABLED_FORMAT), DISABLED_BUILDER)
|
||||
|
||||
.put(Tuple.tuple(BooleanFieldMapper.CONTENT_TYPE, DOC_VALUES_FORMAT), new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.BOOLEAN))
|
||||
.put(Tuple.tuple(BooleanFieldMapper.CONTENT_TYPE, DISABLED_FORMAT), new DisabledIndexFieldData.Builder())
|
||||
.put(Tuple.tuple(BooleanFieldMapper.CONTENT_TYPE, DISABLED_FORMAT), DISABLED_BUILDER)
|
||||
|
||||
.immutableMap();
|
||||
}
|
||||
|
|
|
@ -1,72 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.fielddata.plain;
|
||||
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.fielddata.AtomicFieldData;
|
||||
import org.elasticsearch.index.fielddata.FieldDataType;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldData;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldDataCache;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
||||
import org.elasticsearch.search.MultiValueMode;
|
||||
|
||||
/**
|
||||
* A field data implementation that forbids loading and will throw an {@link IllegalStateException} if you try to load
|
||||
* {@link AtomicFieldData} instances.
|
||||
*/
|
||||
public final class DisabledIndexFieldData extends AbstractIndexFieldData<AtomicFieldData> {
|
||||
|
||||
public static class Builder implements IndexFieldData.Builder {
|
||||
@Override
|
||||
public IndexFieldData<AtomicFieldData> build(IndexSettings indexSettings, MappedFieldType fieldType,
|
||||
IndexFieldDataCache cache, CircuitBreakerService breakerService, MapperService mapperService) {
|
||||
// Ignore Circuit Breaker
|
||||
return new DisabledIndexFieldData(indexSettings, fieldType.name(), fieldType.fieldDataType(), cache);
|
||||
}
|
||||
}
|
||||
|
||||
public DisabledIndexFieldData(IndexSettings indexSettings, String fieldName, FieldDataType fieldDataType, IndexFieldDataCache cache) {
|
||||
super(indexSettings, fieldName, fieldDataType, cache);
|
||||
}
|
||||
|
||||
@Override
|
||||
public AtomicFieldData loadDirect(LeafReaderContext context) throws Exception {
|
||||
throw fail();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected AtomicFieldData empty(int maxDoc) {
|
||||
throw fail();
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexFieldData.XFieldComparatorSource comparatorSource(Object missingValue, MultiValueMode sortMode, Nested nested) {
|
||||
throw fail();
|
||||
}
|
||||
|
||||
private IllegalStateException fail() {
|
||||
return new IllegalStateException("Field data loading is forbidden on " + getFieldName());
|
||||
}
|
||||
|
||||
}
|
|
@ -24,6 +24,7 @@ import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
|||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.FieldType;
|
||||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
|
@ -223,6 +224,15 @@ public abstract class FieldMapper extends Mapper implements Cloneable {
|
|||
return context.path().pathAsText(name);
|
||||
}
|
||||
|
||||
protected boolean defaultDocValues(Version indexCreated) {
|
||||
if (indexCreated.onOrAfter(Version.V_3_0_0)) {
|
||||
// add doc values by default to keyword (boolean, numerics, etc.) fields
|
||||
return fieldType.tokenized() == false;
|
||||
} else {
|
||||
return fieldType.tokenized() == false && fieldType.indexOptions() != IndexOptions.NONE;
|
||||
}
|
||||
}
|
||||
|
||||
protected void setupFieldType(BuilderContext context) {
|
||||
fieldType.setName(buildFullName(context));
|
||||
if (fieldType.indexAnalyzer() == null && fieldType.tokenized() == false && fieldType.indexOptions() != IndexOptions.NONE) {
|
||||
|
@ -233,17 +243,10 @@ public abstract class FieldMapper extends Mapper implements Cloneable {
|
|||
Settings settings = Settings.builder().put(fieldType.fieldDataType().getSettings()).put(fieldDataSettings).build();
|
||||
fieldType.setFieldDataType(new FieldDataType(fieldType.fieldDataType().getType(), settings));
|
||||
}
|
||||
boolean defaultDocValues = fieldType.tokenized() == false && fieldType.indexOptions() != IndexOptions.NONE;
|
||||
// backcompat for "fielddata: format: docvalues" for now...
|
||||
boolean fieldDataDocValues = fieldType.fieldDataType() != null
|
||||
&& FieldDataType.DOC_VALUES_FORMAT_VALUE.equals(fieldType.fieldDataType().getFormat(context.indexSettings()));
|
||||
if (fieldDataDocValues && docValuesSet && fieldType.hasDocValues() == false) {
|
||||
// this forces the doc_values setting to be written, so fielddata does not mask the original setting
|
||||
defaultDocValues = true;
|
||||
}
|
||||
boolean defaultDocValues = defaultDocValues(context.indexCreatedVersion());
|
||||
defaultFieldType.setHasDocValues(defaultDocValues);
|
||||
if (docValuesSet == false) {
|
||||
fieldType.setHasDocValues(defaultDocValues || fieldDataDocValues);
|
||||
fieldType.setHasDocValues(defaultDocValues);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -395,7 +398,7 @@ public abstract class FieldMapper extends Mapper implements Cloneable {
|
|||
boolean defaultIndexed = defaultFieldType.indexOptions() != IndexOptions.NONE;
|
||||
if (includeDefaults || indexed != defaultIndexed ||
|
||||
fieldType().tokenized() != defaultFieldType.tokenized()) {
|
||||
builder.field("index", indexTokenizeOptionToString(indexed, fieldType().tokenized()));
|
||||
builder.field("index", indexTokenizeOption(indexed, fieldType().tokenized()));
|
||||
}
|
||||
if (includeDefaults || fieldType().stored() != defaultFieldType.stored()) {
|
||||
builder.field("store", fieldType().stored());
|
||||
|
@ -492,14 +495,9 @@ public abstract class FieldMapper extends Mapper implements Cloneable {
|
|||
}
|
||||
}
|
||||
|
||||
protected static String indexTokenizeOptionToString(boolean indexed, boolean tokenized) {
|
||||
if (!indexed) {
|
||||
return "no";
|
||||
} else if (tokenized) {
|
||||
return "analyzed";
|
||||
} else {
|
||||
return "not_analyzed";
|
||||
}
|
||||
/* Only protected so that string can override it */
|
||||
protected Object indexTokenizeOption(boolean indexed, boolean tokenized) {
|
||||
return indexed;
|
||||
}
|
||||
|
||||
protected boolean hasCustomFieldDataSettings() {
|
||||
|
|
|
@ -199,10 +199,8 @@ public abstract class MappedFieldType extends FieldType {
|
|||
if (stored() != other.stored()) {
|
||||
conflicts.add("mapper [" + name() + "] has different [store] values");
|
||||
}
|
||||
if (hasDocValues() == false && other.hasDocValues()) {
|
||||
// don't add conflict if this mapper has doc values while the mapper to merge doesn't since doc values are implicitly set
|
||||
// when the doc_values field data format is configured
|
||||
conflicts.add("mapper [" + name() + "] has different [doc_values] values, cannot change from disabled to enabled");
|
||||
if (hasDocValues() != other.hasDocValues()) {
|
||||
conflicts.add("mapper [" + name() + "] has different [doc_values] values");
|
||||
}
|
||||
if (omitNorms() && !other.omitNorms()) {
|
||||
conflicts.add("mapper [" + name() + "] has different [omit_norms] values, cannot change from disable to enabled");
|
||||
|
|
|
@ -40,7 +40,7 @@ import java.util.Iterator;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue;
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenientNodeBooleanValue;
|
||||
import static org.elasticsearch.index.mapper.MapperBuilders.booleanField;
|
||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parseField;
|
||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parseMultiField;
|
||||
|
@ -106,7 +106,7 @@ public class BooleanFieldMapper extends FieldMapper {
|
|||
if (propNode == null) {
|
||||
throw new MapperParsingException("Property [null_value] cannot be null.");
|
||||
}
|
||||
builder.nullValue(nodeBooleanValue(propNode));
|
||||
builder.nullValue(lenientNodeBooleanValue(propNode));
|
||||
iterator.remove();
|
||||
} else if (parseMultiField(builder, name, parserContext, propName, propNode)) {
|
||||
iterator.remove();
|
||||
|
@ -225,7 +225,9 @@ public class BooleanFieldMapper extends FieldMapper {
|
|||
if (value == null) {
|
||||
return;
|
||||
}
|
||||
fields.add(new Field(fieldType().name(), value ? "T" : "F", fieldType()));
|
||||
if (fieldType().indexOptions() != IndexOptions.NONE || fieldType().stored()) {
|
||||
fields.add(new Field(fieldType().name(), value ? "T" : "F", fieldType()));
|
||||
}
|
||||
if (fieldType().hasDocValues()) {
|
||||
fields.add(new SortedNumericDocValuesField(fieldType().name(), value ? 1 : 0));
|
||||
}
|
||||
|
|
|
@ -31,6 +31,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.support.XContentMapValues;
|
||||
import org.elasticsearch.index.analysis.NamedAnalyzer;
|
||||
import org.elasticsearch.index.fielddata.FieldDataType;
|
||||
import org.elasticsearch.index.mapper.FieldMapper;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
|
@ -146,6 +147,27 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc
|
|||
@Override
|
||||
public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
StringFieldMapper.Builder builder = stringField(name);
|
||||
// hack for the fact that string can't just accept true/false for
|
||||
// the index property and still accepts no/not_analyzed/analyzed
|
||||
final Object index = node.remove("index");
|
||||
if (index != null) {
|
||||
final String normalizedIndex = Strings.toUnderscoreCase(index.toString());
|
||||
switch (normalizedIndex) {
|
||||
case "analyzed":
|
||||
builder.tokenized(true);
|
||||
node.put("index", true);
|
||||
break;
|
||||
case "not_analyzed":
|
||||
builder.tokenized(false);
|
||||
node.put("index", true);
|
||||
break;
|
||||
case "no":
|
||||
node.put("index", false);
|
||||
break;
|
||||
default:
|
||||
throw new IllegalArgumentException("Can't parse [index] value [" + index + "], expected [true], [false], [no], [not_analyzed] or [analyzed]");
|
||||
}
|
||||
}
|
||||
parseTextField(builder, name, node, parserContext);
|
||||
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
|
||||
Map.Entry<String, Object> entry = iterator.next();
|
||||
|
@ -368,6 +390,17 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc
|
|||
this.ignoreAbove = ((StringFieldMapper) mergeWith).ignoreAbove;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String indexTokenizeOption(boolean indexed, boolean tokenized) {
|
||||
if (!indexed) {
|
||||
return "no";
|
||||
} else if (tokenized) {
|
||||
return "analyzed";
|
||||
} else {
|
||||
return "not_analyzed";
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException {
|
||||
super.doXContentBody(builder, includeDefaults, params);
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.elasticsearch.common.joda.Joda;
|
|||
import org.elasticsearch.common.logging.ESLoggerFactory;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.settings.loader.SettingsLoader;
|
||||
import org.elasticsearch.common.xcontent.support.XContentMapValues;
|
||||
import org.elasticsearch.index.analysis.NamedAnalyzer;
|
||||
import org.elasticsearch.index.mapper.DocumentMapperParser;
|
||||
import org.elasticsearch.index.mapper.FieldMapper;
|
||||
|
@ -45,7 +46,7 @@ import java.util.Map;
|
|||
import java.util.Map.Entry;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.isArray;
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue;
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenientNodeBooleanValue;
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeFloatValue;
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeIntegerValue;
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeMapValue;
|
||||
|
@ -62,6 +63,14 @@ public class TypeParsers {
|
|||
public static final String INDEX_OPTIONS_POSITIONS = "positions";
|
||||
public static final String INDEX_OPTIONS_OFFSETS = "offsets";
|
||||
|
||||
private static boolean nodeBooleanValue(Object node, Mapper.TypeParser.ParserContext parserContext) {
|
||||
if (parserContext.indexVersionCreated().onOrAfter(Version.V_3_0_0)) {
|
||||
return XContentMapValues.nodeBooleanValue(node);
|
||||
} else {
|
||||
return XContentMapValues.lenientNodeBooleanValue(node);
|
||||
}
|
||||
}
|
||||
|
||||
public static void parseNumberField(NumberFieldMapper.Builder builder, String name, Map<String, Object> numberNode, Mapper.TypeParser.ParserContext parserContext) {
|
||||
parseField(builder, name, numberNode, parserContext);
|
||||
for (Iterator<Map.Entry<String, Object>> iterator = numberNode.entrySet().iterator(); iterator.hasNext();) {
|
||||
|
@ -72,13 +81,13 @@ public class TypeParsers {
|
|||
builder.precisionStep(nodeIntegerValue(propNode));
|
||||
iterator.remove();
|
||||
} else if (propName.equals("ignore_malformed")) {
|
||||
builder.ignoreMalformed(nodeBooleanValue(propNode));
|
||||
builder.ignoreMalformed(nodeBooleanValue(propNode, parserContext));
|
||||
iterator.remove();
|
||||
} else if (propName.equals("coerce")) {
|
||||
builder.coerce(nodeBooleanValue(propNode));
|
||||
builder.coerce(nodeBooleanValue(propNode, parserContext));
|
||||
iterator.remove();
|
||||
} else if (propName.equals("omit_norms")) {
|
||||
builder.omitNorms(nodeBooleanValue(propNode));
|
||||
builder.omitNorms(nodeBooleanValue(propNode, parserContext));
|
||||
iterator.remove();
|
||||
} else if (propName.equals("similarity")) {
|
||||
SimilarityProvider similarityProvider = resolveSimilarity(parserContext, name, propNode.toString());
|
||||
|
@ -102,16 +111,16 @@ public class TypeParsers {
|
|||
parseTermVector(name, propNode.toString(), builder);
|
||||
iterator.remove();
|
||||
} else if (propName.equals("store_term_vectors")) {
|
||||
builder.storeTermVectors(nodeBooleanValue(propNode));
|
||||
builder.storeTermVectors(nodeBooleanValue(propNode, parserContext));
|
||||
iterator.remove();
|
||||
} else if (propName.equals("store_term_vector_offsets")) {
|
||||
builder.storeTermVectorOffsets(nodeBooleanValue(propNode));
|
||||
builder.storeTermVectorOffsets(nodeBooleanValue(propNode, parserContext));
|
||||
iterator.remove();
|
||||
} else if (propName.equals("store_term_vector_positions")) {
|
||||
builder.storeTermVectorPositions(nodeBooleanValue(propNode));
|
||||
builder.storeTermVectorPositions(nodeBooleanValue(propNode, parserContext));
|
||||
iterator.remove();
|
||||
} else if (propName.equals("store_term_vector_payloads")) {
|
||||
builder.storeTermVectorPayloads(nodeBooleanValue(propNode));
|
||||
builder.storeTermVectorPayloads(nodeBooleanValue(propNode, parserContext));
|
||||
iterator.remove();
|
||||
} else if (propName.equals("analyzer")) {
|
||||
NamedAnalyzer analyzer = parserContext.analysisService().analyzer(propNode.toString());
|
||||
|
@ -160,19 +169,19 @@ public class TypeParsers {
|
|||
final String propName = Strings.toUnderscoreCase(entry.getKey());
|
||||
final Object propNode = entry.getValue();
|
||||
if (propName.equals("store")) {
|
||||
builder.store(parseStore(name, propNode.toString()));
|
||||
builder.store(parseStore(name, propNode.toString(), parserContext));
|
||||
iterator.remove();
|
||||
} else if (propName.equals("index")) {
|
||||
parseIndex(name, propNode.toString(), builder);
|
||||
builder.index(parseIndex(name, propNode.toString(), parserContext));
|
||||
iterator.remove();
|
||||
} else if (propName.equals(DOC_VALUES)) {
|
||||
builder.docValues(nodeBooleanValue(propNode));
|
||||
builder.docValues(nodeBooleanValue(propNode, parserContext));
|
||||
iterator.remove();
|
||||
} else if (propName.equals("boost")) {
|
||||
builder.boost(nodeFloatValue(propNode));
|
||||
iterator.remove();
|
||||
} else if (propName.equals("omit_norms")) {
|
||||
builder.omitNorms(nodeBooleanValue(propNode));
|
||||
builder.omitNorms(nodeBooleanValue(propNode, parserContext));
|
||||
iterator.remove();
|
||||
} else if (propName.equals("norms")) {
|
||||
final Map<String, Object> properties = nodeMapValue(propNode, "norms");
|
||||
|
@ -181,7 +190,7 @@ public class TypeParsers {
|
|||
final String propName2 = Strings.toUnderscoreCase(entry2.getKey());
|
||||
final Object propNode2 = entry2.getValue();
|
||||
if (propName2.equals("enabled")) {
|
||||
builder.omitNorms(!nodeBooleanValue(propNode2));
|
||||
builder.omitNorms(!lenientNodeBooleanValue(propNode2));
|
||||
propsIterator.remove();
|
||||
} else if (propName2.equals(Loading.KEY)) {
|
||||
builder.normsLoading(Loading.parse(nodeStringValue(propNode2, null), null));
|
||||
|
@ -194,7 +203,7 @@ public class TypeParsers {
|
|||
builder.indexOptions(nodeIndexOptionValue(propNode));
|
||||
iterator.remove();
|
||||
} else if (propName.equals("include_in_all")) {
|
||||
builder.includeInAll(nodeBooleanValue(propNode));
|
||||
builder.includeInAll(nodeBooleanValue(propNode, parserContext));
|
||||
iterator.remove();
|
||||
} else if (propName.equals("similarity")) {
|
||||
SimilarityProvider similarityProvider = resolveSimilarity(parserContext, name, propNode.toString());
|
||||
|
@ -319,28 +328,43 @@ public class TypeParsers {
|
|||
}
|
||||
}
|
||||
|
||||
public static void parseIndex(String fieldName, String index, FieldMapper.Builder builder) throws MapperParsingException {
|
||||
index = Strings.toUnderscoreCase(index);
|
||||
if ("no".equals(index)) {
|
||||
builder.index(false);
|
||||
} else if ("not_analyzed".equals(index)) {
|
||||
builder.index(true);
|
||||
builder.tokenized(false);
|
||||
} else if ("analyzed".equals(index)) {
|
||||
builder.index(true);
|
||||
builder.tokenized(true);
|
||||
public static boolean parseIndex(String fieldName, String index, Mapper.TypeParser.ParserContext parserContext) throws MapperParsingException {
|
||||
if (parserContext.indexVersionCreated().onOrAfter(Version.V_3_0_0)) {
|
||||
switch (index) {
|
||||
case "true":
|
||||
return true;
|
||||
case "false":
|
||||
return false;
|
||||
default:
|
||||
throw new IllegalArgumentException("Can't parse [index] value [" + index + "], expected [true] or [false]");
|
||||
}
|
||||
} else {
|
||||
throw new MapperParsingException("wrong value for index [" + index + "] for field [" + fieldName + "]");
|
||||
final String normalizedIndex = Strings.toUnderscoreCase(index);
|
||||
switch (normalizedIndex) {
|
||||
case "true":
|
||||
case "not_analyzed":
|
||||
case "analyzed":
|
||||
return true;
|
||||
case "false":
|
||||
case "no":
|
||||
return false;
|
||||
default:
|
||||
throw new IllegalArgumentException("Can't parse [index] value [" + index + "], expected [true], [false], [no], [not_analyzed] or [analyzed]");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static boolean parseStore(String fieldName, String store) throws MapperParsingException {
|
||||
if ("no".equals(store)) {
|
||||
return false;
|
||||
} else if ("yes".equals(store)) {
|
||||
return true;
|
||||
public static boolean parseStore(String fieldName, String store, Mapper.TypeParser.ParserContext parserContext) throws MapperParsingException {
|
||||
if (parserContext.indexVersionCreated().onOrAfter(Version.V_3_0_0)) {
|
||||
return XContentMapValues.nodeBooleanValue(store);
|
||||
} else {
|
||||
return nodeBooleanValue(store);
|
||||
if ("no".equals(store)) {
|
||||
return false;
|
||||
} else if ("yes".equals(store)) {
|
||||
return true;
|
||||
} else {
|
||||
return lenientNodeBooleanValue(store);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -199,17 +199,17 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
|
|||
String propName = Strings.toUnderscoreCase(entry.getKey());
|
||||
Object propNode = entry.getValue();
|
||||
if (propName.equals("lat_lon")) {
|
||||
builder.enableLatLon(XContentMapValues.nodeBooleanValue(propNode));
|
||||
builder.enableLatLon(XContentMapValues.lenientNodeBooleanValue(propNode));
|
||||
iterator.remove();
|
||||
} else if (propName.equals("precision_step")) {
|
||||
builder.precisionStep(XContentMapValues.nodeIntegerValue(propNode));
|
||||
iterator.remove();
|
||||
} else if (propName.equals("geohash")) {
|
||||
builder.enableGeoHash(XContentMapValues.nodeBooleanValue(propNode));
|
||||
builder.enableGeoHash(XContentMapValues.lenientNodeBooleanValue(propNode));
|
||||
iterator.remove();
|
||||
} else if (propName.equals("geohash_prefix")) {
|
||||
builder.geoHashPrefix(XContentMapValues.nodeBooleanValue(propNode));
|
||||
if (XContentMapValues.nodeBooleanValue(propNode)) {
|
||||
builder.geoHashPrefix(XContentMapValues.lenientNodeBooleanValue(propNode));
|
||||
if (XContentMapValues.lenientNodeBooleanValue(propNode)) {
|
||||
builder.enableGeoHash(true);
|
||||
}
|
||||
iterator.remove();
|
||||
|
@ -221,7 +221,7 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
|
|||
}
|
||||
iterator.remove();
|
||||
} else if (propName.equals(Names.IGNORE_MALFORMED)) {
|
||||
builder.ignoreMalformed(XContentMapValues.nodeBooleanValue(propNode));
|
||||
builder.ignoreMalformed(XContentMapValues.lenientNodeBooleanValue(propNode));
|
||||
iterator.remove();
|
||||
} else if (parseMultiField(builder, name, parserContext, propName, propNode)) {
|
||||
iterator.remove();
|
||||
|
|
|
@ -132,7 +132,7 @@ public class GeoPointFieldMapperLegacy extends BaseGeoPointFieldMapper implement
|
|||
String propName = Strings.toUnderscoreCase(entry.getKey());
|
||||
Object propNode = entry.getValue();
|
||||
if (propName.equals(Names.COERCE)) {
|
||||
builder.coerce = XContentMapValues.nodeBooleanValue(propNode);
|
||||
builder.coerce = XContentMapValues.lenientNodeBooleanValue(propNode);
|
||||
iterator.remove();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -52,7 +52,7 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue;
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenientNodeBooleanValue;
|
||||
import static org.elasticsearch.index.mapper.MapperBuilders.geoShapeField;
|
||||
|
||||
|
||||
|
@ -184,11 +184,11 @@ public class GeoShapeFieldMapper extends FieldMapper {
|
|||
builder.fieldType().setStrategyName(fieldNode.toString());
|
||||
iterator.remove();
|
||||
} else if (Names.COERCE.equals(fieldName)) {
|
||||
builder.coerce(nodeBooleanValue(fieldNode));
|
||||
builder.coerce(lenientNodeBooleanValue(fieldNode));
|
||||
iterator.remove();
|
||||
} else if (Names.STRATEGY_POINTS_ONLY.equals(fieldName)
|
||||
&& builder.fieldType().strategyName.equals(SpatialStrategy.TERM.getStrategyName()) == false) {
|
||||
builder.fieldType().setPointsOnly(XContentMapValues.nodeBooleanValue(fieldNode));
|
||||
builder.fieldType().setPointsOnly(XContentMapValues.lenientNodeBooleanValue(fieldNode));
|
||||
iterator.remove();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -46,7 +46,7 @@ import java.util.Iterator;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue;
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenientNodeBooleanValue;
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeMapValue;
|
||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parseTextField;
|
||||
|
||||
|
@ -133,7 +133,7 @@ public class AllFieldMapper extends MetadataFieldMapper {
|
|||
// the AllFieldMapper ctor in the builder since it is not valid. Here we validate
|
||||
// the doc values settings (old and new) are rejected
|
||||
Object docValues = node.get("doc_values");
|
||||
if (docValues != null && nodeBooleanValue(docValues)) {
|
||||
if (docValues != null && lenientNodeBooleanValue(docValues)) {
|
||||
throw new MapperParsingException("Field [" + name + "] is always tokenized and cannot have doc values");
|
||||
}
|
||||
// convoluted way of specifying doc values
|
||||
|
@ -152,7 +152,7 @@ public class AllFieldMapper extends MetadataFieldMapper {
|
|||
String fieldName = Strings.toUnderscoreCase(entry.getKey());
|
||||
Object fieldNode = entry.getValue();
|
||||
if (fieldName.equals("enabled")) {
|
||||
builder.enabled(nodeBooleanValue(fieldNode) ? EnabledAttributeMapper.ENABLED : EnabledAttributeMapper.DISABLED);
|
||||
builder.enabled(lenientNodeBooleanValue(fieldNode) ? EnabledAttributeMapper.ENABLED : EnabledAttributeMapper.DISABLED);
|
||||
iterator.remove();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -40,7 +40,7 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue;
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenientNodeBooleanValue;
|
||||
|
||||
/**
|
||||
* A mapper that indexes the field names of a document under <code>_field_names</code>. This mapper is typically useful in order
|
||||
|
@ -112,7 +112,7 @@ public class FieldNamesFieldMapper extends MetadataFieldMapper {
|
|||
String fieldName = Strings.toUnderscoreCase(entry.getKey());
|
||||
Object fieldNode = entry.getValue();
|
||||
if (fieldName.equals("enabled")) {
|
||||
builder.enabled(nodeBooleanValue(fieldNode));
|
||||
builder.enabled(lenientNodeBooleanValue(fieldNode));
|
||||
iterator.remove();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -38,7 +38,7 @@ import java.util.Iterator;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue;
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenientNodeBooleanValue;
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -95,7 +95,7 @@ public class RoutingFieldMapper extends MetadataFieldMapper {
|
|||
String fieldName = Strings.toUnderscoreCase(entry.getKey());
|
||||
Object fieldNode = entry.getValue();
|
||||
if (fieldName.equals("required")) {
|
||||
builder.required(nodeBooleanValue(fieldNode));
|
||||
builder.required(lenientNodeBooleanValue(fieldNode));
|
||||
iterator.remove();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -51,7 +51,7 @@ import java.util.Iterator;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue;
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenientNodeBooleanValue;
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -122,7 +122,7 @@ public class SourceFieldMapper extends MetadataFieldMapper {
|
|||
String fieldName = Strings.toUnderscoreCase(entry.getKey());
|
||||
Object fieldNode = entry.getValue();
|
||||
if (fieldName.equals("enabled")) {
|
||||
builder.enabled(nodeBooleanValue(fieldNode));
|
||||
builder.enabled(lenientNodeBooleanValue(fieldNode));
|
||||
iterator.remove();
|
||||
} else if ("format".equals(fieldName) && parserContext.indexVersionCreated().before(Version.V_3_0_0)) {
|
||||
// ignore on old indices, reject on and after 3.0
|
||||
|
|
|
@ -44,7 +44,7 @@ import java.util.Iterator;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue;
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenientNodeBooleanValue;
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeTimeValue;
|
||||
|
||||
public class TTLFieldMapper extends MetadataFieldMapper {
|
||||
|
@ -108,7 +108,7 @@ public class TTLFieldMapper extends MetadataFieldMapper {
|
|||
String fieldName = Strings.toUnderscoreCase(entry.getKey());
|
||||
Object fieldNode = entry.getValue();
|
||||
if (fieldName.equals("enabled")) {
|
||||
EnabledAttributeMapper enabledState = nodeBooleanValue(fieldNode) ? EnabledAttributeMapper.ENABLED : EnabledAttributeMapper.DISABLED;
|
||||
EnabledAttributeMapper enabledState = lenientNodeBooleanValue(fieldNode) ? EnabledAttributeMapper.ENABLED : EnabledAttributeMapper.DISABLED;
|
||||
builder.enabled(enabledState);
|
||||
iterator.remove();
|
||||
} else if (fieldName.equals("default")) {
|
||||
|
|
|
@ -43,7 +43,7 @@ import java.util.Iterator;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue;
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenientNodeBooleanValue;
|
||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parseDateTimeFormatter;
|
||||
|
||||
public class TimestampFieldMapper extends MetadataFieldMapper {
|
||||
|
@ -134,7 +134,7 @@ public class TimestampFieldMapper extends MetadataFieldMapper {
|
|||
String fieldName = Strings.toUnderscoreCase(entry.getKey());
|
||||
Object fieldNode = entry.getValue();
|
||||
if (fieldName.equals("enabled")) {
|
||||
EnabledAttributeMapper enabledState = nodeBooleanValue(fieldNode) ? EnabledAttributeMapper.ENABLED : EnabledAttributeMapper.DISABLED;
|
||||
EnabledAttributeMapper enabledState = lenientNodeBooleanValue(fieldNode) ? EnabledAttributeMapper.ENABLED : EnabledAttributeMapper.DISABLED;
|
||||
builder.enabled(enabledState);
|
||||
iterator.remove();
|
||||
} else if (fieldName.equals("format")) {
|
||||
|
@ -149,7 +149,7 @@ public class TimestampFieldMapper extends MetadataFieldMapper {
|
|||
}
|
||||
iterator.remove();
|
||||
} else if (fieldName.equals("ignore_missing")) {
|
||||
ignoreMissing = nodeBooleanValue(fieldNode);
|
||||
ignoreMissing = lenientNodeBooleanValue(fieldNode);
|
||||
builder.ignoreMissing(ignoreMissing);
|
||||
iterator.remove();
|
||||
}
|
||||
|
|
|
@ -49,7 +49,7 @@ import java.util.List;
|
|||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue;
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenientNodeBooleanValue;
|
||||
import static org.elasticsearch.index.mapper.MapperBuilders.object;
|
||||
|
||||
/**
|
||||
|
@ -191,11 +191,11 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll,
|
|||
if (value.equalsIgnoreCase("strict")) {
|
||||
builder.dynamic(Dynamic.STRICT);
|
||||
} else {
|
||||
builder.dynamic(nodeBooleanValue(fieldNode) ? Dynamic.TRUE : Dynamic.FALSE);
|
||||
builder.dynamic(lenientNodeBooleanValue(fieldNode) ? Dynamic.TRUE : Dynamic.FALSE);
|
||||
}
|
||||
return true;
|
||||
} else if (fieldName.equals("enabled")) {
|
||||
builder.enabled(nodeBooleanValue(fieldNode));
|
||||
builder.enabled(lenientNodeBooleanValue(fieldNode));
|
||||
return true;
|
||||
} else if (fieldName.equals("properties")) {
|
||||
if (fieldNode instanceof Collection && ((Collection) fieldNode).isEmpty()) {
|
||||
|
@ -207,7 +207,7 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll,
|
|||
}
|
||||
return true;
|
||||
} else if (fieldName.equals("include_in_all")) {
|
||||
builder.includeInAll(nodeBooleanValue(fieldNode));
|
||||
builder.includeInAll(lenientNodeBooleanValue(fieldNode));
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
@ -230,12 +230,12 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll,
|
|||
}
|
||||
fieldNode = node.get("include_in_parent");
|
||||
if (fieldNode != null) {
|
||||
nestedIncludeInParent = nodeBooleanValue(fieldNode);
|
||||
nestedIncludeInParent = lenientNodeBooleanValue(fieldNode);
|
||||
node.remove("include_in_parent");
|
||||
}
|
||||
fieldNode = node.get("include_in_root");
|
||||
if (fieldNode != null) {
|
||||
nestedIncludeInRoot = nodeBooleanValue(fieldNode);
|
||||
nestedIncludeInRoot = lenientNodeBooleanValue(fieldNode);
|
||||
node.remove("include_in_root");
|
||||
}
|
||||
if (nested) {
|
||||
|
|
|
@ -42,7 +42,7 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue;
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenientNodeBooleanValue;
|
||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parseDateTimeFormatter;
|
||||
|
||||
/**
|
||||
|
@ -189,10 +189,10 @@ public class RootObjectMapper extends ObjectMapper {
|
|||
}
|
||||
return true;
|
||||
} else if (fieldName.equals("date_detection")) {
|
||||
((Builder) builder).dateDetection = nodeBooleanValue(fieldNode);
|
||||
((Builder) builder).dateDetection = lenientNodeBooleanValue(fieldNode);
|
||||
return true;
|
||||
} else if (fieldName.equals("numeric_detection")) {
|
||||
((Builder) builder).numericDetection = nodeBooleanValue(fieldNode);
|
||||
((Builder) builder).numericDetection = lenientNodeBooleanValue(fieldNode);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
|
|
@ -37,7 +37,11 @@ import java.util.Objects;
|
|||
|
||||
/**
|
||||
* A Query that does fuzzy matching for a specific value.
|
||||
*
|
||||
* @deprecated Fuzzy queries are not useful enough. This class will be removed with Elasticsearch 4.0. In most cases you may want to use
|
||||
* a match query with the fuzziness parameter for strings or range queries for numeric and date fields.
|
||||
*/
|
||||
@Deprecated
|
||||
public class FuzzyQueryBuilder extends AbstractQueryBuilder<FuzzyQueryBuilder> implements MultiTermQueryBuilder<FuzzyQueryBuilder> {
|
||||
|
||||
public static final String NAME = "fuzzy";
|
||||
|
|
|
@ -26,6 +26,11 @@ import org.elasticsearch.common.xcontent.XContentParser;
|
|||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* @deprecated Fuzzy queries are not useful enough. This class will be removed with Elasticsearch 4.0. In most cases you may want to use
|
||||
* a match query with the fuzziness parameter for strings or range queries for numeric and date fields.
|
||||
*/
|
||||
@Deprecated
|
||||
public class FuzzyQueryParser implements QueryParser<FuzzyQueryBuilder> {
|
||||
|
||||
public static final ParseField TERM_FIELD = new ParseField("term");
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue